python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.fuel.f3.cellnet.core_cell import CellAgent
from nvflare.fuel.f3.message import Message
def cell_connected_cb_signature(connected_cell: CellAgent, *args, **kwargs):
"""
This is the signature of the cell_connected callback.
Args:
connected_cell: the cell that just got connected
*args:
**kwargs:
Returns:
"""
pass
def cell_disconnected_cb_signature(disconnected_cell: CellAgent, *args, **kwargs):
pass
def request_cb_signature(request: Message, *args, **kwargs) -> Message:
pass
def message_interceptor_signature(message: Message, *args, **kwargs) -> Message:
pass
def filter_cb_signature(message: Message, *args, **kwargs) -> Message:
pass
def cleanup_cb_signature(*args, **kwargs):
pass
| NVFlare-main | nvflare/fuel/f3/cellnet/cbs.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.fuel.f3.cellnet.fqcn import FQCN
from nvflare.fuel.f3.cellnet.net_agent import NetAgent
from nvflare.fuel.f3.stats_pool import VALID_HIST_MODES, parse_hist_mode
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.reg import CommandModule, CommandModuleSpec, CommandSpec
from nvflare.fuel.hci.server.constants import ConnProps
from nvflare.security.logging import secure_format_exception
def _to_int(s: str):
try:
return int(s)
except Exception as ex:
return f"'{s}' is not a valid number: {secure_format_exception(ex)}"
class NetManager(CommandModule):
def __init__(self, agent: NetAgent, diagnose=False):
self.agent = agent
self.diagnose = diagnose
def get_spec(self) -> CommandModuleSpec:
return CommandModuleSpec(
name="cellnet",
cmd_specs=[
CommandSpec(
name="cells",
description="get system cells info",
usage="cells",
handler_func=self._cmd_cells,
visible=self.diagnose,
),
CommandSpec(
name="route",
description="send message to a cell and show route",
usage="route to_cell [from_cell]",
handler_func=self._cmd_route,
visible=self.diagnose,
),
CommandSpec(
name="peers",
description="show connected peers of a cell",
usage="peers target_cell",
handler_func=self._cmd_peers,
visible=self.diagnose,
),
CommandSpec(
name="conns",
description="show connectors of a cell",
usage="conns target_cell",
handler_func=self._cmd_connectors,
visible=self.diagnose,
),
CommandSpec(
name="url_use",
description="show use of a url",
usage="url_use url",
handler_func=self._cmd_url_use,
visible=self.diagnose,
),
CommandSpec(
name="speed",
description="test communication speed between cells",
usage="speed from_fqcn to_fqcn [num_tries] [payload_size]",
handler_func=self._cmd_speed_test,
visible=self.diagnose,
),
CommandSpec(
name="stress",
description="stress test communication among cells",
usage="stress [num_tries] [timeout]",
handler_func=self._cmd_stress_test,
visible=self.diagnose,
),
CommandSpec(
name="bulk",
description="test bulk messaging - each client sends a bulk to server",
usage="bulk [bulk_size]",
handler_func=self._cmd_bulk_test,
visible=self.diagnose,
enabled=self.diagnose,
),
CommandSpec(
name="change_root",
description="change to a new root server",
usage="change_root url",
handler_func=self._cmd_change_root,
visible=self.diagnose,
enabled=self.diagnose,
),
CommandSpec(
name="msg_stats",
description="show request stats",
usage="msg_stats target [mode]",
handler_func=self._cmd_msg_stats,
visible=self.diagnose,
),
CommandSpec(
name="list_pools",
description="list stats pools",
usage="list_pools target",
handler_func=self._cmd_list_pools,
visible=self.diagnose,
),
CommandSpec(
name="show_pool",
description="show stats pool detail",
usage="show_pool target pool_name [mode]",
handler_func=self._cmd_show_pool,
visible=self.diagnose,
),
CommandSpec(
name="show_comm_config",
description="show communication config",
usage="show_comm_config target",
handler_func=self._cmd_show_comm_config,
visible=self.diagnose,
),
CommandSpec(
name="show_config_vars",
description="show all defined config var values",
usage="show_config_vars target",
handler_func=self._cmd_show_config_vars,
visible=self.diagnose,
),
CommandSpec(
name="process_info",
description="show process information",
usage="process_info target",
handler_func=self._cmd_process_info,
visible=self.diagnose,
),
CommandSpec(
name="stop_cell",
description="stop a cell and its children",
usage="stop_cell target",
handler_func=self._cmd_stop_cell,
visible=self.diagnose,
enabled=self.diagnose,
),
CommandSpec(
name="stop_net",
description="stop the whole cellnet",
usage="stop_net",
handler_func=self._cmd_stop_net,
visible=self.diagnose,
enabled=self.diagnose,
),
],
)
def _cmd_cells(self, conn: Connection, args: [str]):
err, cell_fqcns = self.agent.request_cells_info()
if err:
conn.append_error(err)
total_cells = 0
if cell_fqcns:
for c in cell_fqcns:
conn.append_string(c)
err = FQCN.validate(c)
if not err:
total_cells += 1
conn.append_string(f"Total Cells: {total_cells}")
def _cmd_url_use(self, conn: Connection, args: [str]):
if len(args) != 2:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
url = args[1]
results = self.agent.get_url_use(url)
useless_cells = []
for k, v in results.items():
if v == "none":
useless_cells.append(k)
for k in useless_cells:
results.pop(k)
if not results:
conn.append_string(f"No cell uses {url}")
else:
conn.append_dict(results)
def _cmd_route(self, conn: Connection, args: [str]):
if len(args) < 2:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
target_fqcn = args[1]
from_fqcn = "server"
if len(args) > 2:
from_fqcn = args[2]
err, reply_headers, req_headers = self.agent.start_route(from_fqcn, target_fqcn)
conn.append_string(f"Route Info from {from_fqcn} to {target_fqcn}")
if err:
conn.append_error(err)
if req_headers:
conn.append_string("Request Headers:")
conn.append_dict(req_headers)
if reply_headers:
conn.append_string("Reply Headers:")
conn.append_dict(reply_headers)
def _cmd_peers(self, conn: Connection, args: [str]):
if len(args) != 2:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
target_fqcn = args[1]
err_dict, agents = self.agent.get_peers(target_fqcn)
if err_dict:
conn.append_dict(err_dict)
if agents:
for a in agents:
conn.append_string(a)
conn.append_string(f"Total Agents: {len(agents)}")
else:
conn.append_string("No peers")
def _cmd_connectors(self, conn: Connection, args: [str]):
if len(args) != 2:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
target_fqcn = args[1]
err_dict, result = self.agent.get_connectors(target_fqcn)
if err_dict:
conn.append_dict(err_dict)
if result:
conn.append_dict(result)
def _cmd_speed_test(self, conn: Connection, args: [str]):
if len(args) < 3:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
from_fqcn = args[1]
to_fqcn = args[2]
num_tries = 100
payload_size = 1000
if len(args) > 3:
num_tries = _to_int(args[3])
if not isinstance(num_tries, int):
conn.append_error(num_tries)
return
if len(args) > 4:
payload_size = _to_int(args[4])
if not isinstance(payload_size, int):
conn.append_error(payload_size)
return
result = self.agent.speed_test(
from_fqcn=from_fqcn, to_fqcn=to_fqcn, num_tries=num_tries, payload_size=payload_size
)
conn.append_dict(result)
def _cmd_stress_test(self, conn: Connection, args: [str]):
num_tries = 10
timeout = 5.0
if len(args) > 1:
num_tries = _to_int(args[1])
if not isinstance(num_tries, int):
conn.append_error(num_tries)
return
if len(args) > 2:
timeout = _to_int(args[2])
if not isinstance(timeout, int):
conn.append_error(timeout)
return
err, targets = self.agent.request_cells_info()
if err:
conn.append_error(err)
if not targets:
conn.append_error("no targets to test")
conn.append_string(f"starting stress test on {targets}", flush=True)
result = self.agent.start_stress_test(targets=targets, num_rounds=num_tries, timeout=timeout)
total_errors = 0
for t, v in result.items():
if not isinstance(v, dict):
continue
err_dict = v.get("errors")
cell_errs = 0
for _, c in err_dict.items():
cell_errs += c
total_errors += cell_errs
if cell_errs == 0:
v.pop("errors")
conn.append_dict(result)
conn.append_string(f"total errors: {total_errors}")
def _cmd_bulk_test(self, conn: Connection, args: [str]):
bulk_size = 1
if len(args) > 1:
bulk_size = _to_int(args[1])
if not isinstance(bulk_size, int):
conn.append_error(bulk_size)
return
err, targets = self.agent.request_cells_info()
if err:
conn.append_error(err)
if not targets:
conn.append_error("no targets to test")
conn.append_string(f"starting bulk test on {targets}", flush=True)
result = self.agent.start_bulk_test(targets, bulk_size)
conn.append_dict(result)
@staticmethod
def _show_table_dict(conn: Connection, d: dict):
t = conn.append_table(d.get("headers"))
rows = d.get("rows")
for r in rows:
t.add_row(r)
def _cmd_msg_stats(self, conn: Connection, args: [str]):
if len(args) < 2:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
target = args[1]
mode = ""
if len(args) > 2:
mode = args[2]
mode = parse_hist_mode(mode)
if not mode:
conn.append_error(f"invalid mode '{mode}': must be one of {VALID_HIST_MODES}")
return
reply = self.agent.get_msg_stats_table(target, mode)
if isinstance(reply, str):
conn.append_error(reply)
return
if not isinstance(reply, dict):
conn.append_error(f"expect dict bt got {type(reply)}")
return
self._show_table_dict(conn, reply)
def _cmd_show_pool(self, conn: Connection, args: [str]):
if len(args) < 3:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
target = args[1]
pool_name = args[2]
mode = ""
if len(args) > 3:
mode = args[3]
mode = parse_hist_mode(mode)
if not mode:
conn.append_error(f"invalid mode '{mode}': must be one of {VALID_HIST_MODES}")
return
reply = self.agent.show_pool(target, pool_name, mode)
if isinstance(reply, str):
conn.append_error(reply)
return
if not isinstance(reply, dict):
conn.append_error(f"expect dict bt got {type(reply)}")
return
self._show_table_dict(conn, reply)
def _cmd_list_pools(self, conn: Connection, args: [str]):
if len(args) < 2:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
target = args[1]
reply = self.agent.get_pool_list(target)
if isinstance(reply, str):
conn.append_error(reply)
return
if not isinstance(reply, dict):
conn.append_error(f"expect dict bt got {type(reply)}")
return
self._show_table_dict(conn, reply)
def _cmd_show_comm_config(self, conn: Connection, args: [str]):
if len(args) < 2:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
target = args[1]
reply = self.agent.get_comm_config(target)
if isinstance(reply, str):
conn.append_error(reply)
return
if not isinstance(reply, dict):
conn.append_error(f"expect dict bt got {type(reply)}")
return
conn.append_dict(reply)
def _cmd_show_config_vars(self, conn: Connection, args: [str]):
if len(args) < 2:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
target = args[1]
reply = self.agent.get_config_vars(target)
if isinstance(reply, str):
conn.append_error(reply)
return
if not isinstance(reply, dict):
conn.append_error(f"expect dict bt got {type(reply)}")
return
conn.append_dict(reply)
def _cmd_process_info(self, conn: Connection, args: [str]):
if len(args) < 2:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
target = args[1]
reply = self.agent.get_process_info(target)
if isinstance(reply, str):
conn.append_error(reply)
return
if not isinstance(reply, dict):
conn.append_error(f"expect dict bt got {type(reply)}")
return
self._show_table_dict(conn, reply)
def _cmd_change_root(self, conn: Connection, args: [str]):
if len(args) < 2:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
url = args[1]
self.agent.change_root(url)
conn.append_string("Asked to change root")
def _cmd_stop_net(self, conn: Connection, args: [str]):
self.agent.stop()
conn.append_shutdown("Cellnet Stopped")
def _cmd_stop_cell(self, conn: Connection, args: [str]):
if len(args) < 2:
cmd_entry = conn.get_prop(ConnProps.CMD_ENTRY)
conn.append_string(f"Usage: {cmd_entry.usage}")
return
target = args[1]
reply = self.agent.stop_cell(target)
conn.append_string(f"Asked {target} to stop: {reply}")
| NVFlare-main | nvflare/fuel/f3/cellnet/net_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import threading
import time
from typing import Any, Dict, List
import grpc
from nvflare.fuel.f3.comm_config import CommConfigurator
from nvflare.fuel.f3.comm_error import CommError
from nvflare.fuel.f3.connection import BytesAlike, Connection
from nvflare.fuel.f3.drivers.aio_context import AioContext
from nvflare.fuel.f3.drivers.driver import ConnectorInfo
from nvflare.fuel.f3.drivers.grpc.streamer_pb2_grpc import (
StreamerServicer,
StreamerStub,
add_StreamerServicer_to_server,
)
from nvflare.fuel.utils.obj_utils import get_logger
from nvflare.security.logging import secure_format_exception, secure_format_traceback
from .base_driver import BaseDriver
from .driver_params import DriverCap, DriverParams
from .grpc.streamer_pb2 import Frame
from .net_utils import MAX_FRAME_SIZE, get_address, get_tcp_urls, ssl_required
GRPC_DEFAULT_OPTIONS = [
("grpc.max_send_message_length", MAX_FRAME_SIZE),
("grpc.max_receive_message_length", MAX_FRAME_SIZE),
("grpc.keepalive_time_ms", 120000),
("grpc.http2.max_pings_without_data", 0),
]
class _ConnCtx:
def __init__(self):
self.conn = None
self.error = None
self.waiter = threading.Event()
class AioStreamSession(Connection):
seq_num = 0
def __init__(self, aio_ctx: AioContext, connector: ConnectorInfo, conn_props: dict, context=None, channel=None):
super().__init__(connector)
self.aio_ctx = aio_ctx
self.logger = get_logger(self)
self.oq = asyncio.Queue(16)
self.closing = False
self.conn_props = conn_props
self.context = context # for server side
self.channel = channel # for client side
self.lock = threading.Lock()
def get_conn_properties(self) -> dict:
return self.conn_props
def close(self):
self.closing = True
with self.lock:
if self.context:
self.aio_ctx.run_coro(self.context.abort(grpc.StatusCode.CANCELLED, "service closed"))
self.context = None
if self.channel:
self.aio_ctx.run_coro(self.channel.close())
self.channel = None
def send_frame(self, frame: BytesAlike):
try:
AioStreamSession.seq_num += 1
seq = AioStreamSession.seq_num
f = Frame(seq=seq, data=bytes(frame))
self.aio_ctx.run_coro(self.oq.put(f))
except Exception as ex:
self.logger.debug(f"exception send_frame: {self}: {secure_format_exception(ex)}")
if not self.closing:
raise CommError(CommError.ERROR, f"Error sending frame on conn {self}: {secure_format_exception(ex)}")
async def read_loop(self, msg_iter):
ct = threading.current_thread()
self.logger.debug(f"{self}: started read_loop in thread {ct.name}")
try:
async for f in msg_iter:
if self.closing:
return
self.process_frame(f.data)
except grpc.aio.AioRpcError as error:
if not self.closing:
if error.code() == grpc.StatusCode.CANCELLED:
self.logger.debug(f"Connection {self} is closed by peer")
else:
self.logger.debug(f"Connection {self} Error: {error.details()}")
self.logger.debug(secure_format_traceback())
else:
self.logger.debug(f"Connection {self} is closed locally")
except Exception as ex:
if not self.closing:
self.logger.debug(f"{self}: exception {type(ex)} in read_loop: {secure_format_exception(ex)}")
self.logger.debug(secure_format_traceback())
self.logger.debug(f"{self}: in {ct.name}: done read_loop")
async def generate_output(self):
ct = threading.current_thread()
self.logger.debug(f"{self}: generate_output in thread {ct.name}")
try:
while True:
item = await self.oq.get()
yield item
except Exception as ex:
if self.closing:
self.logger.debug(f"{self}: connection closed by {type(ex)}: {secure_format_exception(ex)}")
else:
self.logger.debug(f"{self}: generate_output exception {type(ex)}: {secure_format_exception(ex)}")
self.logger.debug(secure_format_traceback())
self.logger.debug(f"{self}: done generate_output")
class Servicer(StreamerServicer):
def __init__(self, server, aio_ctx: AioContext):
self.server = server
self.aio_ctx = aio_ctx
self.logger = get_logger(self)
async def _write_loop(self, connection, grpc_context):
self.logger.debug("started _write_loop")
try:
while True:
f = await connection.oq.get()
await grpc_context.write(f)
except Exception as ex:
self.logger.debug(f"_write_loop except: {type(ex)}: {secure_format_exception(ex)}")
self.logger.debug("finished _write_loop")
async def Stream(self, request_iterator, context):
connection = None
ct = threading.current_thread()
try:
self.logger.debug(f"SERVER started Stream CB in thread {ct.name}")
conn_props = {
DriverParams.PEER_ADDR.value: context.peer(),
DriverParams.LOCAL_ADDR.value: get_address(self.server.connector.params),
}
cn_names = context.auth_context().get("x509_common_name")
if cn_names:
conn_props[DriverParams.PEER_CN.value] = cn_names[0].decode("utf-8")
connection = AioStreamSession(
aio_ctx=self.aio_ctx,
connector=self.server.connector,
conn_props=conn_props,
context=context,
)
self.logger.debug(f"SERVER created connection in thread {ct.name}")
self.server.driver.add_connection(connection)
try:
await asyncio.gather(self._write_loop(connection, context), connection.read_loop(request_iterator))
except asyncio.CancelledError:
self.logger.debug("SERVER: RPC cancelled")
except Exception as ex:
self.logger.debug(f"await gather except: {type(ex)}: {secure_format_exception(ex)}")
self.logger.debug(f"SERVER: done await gather in thread {ct.name}")
except Exception as ex:
self.logger.debug(f"Connection closed due to error: {secure_format_exception(ex)}")
finally:
if connection:
with connection.lock:
connection.context = None
self.logger.debug(f"SERVER: closing connection {connection.name}")
self.server.driver.close_connection(connection)
self.logger.debug(f"SERVER: cleanly finished Stream CB in thread {ct.name}")
class Server:
def __init__(self, driver, connector, aio_ctx: AioContext, options, conn_ctx: _ConnCtx):
self.logger = get_logger(self)
self.driver = driver
self.connector = connector
self.grpc_server = grpc.aio.server(options=options)
servicer = Servicer(self, aio_ctx)
add_StreamerServicer_to_server(servicer, self.grpc_server)
params = connector.params
host = params.get(DriverParams.HOST.value)
if not host:
host = "0.0.0.0"
port = int(params.get(DriverParams.PORT.value))
addr = f"{host}:{port}"
try:
self.logger.debug(f"SERVER: connector params: {params}")
secure = ssl_required(params)
if secure:
credentials = AioGrpcDriver.get_grpc_server_credentials(params)
self.grpc_server.add_secure_port(addr, server_credentials=credentials)
else:
self.grpc_server.add_insecure_port(addr)
except Exception as ex:
conn_ctx.error = f"cannot listen on {addr}: {type(ex)}: {secure_format_exception(ex)}"
self.logger.debug(conn_ctx.error)
async def start(self, conn_ctx: _ConnCtx):
self.logger.debug("starting grpc server")
try:
await self.grpc_server.start()
await self.grpc_server.wait_for_termination()
except Exception as ex:
conn_ctx.error = f"cannot start server: {type(ex)}: {secure_format_exception(ex)}"
raise ex
async def shutdown(self):
try:
await self.grpc_server.stop(grace=0.5)
except Exception as ex:
self.logger.debug(f"exception shutdown server: {secure_format_exception(ex)}")
class AioGrpcDriver(BaseDriver):
aio_ctx = None
def __init__(self):
super().__init__()
self.server = None
self.options = GRPC_DEFAULT_OPTIONS
self.logger = get_logger(self)
configurator = CommConfigurator()
config = configurator.get_config()
if config:
my_params = config.get("grpc")
if my_params:
self.options = my_params.get("options")
self.logger.debug(f"GRPC Config: options={self.options}")
self.closing = False
@staticmethod
def supported_transports() -> List[str]:
return ["grpc", "grpcs"]
@staticmethod
def capabilities() -> Dict[str, Any]:
return {DriverCap.SEND_HEARTBEAT.value: True, DriverCap.SUPPORT_SSL.value: True}
async def _start_server(self, connector: ConnectorInfo, aio_ctx: AioContext, conn_ctx: _ConnCtx):
self.connector = connector
self.server = Server(self, connector, aio_ctx, options=self.options, conn_ctx=conn_ctx)
if not conn_ctx.error:
try:
conn_ctx.conn = True
await self.server.start(conn_ctx)
except Exception as ex:
if not self.closing:
self.logger.debug(secure_format_traceback())
conn_ctx.error = f"failed to start server: {type(ex)}: {secure_format_exception(ex)}"
conn_ctx.waiter.set()
def listen(self, connector: ConnectorInfo):
self.logger.debug(f"listen called from thread {threading.current_thread().name}")
self.connector = connector
aio_ctx = AioContext.get_global_context()
conn_ctx = _ConnCtx()
aio_ctx.run_coro(self._start_server(connector, aio_ctx, conn_ctx))
while not conn_ctx.conn and not conn_ctx.error:
time.sleep(0.1)
if conn_ctx.error:
raise CommError(code=CommError.ERROR, message=conn_ctx.error)
self.logger.debug("SERVER: waiting for server to finish")
conn_ctx.waiter.wait()
self.logger.debug("SERVER: server is done")
async def _start_connect(self, connector: ConnectorInfo, aio_ctx: AioContext, conn_ctx: _ConnCtx):
self.logger.debug("Started _start_connect coro")
self.connector = connector
params = connector.params
address = get_address(params)
self.logger.debug(f"CLIENT: trying to connect {address}")
try:
secure = ssl_required(params)
if secure:
grpc_channel = grpc.aio.secure_channel(
address, options=self.options, credentials=self.get_grpc_client_credentials(params)
)
else:
grpc_channel = grpc.aio.insecure_channel(address, options=self.options)
async with grpc_channel as channel:
self.logger.debug(f"CLIENT: connected to {address}")
stub = StreamerStub(channel)
conn_props = {DriverParams.PEER_ADDR.value: address}
if secure:
conn_props[DriverParams.PEER_CN.value] = "N/A"
connection = AioStreamSession(
aio_ctx=aio_ctx, connector=connector, conn_props=conn_props, channel=channel
)
try:
self.logger.debug(f"CLIENT: start streaming on connection {connection}")
msg_iter = stub.Stream(connection.generate_output())
conn_ctx.conn = connection
await connection.read_loop(msg_iter)
except asyncio.CancelledError as error:
self.logger.debug(f"CLIENT: RPC cancelled: {error}")
except Exception as ex:
if self.closing:
self.logger.debug(
f"Connection {connection} closed by {type(ex)}: {secure_format_exception(ex)}"
)
else:
self.logger.debug(
f"Connection {connection} client read exception {type(ex)}: {secure_format_exception(ex)}"
)
self.logger.debug(secure_format_traceback())
with connection.lock:
connection.channel = None
connection.close()
except asyncio.CancelledError:
self.logger.debug("CLIENT: RPC cancelled")
except Exception as ex:
conn_ctx.error = f"connection {connection} error: {type(ex)}: {secure_format_exception(ex)}"
self.logger.debug(conn_ctx.error)
self.logger.debug(secure_format_traceback())
conn_ctx.waiter.set()
def connect(self, connector: ConnectorInfo):
self.logger.debug("CLIENT: connect called")
aio_ctx = AioContext.get_global_context()
conn_ctx = _ConnCtx()
aio_ctx.run_coro(self._start_connect(connector, aio_ctx, conn_ctx))
time.sleep(0.2)
while not conn_ctx.conn and not conn_ctx.error:
time.sleep(0.1)
self.logger.debug("CLIENT: connect completed")
if conn_ctx.error:
raise CommError(CommError.ERROR, conn_ctx.error)
self.add_connection(conn_ctx.conn)
conn_ctx.waiter.wait()
self.close_connection(conn_ctx.conn)
def shutdown(self):
if self.closing:
return
self.closing = True
self.close_all()
if self.server:
aio_ctx = AioContext.get_global_context()
aio_ctx.run_coro(self.server.shutdown())
@staticmethod
def get_urls(scheme: str, resources: dict) -> (str, str):
secure = resources.get(DriverParams.SECURE)
if secure:
scheme = "grpcs"
return get_tcp_urls(scheme, resources)
@staticmethod
def get_grpc_client_credentials(params: dict):
root_cert = AioGrpcDriver.read_file(params.get(DriverParams.CA_CERT.value))
cert_chain = AioGrpcDriver.read_file(params.get(DriverParams.CLIENT_CERT))
private_key = AioGrpcDriver.read_file(params.get(DriverParams.CLIENT_KEY))
return grpc.ssl_channel_credentials(
certificate_chain=cert_chain, private_key=private_key, root_certificates=root_cert
)
@staticmethod
def get_grpc_server_credentials(params: dict):
root_cert = AioGrpcDriver.read_file(params.get(DriverParams.CA_CERT.value))
cert_chain = AioGrpcDriver.read_file(params.get(DriverParams.SERVER_CERT))
private_key = AioGrpcDriver.read_file(params.get(DriverParams.SERVER_KEY))
return grpc.ssl_server_credentials(
[(private_key, cert_chain)],
root_certificates=root_cert,
require_client_auth=True,
)
@staticmethod
def read_file(file_name: str):
if not file_name:
return None
with open(file_name, "rb") as f:
return f.read()
| NVFlare-main | nvflare/fuel/f3/drivers/aio_grpc_driver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import socket
from socketserver import BaseRequestHandler
from typing import Any, Union
from nvflare.fuel.f3.comm_error import CommError
from nvflare.fuel.f3.connection import BytesAlike, Connection
from nvflare.fuel.f3.drivers.driver import ConnectorInfo
from nvflare.fuel.f3.drivers.driver_params import DriverParams
from nvflare.fuel.f3.drivers.net_utils import MAX_FRAME_SIZE
from nvflare.fuel.f3.sfm.prefix import PREFIX_LEN, Prefix
from nvflare.fuel.hci.security import get_certificate_common_name
from nvflare.security.logging import secure_format_exception
log = logging.getLogger(__name__)
class SocketConnection(Connection):
def __init__(self, sock: Any, connector: ConnectorInfo, secure: bool = False):
super().__init__(connector)
self.sock = sock
self.secure = secure
self.closing = False
self.conn_props = self._get_socket_properties()
def get_conn_properties(self) -> dict:
return self.conn_props
def close(self):
self.closing = True
if self.sock:
try:
self.sock.shutdown(socket.SHUT_RDWR)
except OSError as error:
log.debug(f"Connection {self} is already closed: {error}")
self.sock.close()
def send_frame(self, frame: BytesAlike):
try:
self.sock.sendall(frame)
except Exception as ex:
if not self.closing:
raise CommError(CommError.ERROR, f"Error sending frame on conn {self}: {secure_format_exception(ex)}")
def read_loop(self):
try:
self.read_frame_loop()
except CommError as error:
if error.code == CommError.CLOSED:
log.debug(f"Connection {self.name} is closed by peer")
else:
log.debug(f"Connection {self.name} is closed due to error: {error}")
except Exception as ex:
if self.closing:
log.debug(f"Connection {self.name} is closed")
else:
log.debug(f"Connection {self.name} is closed due to error: {secure_format_exception(ex)}")
def read_frame_loop(self):
# read_frame throws exception on stale/bad connection so this is not a dead loop
while not self.closing:
frame = self.read_frame()
self.process_frame(frame)
def read_frame(self) -> BytesAlike:
prefix_buf = bytearray(PREFIX_LEN)
self.read_into(prefix_buf, 0, PREFIX_LEN)
prefix = Prefix.from_bytes(prefix_buf)
if prefix.length == PREFIX_LEN:
return prefix_buf
if prefix.length > MAX_FRAME_SIZE:
raise CommError(CommError.BAD_DATA, f"Frame exceeds limit ({prefix.length} > {MAX_FRAME_SIZE}")
frame = bytearray(prefix.length)
frame[0:PREFIX_LEN] = prefix_buf
self.read_into(frame, PREFIX_LEN, prefix.length - PREFIX_LEN)
return frame
def read_into(self, buffer: BytesAlike, offset: int, length: int):
if isinstance(buffer, memoryview):
view = buffer
else:
view = memoryview(buffer)
if offset:
view = view[offset:]
remaining = length
while remaining:
n = self.sock.recv_into(view, remaining)
if n == 0:
raise CommError(CommError.CLOSED, f"Connection {self.name} is closed by peer")
view = view[n:]
remaining -= n
@staticmethod
def _format_address(addr: Union[str, tuple], fileno: int) -> str:
if isinstance(addr, tuple):
result = f"{addr[0]}:{addr[1]}"
else:
result = f"{addr}:{fileno}"
return result
def _get_socket_properties(self) -> dict:
conn_props = {}
try:
peer = self.sock.getpeername()
fileno = self.sock.fileno()
except OSError as ex:
peer = "N/A"
fileno = 0
log.debug(f"getpeername() error: {secure_format_exception(ex)}")
conn_props[DriverParams.PEER_ADDR.value] = self._format_address(peer, fileno)
local = self.sock.getsockname()
conn_props[DriverParams.LOCAL_ADDR.value] = self._format_address(local, fileno)
if self.secure:
cert = self.sock.getpeercert()
if cert:
cn = get_certificate_common_name(cert)
else:
cn = "N/A"
conn_props[DriverParams.PEER_CN.value] = cn
return conn_props
class ConnectionHandler(BaseRequestHandler):
def handle(self):
# noinspection PyUnresolvedReferences
connection = SocketConnection(self.request, self.server.connector, self.server.ssl_context)
# noinspection PyUnresolvedReferences
driver = self.server.driver
driver.add_connection(connection)
connection.read_loop()
driver.close_connection(connection)
| NVFlare-main | nvflare/fuel/f3/drivers/socket_conn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import threading
import time
from nvflare.fuel.utils.obj_utils import get_logger
from nvflare.security.logging import secure_format_exception
class AioContext:
"""Asyncio context. Used to share the asyncio event loop among multiple classes"""
_ctx_lock = threading.Lock()
_global_ctx = None
def __init__(self, name):
self.closed = False
self.name = name
self.loop = None
self.ready = threading.Event()
self.logger = get_logger(self)
self.logger.debug(f"{os.getpid()}: ******** Created AioContext {name}")
def get_event_loop(self):
t = threading.current_thread()
if not self.ready.is_set():
self.logger.debug(f"{os.getpid()} {t.name}: {self.name}: waiting for loop to be ready")
self.ready.wait()
return self.loop
def run_aio_loop(self):
self.logger.debug(f"{self.name}: started AioContext in thread {threading.current_thread().name}")
# self.loop = asyncio.get_event_loop()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.logger.debug(f"{self.name}: got loop: {id(self.loop)}")
self.ready.set()
try:
self.loop.run_forever()
pending_tasks = asyncio.all_tasks(self.loop)
for t in [t for t in pending_tasks if not (t.done() or t.cancelled())]:
# give canceled tasks the last chance to run
self.loop.run_until_complete(t)
# self.loop.run_until_complete(self.loop.shutdown_asyncgens())
except Exception as ex:
self.logger.error(f"error running aio loop: {secure_format_exception(ex)}")
raise ex
finally:
self.logger.debug(f"{self.name}: AIO Loop run done!")
self.loop.close()
self.logger.debug(f"{self.name}: AIO Loop Completed!")
def run_coro(self, coro):
event_loop = self.get_event_loop()
return asyncio.run_coroutine_threadsafe(coro, event_loop)
def stop_aio_loop(self, grace=1.0):
self.logger.debug("Cancelling pending tasks")
pending_tasks = asyncio.all_tasks(self.loop)
for task in pending_tasks:
self.logger.debug(f"{self.name}: cancelled a task")
try:
# task.cancel()
self.loop.call_soon_threadsafe(task.cancel)
except Exception as ex:
self.logger.debug(f"{self.name}: error cancelling task {type(ex)}")
# wait until all pending tasks are done
start = time.time()
while asyncio.all_tasks(self.loop):
if time.time() - start > grace:
self.logger.debug(f"pending tasks are not cancelled in {grace} seconds")
break
time.sleep(0.1)
self.logger.debug("Stopping AIO loop")
try:
self.loop.call_soon_threadsafe(self.loop.stop)
except Exception as ex:
self.logger.debug(f"Loop stopping error: {secure_format_exception(ex)}")
start = time.time()
while self.loop.is_running():
self.logger.debug("looping still running ...")
time.sleep(0.1)
if time.time() - start > grace:
break
if self.loop.is_running():
self.logger.error("could not stop AIO loop")
else:
self.logger.debug("stopped loop!")
@classmethod
def get_global_context(cls):
with cls._ctx_lock:
if not cls._global_ctx:
cls._global_ctx = AioContext(f"Ctx_{os.getpid()}")
t = threading.Thread(target=cls._global_ctx.run_aio_loop, name="aio_ctx")
t.daemon = True
t.start()
return cls._global_ctx
@classmethod
def close_global_context(cls):
with cls._ctx_lock:
if cls._global_ctx:
cls._global_ctx.stop_aio_loop()
cls._global_ctx = None
| NVFlare-main | nvflare/fuel/f3/drivers/aio_context.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from nvflare.fuel.f3.drivers.net_utils import short_url
from nvflare.fuel.utils.constants import Mode
@dataclass
class ConnectorInfo:
"""Connector information"""
handle: str
# noinspection PyUnresolvedReferences
driver: "Driver"
params: dict
mode: Mode
total_conns: int
curr_conns: int
started: bool
stopping: bool
def __str__(self):
url = short_url(self.params)
return f"[{self.handle} {self.mode.name} {url}]"
| NVFlare-main | nvflare/fuel/f3/drivers/connector_info.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import socket
import ssl
from ssl import SSLContext
from typing import Any, Optional
from urllib.parse import parse_qsl, urlencode, urlparse
from nvflare.fuel.f3.comm_error import CommError
from nvflare.fuel.f3.drivers.driver_params import DriverParams
from nvflare.fuel.utils.argument_utils import str2bool
from nvflare.security.logging import secure_format_exception
log = logging.getLogger(__name__)
LO_PORT = 1025
HI_PORT = 65535
MAX_ITER_SIZE = 10
RANDOM_TRIES = 20
BIND_TIME_OUT = 5
SECURE_SCHEMES = {"https", "wss", "grpcs", "stcp", "satcp"}
# GRPC can't handle frame size over 2G. So the limit is set to (2G-2M)
MAX_FRAME_SIZE = 2 * 1024 * 1024 * 1024 - (2 * 1024 * 1024)
MAX_HEADER_SIZE = 1024 * 1024
MAX_PAYLOAD_SIZE = MAX_FRAME_SIZE - 16 - MAX_HEADER_SIZE
def ssl_required(params: dict) -> bool:
"""Check if SSL is required"""
scheme = params.get(DriverParams.SCHEME.value, None)
return scheme in SECURE_SCHEMES or str2bool(params.get(DriverParams.SECURE.value))
def get_ssl_context(params: dict, ssl_server: bool) -> Optional[SSLContext]:
if not ssl_required(params):
return None
ca_path = params.get(DriverParams.CA_CERT.value)
if ssl_server:
cert_path = params.get(DriverParams.SERVER_CERT.value)
key_path = params.get(DriverParams.SERVER_KEY.value)
else:
cert_path = params.get(DriverParams.CLIENT_CERT.value)
key_path = params.get(DriverParams.CLIENT_KEY.value)
if not all([ca_path, cert_path, key_path]):
scheme = params.get(DriverParams.SCHEME.value, "Unknown")
role = "Server" if ssl_server else "Client"
raise CommError(CommError.BAD_CONFIG, f"{role} certificate parameters are missing for scheme {scheme}")
if ssl_server:
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
else:
ctx = ssl.create_default_context()
ctx.minimum_version = ssl.TLSVersion.TLSv1_2
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = False
ctx.load_verify_locations(ca_path)
ctx.load_cert_chain(certfile=cert_path, keyfile=key_path)
return ctx
def get_address(params: dict) -> str:
host = params.get(DriverParams.HOST.value, "0.0.0.0")
port = params.get(DriverParams.PORT.value, 0)
return f"{host}:{port}"
def parse_port_range(entry: Any):
if isinstance(entry, int):
return range(entry, entry + 1)
parts = entry.split("-")
if len(parts) == 1:
num = int(parts[0])
return range(num, num + 1)
lo = int(parts[0]) if parts[0] else LO_PORT
hi = int(parts[1]) if parts[1] else HI_PORT
return range(lo, hi + 1)
def parse_port_list(ranges: Any) -> list:
all_ranges = []
if isinstance(ranges, list):
for r in ranges:
all_ranges.append(parse_port_range(r))
else:
all_ranges.append(parse_port_range(ranges))
return all_ranges
def check_tcp_port(port) -> bool:
result = False
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(BIND_TIME_OUT)
try:
s.bind(("", port))
result = True
except Exception as e:
log.debug(f"Port {port} binding error: {secure_format_exception(e)}")
finally:
s.close()
return result
def get_open_tcp_port(resources: dict) -> Optional[int]:
port = resources.get(DriverParams.PORT)
if port:
return port
ports = resources.get(DriverParams.PORTS)
if ports:
all_ports = parse_port_list(ports)
else:
all_ports = [range(LO_PORT, HI_PORT + 1)]
for port_range in all_ports:
if len(port_range) <= MAX_ITER_SIZE:
for port in port_range:
if check_tcp_port(port):
return port
else:
for i in range(RANDOM_TRIES):
port = random.randint(port_range.start, port_range.stop - 1)
if check_tcp_port(port):
return port
return None
def parse_url(url: str) -> dict:
"""Parse URL into a dictionary, saving original URL also"""
if not url:
return {}
params = {DriverParams.URL.value: url}
parsed_url = urlparse(url)
params[DriverParams.SCHEME.value] = parsed_url.scheme
parts = parsed_url.netloc.split(":")
if len(parts) >= 1:
host = parts[0]
# Host is required in URL. 0 is used as the placeholder for empty host
if host == "0":
host = ""
params[DriverParams.HOST.value] = host
if len(parts) >= 2:
params[DriverParams.PORT.value] = parts[1]
params[DriverParams.PATH.value] = parsed_url.path
params[DriverParams.PARAMS.value] = parsed_url.params
params[DriverParams.QUERY.value] = parsed_url.query
params[DriverParams.FRAG.value] = parsed_url.fragment
if parsed_url.query:
for k, v in parse_qsl(parsed_url.query):
# Only last one is saved if duplicate keys
params[k] = v
return params
def encode_url(params: dict) -> str:
temp = params.copy()
# Original URL is not needed
temp.pop(DriverParams.URL.value, None)
scheme = temp.pop(DriverParams.SCHEME.value, None)
host = temp.pop(DriverParams.HOST.value, None)
if not host:
host = "0"
port = temp.pop(DriverParams.PORT.value, None)
path = temp.pop(DriverParams.PATH.value, None)
parameters = temp.pop(DriverParams.PARAMS.value, None)
# Encoded query is not needed
temp.pop(DriverParams.QUERY.value, None)
frag = temp.pop(DriverParams.FRAG.value, None)
url = f"{scheme}://{host}"
if port:
url += ":" + str(port)
if path:
url += path
if parameters:
url += ";" + parameters
if temp:
url += "?" + urlencode(temp)
if frag:
url += "#" + frag
return url
def short_url(params: dict) -> str:
"""Get a short url to be used in logs"""
url = params.get(DriverParams.URL.value)
if url:
return url
subset = {
k: params[k]
for k in {DriverParams.SCHEME.value, DriverParams.HOST.value, DriverParams.PORT.value, DriverParams.PATH.value}
}
return encode_url(subset)
def get_tcp_urls(scheme: str, resources: dict) -> (str, str):
"""Generate URL pairs for connecting and listening for TCP-based protocols
Args:
scheme: The transport scheme
resources: The resource restrictions like port ranges
Returns:
a tuple with connecting and listening URL
Raises:
CommError: If any error happens while sending the request
"""
host = resources.get("host") if resources else None
if not host:
host = "localhost"
port = get_open_tcp_port(resources)
if not port:
raise CommError(CommError.BAD_CONFIG, "Can't find an open port in the specified range")
# Always listen on all interfaces
listening_url = f"{scheme}://0:{port}"
connect_url = f"{scheme}://{host}:{port}"
return connect_url, listening_url
| NVFlare-main | nvflare/fuel/f3/drivers/net_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
from typing import Any, Dict, List
import websockets
from websockets.exceptions import ConnectionClosedOK
from nvflare.fuel.f3.comm_error import CommError
from nvflare.fuel.f3.connection import BytesAlike, Connection
from nvflare.fuel.f3.drivers import net_utils
from nvflare.fuel.f3.drivers.aio_context import AioContext
from nvflare.fuel.f3.drivers.base_driver import BaseDriver
from nvflare.fuel.f3.drivers.driver import ConnectorInfo
from nvflare.fuel.f3.drivers.driver_params import DriverCap, DriverParams
from nvflare.fuel.f3.drivers.net_utils import MAX_FRAME_SIZE, get_tcp_urls
from nvflare.fuel.f3.sfm.conn_manager import Mode
from nvflare.fuel.hci.security import get_certificate_common_name
from nvflare.security.logging import secure_format_exception
log = logging.getLogger(__name__)
THREAD_POOL_SIZE = 8
class WsConnection(Connection):
def __init__(self, websocket: Any, aio_context: AioContext, connector: ConnectorInfo, secure: bool):
super().__init__(connector)
self.websocket = websocket
self.aio_context = aio_context
self.closing = False
self.secure = secure
self.conn_props = self._get_socket_properties()
def get_conn_properties(self) -> dict:
return self.conn_props
def close(self):
self.closing = True
self.aio_context.run_coro(self.websocket.close())
def send_frame(self, frame: BytesAlike):
self.aio_context.run_coro(self._async_send_frame(frame))
def _get_socket_properties(self) -> dict:
conn_props = {}
addr = self.websocket.remote_address
if addr:
conn_props[DriverParams.PEER_ADDR.value] = f"{addr[0]}:{addr[1]}"
addr = self.websocket.local_address
if addr:
conn_props[DriverParams.LOCAL_ADDR.value] = f"{addr[0]}:{addr[1]}"
peer_cert = self.websocket.transport.get_extra_info("peercert")
if peer_cert:
cn = get_certificate_common_name(peer_cert)
else:
if self.secure:
cn = "N/A"
else:
cn = None
if cn:
conn_props[DriverParams.PEER_CN.value] = cn
return conn_props
async def _async_send_frame(self, frame: BytesAlike):
try:
await self.websocket.send(frame)
# This is to yield control. See bug: https://github.com/aaugustin/websockets/issues/865
await asyncio.sleep(0)
except Exception as ex:
log.error(f"Error sending frame for connection {self}: {secure_format_exception(ex)}")
class AioHttpDriver(BaseDriver):
"""Async HTTP driver using websocket extension"""
def __init__(self):
super().__init__()
self.aio_context = AioContext.get_global_context()
self.stop_event = None
self.ssl_context = None
@staticmethod
def supported_transports() -> List[str]:
return ["http", "https", "ws", "wss"]
@staticmethod
def capabilities() -> Dict[str, Any]:
return {DriverCap.SEND_HEARTBEAT.value: True, DriverCap.SUPPORT_SSL.value: True}
def listen(self, connector: ConnectorInfo):
self._event_loop(Mode.PASSIVE, connector)
def connect(self, connector: ConnectorInfo):
self._event_loop(Mode.ACTIVE, connector)
def shutdown(self):
self.close_all()
if self.stop_event:
self.stop_event.set_result(None)
@staticmethod
def get_urls(scheme: str, resources: dict) -> (str, str):
secure = resources.get(DriverParams.SECURE)
if secure:
scheme = "https"
return get_tcp_urls(scheme, resources)
# Internal methods
def _event_loop(self, mode: Mode, connector: ConnectorInfo):
self.connector = connector
if mode != connector.mode:
raise CommError(CommError.ERROR, f"Connector mode doesn't match driver mode for {self.connector}")
self.aio_context.run_coro(self._async_event_loop(mode)).result()
async def _async_event_loop(self, mode: Mode):
self.stop_event = self.aio_context.get_event_loop().create_future()
params = self.connector.params
host = params.get(DriverParams.HOST.value)
port = params.get(DriverParams.PORT.value)
if mode == Mode.ACTIVE:
coroutine = self._async_connect(host, port)
else:
coroutine = self._async_listen(host, port)
await coroutine
async def _async_connect(self, host, port):
self.ssl_context = net_utils.get_ssl_context(self.connector.params, False)
if self.ssl_context:
scheme = "wss"
else:
scheme = "ws"
async with websockets.connect(
f"{scheme}://{host}:{port}", ssl=self.ssl_context, ping_interval=None, max_size=MAX_FRAME_SIZE
) as ws:
await self._handler(ws)
async def _async_listen(self, host, port):
self.ssl_context = net_utils.get_ssl_context(self.connector.params, True)
async with websockets.serve(
self._handler, host, port, ssl=self.ssl_context, ping_interval=None, max_size=MAX_FRAME_SIZE
):
await self.stop_event
async def _handler(self, websocket):
conn = None
try:
conn = WsConnection(websocket, self.aio_context, self.connector, self.ssl_context)
self.add_connection(conn)
await self._read_loop(conn)
self.close_connection(conn)
except ConnectionClosedOK as ex:
conn_info = str(conn) if conn else "N/A"
log.debug(f"Connection {conn_info} is closed by peer: {secure_format_exception(ex)}")
@staticmethod
async def _read_loop(conn: WsConnection):
while not conn.closing:
# Reading from websocket and call receiver CB
frame = await conn.websocket.recv()
conn.process_frame(frame)
| NVFlare-main | nvflare/fuel/f3/drivers/aio_http_driver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/f3/drivers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
from concurrent.futures import CancelledError
from typing import Any, Dict, List
from nvflare.fuel.f3.comm_error import CommError
from nvflare.fuel.f3.drivers.aio_conn import AioConnection
from nvflare.fuel.f3.drivers.aio_context import AioContext
from nvflare.fuel.f3.drivers.base_driver import BaseDriver
from nvflare.fuel.f3.drivers.connector_info import ConnectorInfo, Mode
from nvflare.fuel.f3.drivers.driver_params import DriverCap, DriverParams
from nvflare.fuel.f3.drivers.net_utils import get_ssl_context
from nvflare.fuel.f3.drivers.tcp_driver import TcpDriver
log = logging.getLogger(__name__)
class AioTcpDriver(BaseDriver):
def __init__(self):
super().__init__()
self.aio_ctx = AioContext.get_global_context()
self.server = None
self.ssl_context = None
@staticmethod
def supported_transports() -> List[str]:
return ["atcp", "satcp"]
@staticmethod
def capabilities() -> Dict[str, Any]:
return {DriverCap.SEND_HEARTBEAT.value: True, DriverCap.SUPPORT_SSL.value: True}
def listen(self, connector: ConnectorInfo):
self._run(connector, Mode.PASSIVE)
def connect(self, connector: ConnectorInfo):
self._run(connector, Mode.ACTIVE)
def shutdown(self):
self.close_all()
if self.server:
self.server.close()
# This will wake up the event loop to end the server
self.aio_ctx.run_coro(asyncio.sleep(0))
@staticmethod
def get_urls(scheme: str, resources: dict) -> (str, str):
return TcpDriver.get_urls(scheme, resources)
# Internal methods
def _run(self, connector: ConnectorInfo, mode: Mode):
self.connector = connector
if mode != self.connector.mode:
raise CommError(CommError.ERROR, f"Connector mode doesn't match driver mode for {self.connector}")
try:
self.aio_ctx.run_coro(self._async_run(mode)).result()
except CancelledError:
log.debug(f"Connector {self.connector} is cancelled")
async def _async_run(self, mode: Mode):
params = self.connector.params
host = params.get(DriverParams.HOST.value)
port = params.get(DriverParams.PORT.value)
if mode == Mode.ACTIVE:
coroutine = self._tcp_connect(host, port)
else:
coroutine = self._tcp_listen(host, port)
await coroutine
async def _tcp_connect(self, host, port):
self.ssl_context = get_ssl_context(self.connector.params, ssl_server=False)
reader, writer = await asyncio.open_connection(host, port, ssl=self.ssl_context)
await self._create_connection(reader, writer)
async def _tcp_listen(self, host, port):
self.ssl_context = get_ssl_context(self.connector.params, ssl_server=True)
self.server = await asyncio.start_server(self._create_connection, host, port, ssl=self.ssl_context)
async with self.server:
await self.server.serve_forever()
async def _create_connection(self, reader, writer):
conn = AioConnection(self.connector, self.aio_ctx, reader, writer, self.ssl_context is not None)
self.add_connection(conn)
await conn.read_loop()
self.close_connection(conn)
| NVFlare-main | nvflare/fuel/f3/drivers/aio_tcp_driver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import socket
from socketserver import TCPServer, ThreadingTCPServer
from typing import Any, Dict, List
from nvflare.fuel.f3.drivers.base_driver import BaseDriver
from nvflare.fuel.f3.drivers.driver import ConnectorInfo, Driver
from nvflare.fuel.f3.drivers.driver_params import DriverCap, DriverParams
from nvflare.fuel.f3.drivers.net_utils import get_ssl_context, get_tcp_urls
from nvflare.fuel.f3.drivers.socket_conn import ConnectionHandler, SocketConnection
from nvflare.security.logging import secure_format_exception
log = logging.getLogger(__name__)
class TcpStreamServer(ThreadingTCPServer):
TCPServer.allow_reuse_address = True
def __init__(self, driver: Driver, connector: ConnectorInfo):
self.driver = driver
self.connector = connector
params = connector.params
self.ssl_context = get_ssl_context(params, ssl_server=True)
host = params.get(DriverParams.HOST.value)
port = int(params.get(DriverParams.PORT.value))
self.local_addr = f"{host}:{port}"
TCPServer.__init__(self, (host, port), ConnectionHandler, False)
if self.ssl_context:
self.socket = self.ssl_context.wrap_socket(self.socket, server_side=True)
try:
self.server_bind()
self.server_activate()
except Exception as ex:
log.error(f"{os.getpid()}: Error binding to {host}:{port}: {secure_format_exception(ex)}")
self.server_close()
raise
class TcpDriver(BaseDriver):
def __init__(self):
super().__init__()
self.server = None
@staticmethod
def supported_transports() -> List[str]:
return ["tcp", "stcp"]
@staticmethod
def capabilities() -> Dict[str, Any]:
return {DriverCap.SEND_HEARTBEAT.value: True, DriverCap.SUPPORT_SSL.value: True}
def listen(self, connector: ConnectorInfo):
self.connector = connector
self.server = TcpStreamServer(self, connector)
self.server.serve_forever()
def connect(self, connector: ConnectorInfo):
self.connector = connector
params = connector.params
host = params.get(DriverParams.HOST.value)
port = int(params.get(DriverParams.PORT.value))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
context = get_ssl_context(params, ssl_server=False)
if context:
sock = context.wrap_socket(sock)
sock.connect((host, port))
connection = SocketConnection(sock, connector, bool(context))
self.add_connection(connection)
connection.read_loop()
self.close_connection(connection)
def shutdown(self):
self.close_all()
if self.server:
self.server.shutdown()
@staticmethod
def get_urls(scheme: str, resources: dict) -> (str, str):
secure = resources.get(DriverParams.SECURE)
if secure:
scheme = "stcp"
return get_tcp_urls(scheme, resources)
| NVFlare-main | nvflare/fuel/f3/drivers/tcp_driver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import logging
import os
import sys
from typing import Optional, Type
from nvflare.fuel.f3.comm_error import CommError
from nvflare.fuel.f3.drivers.driver import Driver
log = logging.getLogger(__name__)
class DriverManager:
"""Transport driver manager"""
def __init__(self):
self.drivers = {}
self.class_cache = set()
def register(self, driver_class: Type[Driver]):
"""Register a driver with Driver Manager
Args:
driver_class: Driver to be registered. Driver must be a subclass of Driver
"""
if not inspect.isclass(driver_class):
raise CommError(CommError.ERROR, f"Registrant must be class, not instance: {type(driver_class)}")
if not issubclass(driver_class, Driver):
raise CommError(CommError.ERROR, f"Class {driver_class.__name__} is not a transport driver")
for scheme in driver_class.supported_transports():
key = scheme.lower()
if key in self.drivers:
log.error(f"Driver for scheme {scheme} is already registered, ignored")
else:
self.drivers[key] = driver_class
log.debug(f"Driver {driver_class.__name__} is registered for {scheme}")
def search_folder(self, folder: str, package: Optional[str]):
"""Search the folder recursively and register all drivers
Args:
folder: The folder to scan
package: The root package for all the drivers. If none, the folder is the
root of the packages
"""
if package is None and folder not in sys.path:
sys.path.append(folder)
for root, dirs, files in os.walk(folder):
for filename in files:
if filename.endswith(".py"):
module = filename[:-3]
sub_folder = root[len(folder) :]
if sub_folder:
sub_folder = sub_folder.strip("/").replace("/", ".")
if sub_folder:
module = sub_folder + "." + module
if package:
module = package + "." + module
imported = importlib.import_module(module)
for _, cls_obj in inspect.getmembers(imported, inspect.isclass):
if cls_obj.__name__ in self.class_cache:
continue
self.class_cache.add(cls_obj.__name__)
if issubclass(cls_obj, Driver) and not inspect.isabstract(cls_obj):
spec = inspect.getfullargspec(cls_obj.__init__)
if len(spec.args) == 1:
self.register(cls_obj)
else:
# Can't handle argument in constructor
log.warning(f"Invalid driver, __init__ with extra arguments: {module}")
def find_driver_class(self, scheme_or_url: str) -> Optional[Type[Driver]]:
"""Find the driver class based on scheme or URL
Args:
scheme_or_url: The scheme or the url
Returns:
The driver instance or None if not found
"""
index = scheme_or_url.find(":")
if index > 0:
scheme = scheme_or_url[0:index]
else:
scheme = scheme_or_url
return self.drivers.get(scheme.lower())
| NVFlare-main | nvflare/fuel/f3/drivers/driver_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from asyncio import CancelledError, IncompleteReadError, StreamReader, StreamWriter
from nvflare.fuel.f3.comm_error import CommError
from nvflare.fuel.f3.connection import BytesAlike, Connection
from nvflare.fuel.f3.drivers.aio_context import AioContext
from nvflare.fuel.f3.drivers.connector_info import ConnectorInfo
from nvflare.fuel.f3.drivers.driver_params import DriverParams
from nvflare.fuel.f3.drivers.net_utils import MAX_FRAME_SIZE
from nvflare.fuel.f3.sfm.prefix import PREFIX_LEN, Prefix
from nvflare.fuel.hci.security import get_certificate_common_name
from nvflare.security.logging import secure_format_exception
log = logging.getLogger(__name__)
class AioConnection(Connection):
def __init__(
self,
connector: ConnectorInfo,
aio_ctx: AioContext,
reader: StreamReader,
writer: StreamWriter,
secure: bool = False,
):
super().__init__(connector)
self.reader = reader
self.writer = writer
self.aio_ctx = aio_ctx
self.closing = False
self.secure = secure
self.conn_props = self._get_aio_properties()
def get_conn_properties(self) -> dict:
return self.conn_props
def close(self):
self.closing = True
if not self.writer:
return
self.writer.close()
self.aio_ctx.run_coro(self.writer.wait_closed())
def send_frame(self, frame: BytesAlike):
try:
self.aio_ctx.run_coro(self._async_send_frame(frame))
except Exception as ex:
log.error(f"Error calling send coroutine for connection {self}: {secure_format_exception(ex)}")
async def read_loop(self):
try:
while not self.closing:
frame = await self._async_read_frame()
self.process_frame(frame)
except IncompleteReadError:
if log.isEnabledFor(logging.DEBUG):
closer = "locally" if self.closing else "by peer"
log.debug(f"Connection {self} is closed {closer}")
except CancelledError as error:
log.debug(f"Connection {self} is closed by peer: {error}")
except Exception as ex:
log.error(f"Read error for connection {self}: {secure_format_exception(ex)}")
# Internal methods
async def _async_send_frame(self, frame: BytesAlike):
try:
self.writer.write(frame)
await self.writer.drain()
except Exception as ex:
if not self.closing:
log.error(f"Error sending frame for connection {self}: {secure_format_exception(ex)}")
async def _async_read_frame(self):
prefix_buf = await self.reader.readexactly(PREFIX_LEN)
prefix = Prefix.from_bytes(prefix_buf)
# Prefix only message
if prefix.length == PREFIX_LEN:
return prefix_buf
if prefix.length > MAX_FRAME_SIZE:
raise CommError(CommError.BAD_DATA, f"Frame exceeds limit ({prefix.length} > {MAX_FRAME_SIZE}")
remaining = await self.reader.readexactly(prefix.length - PREFIX_LEN)
return prefix_buf + remaining
def _get_aio_properties(self) -> dict:
conn_props = {}
if not self.writer:
return conn_props
fileno = 0
local_addr = self.writer.get_extra_info("sockname", "")
if isinstance(local_addr, tuple):
local_addr = f"{local_addr[0]}:{local_addr[1]}"
else:
sock = self.writer.get_extra_info("socket", None)
if sock:
fileno = sock.fileno()
local_addr = f"{local_addr}:{fileno}"
peer_addr = self.writer.get_extra_info("peername", "")
if isinstance(peer_addr, tuple):
peer_addr = f"{peer_addr[0]}:{peer_addr[1]}"
else:
peer_addr = f"{peer_addr}:{fileno}"
conn_props[DriverParams.LOCAL_ADDR.value] = local_addr
conn_props[DriverParams.PEER_ADDR.value] = peer_addr
peer_cert = self.writer.get_extra_info("peercert")
if peer_cert:
cn = get_certificate_common_name(peer_cert)
else:
if self.secure:
cn = "N/A"
else:
cn = None
if cn:
conn_props[DriverParams.PEER_CN.value] = cn
return conn_props
| NVFlare-main | nvflare/fuel/f3/drivers/aio_conn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any, Dict, List
from nvflare.fuel.f3.connection import Connection, ConnState
from nvflare.fuel.f3.drivers.connector_info import ConnectorInfo
class ConnMonitor(ABC):
@abstractmethod
def state_change(self, connection: Connection):
"""Driver state change notification, including new connections
Args:
connection: The connection that state has changed
Raises:
CommError: If any error happens while processing the frame
"""
pass
class Driver(ABC):
"""Transport driver spec
A transport driver is responsible for establishing connections. The connections are used
to transport frames to remote endpoint.
The frame is opaque to the driver, except the length (first 4-bytes), which may be needed to
determine the frame boundary on stream-based transports like TCP or sockets.
"""
def __init__(self):
self.state = ConnState.IDLE
self.conn_monitor = None
def get_name(self) -> str:
"""Return the name of the driver, used for logging
By default, it returns class name
"""
return self.__class__.__name__
@staticmethod
@abstractmethod
def supported_transports() -> List[str]:
"""Return a list of transports supported by this driver, for example
["http", "https", "ws", "wss"]
"""
pass
@staticmethod
@abstractmethod
def capabilities() -> Dict[str, Any]:
"""Return a dictionary of capabilities of the driver."""
pass
@abstractmethod
def listen(self, connector: ConnectorInfo):
"""Start the driver in passive mode
Args:
connector: Connector with parameters
Raises:
CommError: If any errors
"""
pass
@abstractmethod
def connect(self, connector: ConnectorInfo):
"""Start the driver in active mode
Args:
connector: Connector with parameters
Raises:
CommError: If any errors
"""
pass
@staticmethod
@abstractmethod
def get_urls(scheme: str, resources: dict) -> (str, str):
"""Get active and passive URL pair based on resources
Args:
scheme: A scheme supported by the driver, like http or https
resources: User specified resources like host and port ranges.
Returns:
A tuple with active and passive URLs
Raises:
CommError: If no free port can be found
"""
pass
@abstractmethod
def shutdown(self):
"""Stop driver and disconnect all the connections created by it
Raises:
CommError: If any errors
"""
pass
def register_conn_monitor(self, monitor: ConnMonitor):
"""Register a monitor for connection state change, including new connections"""
self.conn_monitor = monitor
| NVFlare-main | nvflare/fuel/f3/drivers/driver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from abc import ABC
from typing import Dict, Optional
from nvflare.fuel.f3.comm_error import CommError
from nvflare.fuel.f3.connection import Connection, ConnState
from nvflare.fuel.f3.drivers.driver import ConnectorInfo, Driver
log = logging.getLogger(__name__)
class BaseDriver(Driver, ABC):
"""Common base class for all drivers
It contains all the common connection management code
"""
def __init__(self):
super().__init__()
self.connections: Dict[str, Connection] = {}
self.connector: Optional[ConnectorInfo] = None
self.conn_lock = threading.Lock()
def add_connection(self, conn: Connection):
with self.conn_lock:
self.connections[conn.name] = conn
conn.state = ConnState.CONNECTED
self._notify_monitor(conn)
log.debug(f"Connection created: {self.get_name()}:{conn}")
def close_connection(self, conn: Connection):
log.debug(f"Connection removed: {self.get_name()}:{conn}")
conn.state = ConnState.CLOSED
self._notify_monitor(conn)
with self.conn_lock:
if not self.connections.pop(conn.name, None):
log.debug(f"{conn.name} is already removed from driver")
def close_all(self):
with self.conn_lock:
for name in sorted(self.connections.keys()):
conn = self.connections[name]
log.debug(f"Closing connection: {self.get_name()}:{conn}")
conn.close()
def _notify_monitor(self, conn: Connection):
if not self.conn_monitor:
raise CommError(CommError.ERROR, f"Connection monitor not registered for driver {self.get_name()}")
self.conn_monitor.state_change(conn)
| NVFlare-main | nvflare/fuel/f3/drivers/base_driver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class DriverParams(str, Enum):
# URL components. Those parameters are part of the URL, no need to be included in query string
# URL = SCHEME://HOST:PORT/PATH;PARAMS?QUERY#FRAG
URL = "url"
SCHEME = "scheme"
HOST = "host"
PORT = "port"
PATH = "path"
PARAMS = "params"
FRAG = "frag"
QUERY = "query"
# Other parameters
CA_CERT = "ca_cert"
SERVER_CERT = "server_cert"
SERVER_KEY = "server_key"
CLIENT_CERT = "client_cert"
CLIENT_KEY = "client_key"
SECURE = "secure"
PORTS = "ports"
SOCKET = "socket"
LOCAL_ADDR = "local_addr"
PEER_ADDR = "peer_addr"
PEER_CN = "peer_cn"
class DriverCap(str, Enum):
SEND_HEARTBEAT = "send_heartbeat"
SUPPORT_SSL = "support_ssl"
| NVFlare-main | nvflare/fuel/f3/drivers/driver_params.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: streamer.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import builder as _builder
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0estreamer.proto\x12\x08streamer\"\"\n\x05\x46rame\x12\x0b\n\x03seq\x18\x01 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x32<\n\x08Streamer\x12\x30\n\x06Stream\x12\x0f.streamer.Frame\x1a\x0f.streamer.Frame\"\x00(\x01\x30\x01\x42\x06\xa2\x02\x03RTGb\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'streamer_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\242\002\003RTG'
_FRAME._serialized_start=28
_FRAME._serialized_end=62
_STREAMER._serialized_start=64
_STREAMER._serialized_end=124
# @@protoc_insertion_point(module_scope)
| NVFlare-main | nvflare/fuel/f3/drivers/grpc/streamer_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from .streamer_pb2 import Frame
class StreamerStub(object):
"""Interface exported by the server.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Stream = channel.stream_stream(
'/streamer.Streamer/Stream',
request_serializer=Frame.SerializeToString,
response_deserializer=Frame.FromString,
)
class StreamerServicer(object):
"""Interface exported by the server.
"""
def Stream(self, request_iterator, context):
"""A Bidirectional streaming RPC.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_StreamerServicer_to_server(servicer, server):
rpc_method_handlers = {
'Stream': grpc.stream_stream_rpc_method_handler(
servicer.Stream,
request_deserializer=Frame.FromString,
response_serializer=Frame.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'streamer.Streamer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Streamer(object):
"""Interface exported by the server.
"""
@staticmethod
def Stream(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/streamer.Streamer/Stream',
Frame.SerializeToString,
Frame.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| NVFlare-main | nvflare/fuel/f3/drivers/grpc/streamer_pb2_grpc.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/f3/drivers/grpc/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nvflare.fuel.f3.cellnet.fqcn import FQCN
from nvflare.fuel.f3.cellnet.net_manager import NetManager
from nvflare.fuel.f3.mpm import MainProcessMonitor
from nvflare.fuel.hci.security import hash_password
from nvflare.fuel.hci.server.builtin import new_command_register_with_builtin_module
from nvflare.fuel.hci.server.hci import AdminServer
from nvflare.fuel.hci.server.login import LoginModule, SessionManager, SimpleAuthenticator
from .cell_runner import CellRunner, NetConfig
class Server(CellRunner):
def __init__(self, config_path: str, config_file: str, log_level: str):
self._name = self.__class__.__name__
self.logger = logging.getLogger(self._name)
net_config = NetConfig(config_file)
admin_host, admin_port = net_config.get_admin()
if not admin_host or not admin_port:
raise RuntimeError("missing admin host/port in net config")
CellRunner.__init__(
self, config_path=config_path, config_file=config_file, my_name=FQCN.ROOT_SERVER, log_level=log_level
)
net_mgr = NetManager(self.agent, diagnose=True)
# set up admin server
users = {"admin": hash_password("admin")}
cmd_reg = new_command_register_with_builtin_module(app_ctx=self)
authenticator = SimpleAuthenticator(users)
sess_mgr = SessionManager()
login_module = LoginModule(authenticator, sess_mgr)
cmd_reg.register_module(login_module)
cmd_reg.register_module(sess_mgr)
cmd_reg.register_module(net_mgr)
self.sess_mgr = sess_mgr
self.admin = AdminServer(cmd_reg=cmd_reg, host=admin_host, port=int(admin_port))
MainProcessMonitor.add_cleanup_cb(self._clean_up)
def start(self, start_all=True):
super().start(start_all)
self.admin.start()
def _clean_up(self):
# self.sess_mgr.shutdown()
self.logger.debug(f"{self.cell.get_fqcn()}: Closed session manager")
self.admin.stop()
self.logger.debug(f"{self.cell.get_fqcn()}: Stopped Admin Server")
| NVFlare-main | nvflare/fuel/f3/qat/server.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import cmd
import json
from nvflare.fuel.f3.stats_pool import VALID_HIST_MODES, StatsPoolManager, parse_hist_mode
from nvflare.fuel.hci.table import Table
class StatsViewer(cmd.Cmd):
def __init__(self, pools: dict, prompt: str = "> "):
cmd.Cmd.__init__(self)
self.intro = "Type help or ? to list commands.\n"
self.prompt = prompt
self.pools = pools
StatsPoolManager.from_dict(pools)
def do_list_pools(self, arg):
headers, rows = StatsPoolManager.get_table()
self._show_table(headers, rows)
def do_show_pool(self, arg: str):
args = arg.split()
if len(args) < 1:
self.write_string("Error: missing pool name")
return
name = args[0]
mode = ""
if len(args) > 1:
mode = args[1]
mode = parse_hist_mode(mode)
if not mode:
self.write_string(f"Error: invalid model {args[1]} - must be one of {VALID_HIST_MODES}")
return
pool = StatsPoolManager.get_pool(name)
if not pool:
self.write_string(f"Error: pool '{name}' does not exist")
return
headers, rows = pool.get_table(mode)
self._show_table(headers, rows)
def _show_table(self, headers, rows):
t = Table(headers)
for r in rows:
t.add_row(r)
t.write(self.stdout)
def do_bye(self, arg):
return True
def emptyline(self):
return
def run(self):
self.cmdloop(self.intro)
def _write(self, content: str):
self.stdout.write(content)
def write_string(self, data: str):
content = data + "\n"
self._write(content)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--stats_file", "-f", type=str, help="stats file name", required=True)
args = parser.parse_args()
with open(args.stats_file) as f:
d = json.load(f)
viewer = StatsViewer(d)
viewer.run()
if __name__ == "__main__":
main()
| NVFlare-main | nvflare/fuel/f3/qat/stats_viewer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--num_clients", "-c", type=int, help="number of clients", required=False, default=2)
parser.add_argument("--num_jobs", "-j", type=int, help="number of jobs", required=False, default=1)
parser.add_argument("--scheme", "-s", type=str, help="scheme of the root url", required=False, default="grpc")
args = parser.parse_args()
num_clients = args.num_clients
if num_clients <= 0:
print(f"invalid num_clients {num_clients}: must be > 0")
num_jobs = args.num_jobs
if num_jobs <= 0:
print(f"invalid num_jobs {num_jobs}: must be > 0")
clients = [f"c{i+1}" for i in range(num_clients)]
jobs = [f"j{i+1}" for i in range(num_jobs)]
server_jobs = [f"s_{j}" for j in jobs]
config = {
"root_url": f"{args.scheme}://localhost:8002",
"admin": {"host": "localhost", "port": "8003"},
"server": {"children": server_jobs, "clients": clients},
}
for c in clients:
cc = {c: {"children": [f"{c}_{j}" for j in jobs]}}
config.update(cc)
file_name = f"net_config_c{num_clients}_j{num_jobs}.json"
json_object = json.dumps(config, indent=4)
with open(file_name, "w") as outfile:
outfile.write(json_object)
print(f"Config file created: {file_name}")
if __name__ == "__main__":
main()
| NVFlare-main | nvflare/fuel/f3/qat/make_net_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from nvflare.fuel.f3.mpm import MainProcessMonitor as Mpm
from nvflare.fuel.f3.qat.cell_runner import CellRunner
from nvflare.fuel.utils.config_service import ConfigService
def main():
"""
Script to launch the admin client to issue admin commands to the server.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--config_dir", "-c", type=str, help="config folder", required=False, default=".")
parser.add_argument(
"--config_file", "-f", type=str, help="config file name", required=False, default="net_config.json"
)
parser.add_argument("--name", "-n", type=str, help="my cell name", required=True)
parser.add_argument("--parent_fqcn", "-pn", type=str, help="parent cell name", required=False, default="")
parser.add_argument("--parent_url", "-pu", type=str, help="parent cell url", required=False, default="")
parser.add_argument("--log_level", "-l", type=str, help="log level", required=False, default="info")
parser.add_argument("--self_only", "-s", help="self only - don't start subs", default=False, action="store_true")
args = parser.parse_args()
logging.basicConfig()
log_level = logging.INFO
if args.log_level in ["debug", "d"]:
log_level = logging.DEBUG
elif args.log_level in ["error", "err", "e"]:
log_level = logging.ERROR
logging.getLogger().setLevel(log_level)
ConfigService.initialize(section_files={}, config_path=[args.config_dir])
runner = CellRunner(
config_path=args.config_dir,
config_file=args.config_file,
my_name=args.name,
parent_url=args.parent_url,
parent_fqcn=args.parent_fqcn,
log_level=args.log_level,
)
start_all = not args.self_only
runner.start(start_all)
runner.run()
if __name__ == "__main__":
Mpm.run(main)
| NVFlare-main | nvflare/fuel/f3/qat/run_cell.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.f3.qat.net_config import NetConfig
from nvflare.fuel.hci.client.cli import AdminClient, CredentialType
from nvflare.fuel.hci.client.static_service_finder import StaticServiceFinder
from nvflare.fuel.utils.config_service import ConfigService
def main():
"""
Script to launch the admin client to issue admin commands to the server.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--config_dir", "-c", type=str, help="config folder", required=False, default=".")
parser.add_argument(
"--config_file", "-f", type=str, help="config file name", required=False, default="net_config.json"
)
args = parser.parse_args()
ConfigService.initialize(section_files={}, config_path=[args.config_dir])
net_config = NetConfig(args.config_file)
admin_host, admin_port = net_config.get_admin()
if not admin_host or not admin_port:
raise ConfigError("missing admin host or port in net_config")
service_finder = StaticServiceFinder(host=admin_host, port=int(admin_port))
client = AdminClient(
credential_type=CredentialType.PASSWORD,
service_finder=service_finder,
)
client.run()
if __name__ == "__main__":
main()
| NVFlare-main | nvflare/fuel/f3/qat/admin.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from nvflare.fuel.f3.stats_pool import CsvRecordReader
"""
This tool can be used to compute the total time spent on communication for a job.
NOTE: if all processes (server and clients) are run on the same host, then the computed results are accurate.
If processes are run on different hosts, then these hosts must be synchronized with NTP (Network Time Protocol).
Before starting this tool, you must collect the stats_pool_records.csv files of all the processes into a folder.
These files must all have the suffix of ".csv". For FL clients, these files are located in their workspaces.
For FL server, you need to download the job first (using admin console or flare api) and then find it in the downloaded
workspace of the job.
Since these files have the same name in their workspaces, you must rename them when copying into the same folder.
You can simply use the client names for clients and "server" for server file.
Once you have all the csv files in the same folder, you can start this tool with the following args:
-d: the directory that contains the csv files. Required.
-o: the output file that will contain the result. Optional.
If the output file name is not specified, it will be default to "comm.txt".
The result is printed to the screen and written to the output file.
The output file will be placed into the same folder that contains the csv files.
Do not name your output file with the suffix ".csv"!
"""
def _print(data: str, out_file):
print(data)
if out_file is not None:
out_file.write(data + "\n")
def _compute_time(file_name: str, pool_name: str, out_file):
result = 0.0
max_time = 0.0
min_time = 1000
count = 0
_print(f"Processing record file: {file_name}", out_file)
reader = CsvRecordReader(file_name)
for rec in reader:
if rec.pool_name != pool_name:
continue
count += 1
result += rec.value
if max_time < rec.value:
max_time = rec.value
if min_time > rec.value:
min_time = rec.value
_print(f" Max={max_time}; Min={min_time}; Avg={result/count}; Count={count}; Total={result}", out_file)
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--stats_dir", "-d", type=str, help="directory that contains stats record files", required=True)
parser.add_argument(
"--out_file",
"-o",
type=str,
help="directory that contains stats record files",
required=False,
default="comm.txt",
)
args = parser.parse_args()
stats_dir = args.stats_dir
files = os.listdir(stats_dir)
if not files:
print(f"No stats files in {stats_dir}")
return -1
out_file = None
if args.out_file:
out_file = open(os.path.join(stats_dir, args.out_file), "w")
total = 0.0
for fn in files:
if not fn.endswith(".csv"):
continue
t = _compute_time(file_name=os.path.join(stats_dir, fn), pool_name="msg_travel", out_file=out_file)
total += t
_print(f"Total comm time: {total}", out_file)
if out_file is not None:
out_file.close()
if __name__ == "__main__":
main()
| NVFlare-main | nvflare/fuel/f3/qat/time_comm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shlex
import subprocess
import sys
import threading
import time
from nvflare.fuel.f3.cellnet.core_cell import CellAgent, CoreCell, Message, MessageHeaderKey, MessageType
from nvflare.fuel.f3.cellnet.fqcn import FQCN
from nvflare.fuel.f3.cellnet.net_agent import NetAgent
from nvflare.fuel.f3.mpm import MainProcessMonitor
from nvflare.fuel.f3.stats_pool import StatsPoolManager
from .net_config import NetConfig
class _RunnerInfo:
def __init__(self, name: str, fqcn: str, process):
self.name = name
self.fqcn = fqcn
self.process = process
class CellRunner:
def __init__(
self,
config_path: str,
config_file: str,
my_name: str,
parent_url: str = "",
parent_fqcn: str = "",
log_level: str = "info",
):
self.new_root_url = None
self.config_path = config_path
self.config_file = config_file
self.log_level = log_level
self.waiter = threading.Event()
if not parent_fqcn:
my_fqcn = my_name
else:
my_fqcn = FQCN.join([parent_fqcn, my_name])
net_config = NetConfig(config_file)
self.root_url = net_config.get_root_url()
self.children = net_config.get_children(my_name)
self.clients = net_config.get_clients()
self.create_internal_listener = self.children and len(self.children) > 0
self.cell = CoreCell(
fqcn=my_fqcn,
root_url=self.root_url,
secure=False,
credentials={},
create_internal_listener=self.create_internal_listener,
parent_url=parent_url,
)
self.agent = NetAgent(
self.cell,
self._change_root,
self._agent_closed,
)
self.child_runners = {}
self.client_runners = {}
self.cell.set_cell_connected_cb(cb=self._cell_connected)
self.cell.set_cell_disconnected_cb(cb=self._cell_disconnected)
self.cell.add_incoming_reply_filter(channel="*", topic="*", cb=self._filter_incoming_reply)
self.cell.add_incoming_request_filter(channel="*", topic="*", cb=self._filter_incoming_request)
self.cell.add_outgoing_reply_filter(channel="*", topic="*", cb=self._filter_outgoing_reply)
self.cell.add_outgoing_request_filter(channel="*", topic="*", cb=self._filter_outgoing_request)
self.cell.set_message_interceptor(cb=self._inspect_message)
# MainProcessMonitor.add_run_monitor(self._check_new_root)
def _inspect_message(self, message: Message):
header_name = "inspected_by"
inspectors = message.get_header(header_name)
if not inspectors:
inspectors = []
message.set_header(header_name, inspectors)
inspectors.append(self.cell.get_fqcn())
def _cell_connected(self, connected_cell: CellAgent):
self.cell.logger.info(f"{self.cell.get_fqcn()}: Cell {connected_cell.get_fqcn()} connected")
def _cell_disconnected(self, disconnected_cell: CellAgent):
self.cell.logger.info(f"{self.cell.get_fqcn()}: Cell {disconnected_cell.get_fqcn()} disconnected")
def _filter_incoming_reply(self, message: Message):
channel = message.get_header(MessageHeaderKey.CHANNEL, "")
topic = message.get_header(MessageHeaderKey.TOPIC, "")
msg_type = message.get_header(MessageHeaderKey.MSG_TYPE)
destination = message.get_header(MessageHeaderKey.DESTINATION, "")
assert len(channel) > 0
assert len(topic) > 0
assert msg_type == MessageType.REPLY
assert destination == self.cell.get_fqcn()
self.cell.logger.debug(f"{self.cell.get_fqcn()}: _filter_incoming_reply called")
def _filter_incoming_request(self, message: Message):
channel = message.get_header(MessageHeaderKey.CHANNEL, "")
topic = message.get_header(MessageHeaderKey.TOPIC, "")
msg_type = message.get_header(MessageHeaderKey.MSG_TYPE)
destination = message.get_header(MessageHeaderKey.DESTINATION, "")
assert len(channel) > 0
assert len(topic) > 0
assert msg_type == MessageType.REQ
assert destination == self.cell.get_fqcn()
self.cell.logger.debug(f"{self.cell.get_fqcn()}: _filter_incoming_request called")
def _filter_outgoing_reply(self, message: Message):
channel = message.get_header(MessageHeaderKey.CHANNEL, "")
topic = message.get_header(MessageHeaderKey.TOPIC, "")
msg_type = message.get_header(MessageHeaderKey.MSG_TYPE)
origin = message.get_header(MessageHeaderKey.ORIGIN, "")
assert len(channel) > 0
assert len(topic) > 0
assert msg_type == MessageType.REPLY
assert origin == self.cell.get_fqcn()
self.cell.logger.debug(f"{self.cell.get_fqcn()}: _filter_outgoing_reply called")
def _filter_outgoing_request(self, message: Message):
channel = message.get_header(MessageHeaderKey.CHANNEL, "")
topic = message.get_header(MessageHeaderKey.TOPIC, "")
msg_type = message.get_header(MessageHeaderKey.MSG_TYPE)
origin = message.get_header(MessageHeaderKey.ORIGIN, "")
assert len(channel) > 0
assert len(topic) > 0
assert msg_type == MessageType.REQ
assert origin == self.cell.get_fqcn()
self.cell.logger.debug(f"{self.cell.get_fqcn()}: _filter_outgoing_request called")
def _create_subprocess(self, name: str, parent_fqcn: str, parent_url: str, start_it=True):
time.sleep(0.2)
parts = [
f"{sys.executable} -m run_cell",
f"-c {self.config_path}",
f"-f {self.config_file}",
f"-n {name}",
f"-l {self.log_level}",
]
if parent_fqcn:
parts.append(f"-pn {parent_fqcn}")
if parent_url:
parts.append(f"-pu {parent_url}")
command = " ".join(parts)
print(f"Start Cell Command: {command}")
if start_it:
return subprocess.Popen(shlex.split(command), preexec_fn=os.setsid, env=os.environ.copy())
else:
return None
def start(self, start_all=True):
self.cell.start()
if self.create_internal_listener:
# create children
int_url = self.cell.get_internal_listener_url()
for child_name in self.children:
p = self._create_subprocess(
name=child_name, parent_url=int_url, parent_fqcn=self.cell.get_fqcn(), start_it=start_all
)
child_fqcn = FQCN.join([self.cell.get_fqcn(), child_name])
info = _RunnerInfo(child_name, child_fqcn, p)
self.child_runners[child_name] = info
if self.cell.get_fqcn() == FQCN.ROOT_SERVER and self.clients:
# I'm the server root: create clients
time.sleep(1.0)
for client_name in self.clients:
p = self._create_subprocess(name=client_name, parent_url="", parent_fqcn="", start_it=start_all)
self.client_runners[client_name] = _RunnerInfo(client_name, client_name, p)
def stop(self):
# self.agent.stop()
self.waiter.set()
def _agent_closed(self):
self.stop()
def _change_root(self, url: str):
self.cell.change_server_root(url)
def dump_stats(self):
StatsPoolManager.dump_summary(f"{self.cell.get_fqcn()}_stats.json")
def run(self):
MainProcessMonitor.set_name(self.cell.get_fqcn())
MainProcessMonitor.add_cleanup_cb(self.dump_stats)
MainProcessMonitor.add_cleanup_cb(self.cell.stop)
self.waiter.wait()
| NVFlare-main | nvflare/fuel/f3/qat/cell_runner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.fuel.utils.config_service import ConfigService
class NetConfig:
def __init__(self, config_file_name="net_config.json"):
self.config = ConfigService.load_config_dict(config_file_name)
if not self.config:
raise RuntimeError(f"cannot load {config_file_name}")
def get_root_url(self):
return self.config.get("root_url")
def get_children(self, me: str):
my_config = self.config.get(me)
if my_config:
return my_config.get("children", [])
else:
return []
def get_clients(self):
server_config = self.config.get("server")
if server_config:
return server_config.get("clients", [])
else:
return []
def get_admin(self) -> (str, str):
admin_config = self.config.get("admin")
if admin_config:
return admin_config.get("host"), admin_config.get("port")
return "", ""
| NVFlare-main | nvflare/fuel/f3/qat/net_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from nvflare.fuel.f3.mpm import MainProcessMonitor
from nvflare.fuel.f3.qat.server import Server
from nvflare.fuel.utils.config_service import ConfigService
def main():
"""
Script to launch the admin client to issue admin commands to the server.
"""
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--config_dir", "-c", type=str, help="config folder", required=False, default=".")
parser.add_argument(
"--config_file", "-f", type=str, help="config file name", required=False, default="net_config.json"
)
parser.add_argument("--self_only", "-s", help="self only - don't start subs", default=False, action="store_true")
parser.add_argument("--log_level", "-l", type=str, help="log level", required=False, default="info")
args = parser.parse_args()
logging.basicConfig()
log_level = logging.INFO
if args.log_level in ["debug", "d"]:
log_level = logging.DEBUG
elif args.log_level in ["error", "err", "e"]:
log_level = logging.ERROR
logging.getLogger().setLevel(log_level)
ConfigService.initialize(section_files={}, config_path=[args.config_dir])
server = Server(config_path=args.config_dir, config_file=args.config_file, log_level=args.log_level)
start_all = not args.self_only
server.start(start_all)
server.run()
if __name__ == "__main__":
MainProcessMonitor.run(main)
| NVFlare-main | nvflare/fuel/f3/qat/run_server.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from dataclasses import dataclass
from nvflare.fuel.f3.comm_error import CommError
from nvflare.fuel.f3.connection import BytesAlike
PREFIX_STRUCT = struct.Struct(">IHBBHHHH")
PREFIX_LEN = PREFIX_STRUCT.size
@dataclass
class Prefix:
"""Prefix is the 16-byte fixed header for the SFM frame, every frame must have this prefix.
Beside all the other attributes, it provides framing for the message. Framing is needed if the
frame is sent over byte streams like TCP or sockets.
The 8 fields in the prefix are all integers encoded in big-endian,
1. length(4): Total length of the frame.
2. header_len(2): Length of the encoded headers
3. type(1): Frame type (DATA, HELLO etc)
4. reserved(1): Not used, 0
5. flags(2): Attribute of the frame (OOB, ACK etc).
6. app_id(2): Application ID to support multiple apps
7. stream_id(2): Stream ID to connect all fragments of a stream
8. sequence(2): A sequence number for each frame. Used to detect lost frames.
"""
length: int = 0
header_len: int = 0
type: int = 0
reserved: int = 0
flags: int = 0
app_id: int = 0
stream_id: int = 0
sequence: int = 0
@staticmethod
def from_bytes(buffer: bytes) -> "Prefix":
if len(buffer) < PREFIX_LEN:
raise CommError(CommError.BAD_DATA, "Prefix too short")
return Prefix(*PREFIX_STRUCT.unpack_from(buffer, 0))
def to_buffer(self, buffer: BytesAlike, offset: int):
PREFIX_STRUCT.pack_into(
buffer,
offset,
self.length,
self.header_len,
self.type,
self.reserved,
self.flags,
self.app_id,
self.stream_id,
self.sequence,
)
| NVFlare-main | nvflare/fuel/f3/sfm/prefix.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, List, Optional
import msgpack
from nvflare.fuel.f3.comm_error import CommError
from nvflare.fuel.f3.connection import BytesAlike, Connection, ConnState, FrameReceiver
from nvflare.fuel.f3.drivers.connector_info import ConnectorInfo, Mode
from nvflare.fuel.f3.drivers.driver import ConnMonitor, Driver
from nvflare.fuel.f3.drivers.driver_params import DriverCap, DriverParams
from nvflare.fuel.f3.drivers.net_utils import ssl_required
from nvflare.fuel.f3.endpoint import Endpoint, EndpointMonitor, EndpointState
from nvflare.fuel.f3.message import Message, MessageReceiver
from nvflare.fuel.f3.sfm.constants import HandshakeKeys, Types
from nvflare.fuel.f3.sfm.heartbeat_monitor import HeartbeatMonitor
from nvflare.fuel.f3.sfm.prefix import PREFIX_LEN, Prefix
from nvflare.fuel.f3.sfm.sfm_conn import SfmConnection
from nvflare.fuel.f3.sfm.sfm_endpoint import SfmEndpoint
from nvflare.fuel.f3.stats_pool import StatsPoolManager
from nvflare.security.logging import secure_format_exception, secure_format_traceback
FRAME_THREAD_POOL_SIZE = 100
CONN_THREAD_POOL_SIZE = 16
INIT_WAIT = 1
MAX_WAIT = 60
SILENT_RECONNECT_TIME = 5
SELF_ADDR = "0.0.0.0:0"
log = logging.getLogger(__name__)
handle_lock = threading.Lock()
handle_count = 0
def get_handle():
global handle_lock, handle_count
with handle_lock:
handle_count += 1
return "CH%05d" % handle_count
class ConnManager(ConnMonitor):
"""SFM connection manager
The class is responsible for maintaining state of SFM connections and pumping data through them
"""
def __init__(self, local_endpoint: Endpoint):
self.local_endpoint = local_endpoint
# Active connectors
self.connectors: Dict[str, ConnectorInfo] = {}
# A dict of SFM connections, key is connection name
self.sfm_conns: Dict[str, SfmConnection] = {}
# A dict of SfmEndpoint for finding endpoint by name
self.sfm_endpoints: Dict[str, SfmEndpoint] = {}
# A list of Endpoint monitors
self.monitors: List[EndpointMonitor] = []
# App/receiver mapping
self.receivers: Dict[int, MessageReceiver] = {}
self.started = False
self.conn_mgr_executor = ThreadPoolExecutor(CONN_THREAD_POOL_SIZE, "conn_mgr")
self.frame_mgr_executor = ThreadPoolExecutor(FRAME_THREAD_POOL_SIZE, "frame_mgr")
self.lock = threading.Lock()
self.null_conn = NullConnection()
stats = StatsPoolManager.get_pool("sfm_send_frame")
if not stats:
stats = StatsPoolManager.add_time_hist_pool(
"sfm_send_frame", "SFM send_frame time in secs", scope=local_endpoint.name
)
self.send_frame_stats = stats
self.heartbeat_monitor = HeartbeatMonitor(self.sfm_conns)
def add_connector(self, driver: Driver, params: dict, mode: Mode) -> str:
# Validate parameters
capabilities = driver.capabilities()
support_ssl = capabilities.get(DriverCap.SUPPORT_SSL, False)
if ssl_required(params) and not support_ssl:
scheme = params.get(DriverParams.SCHEME.value, "Unknown")
raise CommError(
CommError.BAD_CONFIG,
f"Connector with scheme {scheme} requires SSL but " f"driver {driver.get_name()} doesn't support it",
)
handle = get_handle()
connector = ConnectorInfo(handle, driver, params, mode, 0, 0, False, False)
driver.register_conn_monitor(self)
with self.lock:
self.connectors[handle] = connector
log.debug(f"Connector {connector} is created")
if self.started:
self.start_connector(connector)
return handle
def remove_connector(self, handle: str):
with self.lock:
connector = self.connectors.pop(handle, None)
if connector:
connector.stopping = True
connector.driver.shutdown()
log.debug(f"Connector {connector} is removed")
else:
log.error(f"Unknown connector handle: {handle}")
def start(self):
with self.lock:
for handle in sorted(self.connectors.keys()):
connector = self.connectors[handle]
if not connector.started:
self.start_connector(connector)
self.heartbeat_monitor.start()
self.started = True
def stop(self):
self.heartbeat_monitor.stop()
with self.lock:
for handle in sorted(self.connectors.keys()):
connector = self.connectors[handle]
connector.stopping = True
connector.driver.shutdown()
self.conn_mgr_executor.shutdown(True)
self.frame_mgr_executor.shutdown(True)
def find_endpoint(self, name: str) -> Optional[Endpoint]:
sfm_endpoint = self.sfm_endpoints.get(name)
if not sfm_endpoint:
log.debug(f"Endpoint {name} doesn't exist")
return None
return sfm_endpoint.endpoint
def remove_endpoint(self, name: str):
sfm_endpoint = self.sfm_endpoints.get(name)
if not sfm_endpoint:
log.debug(f"Endpoint {name} doesn't exist or already removed")
return
for sfm_conn in sfm_endpoint.connections:
sfm_conn.conn.close()
self.sfm_endpoints.pop(name)
log.debug(f"Endpoint {name} is removed")
def get_connections(self, name: str) -> Optional[List[SfmConnection]]:
sfm_endpoint = self.sfm_endpoints.get(name)
if not sfm_endpoint:
log.debug("Endpoint {name} doesn't exist")
return None
return sfm_endpoint.connections
def send_message(self, endpoint: Endpoint, app_id: int, headers: Optional[dict], payload: BytesAlike):
"""Send a message to endpoint for app
The message is asynchronous, no response is expected.
Args:
endpoint: An endpoint to send the message to
app_id: Application ID
headers: headers, optional
payload: message payload, optional
Raises:
CommError: If any error happens while sending the data
"""
if endpoint.name == self.local_endpoint.name:
self.send_loopback_message(endpoint, app_id, headers, payload)
return
sfm_endpoint = self.sfm_endpoints.get(endpoint.name)
if not sfm_endpoint:
raise CommError(CommError.CLOSED, f"Endpoint {endpoint.name} not available, may be disconnected")
state = sfm_endpoint.endpoint.state
if state != EndpointState.READY:
raise CommError(CommError.NOT_READY, f"Endpoint {endpoint.name} is not ready: {state}")
stream_id = sfm_endpoint.next_stream_id()
# When multiple connections, round-robin by stream ID
sfm_conn = sfm_endpoint.get_connection(stream_id)
if not sfm_conn:
log.error("Logic error, ready endpoint has no connections")
raise CommError(CommError.ERROR, f"Endpoint {endpoint.name} has no connection")
# TODO: If multiple connections, should retry a diff connection on errors
start = time.perf_counter()
sfm_conn.send_data(app_id, stream_id, headers, payload)
self.send_frame_stats.record_value(
category=sfm_conn.conn.connector.driver.get_name(), value=time.perf_counter() - start
)
def register_message_receiver(self, app_id: int, receiver: MessageReceiver):
if self.receivers.get(app_id):
raise CommError(CommError.BAD_CONFIG, f"Receiver for app {app_id} is already registered")
self.receivers[app_id] = receiver
def add_endpoint_monitor(self, monitor: EndpointMonitor):
self.monitors.append(monitor)
# Internal methods
def start_connector(self, connector: ConnectorInfo):
"""Start connector in a new thread"""
if connector.started:
return
log.info(f"Connector {connector} is starting")
self.conn_mgr_executor.submit(self.start_connector_task, connector)
@staticmethod
def start_connector_task(connector: ConnectorInfo):
"""Start connector in a new thread
This function will loop as long as connector is not stopped
"""
connector.started = True
if connector.mode == Mode.ACTIVE:
starter = connector.driver.connect
else:
starter = connector.driver.listen
wait = INIT_WAIT
while not connector.stopping:
start_time = time.time()
try:
starter(connector)
except Exception as ex:
fail_msg = (
f"Connector {connector} failed with exception {type(ex).__name__}: {secure_format_exception(ex)}"
)
if wait < SILENT_RECONNECT_TIME:
log.debug(fail_msg)
else:
log.error(fail_msg)
if connector.stopping:
log.debug(f"Connector {connector} has stopped")
break
# After a long run, resetting wait
run_time = time.time() - start_time
if run_time > MAX_WAIT:
log.debug(f"Driver for {connector} had a long run ({run_time} sec), resetting wait")
wait = INIT_WAIT
reconnect_msg = f"Retrying {connector} in {wait} seconds"
# First few retries may happen in normal shutdown, show it as debug
if wait < SILENT_RECONNECT_TIME:
log.debug(reconnect_msg)
else:
log.info(reconnect_msg)
time.sleep(wait)
# Exponential backoff
wait *= 2
if wait > MAX_WAIT:
wait = MAX_WAIT
def state_change(self, connection: Connection):
try:
state = connection.state
connector = connection.connector
if state == ConnState.CONNECTED:
self.handle_new_connection(connection)
with self.lock:
connector.total_conns += 1
connector.curr_conns += 1
elif state == ConnState.CLOSED:
self.close_connection(connection)
with self.lock:
connector.curr_conns -= 1
else:
log.error(f"Unknown state: {state}")
except Exception as ex:
log.error(f"Error handling state change: {secure_format_exception(ex)}")
log.debug(secure_format_traceback())
def process_frame_task(self, sfm_conn: SfmConnection, frame: BytesAlike):
try:
prefix = Prefix.from_bytes(frame)
log.debug(f"Received frame: {prefix} on {sfm_conn.conn}")
if prefix.header_len == 0:
headers = None
else:
headers = msgpack.unpackb(frame[PREFIX_LEN : PREFIX_LEN + prefix.header_len])
if prefix.type in (Types.HELLO, Types.READY):
if prefix.type == Types.HELLO:
sfm_conn.send_handshake(Types.READY)
data = self.get_dict_payload(prefix, frame)
self.update_endpoint(sfm_conn, data)
elif prefix.type == Types.PING:
sfm_conn.send_heartbeat(Types.PONG)
elif prefix.type == Types.PONG:
log.debug(f"PONG received for {sfm_conn.conn}")
# No action is needed for PONG. The last_activity is already updated
elif prefix.type == Types.DATA:
if prefix.length > PREFIX_LEN + prefix.header_len:
payload = frame[PREFIX_LEN + prefix.header_len :]
else:
payload = None
message = Message(headers, payload)
receiver = self.receivers.get(prefix.app_id)
if receiver:
receiver.process_message(sfm_conn.sfm_endpoint.endpoint, sfm_conn.conn, prefix.app_id, message)
else:
log.debug(f"No receiver registered for App ID {prefix.app_id}, message ignored")
else:
log.error(f"Received unsupported frame type {prefix.type} on {sfm_conn.get_name()}")
except Exception as ex:
log.error(f"Error processing frame: {secure_format_exception(ex)}")
log.debug(secure_format_traceback())
def process_frame(self, sfm_conn: SfmConnection, frame: BytesAlike):
self.frame_mgr_executor.submit(self.process_frame_task, sfm_conn, frame)
def update_endpoint(self, sfm_conn: SfmConnection, data: dict):
endpoint_name = data.pop(HandshakeKeys.ENDPOINT_NAME)
if not endpoint_name:
raise CommError(CommError.BAD_DATA, f"Handshake without endpoint name for connection {sfm_conn.get_name()}")
if endpoint_name == self.local_endpoint.name:
raise CommError(
CommError.BAD_DATA, f"Duplicate endpoint name {endpoint_name} for connection {sfm_conn.get_name()}"
)
endpoint = Endpoint(endpoint_name, data)
endpoint.state = EndpointState.READY
conn_props = sfm_conn.conn.get_conn_properties()
if conn_props:
endpoint.conn_props.update(conn_props)
sfm_endpoint = self.sfm_endpoints.get(endpoint_name)
if sfm_endpoint:
old_state = sfm_endpoint.endpoint.state
sfm_endpoint.endpoint = endpoint
else:
old_state = EndpointState.IDLE
sfm_endpoint = SfmEndpoint(endpoint)
sfm_endpoint.add_connection(sfm_conn)
sfm_conn.sfm_endpoint = sfm_endpoint
self.sfm_endpoints[endpoint_name] = sfm_endpoint
if endpoint.state != old_state:
self.notify_monitors(endpoint)
def notify_monitors(self, endpoint: Endpoint):
if not self.monitors:
log.debug("No endpoint monitor registered")
return
for monitor in self.monitors:
monitor.state_change(endpoint)
@staticmethod
def get_dict_payload(prefix, frame):
mv = memoryview(frame)
return msgpack.unpackb(mv[(PREFIX_LEN + prefix.header_len) :])
def handle_new_connection(self, connection: Connection):
sfm_conn = SfmConnection(connection, self.local_endpoint)
with self.lock:
self.sfm_conns[sfm_conn.get_name()] = sfm_conn
connection.register_frame_receiver(SfmFrameReceiver(self, sfm_conn))
if connection.connector.mode == Mode.ACTIVE:
sfm_conn.send_handshake(Types.HELLO)
def close_connection(self, connection: Connection):
with self.lock:
name = connection.name
if name not in self.sfm_conns:
log.debug(f"Connection {name} has closed with no endpoint assigned")
return
sfm_conn = self.sfm_conns.pop(name)
sfm_endpoint = sfm_conn.sfm_endpoint
if sfm_endpoint is None:
log.debug(f"Connection {name} is closed before SFM handshake")
return
old_state = sfm_endpoint.endpoint.state
sfm_endpoint.remove_connection(sfm_conn)
state = EndpointState.READY if sfm_endpoint.connections else EndpointState.DISCONNECTED
sfm_endpoint.endpoint.state = state
if old_state != state:
self.notify_monitors(sfm_endpoint.endpoint)
def send_loopback_message(self, endpoint: Endpoint, app_id: int, headers: Optional[dict], payload: BytesAlike):
"""Send message to itself"""
# Call receiver in a different thread to avoid deadlock
self.frame_mgr_executor.submit(self.loopback_message_task, endpoint, app_id, headers, payload)
def loopback_message_task(self, endpoint: Endpoint, app_id: int, headers: Optional[dict], payload: BytesAlike):
receiver = self.receivers.get(app_id)
if not receiver:
log.debug(f"No receiver registered for App ID {app_id}, loopback message ignored")
return
try:
receiver.process_message(endpoint, self.null_conn, app_id, Message(headers, payload))
except Exception as ex:
log.error(f"Loopback message error: {secure_format_exception(ex)}")
class SfmFrameReceiver(FrameReceiver):
def __init__(self, conn_manager: ConnManager, conn: SfmConnection):
self.conn_manager = conn_manager
self.conn = conn
def process_frame(self, frame: BytesAlike):
self.conn.last_activity = time.time()
try:
self.conn_manager.process_frame(self.conn, frame)
except Exception as ex:
log.error(f"Error processing frame: {secure_format_exception(ex)}")
log.debug(secure_format_traceback())
class NullConnection(Connection):
"""A mock connection used for loopback messages"""
def __init__(self):
connector = ConnectorInfo("Null", None, {}, Mode.ACTIVE, 0, 0, False, False)
super().__init__(connector)
def get_conn_properties(self) -> dict:
return {DriverParams.LOCAL_ADDR.value: SELF_ADDR, DriverParams.PEER_ADDR.value: SELF_ADDR}
def close(self):
pass
def send_frame(self, frame: BytesAlike):
raise CommError(CommError.NOT_SUPPORTED, "Can't send data on Null connection")
| NVFlare-main | nvflare/fuel/f3/sfm/conn_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Types:
DATA = 1
FRAG = 2
ACK = 3
HELLO = 4
READY = 5
PING = 6
PONG = 7
class HandshakeKeys:
ENDPOINT_NAME = "endpoint_name"
TIMESTAMP = "timestamp"
class Flags:
# Out of band message
OOB = 0x8000
# ACK requested
ACK = 0x4000
# Request, message-id in the header
REQ = 0x2000
# Response, message-id in the header
RESP = 0x1000
# PUB/SUB message, topic is in the header
PUB_SUB = 0x0800
| NVFlare-main | nvflare/fuel/f3/sfm/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
from typing import Optional
import msgpack
from nvflare.fuel.f3.connection import BytesAlike, Connection
from nvflare.fuel.f3.endpoint import Endpoint
from nvflare.fuel.f3.sfm.constants import HandshakeKeys, Types
from nvflare.fuel.f3.sfm.prefix import PREFIX_LEN, Prefix
log = logging.getLogger(__name__)
class SfmConnection:
"""A wrapper of driver connection.
Driver connection deals with frame. This connection handles messages.
The frame format:
.. code-block::
+--------------------------------------------------------+
| length (4 bytes) |
+----------------------------+---------------------------+
| header_len (2) | type (1) | reserved |
+----------------------------+---------------------------+
| flags (2) | app_id (2) |
+----------------------------+---------------------------+
| stream_id (2) | sequence (2) |
+--------------------------------------------------------+
| Headers |
| header_len bytes |
+--------------------------------------------------------+
| |
| Payload |
| (length-header_len-16) bytes |
| |
+--------------------------------------------------------+
"""
def __init__(self, conn: Connection, local_endpoint: Endpoint):
self.conn = conn
self.local_endpoint = local_endpoint
self.sfm_endpoint = None
self.last_activity = 0
self.sequence = 0
self.lock = threading.Lock()
def get_name(self) -> str:
return self.conn.name
def next_sequence(self) -> int:
"""Get next sequence number for the connection.
Sequence is used to detect lost frames.
"""
with self.lock:
self.sequence = (self.sequence + 1) & 0xFFFF
return self.sequence
def send_handshake(self, frame_type: int):
"""Send HELLO/READY frame"""
data = {HandshakeKeys.ENDPOINT_NAME: self.local_endpoint.name, HandshakeKeys.TIMESTAMP: time.time()}
if self.local_endpoint.properties:
data.update(self.local_endpoint.properties)
self.send_dict(frame_type, 1, data)
def send_heartbeat(self, frame_type: int, data: Optional[dict] = None):
"""Send Ping or Pong"""
if frame_type not in (Types.PING, Types.PONG):
log.error(f"Heartbeat type must be PING or PONG, not {frame_type}")
return
if not self.sfm_endpoint:
log.error("Trying to send heartbeat before SFM Endpoint is established")
return
stream_id = self.sfm_endpoint.next_stream_id()
self.send_dict(frame_type, stream_id, data)
def send_data(self, app_id: int, stream_id: int, headers: Optional[dict], payload: BytesAlike):
"""Send user data"""
prefix = Prefix(0, 0, Types.DATA, 0, 0, app_id, stream_id, 0)
self.send_frame(prefix, headers, payload)
def send_dict(self, frame_type: int, stream_id: int, data: dict):
"""Send a dict as payload"""
prefix = Prefix(0, 0, frame_type, 0, 0, 0, stream_id, 0)
payload = msgpack.packb(data)
self.send_frame(prefix, None, payload)
def send_frame(self, prefix: Prefix, headers: Optional[dict], payload: Optional[BytesAlike]):
headers_bytes = self.headers_to_bytes(headers)
header_len = len(headers_bytes) if headers_bytes else 0
length = PREFIX_LEN + header_len
if payload:
length += len(payload)
prefix.length = length
prefix.header_len = header_len
prefix.sequence = self.next_sequence()
buffer: bytearray = bytearray(length)
offset = 0
prefix.to_buffer(buffer, offset)
offset += PREFIX_LEN
if headers_bytes:
buffer[offset:] = headers_bytes
offset += header_len
if payload:
buffer[offset:] = payload
log.debug(f"Sending frame: {prefix} on {self.conn}")
# Only one thread can send data on a connection. Otherwise, the frames may interleave.
with self.lock:
self.conn.send_frame(buffer)
@staticmethod
def headers_to_bytes(headers: Optional[dict]) -> Optional[bytes]:
if headers:
return msgpack.packb(headers)
else:
return None
| NVFlare-main | nvflare/fuel/f3/sfm/sfm_conn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/f3/sfm/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from typing import List, Optional
from nvflare.fuel.f3.endpoint import Endpoint
from nvflare.fuel.f3.sfm.sfm_conn import SfmConnection
# Hard-coded stream ID to be used by packets before handshake
RESERVED_STREAM_ID = 16
MAX_CONN_PER_ENDPOINT = 1
log = logging.getLogger(__name__)
class SfmEndpoint:
"""An endpoint wrapper to keep SFM internal data"""
def __init__(self, endpoint: Endpoint):
self.endpoint = endpoint
self.stream_id: int = RESERVED_STREAM_ID
self.lock = threading.Lock()
self.connections: List[SfmConnection] = []
def add_connection(self, sfm_conn: SfmConnection):
with self.lock:
while len(self.connections) >= MAX_CONN_PER_ENDPOINT:
first_conn = self.connections[0]
first_conn.conn.close()
self.connections.pop(0)
log.info(
f"Connection {first_conn.get_name()} is evicted for {sfm_conn.get_name()} "
f"from endpoint {self.endpoint.name} for exceeding limit {MAX_CONN_PER_ENDPOINT}"
)
self.connections.append(sfm_conn)
def remove_connection(self, sfm_conn: SfmConnection):
if not self.connections:
log.debug(
f"Connection {sfm_conn.get_name()} is already removed. "
f"No connections for endpoint {self.endpoint.name}"
)
return
with self.lock:
found_index = next(
(index for index, conn in enumerate(self.connections) if conn.get_name() == sfm_conn.get_name()), None
)
if found_index is not None:
self.connections.pop(found_index)
log.debug(f"Connection {sfm_conn.get_name()} is removed from endpoint {self.endpoint.name}")
else:
log.debug(f"Connection {sfm_conn.get_name()} is already removed from endpoint {self.endpoint.name}")
def get_connection(self, stream_id: int) -> Optional[SfmConnection]:
if not self.connections:
return None
index = stream_id % len(self.connections)
return self.connections[index]
def next_stream_id(self) -> int:
"""Get next stream_id for the endpoint
stream_id is used to assemble fragmented data
"""
with self.lock:
self.stream_id = (self.stream_id + 1) & 0xFFFF
return self.stream_id
| NVFlare-main | nvflare/fuel/f3/sfm/sfm_endpoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from threading import Event, Thread
from typing import Dict
from nvflare.fuel.f3.comm_config import CommConfigurator
from nvflare.fuel.f3.drivers.driver_params import DriverCap
from nvflare.fuel.f3.sfm.constants import Types
from nvflare.fuel.f3.sfm.sfm_conn import SfmConnection
log = logging.getLogger(__name__)
HEARTBEAT_TICK = 5
DEFAULT_HEARTBEAT_INTERVAL = 60
class HeartbeatMonitor(Thread):
def __init__(self, conns: Dict[str, SfmConnection]):
Thread.__init__(self)
self.thread_name = "hb_mon"
self.conns = conns
self.stopped = Event()
self.curr_time = 0
self.interval = CommConfigurator().get_heartbeat_interval(DEFAULT_HEARTBEAT_INTERVAL)
if self.interval < HEARTBEAT_TICK:
log.warning(f"Heartbeat interval is too small ({self.interval} < {HEARTBEAT_TICK})")
def stop(self):
self.stopped.set()
def run(self):
while not self.stopped.is_set():
try:
self.curr_time = time.time()
self._check_heartbeat()
except Exception as ex:
log.error(f"Heartbeat check failed: {ex}")
self.stopped.wait(HEARTBEAT_TICK)
log.debug("Heartbeat monitor stopped")
def _check_heartbeat(self):
for sfm_conn in self.conns.values():
driver = sfm_conn.conn.connector.driver
caps = driver.capabilities()
if caps and not caps.get(DriverCap.SEND_HEARTBEAT.value, False):
continue
if self.curr_time - sfm_conn.last_activity > self.interval:
sfm_conn.send_heartbeat(Types.PING)
log.debug(f"Heartbeat sent to connection: {sfm_conn.conn}")
| NVFlare-main | nvflare/fuel/f3/sfm/heartbeat_monitor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from collections import deque
from typing import Callable, Dict, Tuple
from nvflare.fuel.f3.cellnet.core_cell import CoreCell
from nvflare.fuel.f3.cellnet.defs import MessageHeaderKey
from nvflare.fuel.f3.cellnet.registry import Callback, Registry
from nvflare.fuel.f3.connection import BytesAlike
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.streaming.stream_const import (
EOS,
STREAM_ACK_TOPIC,
STREAM_CHANNEL,
STREAM_DATA_TOPIC,
StreamDataType,
StreamHeaderKey,
)
from nvflare.fuel.f3.streaming.stream_types import Stream, StreamError, StreamFuture
from nvflare.fuel.f3.streaming.stream_utils import stream_thread_pool
log = logging.getLogger(__name__)
MAX_OUT_SEQ_CHUNKS = 16
# 1/4 of the window size
ACK_INTERVAL = 1024 * 1024 * 4
READ_TIMEOUT = 60
class RxTask:
"""Receiving task for ByteStream"""
def __init__(self, sid: int, origin: str):
self.sid = sid
self.origin = origin
self.channel = None
self.topic = None
self.headers = None
self.size = 0
# The reassembled buffer in a double-ended queue
self.buffers = deque()
# Out-of-sequence buffers to be assembled
self.out_seq_buffers: Dict[int, Tuple[bool, BytesAlike]] = {}
self.stream_future = None
self.next_seq = 0
self.offset = 0
self.offset_ack = 0
self.eos = False
self.waiter = threading.Event()
self.task_lock = threading.Lock()
self.last_chunk_received = False
def __str__(self):
return f"Rx[SID:{self.sid} from {self.origin} for {self.channel}/{self.topic}]"
class RxStream(Stream):
"""A stream that's used to read streams from the buffer"""
def __init__(self, byte_receiver: "ByteReceiver", task: RxTask):
super().__init__(task.size, task.headers)
self.byte_receiver = byte_receiver
self.task = task
def read(self, chunk_size: int) -> bytes:
if self.closed:
raise StreamError("Read from closed stream")
if (not self.task.buffers) and self.task.eos:
return EOS
# Block if buffers are empty
count = 0
while not self.task.buffers:
if count > 0:
log.debug(f"Read block is unblocked multiple times: {count}")
self.task.waiter.clear()
if not self.task.waiter.wait(READ_TIMEOUT):
error = StreamError(f"{self.task} read timed out after {READ_TIMEOUT} seconds")
self.byte_receiver.stop_task(self.task, error)
raise error
count += 1
with self.task.task_lock:
last_chunk, buf = self.task.buffers.popleft()
if buf is None:
buf = bytes(0)
if 0 < chunk_size < len(buf):
result = buf[0:chunk_size]
# Put leftover to the head of the queue
self.task.buffers.appendleft((last_chunk, buf[chunk_size:]))
else:
result = buf
if last_chunk:
self.task.eos = True
self.task.offset += len(result)
if not self.task.last_chunk_received and (self.task.offset - self.task.offset_ack > ACK_INTERVAL):
# Send ACK
message = Message()
message.add_headers(
{
StreamHeaderKey.STREAM_ID: self.task.sid,
StreamHeaderKey.DATA_TYPE: StreamDataType.ACK,
StreamHeaderKey.OFFSET: self.task.offset,
}
)
self.byte_receiver.cell.fire_and_forget(STREAM_CHANNEL, STREAM_ACK_TOPIC, self.task.origin, message)
self.task.offset_ack = self.task.offset
self.task.stream_future.set_progress(self.task.offset)
return result
def close(self):
if not self.task.stream_future.done():
self.task.stream_future.set_result(self.task.offset)
self.closed = True
class ByteReceiver:
def __init__(self, cell: CoreCell):
self.cell = cell
self.cell.register_request_cb(channel=STREAM_CHANNEL, topic=STREAM_DATA_TOPIC, cb=self._data_handler)
self.registry = Registry()
self.rx_task_map = {}
self.map_lock = threading.Lock()
def register_callback(self, channel: str, topic: str, stream_cb: Callable, *args, **kwargs):
if not callable(stream_cb):
raise StreamError(f"specified stream_cb {type(stream_cb)} is not callable")
self.registry.set(channel, topic, Callback(stream_cb, args, kwargs))
def stop_task(self, task: RxTask, error: StreamError = None, notify=True):
with self.map_lock:
self.rx_task_map.pop(task.sid, None)
if error:
log.error(f"Stream error: {error}")
task.stream_future.set_exception(error)
if notify:
message = Message()
message.add_headers(
{
StreamHeaderKey.STREAM_ID: task.sid,
StreamHeaderKey.DATA_TYPE: StreamDataType.ERROR,
StreamHeaderKey.ERROR_MSG: str(error),
}
)
self.cell.fire_and_forget(STREAM_CHANNEL, STREAM_ACK_TOPIC, task.origin, message)
task.eos = True
def _data_handler(self, message: Message):
sid = message.get_header(StreamHeaderKey.STREAM_ID)
origin = message.get_header(MessageHeaderKey.ORIGIN)
seq = message.get_header(StreamHeaderKey.SEQUENCE)
error = message.get_header(StreamHeaderKey.ERROR_MSG, None)
payload = message.payload
with self.map_lock:
task = self.rx_task_map.get(sid, None)
if not task:
if error:
log.debug(f"Received error for non-existing stream: SID {sid} from {origin}")
return
task = RxTask(sid, origin)
self.rx_task_map[sid] = task
if error:
self.stop_task(task, StreamError(f"Received error from {origin}: {error}"), notify=False)
return
if seq == 0:
# Handle new stream
task.channel = message.get_header(StreamHeaderKey.CHANNEL)
task.topic = message.get_header(StreamHeaderKey.TOPIC)
task.headers = message.headers
task.stream_future = StreamFuture(sid, message.headers)
task.size = message.get_header(StreamHeaderKey.SIZE, 0)
task.stream_future.set_size(task.size)
# Invoke callback
callback = self.registry.find(task.channel, task.topic)
if not callback:
self.stop_task(task, StreamError(f"No callback is registered for {task.channel}/{task.topic}"))
return
stream_thread_pool.submit(self._callback_wrapper, task, callback)
with task.task_lock:
data_type = message.get_header(StreamHeaderKey.DATA_TYPE)
last_chunk = data_type == StreamDataType.FINAL
if last_chunk:
task.last_chunk_received = True
if seq == task.next_seq:
self._append(task, (last_chunk, payload))
task.next_seq += 1
# Try to reassemble out-of-seq buffers
while task.next_seq in task.out_seq_buffers:
chunk = task.out_seq_buffers.pop(task.next_seq)
self._append(task, chunk)
task.next_seq += 1
else:
# Out-of-seq chunk reassembly
if len(task.out_seq_buffers) >= MAX_OUT_SEQ_CHUNKS:
self.stop_task(task, StreamError(f"Too many out-of-sequence chunks: {len(task.out_seq_buffers)}"))
return
else:
task.out_seq_buffers[seq] = last_chunk, payload
# If all chunks are lined up, the task can be deleted
if not task.out_seq_buffers and task.buffers:
last_chunk, _ = task.buffers[-1]
if last_chunk:
self.stop_task(task)
def _callback_wrapper(self, task: RxTask, callback: Callback):
"""A wrapper to catch all exceptions in the callback"""
try:
stream = RxStream(self, task)
return callback.cb(task.stream_future, stream, False, *callback.args, **callback.kwargs)
except Exception as ex:
msg = f"{task} callback {callback.cb} throws exception: {ex}"
log.error(msg)
self.stop_task(task, StreamError(msg))
@staticmethod
def _append(task: RxTask, buf: Tuple[bool, BytesAlike]):
if not buf:
return
task.buffers.append(buf)
# Wake up blocking read()
if not task.waiter.is_set():
task.waiter.set()
| NVFlare-main | nvflare/fuel/f3/streaming/byte_receiver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Callable, Optional
from nvflare.fuel.f3.cellnet.defs import MessageHeaderKey
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.streaming.blob_streamer import BlobStreamer
from nvflare.fuel.f3.streaming.stream_const import StreamHeaderKey
from nvflare.fuel.f3.streaming.stream_types import ObjectIterator, ObjectStreamFuture, StreamFuture
from nvflare.fuel.f3.streaming.stream_utils import gen_stream_id, stream_thread_pool
log = logging.getLogger(__name__)
class ObjectTxTask:
def __init__(self, channel: str, topic: str, target: str, headers: dict, iterator: ObjectIterator):
self.obj_sid = gen_stream_id()
self.index = 0
self.channel = channel
self.topic = topic
self.target = target
self.headers = headers if headers else {}
self.iterator = iterator
self.object_future = None
self.stop = False
def __str__(self):
return f"ObjTx[SID:{self.obj_sid}/{self.index} to {self.target} for {self.channel}/{self.topic}]"
class ObjectRxTask:
def __init__(self, obj_sid: int, channel: str, topic: str, origin: str, headers: dict):
self.obj_sid = obj_sid
self.index = 0
self.channel = channel
self.topic = topic
self.origin = origin
self.headers = headers
self.object_future: Optional[ObjectStreamFuture] = None
def __str__(self):
return f"ObjRx[SID:{self.obj_sid}/{self.index} from {self.origin} for {self.channel}/{self.topic}]"
class ObjectHandler:
def __init__(self, object_stream_cb: Callable, object_cb: Callable, obj_tasks: dict):
self.object_stream_cb = object_stream_cb
self.object_cb = object_cb
self.obj_tasks = obj_tasks
def object_done(self, future: StreamFuture, obj_sid: int, index: int, *args, **kwargs):
blob = future.result()
self.object_cb(obj_sid, index, Message(future.get_headers(), blob), *args, **kwargs)
def handle_object(self, future: StreamFuture, *args, **kwargs):
headers = future.get_headers()
obj_sid = headers.get(StreamHeaderKey.OBJECT_STREAM_ID, None)
if obj_sid is None:
return
task = self.obj_tasks.get(obj_sid, None)
if not task:
# Handle new object stream
origin = headers.get(MessageHeaderKey.ORIGIN)
channel = headers.get(StreamHeaderKey.CHANNEL)
topic = headers.get(StreamHeaderKey.TOPIC)
task = ObjectRxTask(obj_sid, channel, topic, origin, headers)
task.object_future = ObjectStreamFuture(obj_sid, headers)
stream_thread_pool.submit(self.object_stream_cb, task.object_future, *args, **kwargs)
task.object_future.set_index(task.index)
task.index += 1
future.add_done_callback(self.object_done, future, task.obj_sid, task.index)
class ObjectStreamer:
def __init__(self, blob_streamer: BlobStreamer):
self.blob_streamer = blob_streamer
self.obj_tasks = {}
def stream_objects(
self, channel: str, topic: str, target: str, headers: dict, iterator: ObjectIterator
) -> ObjectStreamFuture:
tx_task = ObjectTxTask(channel, topic, target, headers, iterator)
tx_task.object_future = ObjectStreamFuture(tx_task.obj_sid, headers)
stream_thread_pool.submit(self._streaming_task, tx_task)
return tx_task.object_future
def register_object_callbacks(
self, channel, topic, object_stream_cb: Callable, object_cb: Callable, *args, **kwargs
):
handler = ObjectHandler(object_stream_cb, object_cb, self.obj_tasks)
self.blob_streamer.register_blob_callback(channel, topic, handler.handle_object, *args, **kwargs)
def _streaming_task(self, task: ObjectTxTask):
for obj in task.iterator:
task.object_future.set_index(task.index)
task.headers.update(
{
StreamHeaderKey.OBJECT_STREAM_ID: task.obj_sid,
StreamHeaderKey.OBJECT_INDEX: task.index,
}
)
blob_future = self.blob_streamer.send(task.channel, task.topic, task.target, task.headers, obj)
# Wait till it's done
bytes_sent = blob_future.result()
log.debug(f"Stream {task.obj_sid} Object {task.index} is sent ({bytes_sent}")
task.index += 1
task.object_future.set_result(task.index)
| NVFlare-main | nvflare/fuel/f3/streaming/object_streamer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Callable, Optional
from nvflare.fuel.f3.connection import BytesAlike
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.streaming.byte_receiver import ByteReceiver
from nvflare.fuel.f3.streaming.byte_streamer import ByteStreamer
from nvflare.fuel.f3.streaming.stream_const import EOS
from nvflare.fuel.f3.streaming.stream_types import Stream, StreamError, StreamFuture
from nvflare.fuel.f3.streaming.stream_utils import FastBuffer, stream_thread_pool, wrap_view
from nvflare.security.logging import secure_format_traceback
log = logging.getLogger(__name__)
class BlobStream(Stream):
def __init__(self, blob: BytesAlike, headers: Optional[dict]):
super().__init__(len(blob), headers)
self.blob_view = wrap_view(blob)
def read(self, chunk_size: int) -> BytesAlike:
if self.pos >= self.get_size():
return EOS
next_pos = self.pos + chunk_size
if next_pos > self.get_size():
next_pos = self.get_size()
buf = self.blob_view[self.pos : next_pos]
self.pos = next_pos
return buf
class BlobTask:
def __init__(self, future: StreamFuture, stream: Stream):
self.future = future
self.stream = stream
self.size = stream.get_size()
self.pre_allocated = self.size > 0
if self.pre_allocated:
self.buffer = wrap_view(bytearray(self.size))
else:
self.buffer = FastBuffer()
class BlobHandler:
def __init__(self, blob_cb: Callable):
self.blob_cb = blob_cb
def handle_blob_cb(self, future: StreamFuture, stream: Stream, resume: bool, *args, **kwargs) -> int:
if resume:
log.warning("Resume is not supported, ignored")
blob_task = BlobTask(future, stream)
stream_thread_pool.submit(self._read_stream, blob_task)
self.blob_cb(future, *args, **kwargs)
return 0
@staticmethod
def _read_stream(blob_task: BlobTask):
try:
# It's most efficient to use the same chunk size as the stream
chunk_size = ByteStreamer.get_chunk_size()
buf_size = 0
while True:
buf = blob_task.stream.read(chunk_size)
if not buf:
break
length = len(buf)
if blob_task.pre_allocated:
blob_task.buffer[buf_size : buf_size + length] = buf
else:
blob_task.buffer.append(buf)
buf_size += length
if blob_task.size and blob_task.size != buf_size:
log.warning(
f"Stream {blob_task.future.get_stream_id()} size doesn't match: " f"{blob_task.size} <> {buf_size}"
)
if blob_task.pre_allocated:
result = blob_task.buffer
else:
result = blob_task.buffer.to_bytes()
blob_task.future.set_result(result)
except Exception as ex:
log.error(f"Stream {blob_task.future.get_stream_id()} read error: {ex}")
log.debug(secure_format_traceback())
blob_task.future.set_exception(ex)
class BlobStreamer:
def __init__(self, byte_streamer: ByteStreamer, byte_receiver: ByteReceiver):
self.byte_streamer = byte_streamer
self.byte_receiver = byte_receiver
def send(self, channel: str, topic: str, target: str, message: Message, secure: bool) -> StreamFuture:
if message.payload is None:
message.payload = bytes(0)
if not isinstance(message.payload, (bytes, bytearray, memoryview)):
raise StreamError(f"BLOB is invalid type: {type(message.payload)}")
blob_stream = BlobStream(message.payload, message.headers)
return self.byte_streamer.send(channel, topic, target, message.headers, blob_stream, secure)
def register_blob_callback(self, channel, topic, blob_cb: Callable, *args, **kwargs):
handler = BlobHandler(blob_cb)
self.byte_receiver.register_callback(channel, topic, handler.handle_blob_cb, *args, **kwargs)
| NVFlare-main | nvflare/fuel/f3/streaming/blob_streamer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from abc import ABC, abstractmethod
from collections.abc import Iterator
from typing import Any, Callable, Optional
from nvflare.fuel.f3.connection import BytesAlike
from nvflare.fuel.f3.streaming.stream_utils import gen_stream_id
log = logging.getLogger(__name__)
class StreamError(Exception):
"""All stream API throws this error"""
pass
class StreamCancelled(StreamError):
"""Streaming is cancelled by sender"""
pass
class Stream(ABC):
"""A raw, read-only, seekable binary byte stream"""
def __init__(self, size: int = 0, headers: Optional[dict] = None):
"""Constructor for stream
Args:
size: The total size of stream. 0 if unknown
headers: Optional headers to be passed to the receiver
"""
self.size = size
self.pos = 0
self.headers = headers
self.closed = False
def get_size(self) -> int:
return self.size
def get_pos(self):
return self.pos
def get_headers(self) -> Optional[dict]:
return self.headers
@abstractmethod
def read(self, chunk_size: int) -> BytesAlike:
"""Read and return up to chunk_size bytes. It can return less but not more than the chunk_size.
An empty bytes object is returned if the stream reaches the end.
Args:
chunk_size: Up to (but maybe less) this many bytes will be returned
Returns:
Binary data. If empty, it means the stream is depleted (EOF)
"""
pass
def close(self):
"""Close the stream"""
self.closed = True
def seek(self, offset: int):
"""Change the stream position to the given byte offset.
Args:
offset: Offset relative to the start of the stream
Exception:
StreamError: If the stream is not seekable
"""
self.pos = offset
class ObjectIterator(Iterator, ABC):
"""An object iterator that returns next object
The __next__() method must be defined to return next object.
"""
def __init__(self, headers: Optional[dict] = None):
self.sid = gen_stream_id()
self.headers = headers
self.index = 0
def get_headers(self) -> Optional[dict]:
return self.headers
def stream_id(self) -> int:
return self.sid
def get_index(self) -> int:
return self.index
def set_index(self, index: int):
self.index = index
class StreamFuture:
"""Future class for all stream calls.
Fashioned after concurrent.futures.Future
"""
def __init__(self, stream_id: int, headers: Optional[dict] = None):
self.stream_id = stream_id
self.headers = headers
self.waiter = threading.Event()
self.lock = threading.Lock()
self.error: Optional[StreamError] = None
self.value = None
self.size = 0
self.progress = 0
self.done_callbacks = []
def get_stream_id(self) -> int:
return self.stream_id
def get_headers(self) -> Optional[dict]:
return self.headers
def get_size(self) -> int:
return self.size
def set_size(self, size: int):
self.size = size
def get_progress(self) -> int:
return self.progress
def set_progress(self, progress: int):
self.progress = progress
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self.lock:
if self.error or self.result:
return False
self.error = StreamCancelled(f"Stream {self.stream_id} is cancelled")
return True
def cancelled(self):
with self.lock:
return isinstance(self.error, StreamCancelled)
def running(self):
"""Return True if the future is currently executing."""
with self.lock:
return not self.waiter.isSet()
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self.lock:
return self.error or self.waiter.isSet()
def add_done_callback(self, done_cb: Callable, *args, **kwargs):
"""Attaches a callable that will be called when the future finishes.
Args:
done_cb: A callable that will be called with this future completes
"""
with self.lock:
self.done_callbacks.append((done_cb, args, kwargs))
def result(self, timeout=None) -> Any:
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The final result
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
if not self.waiter.wait(timeout):
raise TimeoutError(f"Future timed out waiting result after {timeout} seconds")
if self.error:
raise self.error
return self.value
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
if not self.waiter.wait(timeout):
raise TimeoutError(f"Future timed out waiting exception after {timeout} seconds")
return self.error
def set_result(self, value: Any):
"""Sets the return value of work associated with the future."""
with self.lock:
if self.error:
raise StreamError("Invalid state, future already failed")
self.value = value
self.waiter.set()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception."""
with self.lock:
self.error = exception
self.waiter.set()
self._invoke_callbacks()
def _invoke_callbacks(self):
for callback, args, kwargs in self.done_callbacks:
try:
callback(self, args, kwargs)
except Exception as ex:
log.error(f"Exception calling callback for {callback}: {ex}")
class ObjectStreamFuture(StreamFuture):
def __init__(self, stream_id: int, headers: Optional[dict] = None):
super().__init__(stream_id, headers)
self.index = 0
def get_index(self) -> int:
"""Current object index, which is only available for ObjectStream"""
return self.index
def set_index(self, index: int):
"""Set current object index"""
self.index = index
def get_progress(self):
return self.index
| NVFlare-main | nvflare/fuel/f3/streaming/stream_types.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/f3/streaming/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from typing import Optional
from nvflare.fuel.f3.cellnet.core_cell import CoreCell
from nvflare.fuel.f3.cellnet.defs import MessageHeaderKey
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.streaming.stream_const import (
STREAM_ACK_TOPIC,
STREAM_CHANNEL,
STREAM_DATA_TOPIC,
StreamDataType,
StreamHeaderKey,
)
from nvflare.fuel.f3.streaming.stream_types import Stream, StreamError, StreamFuture
from nvflare.fuel.f3.streaming.stream_utils import gen_stream_id, stream_thread_pool, wrap_view
STREAM_CHUNK_SIZE = 1024 * 1024
STREAM_WINDOW_SIZE = 16 * STREAM_CHUNK_SIZE
STREAM_ACK_WAIT = 10
log = logging.getLogger(__name__)
class TxTask:
def __init__(self, channel: str, topic: str, target: str, headers: dict, stream: Stream, secure: bool):
self.sid = gen_stream_id()
self.buffer = bytearray(STREAM_CHUNK_SIZE)
# Optimization to send the original buffer without copying
self.direct_buf: Optional[bytes] = None
self.buffer_size = 0
self.channel = channel
self.topic = topic
self.target = target
self.headers = headers
self.stream = stream
self.stream_future = None
self.task_future = None
self.ack_waiter = threading.Event()
self.seq = 0
self.offset = 0
self.offset_ack = 0
self.secure = secure
def __str__(self):
return f"Tx[SID:{self.sid} to {self.target} for {self.channel}/{self.topic}]"
class ByteStreamer:
def __init__(self, cell: CoreCell):
self.cell = cell
self.cell.register_request_cb(channel=STREAM_CHANNEL, topic=STREAM_ACK_TOPIC, cb=self._ack_handler)
self.tx_task_map = {}
self.map_lock = threading.Lock()
@staticmethod
def get_chunk_size():
return STREAM_CHUNK_SIZE
def send(self, channel: str, topic: str, target: str, headers: dict, stream: Stream, secure=False) -> StreamFuture:
tx_task = TxTask(channel, topic, target, headers, stream, secure)
with self.map_lock:
self.tx_task_map[tx_task.sid] = tx_task
future = StreamFuture(tx_task.sid)
future.set_size(stream.get_size())
tx_task.stream_future = future
tx_task.task_future = stream_thread_pool.submit(self._transmit_task, tx_task)
return future
def _transmit_task(self, task: TxTask):
while True:
buf = task.stream.read(STREAM_CHUNK_SIZE)
if not buf:
# End of Stream
self._transmit(task, final=True)
self._stop_task(task)
return
# Flow control
window = task.offset - task.offset_ack
# It may take several ACKs to clear up the window
while window > STREAM_WINDOW_SIZE:
log.debug(f"{task} window size {window} exceeds limit: {STREAM_WINDOW_SIZE}")
task.ack_waiter.clear()
if not task.ack_waiter.wait(timeout=STREAM_ACK_WAIT):
self._stop_task(task, StreamError(f"{task} ACK timeouts after {STREAM_ACK_WAIT} seconds"))
return
window = task.offset - task.offset_ack
size = len(buf)
if size > STREAM_CHUNK_SIZE:
raise StreamError(f"Stream returns invalid size: {size} for {task}")
if size + task.buffer_size > STREAM_CHUNK_SIZE:
self._transmit(task)
if size == STREAM_CHUNK_SIZE:
task.direct_buf = buf
else:
task.buffer[task.buffer_size : task.buffer_size + size] = buf
task.buffer_size += size
def _transmit(self, task: TxTask, final=False):
if task.buffer_size == 0:
payload = bytes(0)
elif task.buffer_size == STREAM_CHUNK_SIZE:
if task.direct_buf:
payload = task.direct_buf
else:
payload = task.buffer
else:
payload = wrap_view(task.buffer)[0 : task.buffer_size]
message = Message(None, payload)
if task.offset == 0:
# User headers are only included in the first chunk
if task.headers:
message.add_headers(task.headers)
message.add_headers(
{
StreamHeaderKey.CHANNEL: task.channel,
StreamHeaderKey.TOPIC: task.topic,
StreamHeaderKey.SIZE: task.stream.get_size(),
}
)
message.add_headers(
{
StreamHeaderKey.STREAM_ID: task.sid,
StreamHeaderKey.DATA_TYPE: StreamDataType.FINAL if final else StreamDataType.CHUNK,
StreamHeaderKey.SEQUENCE: task.seq,
StreamHeaderKey.OFFSET: task.offset,
}
)
errors = self.cell.fire_and_forget(STREAM_CHANNEL, STREAM_DATA_TOPIC, task.target, message, secure=task.secure)
error = errors.get(task.target)
if error:
msg = f"Message sending error to target {task.target}: {error}"
log.debug(msg)
self._stop_task(task, StreamError(msg))
return
# Update state
task.seq += 1
task.offset += task.buffer_size
task.buffer_size = 0
task.direct_buf = None
# Update future
task.stream_future.set_progress(task.offset)
def _stop_task(self, task: TxTask, error: StreamError = None, notify=True):
with self.map_lock:
self.tx_task_map.pop(task.sid, None)
if error:
log.debug(f"Stream error: {error}")
task.stream_future.set_exception(error)
if notify:
message = Message(None, None)
message.add_headers(
{
StreamHeaderKey.STREAM_ID: task.sid,
StreamHeaderKey.DATA_TYPE: StreamDataType.ERROR,
StreamHeaderKey.OFFSET: task.offset,
StreamHeaderKey.ERROR_MSG: str(error),
}
)
self.cell.fire_and_forget(STREAM_CHANNEL, STREAM_DATA_TOPIC, task.target, message, secure=task.secure)
else:
# Result is the number of bytes streamed
task.stream_future.set_result(task.offset)
def _ack_handler(self, message: Message):
origin = message.get_header(MessageHeaderKey.ORIGIN)
sid = message.get_header(StreamHeaderKey.STREAM_ID)
offset = message.get_header(StreamHeaderKey.OFFSET, None)
with self.map_lock:
task = self.tx_task_map.get(sid, None)
if not task:
# Last few ACKs always arrive late so this is normal
log.debug(f"ACK for stream {sid} received late from {origin} with offset {offset}")
return
error = message.get_header(StreamHeaderKey.ERROR_MSG, None)
if error:
self._stop_task(task, StreamError(f"Received error from {origin}: {error}"), notify=False)
return
if offset > task.offset_ack:
task.offset_ack = offset
if not task.ack_waiter.is_set():
task.ack_waiter.set()
| NVFlare-main | nvflare/fuel/f3/streaming/byte_streamer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from nvflare.fuel.f3.connection import BytesAlike
from nvflare.fuel.f3.mpm import MainProcessMonitor
STREAM_THREAD_POOL_SIZE = 128
stream_thread_pool = ThreadPoolExecutor(STREAM_THREAD_POOL_SIZE, "stm")
lock = threading.Lock()
sid_base = int((time.time() + os.getpid()) * 1000000) # microseconds
stream_count = 0
def wrap_view(buffer: BytesAlike) -> memoryview:
if isinstance(buffer, memoryview):
view = buffer
else:
view = memoryview(buffer)
return view
def gen_stream_id() -> int:
global lock, stream_count, sid_base
with lock:
stream_count += 1
return sid_base + stream_count
class FastBuffer:
"""A buffer with fast appending"""
def __init__(self, buf: BytesAlike = None):
if not buf:
self.capacity = 1024
else:
self.capacity = len(buf)
self.buffer = bytearray(self.capacity)
if buf:
self.buffer[:] = buf
self.size = len(buf)
else:
self.size = 0
def to_bytes(self) -> BytesAlike:
"""Return bytes-like object.
Once this method is called, append() may not work any longer, since the buffer may have been exported"""
if self.capacity == self.size:
result = self.buffer
else:
view = wrap_view(self.buffer)
result = view[0 : self.size]
return result
def append(self, buf: BytesAlike):
"""Fast append by doubling the size of the buffer when it runs out"""
if not buf:
return self
length = len(buf)
remaining = self.capacity - self.size
if length > remaining:
# Expanding the array as least twice the current capacity
new_cap = max(length + self.size, 2 * self.capacity)
self.buffer = self.buffer.ljust(new_cap, b"\x00")
self.capacity = new_cap
self.buffer[self.size :] = buf
self.size += length
return self
def __len__(self):
return self.size
def stream_shutdown():
stream_thread_pool.shutdown(wait=True)
MainProcessMonitor.add_cleanup_cb(stream_shutdown)
| NVFlare-main | nvflare/fuel/f3/streaming/stream_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from pathlib import Path
from typing import Callable, Optional
from nvflare.fuel.f3.connection import BytesAlike
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.streaming.byte_receiver import ByteReceiver
from nvflare.fuel.f3.streaming.byte_streamer import ByteStreamer
from nvflare.fuel.f3.streaming.stream_const import StreamHeaderKey
from nvflare.fuel.f3.streaming.stream_types import Stream, StreamFuture
from nvflare.fuel.f3.streaming.stream_utils import stream_thread_pool
log = logging.getLogger(__name__)
class FileStream(Stream):
def __init__(self, file_name: str, headers: Optional[dict]):
self.file = open(file_name, "rb")
size = self.file.seek(0, os.SEEK_END)
self.file.seek(0, os.SEEK_SET)
super().__init__(size, headers)
def read(self, chunk_size: int) -> BytesAlike:
return self.file.read(chunk_size)
def close(self):
self.closed = True
self.file.close()
class FileHandler:
def __init__(self, file_cb: Callable):
self.file_cb = file_cb
self.size = 0
self.file_name = None
def handle_file_cb(self, future: StreamFuture, stream: Stream, resume: bool, *args, **kwargs) -> int:
if resume:
log.warning("Resume is not supported, ignored")
self.size = stream.get_size()
original_name = future.headers.get(StreamHeaderKey.FILE_NAME)
file_name = self.file_cb(future, original_name, *args, **kwargs)
stream_thread_pool.submit(self._write_to_file, file_name, future, stream)
return 0
def _write_to_file(self, file_name: str, future: StreamFuture, stream: Stream):
file = open(file_name, "wb")
chunk_size = ByteStreamer.get_chunk_size()
file_size = 0
while True:
buf = stream.read(chunk_size)
if not buf:
break
file_size += len(buf)
file.write(buf)
file.close()
if self.size and (self.size != file_size):
log.warning(f"Size doesn't match: {self.size} <> {file_size}")
future.set_result(file_name)
class FileStreamer:
def __init__(self, byte_streamer: ByteStreamer, byte_receiver: ByteReceiver):
self.byte_streamer = byte_streamer
self.byte_receiver = byte_receiver
def send(self, channel: str, topic: str, target: str, message: Message, secure=False) -> StreamFuture:
file_name = Path(message.payload).name
file_stream = FileStream(message.payload, message.headers)
message.add_headers(
{
StreamHeaderKey.SIZE: file_stream.get_size(),
StreamHeaderKey.FILE_NAME: file_name,
}
)
return self.byte_streamer.send(channel, topic, target, message.headers, file_stream, secure)
def register_file_callback(self, channel, topic, file_cb: Callable, *args, **kwargs):
handler = FileHandler(file_cb)
self.byte_receiver.register_callback(channel, topic, handler.handle_file_cb, *args, **kwargs)
| NVFlare-main | nvflare/fuel/f3/streaming/file_streamer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
STREAM_PREFIX = "sm__"
STREAM_CHANNEL = STREAM_PREFIX + "STREAM"
STREAM_DATA_TOPIC = STREAM_PREFIX + "DATA"
STREAM_ACK_TOPIC = STREAM_PREFIX + "ACK"
STREAM_CERT_TOPIC = STREAM_PREFIX + "CERT"
# End of Stream indicator
EOS = bytes()
class StreamDataType:
# Payload chunk
CHUNK = 1
# Final chunk, end of stream
FINAL = 2
# ACK with last received offset
ACK = 3
# Resume request
RESUME = 4
# Resume ack with offset to start
RESUME_ACK = 5
# Streaming failed
ERROR = 6
class StreamHeaderKey:
# Try to keep the key small to reduce the overhead
STREAM_ID = STREAM_PREFIX + "id"
DATA_TYPE = STREAM_PREFIX + "dt"
SIZE = STREAM_PREFIX + "sz"
SEQUENCE = STREAM_PREFIX + "sq"
OFFSET = STREAM_PREFIX + "os"
ERROR_MSG = STREAM_PREFIX + "em"
CHANNEL = STREAM_PREFIX + "ch"
FILE_NAME = STREAM_PREFIX + "fn"
TOPIC = STREAM_PREFIX + "tp"
OBJECT_STREAM_ID = STREAM_PREFIX + "os"
OBJECT_INDEX = STREAM_PREFIX + "oi"
STREAM_REQ_ID = STREAM_PREFIX + "ri"
PAYLOAD_ENCODING = STREAM_PREFIX + "pe"
| NVFlare-main | nvflare/fuel/f3/streaming/stream_const.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from nvflare.fuel.f3.cellnet.core_cell import CoreCell
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.stream_cell import StreamCell
from nvflare.fuel.f3.streaming.stream_types import StreamFuture
from nvflare.fuel.f3.streaming.tools.utils import (
BUF_SIZE,
RX_CELL,
TEST_CHANNEL,
TEST_TOPIC,
TX_CELL,
make_buffer,
setup_log,
)
class Sender:
"""Test BLOB sending"""
def __init__(self, url: str):
core_cell = CoreCell(TX_CELL, url, secure=False, credentials={})
self.stream_cell = StreamCell(core_cell)
core_cell.start()
def send(self, blob: bytes) -> StreamFuture:
return self.stream_cell.send_blob(TEST_CHANNEL, TEST_TOPIC, RX_CELL, Message(None, blob))
if __name__ == "__main__":
setup_log(logging.INFO)
connect_url = "tcp://localhost:1234"
sender = Sender(connect_url)
time.sleep(2)
print("Creating buffer ...")
start = time.time()
buffer = make_buffer(BUF_SIZE)
print(f"Buffer done, took {time.time()-start} seconds")
start = time.time()
fut = sender.send(buffer)
last = 0
while not fut.done():
progress = fut.get_progress()
print(f"{fut.get_stream_id()} Progress: {progress} Delta:{progress - last}")
last = progress
time.sleep(1)
n = fut.result()
print(f"Time to send {time.time()-start} seconds")
print(f"Bytes sent: {n}")
| NVFlare-main | nvflare/fuel/f3/streaming/tools/sender.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from nvflare.fuel.f3.cellnet.core_cell import CoreCell
from nvflare.fuel.f3.stream_cell import StreamCell
from nvflare.fuel.f3.streaming.stream_types import StreamFuture
from nvflare.fuel.f3.streaming.tools.utils import BUF_SIZE, RX_CELL, TEST_CHANNEL, TEST_TOPIC, make_buffer, setup_log
class Receiver:
"""Test BLOB receiving"""
def __init__(self, listening_url: str):
cell = CoreCell(RX_CELL, listening_url, secure=False, credentials={})
cell.start()
self.stream_cell = StreamCell(cell)
self.stream_cell.register_blob_cb(TEST_CHANNEL, TEST_TOPIC, self.blob_cb)
self.futures = {}
def get_futures(self) -> dict:
return self.futures
def blob_cb(self, stream_future: StreamFuture, *args, **kwargs):
sid = stream_future.get_stream_id()
print(f"Stream {sid} received")
self.futures[sid] = stream_future
if __name__ == "__main__":
setup_log(logging.INFO)
url = "tcp://localhost:1234"
receiver = Receiver(url)
time.sleep(2)
result = None
last = 0
while True:
if receiver.get_futures:
for sid, fut in receiver.get_futures().items():
if fut.done():
result = fut.result()
break
else:
progress = fut.get_progress()
print(f"{sid} Progress: {progress} Delta:{progress - last}")
last = progress
time.sleep(1)
if result:
break
print("Recreating buffer ...")
start = time.time()
buffer = make_buffer(BUF_SIZE)
print(f"Buffer done, took {time.time()-start} seconds")
if buffer == result:
print("Result is correct")
else:
print("Result is wrong")
| NVFlare-main | nvflare/fuel/f3/streaming/tools/receiver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/f3/streaming/tools/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from nvflare.fuel.f3.streaming.stream_utils import wrap_view
BUF_SIZE = 64 * 1024 * 1024 + 1
TEST_CHANNEL = "stream"
TEST_TOPIC = "test"
TX_CELL = "sender"
RX_CELL = "server"
def make_buffer(size: int) -> bytearray:
buf = wrap_view(bytearray(size))
buf_len = 0
n = 0
while True:
temp = n.to_bytes(8, "big", signed=False)
temp_len = len(temp)
if (buf_len + temp_len) > size:
temp_len = size - buf_len
buf[buf_len : buf_len + temp_len] = temp[0:temp_len]
buf_len += temp_len
n += 1
if buf_len >= size:
break
return buf
def setup_log(level):
logging.basicConfig(level=level)
formatter = logging.Formatter(
fmt="%(relativeCreated)6d [%(threadName)-12s] [%(levelname)-5s] %(name)s: %(message)s"
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
root_log = logging.getLogger()
root_log.handlers.clear()
root_log.addHandler(handler)
| NVFlare-main | nvflare/fuel/f3/streaming/tools/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import time
from nvflare.fuel.f3.cellnet.core_cell import CoreCell
from nvflare.fuel.f3.stream_cell import StreamCell
from nvflare.fuel.f3.streaming.stream_types import StreamFuture
from nvflare.fuel.f3.streaming.stream_utils import stream_thread_pool
from nvflare.fuel.f3.streaming.tools.utils import RX_CELL, TEST_CHANNEL, TEST_TOPIC, setup_log
class FileReceiver:
"""Utility to receive files sent from another cell"""
def __init__(self, listening_url: str, out_folder: str):
self.cell = CoreCell(RX_CELL, listening_url, secure=False, credentials={})
self.stream_cell = StreamCell(self.cell)
self.stream_cell.register_file_cb(TEST_CHANNEL, TEST_TOPIC, self.file_cb)
self.cell.start()
self.out_folder = out_folder
self.file_received = 0
def stop(self):
self.cell.stop()
def file_cb(self, future: StreamFuture, original_name: str):
out_file = os.path.join(self.out_folder, original_name)
stream_thread_pool.submit(self.monitor_status, future)
print(f"Received file {original_name}, writing to {out_file} ...")
return out_file
def monitor_status(self, future: StreamFuture):
start = time.time()
while True:
if future.done():
break
progress = future.get_progress()
percent = progress * 100.0 / future.get_size()
print(f"Received {progress} bytes {percent:.2f}% done")
time.sleep(1)
name = future.result()
print(f"Time elapsed: {(time.time() - start):.3f} seconds")
print(f"File {name} is sent")
self.file_received += 1
return name
if __name__ == "__main__":
setup_log(logging.INFO)
if len(sys.argv) != 3:
print(f"Usage: {sys.argv[0]} listening_url out_folder")
sys.exit(1)
listening_url = sys.argv[1]
out_folder = sys.argv[2]
receiver = FileReceiver(listening_url, out_folder)
while True:
if receiver.file_received >= 1:
break
time.sleep(1)
receiver.stop()
print(f"Done. Files received: {receiver.file_received}")
| NVFlare-main | nvflare/fuel/f3/streaming/tools/file_receiver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import threading
import time
from nvflare.fuel.f3.cellnet.core_cell import CellAgent, CoreCell
from nvflare.fuel.f3.message import Message
from nvflare.fuel.f3.stream_cell import StreamCell
from nvflare.fuel.f3.streaming.tools.utils import RX_CELL, TEST_CHANNEL, TEST_TOPIC, TX_CELL, setup_log
class FileSender:
"""Utility to send a file to another cell"""
def __init__(self, url: str):
core_cell = CoreCell(TX_CELL, url, secure=False, credentials={})
self.stream_cell = StreamCell(core_cell)
core_cell.set_cell_connected_cb(self.cell_connected)
core_cell.start()
self.cell = core_cell
self.ready = threading.Event()
def stop(self):
self.cell.stop()
def wait(self):
self.ready.wait()
def send(self, file_to_send: str):
future = self.stream_cell.send_file(TEST_CHANNEL, TEST_TOPIC, RX_CELL, Message(None, file_to_send))
while True:
if future.done():
break
time.sleep(1)
progress = future.get_progress()
percent = progress * 100.0 / future.get_size()
print(f"Sent {progress} bytes {percent:.2f}% done")
size = future.result()
print(f"Total {size} bytes sent for file {file_to_send}")
def cell_connected(self, agent: CellAgent):
if agent.get_fqcn() == RX_CELL:
self.ready.set()
if __name__ == "__main__":
setup_log(logging.INFO)
if len(sys.argv) != 3:
print(f"Usage: {sys.argv[0]} connect_url file_name")
sys.exit(1)
connect_url = sys.argv[1]
file_name = sys.argv[2]
sender = FileSender(connect_url)
print("Waiting for receiver to be online ...")
sender.wait()
print(f"Sending file {file_name} ...")
start = time.time()
sender.send(file_name)
print(f"Time elapsed: {(time.time()-start):.3f} seconds")
sender.stop()
print("Done")
| NVFlare-main | nvflare/fuel/f3/streaming/tools/file_sender.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import logging
import pkgutil
from typing import Dict, List, Optional
from nvflare.security.logging import secure_format_exception
DEPRECATED_PACKAGES = ["nvflare.app_common.pt", "nvflare.app_common.homomorphic_encryption"]
def get_class(class_path):
module_name, class_name = class_path.rsplit(".", 1)
try:
module_ = importlib.import_module(module_name)
try:
class_ = getattr(module_, class_name)
except AttributeError:
raise ValueError("Class {} does not exist".format(class_path))
except AttributeError:
raise ValueError("Module {} does not exist".format(class_path))
return class_
def instantiate_class(class_path, init_params):
"""Method for creating an instance for the class.
Args:
class_path: full path of the class
init_params: A dictionary that contains the name of the transform and constructor input
arguments. The transform name will be appended to `medical.common.transforms` to make a
full name of the transform to be built.
"""
c = get_class(class_path)
try:
if init_params:
instance = c(**init_params)
else:
instance = c()
except TypeError as e:
raise ValueError(f"Class {class_path} has parameters error: {secure_format_exception(e)}.")
return instance
class _ModuleScanResult:
"""Data class for ModuleScanner."""
def __init__(self, class_name: str, module_name: str):
self.class_name = class_name
self.module_name = module_name
def __str__(self):
return f"{self.class_name}:{self.module_name}"
class ModuleScanner:
def __init__(self, base_pkgs: List[str], module_names: List[str], exclude_libs=True):
"""Loads specified modules from base packages and then constructs a class to module name mapping.
Args:
base_pkgs: base packages to look for modules in
module_names: module names to load
exclude_libs: excludes modules containing .libs if True. Defaults to True.
"""
self.base_pkgs = base_pkgs
self.module_names = module_names
self.exclude_libs = exclude_libs
self._logger = logging.getLogger(self.__class__.__name__)
self._class_table: Dict[str, str] = {}
self._create_classes_table()
def _create_classes_table(self):
scan_result_table = {}
for base in self.base_pkgs:
package = importlib.import_module(base)
for module_info in pkgutil.walk_packages(path=package.__path__, prefix=package.__name__ + "."):
module_name = module_info.name
if any(module_name.startswith(deprecated_package) for deprecated_package in DEPRECATED_PACKAGES):
continue
if module_name.startswith(base):
if not self.exclude_libs or (".libs" not in module_name):
if any(module_name.startswith(base + "." + name + ".") for name in self.module_names):
try:
module = importlib.import_module(module_name)
for name, obj in inspect.getmembers(module):
if (
not name.startswith("_")
and inspect.isclass(obj)
and obj.__module__ == module_name
):
# same class name exists in multiple modules
if name in scan_result_table:
scan_result = scan_result_table[name]
if name in self._class_table:
self._class_table.pop(name)
self._class_table[f"{scan_result.module_name}.{name}"] = module_name
self._class_table[f"{module_name}.{name}"] = module_name
else:
scan_result = _ModuleScanResult(class_name=name, module_name=module_name)
scan_result_table[name] = scan_result
self._class_table[name] = module_name
except (ModuleNotFoundError, RuntimeError) as e:
self._logger.debug(
f"Try to import module {module_name}, but failed: {secure_format_exception(e)}. "
f"Can't use name in config to refer to classes in module: {module_name}."
)
pass
def get_module_name(self, class_name) -> Optional[str]:
"""Gets the name of the module that contains this class.
Args:
class_name: The name of the class
Returns:
The module name if found.
"""
return self._class_table.get(class_name, None)
| NVFlare-main | nvflare/fuel/utils/class_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from .validation_utils import check_object_type
def update(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
def update_configs_with_envs(configs, env):
for k, v in configs.items():
if isinstance(v, list):
length = len(v)
for i in range(length):
if isinstance(v[i], dict):
configs[k][i] = update_configs_with_envs(v[i], env)
elif isinstance(v, dict):
configs[k] = update_configs_with_envs(v, env)
elif isinstance(v, str):
configs[k] = v.format(**env)
return configs
def merge_dict(dict1, dict2):
return {**dict1, **dict2}
def extract_first_level_primitive(d):
result = {}
for k, v in d.items():
if type(v) in (int, float, bool, str):
result[k] = v
return result
def augment(to_dict: dict, from_dict: dict, from_override_to=False, append_list="components") -> str:
"""Augments the to_dict with the content from the from_dict.
- Items in from_dict but not in to_dict are added to the to_dict
- Items in both from_dict and to_dict must be ether dicts or list of dicts,
and augment will be done on these items recursively
- Non-dict/list items in both from_dict and to_dict are considered conflicts.
Args:
to_dict: the dict to be augmented
from_dict: content to augment the to_dict
from_override_to: content in from_dict overrides content in to_dict when conflict happens
append_list: str or list of str: item keys for list to be appended
Returns:
An error message if any; empty str if success.
.. note::
The content of the to_dict is updated
"""
check_object_type("to_dict", to_dict, dict)
check_object_type("from_dict", from_dict, dict)
if isinstance(append_list, str):
append_list = [append_list]
elif not isinstance(append_list, list):
return f"append_list must be str or list but got {type(append_list)}"
for k, fv in from_dict.items():
if k not in to_dict:
to_dict[k] = fv
continue
tv = to_dict[k]
if isinstance(fv, dict):
if not isinstance(tv, dict):
return f"type conflict in element '{k}': dict in from_dict but {type(tv)} in to_dict"
err = augment(tv, fv)
if err:
return err
continue
if isinstance(fv, list):
if not isinstance(tv, list):
return f"type conflict in element '{k}': list in from_dict but {type(tv)} in to_dict"
if k in append_list:
# items in "from_dict" are appended to "to_dict"
tv.extend(fv)
continue
if len(fv) != len(tv):
return f"list length conflict in element '{k}': {len(fv)} in from_dict but {len(tv)} in to_dict"
for i in range(len(fv)):
# we only support list of dicts!
fvi = fv[i]
tvi = tv[i]
if not isinstance(fvi, dict):
return f"invalid list item {i} in element '{k}' in from_dict: must be dict but got {type(fvi)}"
if not isinstance(tvi, dict):
return f"invalid list item {i} in element '{k}' in to_dict: must be dict but got {type(tvi)}"
err = augment(tv[i], fv[i])
if err:
return err
continue
if type(fv) != type(tv):
return f"type conflict in element '{k}': {type(fv)} in from_dict but {type(tv)} in to_dict"
if from_override_to:
to_dict[k] = fv
return ""
def _update_component_dict(comp_list: list, target: dict) -> str:
for c in comp_list:
check_object_type("element in comp_list", c, dict)
cid = c.get("id", None)
if not cid:
return "missing 'id' from a component"
target[cid] = c
return ""
def update_components(target_dict: dict, from_dict: dict) -> str:
"""update components in target_dict with components from the from_dict.
If a component with the same ID exists in both target_dict and from_dict, the component in from_dict
will replace the one in target_dict.
If a component only exists in from_dict, it will be added to the component list of target_dict.
Args:
target_dict: the dict to be updated
from_dict: the dict that will be used to update the target_dict
Returns:
"""
key_components = "components"
from_comp_list = from_dict.get(key_components, None)
if not from_comp_list:
# no components to update
return ""
check_object_type("from_comp_list", from_comp_list, list)
target_comp_list = target_dict.get(key_components, None)
if not target_comp_list:
target_dict[key_components] = from_comp_list
return ""
check_object_type("target_comp_list", target_comp_list, list)
from_comp_dict = {}
err = _update_component_dict(from_comp_list, from_comp_dict)
if err:
return f"error in from_dict: {err}"
target_comp_dict = {}
err = _update_component_dict(target_comp_list, target_comp_dict)
if err:
return f"error in target_dict: {err}"
# determine components in both
dups = []
for cid in target_comp_dict.keys():
if cid in from_comp_dict:
dups.append(cid)
for cid in dups:
# remove from target_comp_dict
target_comp_dict.pop(cid)
new_target_comp_list = list(target_comp_dict.values())
new_target_comp_list.extend(from_comp_list)
target_dict[key_components] = new_target_comp_list
return ""
| NVFlare-main | nvflare/fuel/utils/dict_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import io
_true_set = {"yes", "true", "t", "y", "1"}
_false_set = {"no", "false", "f", "n", "0"}
def str2bool(value, raise_exc=False):
if isinstance(value, bool):
return value
if isinstance(value, str):
value = value.lower()
if value in _true_set:
return True
if value in _false_set:
return False
if isinstance(value, int):
return value != 0
if raise_exc:
raise ValueError('Expected "%s"' % '", "'.join(_true_set | _false_set))
return None
def parse_var(s):
"""Parse string variable into key-value tuple.
Returns (key, value) tuple from string with equals sign with the portion before the first equals sign as the key
and the rest as the value.
Args:
s: string to parse
Returns: Tuple of key and value
"""
items = s.split("=")
key = items[0].strip() # we remove blanks around keys, as is logical
value = ""
if len(items) > 1:
# rejoin the rest:
value = "=".join(items[1:])
return key, value
def parse_vars(items):
"""Converts a list of key value pairs into a dictionary.
Args:
items: list like ['a=1', 'b=2', 'c=3']
Returns: dictionary like {'a': '1', 'b': '2', 'c': '3'}
"""
d = {}
if items:
for item in items:
key, value = parse_var(item)
# d[key] = value
try:
d[key] = int(value)
except ValueError:
try:
d[key] = float(value)
except ValueError:
try:
d[key] = bool(str2bool(str(value), True))
except ValueError:
d[key] = value
return d
class SafeArgumentParser(argparse.ArgumentParser):
"""Safe version of ArgumentParser which doesn't exit on error"""
def __init__(self, **kwargs):
kwargs["add_help"] = False
super().__init__(**kwargs)
def error(self, message):
writer = io.StringIO()
self.print_help(writer)
raise ValueError(message + "\n" + writer.getvalue())
| NVFlare-main | nvflare/fuel/utils/argument_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from collections import OrderedDict
from enum import Enum
from typing import Any, Dict, List, Optional
class ConfigFormat(Enum):
JSON = "JSON"
PYHOCON = "PYHOCON"
OMEGACONF = "OMEGACONF"
@classmethod
def config_ext_formats(cls):
return OrderedDict(
{
".json": ConfigFormat.JSON,
".conf": ConfigFormat.PYHOCON,
".yml": ConfigFormat.OMEGACONF,
".json.default": ConfigFormat.JSON,
".conf.default": ConfigFormat.PYHOCON,
".yml.default": ConfigFormat.OMEGACONF,
}
)
@classmethod
def extensions(cls, target_fmt=None) -> List[str]:
if target_fmt is None:
return [ext for ext, fmt in cls.config_ext_formats().items()]
else:
return [ext for ext, fmt in cls.config_ext_formats().items() if fmt == target_fmt]
class Config(ABC):
def __init__(self, conf: Any, fmt: ConfigFormat, file_path: Optional[str] = None):
self.format = fmt
self.conf = conf
self.file_path = file_path
def get_format(self) -> ConfigFormat:
"""returns the current config objects ConfigFormat
Returns:
return ConfigFormat
"""
return self.format
def get_exts(self) -> List[str]:
return ConfigFormat.extensions(self.format)
def get_native_conf(self):
"""Return the original underline config object representation if you prefer to use it directly
Pyhocon → ConfigTree
JSON → Dict
OMEGACONF → ConfigDict
Returns: Any,
return native config objects
"""
return self.conf
def get_location(self) -> Optional[str]:
"""return the file path where this configuration is loaded from
Returns:
return None if the config is not from file else return file path
"""
return self.file_path
@abstractmethod
def to_dict(self, resolve: Optional[bool] = True) -> Dict:
"""convert underline config object to dictionary
Args:
resolve: optional argument to indicate if the variable need to be resolved when convert to dictionary
not all underline configuration format support this.
If not supported, it is treated default valueTrue.
Returns:
Returns: converted configuration as dict
"""
@abstractmethod
def to_str(self, element: Optional[Dict] = None) -> str:
"""convert dict element to the str representation of the underline configuration, if element is not None
For example, for JsonFormat, the method return json string
for PyhoconFormat, the method return pyhocon string
for OmegaconfFormat, the method returns YAML string representation
If the element is None, return the underline config to string presentation
Args:
element: Optional[Dict]. default to None. dictionary representation of config
Returns:
string representation of the configuration in given format for the element or config
"""
class ConfigLoader(ABC):
def __init__(self, fmt: ConfigFormat):
self.format = fmt
def get_format(self) -> ConfigFormat:
"""returns the current configLoader's ConfigFormat
Returns:
return ConfigFormat
"""
return self.format
@abstractmethod
def load_config(self, file_path: str) -> Config:
"""load configuration from config file: file_path
Args:
file_path: file path for configuration to be loaded
Returns:
return Config
"""
def load_config_from_str(self, config_str: str) -> Config:
"""Load Configuration based on the string representation of the underline configuration
for example, Json String for Jsonformat. python conf string or yaml string presentation
Args:
config_str:
Returns:
return Config
"""
raise NotImplementedError
def load_config_from_dict(self, config_dict: dict) -> Config:
"""Load Configuration based for given config dict.
Args:
config_dict:
Returns:
Config
"""
raise NotImplementedError
| NVFlare-main | nvflare/fuel/utils/config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class Mode(str, Enum):
ACTIVE = "ACTIVE"
PASSIVE = "PASSIVE"
| NVFlare-main | nvflare/fuel/utils/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import inspect
import warnings
def deprecated(reason):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""
def decorator(func):
fmt = "Call to deprecated {kind} {name}{reason}."
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning)
warnings.warn(
fmt.format(
kind="class" if inspect.isclass(func) else "function",
name=func.__name__,
reason=f" ({reason})" if reason else "",
),
category=DeprecationWarning,
stacklevel=2,
)
warnings.simplefilter("default", DeprecationWarning)
return func(*args, **kwargs)
return new_func
if inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
return decorator(reason)
elif isinstance(reason, str):
# The @deprecated is used with a 'reason'.
return decorator
else:
raise TypeError(repr(type(reason)))
| NVFlare-main | nvflare/fuel/utils/deprecated.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/utils/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
def time_to_string(t) -> str:
"""Convert time into a formatted string.
Args:
t: input time string in seconds since the Epoch
Returns:
formatted time string
"""
if t is None:
return "N/A"
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
| NVFlare-main | nvflare/fuel/utils/time_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SYMBOL_ALL = "@all"
SYMBOL_NONE = "@none"
class DefaultValuePolicy:
"""
Defines policy for how to determine default value
"""
DISALLOW = "disallow"
ANY = "any"
EMPTY = "empty"
ALL = "all"
@classmethod
def valid_policy(cls, p: str):
return p in [cls.DISALLOW, cls.ANY, cls.EMPTY, cls.ALL]
def check_positive_int(name, value):
if not isinstance(value, int):
raise TypeError(f"{name} must be an int, but got {type(value)}.")
if value <= 0:
raise ValueError(f"{name} must > 0, but got {value}")
def check_non_negative_int(name, value):
if not isinstance(value, int):
raise TypeError(f"{name} must be an int, but got {type(value)}.")
if value < 0:
raise ValueError(f"{name} must >= 0, but got {value}")
def check_positive_number(name, value):
if not isinstance(value, (int, float)):
raise TypeError(f"{name} must be a number, but got {type(value)}.")
if value <= 0:
raise ValueError(f"{name} must > 0, but got {value}")
def check_number_range(name, value, min_value=None, max_value=None):
if not isinstance(value, (int, float)):
raise TypeError(f"{name} must be a number, but got {type(value)}.")
if min_value is not None:
if not isinstance(min_value, (int, float)):
raise TypeError(f"{name}: min_value must be a number but got {type(min_value)}.")
if value < min_value:
raise ValueError(f"{name} must be >= {min_value} but got {value}")
if max_value is not None:
if not isinstance(max_value, (int, float)):
raise TypeError(f"{name}: max_value must be a number but got {type(max_value)}.")
if value > max_value:
raise ValueError(f"{name} must be <= {max_value} but got {value}")
def check_non_negative_number(name, value):
if not isinstance(value, (int, float)):
raise TypeError(f"{name} must be a number, but got {type(value)}.")
if value < 0:
raise ValueError(f"{name} must >= 0, but got {value}")
def check_str(name, value):
check_object_type(name, value, str)
def check_non_empty_str(name, value):
check_object_type(name, value, str)
v = value.strip()
if not v:
raise ValueError(f"{name} must not be empty")
def check_object_type(name, value, obj_type):
if not isinstance(value, obj_type):
raise TypeError(f"{name} must be {obj_type}, but got {type(value)}.")
def check_callable(name, value):
if not callable(value):
raise ValueError(f"{name} must be callable, but got {type(value)}.")
def _determine_candidates_value(var_name: str, candidates, base: list):
if not isinstance(base, list):
raise TypeError(f"base must be list but got {type(base)}")
if candidates is None:
return None # empty
if isinstance(candidates, str):
nc = candidates.strip()
if not nc:
return []
c = nc.lower()
if c == SYMBOL_ALL:
return base
elif c == SYMBOL_NONE:
return None
elif nc in base:
return [nc]
else:
raise ValueError(f"value of '{var_name}' ({candidates}) is invalid")
if not isinstance(candidates, list):
raise ValueError(f"invalid '{var_name}': expect str or list of str but got {type(candidates)}")
validated = []
for c in candidates:
if not isinstance(c, str):
raise ValueError(f"invalid value in '{var_name}': must be str but got {type(c)}")
n = c.strip()
if n not in base:
raise ValueError(f"invalid value '{n}' in '{var_name}'")
if n not in validated:
validated.append(n)
return validated
def validate_candidates(var_name: str, candidates, base: list, default_policy: str, allow_none: bool):
"""Validate specified candidates against the items in the "base" list, based on specified policy
and returns determined value for the candidates.
The value of candidates could have the following cases:
1. Not explicitly specified (Python object None or empty list [])
In this case, the default_policy decides the final result:
- ANY: returns a list that contains a single item from the base
- EMPTY: returns an empty list
- ALL: returns the base list
- DISALLOW: raise exception - candidates must be explicitly specified
2. A list of string items
In this case, each item in the candidates list must be in the "base". Duplicates are removed.
3. A string with special value "@all" to mean "all items from the base"
Returns the base list.
4. A string with special value "@none" to mean "no items"
If allow_none is True, then returns an empty list; otherwise raise exception.
5. A string that is not a special value
If it is in the "base", return a list that contains this item; otherwise raise exception.
Args:
var_name: the name of the "candidates" var from the caller
candidates: the candidates to be validated
base: the base list that contains valid items
default_policy: policy for how to handle default value when "candidates" is not explicitly specified.
allow_none: whether "none" is allowed for candidates.
Returns:
"""
if not DefaultValuePolicy.valid_policy(default_policy):
raise ValueError(f"invalid default policy {default_policy}")
c = _determine_candidates_value(var_name, candidates, base)
if c is None:
if not allow_none:
raise ValueError(f"{var_name} must not be none")
else:
return [] # empty
if not c:
# empty
if default_policy == DefaultValuePolicy.EMPTY:
return []
elif default_policy == DefaultValuePolicy.ALL:
return base
elif default_policy == DefaultValuePolicy.DISALLOW:
raise ValueError(f"invalid value '{candidates}' in '{var_name}': it must be subset of {base}")
else:
# any
return [base[0]]
return c
def _determine_candidate_value(var_name: str, candidate, base: list):
if candidate is None:
return None
if not isinstance(candidate, str):
raise ValueError(f"invalid '{var_name}': must be str but got {type(candidate)}")
n = candidate.strip()
if n in base:
return n
c = n.lower()
if c == SYMBOL_NONE:
return None
elif not c:
return ""
else:
raise ValueError(f"invalid value '{candidate}' in '{var_name}'")
def validate_candidate(var_name: str, candidate, base: list, default_policy: str, allow_none: bool):
"""Validate specified candidate against the items in the "base" list, based on specified policy
and returns determined value for the candidate.
The value of candidate could have the following cases:
1. Not explicitly specified (Python object None or empty string)
In this case, the default_policy decides the final result:
- ANY: returns the first item from the base
- EMPTY: returns an empty str
- ALL or DISALLOW: raise exception - candidate must be explicitly specified
2. A string with special value "@none" to mean "nothing"
If allow_none is True, then returns an empty str; otherwise raise exception.
3. A string that is not a special value
If it is in the "base", return it; otherwise raise exception.
All other cases, raise exception.
NOTE: the final value is normalized (leading and trailing white spaces are removed).
Args:
var_name: the name of the "candidate" var from the caller
candidate: the candidate to be validated
base: the base list that contains valid items
default_policy: policy for how to handle default value when "candidates" is not explicitly specified.
allow_none: whether "none" is allowed for candidates.
Returns:
"""
if not DefaultValuePolicy.valid_policy(default_policy):
raise ValueError(f"invalid default policy {default_policy}")
if default_policy == DefaultValuePolicy.ALL:
raise ValueError(f"the policy '{default_policy}' is not applicable to validate_candidate")
c = _determine_candidate_value(var_name, candidate, base)
if c is None:
if not allow_none:
raise ValueError(f"{var_name} must be specified")
else:
return ""
if not c:
if default_policy == DefaultValuePolicy.EMPTY:
return ""
elif default_policy == DefaultValuePolicy.ANY:
return base[0]
else:
raise ValueError(f"invalid value '{candidate}' in '{var_name}': it must be one of {base}")
else:
return c
def normalize_config_arg(value):
if value is False:
return None # specified to be "empty"
if isinstance(value, str):
if value.strip().lower() == SYMBOL_NONE:
return None
if not value:
return "" # meaning to take default
return value
| NVFlare-main | nvflare/fuel/utils/validation_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from typing import Dict, Optional
from nvflare.fuel.utils.config import Config, ConfigFormat, ConfigLoader
from nvflare.security.logging import secure_format_exception
class JsonConfig(Config):
def __init__(self, conf: Dict, file_path: Optional[str] = None):
super(JsonConfig, self).__init__(conf, ConfigFormat.JSON, file_path)
def to_dict(self, resolve: Optional[bool] = True) -> Dict:
return self.conf
def to_str(self, element: Optional[Dict] = None) -> str:
if element is None:
return json.dumps(self.conf)
else:
return json.dumps(element)
class JsonConfigLoader(ConfigLoader):
def __init__(self):
super(JsonConfigLoader, self).__init__(fmt=ConfigFormat.JSON)
self.logger = logging.getLogger(self.__class__.__name__)
def load_config(self, file_path: str) -> Config:
conf_dict = self._from_file(file_path)
return JsonConfig(conf_dict, file_path)
def load_config_from_str(self, config_str: str) -> Config:
try:
conf = json.loads(config_str)
return JsonConfig(conf)
except Exception as e:
self.logger.error("Error loading config {}: {}".format(config_str, secure_format_exception(e)))
raise e
def load_config_from_dict(self, config_dict: dict) -> Config:
return JsonConfig(config_dict)
def _from_file(self, path) -> Dict:
with open(path, "r") as file:
try:
return json.load(file)
except Exception as e:
self.logger.error("Error loading config file {}: {}".format(path, secure_format_exception(e)))
raise e
| NVFlare-main | nvflare/fuel/utils/json_config_loader.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.utils.class_utils import instantiate_class
from nvflare.security.logging import secure_format_exception
class ConfigType:
COMPONENT = "component"
DICT = "dict"
class ComponentBuilder:
@abstractmethod
def get_module_scanner(self):
"""Provide the package module scanner.
Returns: module_scanner
"""
pass
def is_class_config(self, config_dict: dict) -> bool:
def has_valid_class_path():
try:
_ = self.get_class_path(config_dict)
# we have valid class path
return True
except ConfigError:
# this is not a valid class path
return False
# use config_type to distinguish between components and regular dictionaries
config_type = config_dict.get("config_type", ConfigType.COMPONENT)
if config_type != ConfigType.COMPONENT:
return False
# regardless it has args or not. if path/name and valid class path, very likely we have
# class config.
if ("path" in config_dict or "name" in config_dict) and has_valid_class_path():
return True
else:
return False
def build_component(self, config_dict):
if not config_dict:
return None
if not isinstance(config_dict, dict):
raise ConfigError("component config must be dict but got {}.".format(type(config_dict)))
if config_dict.get("disabled") is True:
return None
class_args = config_dict.get("args", dict())
for k, v in class_args.items():
if isinstance(v, dict) and self.is_class_config(v):
# try to replace the arg with a component
try:
t = self.build_component(v)
class_args[k] = t
except Exception as e:
raise ValueError(f"failed to instantiate class: {secure_format_exception(e)} ")
class_path = self.get_class_path(config_dict)
# Handle the special case, if config pass in the class_attributes, use the user defined class attributes
# parameters directly.
if "class_attributes" in class_args:
class_args = class_args["class_attributes"]
return instantiate_class(class_path, class_args)
def get_class_path(self, config_dict):
if "path" in config_dict.keys():
path_spec = config_dict["path"]
if not isinstance(path_spec, str):
raise ConfigError("path spec must be str but got {}.".format(type(path_spec)))
if len(path_spec) <= 0:
raise ConfigError("path spec must not be empty")
class_path = format(path_spec)
parts = class_path.split(".")
if len(parts) < 2:
raise ConfigError("invalid class path '{}': missing module name".format(class_path))
else:
if "name" not in config_dict:
raise ConfigError("class name or path must be specified")
class_name = config_dict["name"]
if not isinstance(class_name, str):
raise ConfigError("class name must be str but got {}.".format(type(class_name)))
if len(class_name) <= 0:
raise ConfigError("class name must not be empty")
module_name = self.get_module_scanner().get_module_name(class_name)
if module_name is None:
raise ConfigError('Cannot find component class "{}"'.format(class_name))
class_path = module_name + ".{}".format(class_name)
return class_path
| NVFlare-main | nvflare/fuel/utils/component_builder.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from typing import List
def has_nvidia_smi() -> bool:
from shutil import which
return which("nvidia-smi") is not None
def use_nvidia_smi(query: str, report_format: str = "csv"):
if has_nvidia_smi():
result = subprocess.run(
["nvidia-smi", f"--query-gpu={query}", f"--format={report_format}"],
capture_output=True,
text=True,
)
rc = result.returncode
if rc > 0:
raise Exception(f"Failed to call nvidia-smi with query {query}", result.stderr)
else:
return result.stdout.splitlines()
return None
def _parse_gpu_mem(result: str = None, unit: str = "MiB") -> List:
gpu_memory = []
if result:
for i in result[1:]:
mem, mem_unit = i.split(" ")
if mem_unit != unit:
raise RuntimeError("Memory unit does not match.")
gpu_memory.append(int(mem))
return gpu_memory
def get_host_gpu_memory_total(unit="MiB") -> List:
result = use_nvidia_smi("memory.total")
return _parse_gpu_mem(result, unit)
def get_host_gpu_memory_free(unit="MiB") -> List:
result = use_nvidia_smi("memory.free")
return _parse_gpu_mem(result, unit)
def get_host_gpu_ids() -> List:
"""Gets GPU IDs.
Note:
Only supports nvidia-smi now.
"""
result = use_nvidia_smi("index")
gpu_ids = []
if result:
for i in result[1:]:
gpu_ids.append(int(i))
return gpu_ids
| NVFlare-main | nvflare/fuel/utils/gpu_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pathlib
from typing import List, Optional, Tuple
from nvflare.fuel.utils.config import Config, ConfigFormat, ConfigLoader
from nvflare.fuel.utils.import_utils import optional_import
from nvflare.fuel.utils.json_config_loader import JsonConfigLoader
class ConfigFactory:
logger = logging.getLogger(__qualname__)
OmegaConfLoader, omega_import_ok = optional_import(
module="nvflare.fuel_opt.utils.omegaconf_loader", name="OmegaConfLoader"
)
PyhoconLoader, pyhocon_import_ok = optional_import(
module="nvflare.fuel_opt.utils.pyhocon_loader", name="PyhoconLoader"
)
_fmt2Loader = {
ConfigFormat.JSON: JsonConfigLoader(),
}
if omega_import_ok:
_fmt2Loader.update({ConfigFormat.OMEGACONF: OmegaConfLoader()})
if pyhocon_import_ok:
_fmt2Loader.update({ConfigFormat.PYHOCON: PyhoconLoader()})
@staticmethod
def search_config_format(
init_file_path: str, search_dirs: Optional[List[str]] = None, target_fmt: Optional[ConfigFormat] = None
) -> Tuple[Optional[ConfigFormat], Optional[str]]:
"""find the configuration format and the location (file_path) for given initial init_file_path and search directories.
for example, the initial config file path given is config_client.json
the search function will ignore the .json extension and search "config_client.xxx" in the given directory in
specified extension search order. The first found file_path will be used as configuration.
the ".xxx" is one of the extensions defined in the configuration format.
Args:
init_file_path: initial file_path for the configuration
search_dirs: search directory. If none, the parent directory of init_file_path will be used as search dir
target_fmt: (ConfigFormat) if specified, only this format searched, ignore all other formats.
Returns:
Tuple of None,None or ConfigFormat and real configuration file path
"""
logger = ConfigFactory.logger
if not search_dirs: # empty or None
parent_dir = pathlib.Path(init_file_path).parent
search_dirs = [str(parent_dir)]
target_exts = None
if target_fmt:
target_exts = ConfigFormat.extensions(target_fmt)
# we ignore the original extension
file_basename = ConfigFactory.get_file_basename(init_file_path)
ext2fmt_map = ConfigFormat.config_ext_formats()
extensions = target_exts if target_fmt else ext2fmt_map.keys()
for search_dir in search_dirs:
logger.debug(f"search file basename:'{file_basename}', search dirs = {search_dirs}")
for ext in extensions:
fmt = ext2fmt_map[ext]
filename = f"{file_basename}{ext}"
for root, dirs, files in os.walk(search_dir):
if filename in files:
config_file = os.path.join(root, filename)
return fmt, config_file
return None, None
@staticmethod
def get_file_basename(init_file_path):
base_path = os.path.basename(init_file_path)
index = base_path.find(".")
file_basename = base_path[:index]
return file_basename
@staticmethod
def load_config(
file_path: str, search_dirs: Optional[List[str]] = None, target_fmt: Optional[ConfigFormat] = None
) -> Optional[Config]:
"""Find the configuration for given initial init_file_path and search directories.
for example, the initial config file path given is config_client.json
the search function will ignore the .json extension and search "config_client.xxx" in the given directory in
specified extension search order. The first found file_path will be used as configuration.
the ".xxx" is one of the extensions defined in the configuration format.
Args:
file_path: initial file path
search_dirs: search directory. If none, the parent directory of init_file_path will be used as search dir
target_fmt: (ConfigFormat) if specified, only this format searched, ignore all other formats.
Returns:
None if not found, or Config
"""
config_format, real_config_file_path = ConfigFactory.search_config_format(file_path, search_dirs, target_fmt)
if config_format is not None and real_config_file_path is not None:
config_loader = ConfigFactory.get_config_loader(config_format)
if config_loader:
conf = config_loader.load_config(file_path=real_config_file_path)
return conf
else:
return None
return None
@staticmethod
def get_config_loader(config_format: ConfigFormat) -> Optional[ConfigLoader]:
"""return ConfigLoader for given config_format
Args:
config_format: ConfigFormat
Returns:
the matching ConfigLoader for the given format
"""
if config_format is None:
return None
return ConfigFactory._fmt2Loader.get(config_format)
@staticmethod
def match_config(parent, init_file_path, match_fn) -> bool:
# we ignore the original extension
basename = os.path.splitext(pathlib.Path(init_file_path).name)[0]
ext2fmt_map = ConfigFormat.config_ext_formats()
for ext in ext2fmt_map:
if match_fn(parent, f"{basename}{ext}"):
return True
return False
@staticmethod
def has_config(init_file_path: str, search_dirs: Optional[List[str]] = None) -> bool:
fmt, real_file_path = ConfigFactory.search_config_format(init_file_path, search_dirs)
return real_file_path is not None
| NVFlare-main | nvflare/fuel/utils/config_factory.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
def get_size(obj, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, "__dict__"):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
def get_logger(obj):
return logging.getLogger(f"{obj.__module__}.{obj.__class__.__qualname__}")
| NVFlare-main | nvflare/fuel/utils/obj_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
from typing import Dict, List, Optional, Union
from nvflare.fuel.utils.config import Config, ConfigFormat
from nvflare.fuel.utils.config_factory import ConfigFactory
ENV_VAR_PREFIX = "NVFLARE_"
def find_file_in_dir(file_basename, path) -> Union[None, str]:
"""
Find a file from a directory and return the full path of the file, if found
Args:
file_basename: base name of the file to be found
path: the directory from where the file is to be found
Returns: the full path of the file, if found; None if not found
"""
for root, dirs, files in os.walk(path):
if file_basename in files:
return os.path.join(root, file_basename)
return None
def search_file(file_basename: str, dirs: List[str]) -> Union[None, str]:
"""
Find a file by searching a list of dirs and return the one in the last dir.
Args:
file_basename: base name of the file to be found
dirs: list of directories to search
Returns: the full path of the file, if found; None if not found
"""
for d in dirs:
f = find_file_in_dir(file_basename, d)
if f:
return f
return None
class ConfigService:
"""
The ConfigService provides a global configuration service that can be used by any component at any layer.
The ConfigService manages config information and makes it available to any component, in two ways:
1. Config info is preloaded into predefined sections. Callers can get the config data by a section name.
2. Manages config path (a list of directories) and loads file from the path.
Only JSON file loading is supported.
"""
logger = logging.getLogger(__name__)
_sections = {}
_config_path = []
_cmd_args = None
_var_dict = None
_var_values = {}
@classmethod
def initialize(cls, section_files: Dict[str, str], config_path: List[str], parsed_args=None, var_dict=None):
"""
Initialize the ConfigService.
Configuration is divided into sections, and each section must have a JSON config file.
Only specify the base name of the config file.
Config path is provided to locate config files. Files are searched in the order of provided
config_dirs. If multiple directories contain the same file name, then the first one is used.
Args:
section_files: dict: section name => config file
config_path: list of config directories
parsed_args: command args for starting the program
var_dict: dict for additional vars
Returns:
"""
if not isinstance(section_files, dict):
raise TypeError(f"section_files must be dict but got {type(section_files)}")
if not isinstance(config_path, list):
raise TypeError(f"config_dirs must be list but got {type(config_path)}")
if not config_path:
raise ValueError("config_dirs is empty")
if var_dict and not isinstance(var_dict, dict):
raise ValueError(f"var_dict must dict but got {type(var_dict)}")
for d in config_path:
if not isinstance(d, str):
raise ValueError(f"config_dirs must contain str but got {type(d)}")
if not os.path.exists(d):
raise ValueError(f"'directory {d}' does not exist")
if not os.path.isdir(d):
raise ValueError(f"'{d}' is not a valid directory")
cls._config_path = config_path
for section, file_basename in section_files.items():
cls._sections[section] = cls.load_config_dict(file_basename, cls._config_path)
cls._var_dict = var_dict
if parsed_args:
if not isinstance(parsed_args, argparse.Namespace):
raise ValueError(f"parsed_args must be argparse.Namespace but got {type(parsed_args)}")
cls._cmd_args = dict(parsed_args.__dict__)
@classmethod
def get_section(cls, name: str):
return cls._sections.get(name)
@classmethod
def add_section(cls, section_name: str, data: dict, overwrite_existing: bool = True):
"""
Add a section to the config data.
Args:
section_name: name of the section to be added
data: data of the section
overwrite_existing: if section already exists, whether to overwrite
Returns:
"""
if not isinstance(section_name, str):
raise TypeError(f"section name must be str but got {type(section_name)}")
if not isinstance(data, dict):
raise TypeError(f"config data must be dict but got {type(data)}")
if overwrite_existing or section_name not in cls._sections:
cls._sections[section_name] = data
@classmethod
def load_configuration(cls, file_basename: str) -> Optional[Config]:
return ConfigFactory.load_config(file_basename, cls._config_path)
@classmethod
def load_config_dict(
cls, file_basename: str, search_dirs: Optional[List] = None, raise_exception: bool = True
) -> Optional[Dict]:
"""
Load a specified config file ( ignore extension)
Args:
raise_exception: if True raise exception when error occurs
file_basename: base name of the config file to be loaded.
for example: file_basename = config_fed_server.json
what the function does is to search for config file that matches
config_fed_server.[json|json.default|conf|conf.default|yml|yml.default]
in given search directories: cls._config_path
if json or json.default is not found;
then switch to Pyhoncon [.conf] or corresponding default file; if still not found; then we switch
to YAML files. We use OmegaConf to load YAML
search_dirs: which directories to search.
Returns: Dictionary from the configuration
if not found, exception will be raised.
"""
conf = ConfigFactory.load_config(file_basename, search_dirs)
if conf:
return conf.to_dict()
else:
if raise_exception:
raise FileNotFoundError(cls.config_not_found_msg(file_basename, search_dirs))
return None
@classmethod
def config_not_found_msg(cls, file_basename, search_dirs):
basename = os.path.splitext(file_basename)[0]
conf_exts = "|".join(ConfigFormat.config_ext_formats().keys())
msg = f"cannot find file '{basename}[{conf_exts}]'"
msg = f"{msg} from search paths: '{search_dirs}'" if search_dirs else msg
return msg
@classmethod
def find_file(cls, file_basename: str) -> Union[None, str]:
"""
Find specified file from the config path.
Caller is responsible for loading/processing the file. This is useful for non-JSON files.
Args:
file_basename: base name of the file to be found
Returns: full name of the file if found; None if not.
"""
if not isinstance(file_basename, str):
raise TypeError(f"file_basename must be str but got {type(file_basename)}")
return search_file(file_basename, cls._config_path)
@classmethod
def _get_var(cls, name: str, conf):
if not isinstance(name, str):
raise ValueError(f"var name must be str but got {type(name)}")
# see whether command args have it
if cls._cmd_args and name in cls._cmd_args:
return cls._cmd_args.get(name)
if cls._var_dict and name in cls._var_dict:
return cls._var_dict.get(name)
# check OS env vars
if not name.startswith(ENV_VAR_PREFIX):
env_var_name = ENV_VAR_PREFIX + name
else:
env_var_name = name
env_var_name = env_var_name.upper()
if env_var_name in os.environ:
return os.environ.get(env_var_name)
if isinstance(conf, dict):
return conf.get(name)
@classmethod
def _int_var(cls, name: str, conf=None, default=None):
v = cls._get_var(name, conf)
if v is None:
return default
try:
return int(v)
except Exception as e:
raise ValueError(f"var {name}'s value '{v}' cannot be converted to int: {e}")
@classmethod
def _any_var(cls, func, name, conf, default):
if name in cls._var_values:
return cls._var_values.get(name)
v = func(name, conf, default)
if v is not None:
cls._var_values[name] = v
return v
@classmethod
def get_int_var(cls, name: str, conf=None, default=None):
return cls._any_var(cls._int_var, name, conf, default)
@classmethod
def _float_var(cls, name: str, conf=None, default=None):
v = cls._get_var(name, conf)
if v is None:
return default
try:
return float(v)
except:
raise ValueError(f"var {name}'s value '{v}' cannot be converted to float")
@classmethod
def get_float_var(cls, name: str, conf=None, default=None):
return cls._any_var(cls._float_var, name, conf, default)
@classmethod
def _bool_var(cls, name: str, conf=None, default=None):
v = cls._get_var(name, conf)
if v is None:
return default
if isinstance(v, bool):
return v
if isinstance(v, int):
return v != 0
if isinstance(v, str):
v = v.lower()
return v in ["true", "t", "yes", "y", "1"]
raise ValueError(f"var {name}'s value '{v}' cannot be converted to bool")
@classmethod
def get_bool_var(cls, name: str, conf=None, default=None):
return cls._any_var(cls._bool_var, name, conf, default)
@classmethod
def _str_var(cls, name: str, conf=None, default=None):
v = cls._get_var(name, conf)
if v is None:
return default
try:
return str(v)
except:
raise ValueError(f"var {name}'s value '{v}' cannot be converted to str")
@classmethod
def get_str_var(cls, name: str, conf=None, default=None):
return cls._any_var(cls._str_var, name, conf, default)
@classmethod
def get_var_values(cls):
return cls._var_values
| NVFlare-main | nvflare/fuel/utils/config_service.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Part of code is Adapted from from https://github.com/Project-MONAI/MONAI/blob/dev/monai/utils/module.py#L282
# which has the following license
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Part of code is Adapted from from https://github.com/Project-MONAI/MONAI/blob/dev/monai/utils/module.py#L282
"""
from importlib import import_module
from typing import Any, Tuple
from nvflare.security.logging import secure_format_exception
OPTIONAL_IMPORT_MSG_FMT = "{}"
OPS = ["==", ">=", ">", "<", "<="]
def get_module_version(this_pkg):
return this_pkg.__version__.split(".")[:2]
def get_module_version_str(the_module):
if the_module:
module_version = ".".join(get_module_version(the_module))
else:
module_version = ""
return module_version
def check_version(that_pkg, version: str = "", op: str = "==") -> bool:
"""
compare module version with provided version
"""
if not version or not hasattr(that_pkg, "__version__"):
return True # always valid version
mod_version = tuple(int(x) for x in get_module_version(that_pkg))
required = tuple(int(x) for x in version.split("."))
result = True
if op == "==":
result = mod_version == required
elif op == ">=":
result = mod_version >= required
elif op == ">":
result = mod_version > required
elif op == "<":
result = mod_version < required
elif op == "<=":
result = mod_version <= required
return result
class LazyImportError(ImportError):
"""
Could not import APIs from an optional dependency.
"""
def optional_import(
module: str,
op: str = "==",
version: str = "",
name: str = "",
descriptor: str = OPTIONAL_IMPORT_MSG_FMT,
allow_namespace_pkg: bool = False,
) -> Tuple[Any, bool]:
"""
Imports an optional module specified by `module` string.
Any importing related exceptions will be stored, and exceptions raise lazily
when attempting to use the failed-to-import module.
Args:
module: name of the module to be imported.
op: version op it should be one of the followings: ==, <=, <, >, >=
version: version string of the module if specified, it is used to the module version such
that is satisfy the condition: <module>.__version__ <op> <version>.
name: a non-module attribute (such as method/class) to import from the imported module.
descriptor: a format string for the final error message when using a not imported module.
allow_namespace_pkg: whether importing a namespace package is allowed. Defaults to False.
Returns:
The imported module and a boolean flag indicating whether the import is successful.
Examples::
>>> torch, flag = optional_import('torch')
>>> print(torch, flag)
<module 'torch' from '/..../lib/python3.8/site-packages/torch/__init__.py'> True
>>> torch, flag = optional_import('torch', '1.1')
>>> print(torch, flag)
<module 'torch' from 'python/lib/python3.6/site-packages/torch/__init__.py'> True
>>> the_module, flag = optional_import('unknown_module')
>>> print(flag)
False
>>> the_module.method # trying to access a module which is not imported
OptionalImportError: import unknown_module (No module named 'unknown_module').
>>> torch, flag = optional_import('torch', '42')
>>> torch.nn # trying to access a module for which there isn't a proper version imported
OptionalImportError: import torch (requires version=='42').
>>> conv, flag = optional_import('torch.nn.functional', ">=", '1.0', name='conv1d')
>>> print(conv)
<built-in method conv1d of type object at 0x11a49eac0>
>>> conv, flag = optional_import('torch.nn.functional', ">=", '42', name='conv1d')
>>> conv() # trying to use a function from the not successfully imported module (due to unmatched version)
OptionalImportError: from torch.nn.functional import conv1d (requires version>='42').
"""
tb = None
exception_str = ""
if name:
actual_cmd = f"from {module} import {name}"
else:
actual_cmd = f"import {module}"
pkg = None
try:
if op not in OPS:
raise ValueError(f"invalid op {op}, must be one of {OPS}")
pkg = __import__(module) # top level module
the_module = import_module(module)
if not allow_namespace_pkg:
is_namespace = getattr(the_module, "__file__", None) is None and hasattr(the_module, "__path__")
if is_namespace:
raise AssertionError
if name: # user specified to load class/function/... from the module
the_module = getattr(the_module, name)
except Exception as import_exception: # any exceptions during import
tb = import_exception.__traceback__
exception_str = secure_format_exception(import_exception)
else: # found the module
if check_version(pkg, version, op):
return the_module, True
# preparing lazy error message
msg = descriptor.format(actual_cmd)
if version and tb is None: # a pure version issue
msg += f": requires '{module}{op}{version}'"
if pkg:
module_version = get_module_version_str(pkg)
msg += f", current '{module}=={module_version}' "
if exception_str:
msg += f" ({exception_str})"
class _LazyRaise:
def __init__(self, attr_name, *_args, **_kwargs):
self.attr_name = attr_name
_default_msg = f"{msg}."
if tb is None:
self._exception = LazyImportError(_default_msg)
else:
self._exception = LazyImportError(_default_msg).with_traceback(tb)
def __getattr__(self, attr_name):
"""
Raises:
OptionalImportError: When you call this method.
"""
raise self._exception
def __call__(self, *_args, **_kwargs):
"""
Raises:
OptionalImportError: When you call this method.
"""
raise self._exception
return _LazyRaise(name), False
| NVFlare-main | nvflare/fuel/utils/import_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
from pathlib import Path
from zipfile import ZipFile
def normpath_for_zip(path):
"""Normalizes the path for zip file.
Args:
path (str): the path to be normalized
"""
path = os.path.normpath(path)
path = os.path.splitdrive(path)[1]
# ZIP spec requires forward slashes
return path.replace("\\", "/")
def remove_leading_dotdot(path: str) -> str:
path = str(Path(path))
while path.startswith(f"..{os.path.sep}"):
path = path[3:]
return path
def split_path(path: str) -> (str, str):
"""Splits a path into a pair of head and tail.
It removes trailing `os.path.sep` and call `os.path.split`
Args:
path: Path to split
Returns:
A tuple of `(head, tail)`
"""
path = str(Path(path))
if path.endswith(os.path.sep):
full_path = path[:-1]
else:
full_path = path
return os.path.split(full_path)
def get_all_file_paths(directory):
"""Gets all file paths in the directory.
Args:
directory: directory to get all paths for
Returns:
A list of paths of all the files in the provided directory
"""
file_paths = []
# crawling through directory and subdirectories
for root, directories, files in os.walk(directory):
for filename in files:
file_paths.append(normpath_for_zip(os.path.join(root, filename)))
for dir_name in directories:
file_paths.append(normpath_for_zip(os.path.join(root, dir_name)))
return file_paths
def _zip_directory(root_dir: str, folder_name: str, output_file):
"""Creates a zip archive file for the specified directory.
Args:
root_dir: root path that contains the folder to be zipped
folder_name: path to the folder to be zipped, relative to root_dir
output_file: file to write to
"""
dir_name = normpath_for_zip(os.path.join(root_dir, folder_name))
if not os.path.exists(dir_name):
raise FileNotFoundError(f'source directory "{dir_name}" does not exist')
if not os.path.isdir(dir_name):
raise NotADirectoryError(f'"{dir_name}" is not a valid directory')
file_paths = get_all_file_paths(dir_name)
if folder_name:
prefix_len = len(split_path(dir_name)[0]) + 1
else:
prefix_len = len(dir_name) + 1
# writing files to a zipfile
with ZipFile(output_file, "w") as z:
# writing each file one by one
for full_path in file_paths:
rel_path = full_path[prefix_len:]
z.write(full_path, arcname=rel_path)
def zip_directory_to_bytes(root_dir: str, folder_name: str) -> bytes:
"""Compresses a directory and return the bytes value of it.
Args:
root_dir: root path that contains the folder to be zipped
folder_name: path to the folder to be zipped, relative to root_dir
"""
bio = io.BytesIO()
_zip_directory(root_dir, folder_name, bio)
return bio.getvalue()
def zip_directory_to_file(root_dir: str, folder_name: str, output_file):
"""Compresses a directory and return the bytes value of it.
Args:
root_dir: root path that contains the folder to be zipped
folder_name: path to the folder to be zipped, relative to root_dir
output_file: path of the output file
"""
_zip_directory(root_dir, folder_name, output_file)
def ls_zip_from_bytes(zip_data: bytes):
"""Returns info of a zip.
Args:
zip_data: the input zip data
"""
with ZipFile(io.BytesIO(zip_data), "r") as z:
return z.infolist()
def unzip_single_file_from_bytes(zip_data: bytes, output_dir_name: str, file_path: str):
"""Decompresses a zip and extracts single specified file to the specified output directory.
Args:
zip_data: the input zip data
output_dir_name: the output directory for extracted content
file_path: file path to file to unzip
"""
path_to_file, _ = split_path(file_path)
output_dir_name = os.path.join(output_dir_name, path_to_file)
os.makedirs(output_dir_name)
if not os.path.exists(output_dir_name):
raise FileNotFoundError(f'output directory "{output_dir_name}" does not exist')
if not os.path.isdir(output_dir_name):
raise NotADirectoryError(f'"{output_dir_name}" is not a valid directory')
with ZipFile(io.BytesIO(zip_data), "r") as z:
z.extract(file_path, path=output_dir_name)
def unzip_all_from_bytes(zip_data: bytes, output_dir_name: str):
"""Decompresses a zip and extracts all files to the specified output directory.
Args:
zip_data: the input zip data
output_dir_name: the output directory for extracted content
"""
if not os.path.exists(output_dir_name):
raise FileNotFoundError(f'output directory "{output_dir_name}" does not exist')
if not os.path.isdir(output_dir_name):
raise NotADirectoryError(f'"{output_dir_name}" is not a valid directory')
with ZipFile(io.BytesIO(zip_data), "r") as z:
z.extractall(output_dir_name)
def unzip_all_from_file(zip_file_path: str, output_dir_name: str):
if not os.path.exists(output_dir_name):
raise FileNotFoundError(f'output directory "{output_dir_name}" does not exist')
if not os.path.isdir(output_dir_name):
raise NotADirectoryError(f'"{output_dir_name}" is not a valid directory')
if not os.path.exists(zip_file_path):
raise FileNotFoundError(f'zip file "{zip_file_path}" does not exist')
if not os.path.isfile(zip_file_path):
raise ValueError(f'zip file "{zip_file_path}" is not a valid file')
with ZipFile(zip_file_path, "r") as z:
z.extractall(output_dir_name)
| NVFlare-main | nvflare/fuel/utils/zip_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.security.logging import secure_format_exception
class State(object):
def __init__(self, name: str):
if not isinstance(name, str):
raise TypeError(f"name must be str but got {type(name)}")
name = name.strip()
if len(name) <= 0:
raise ValueError("name must not be empty")
self.name = name
self.fsm = None
def execute(self, **kwargs):
pass
def leave(self):
pass
def enter(self):
pass
class FSM(object):
STATE_NAME_EXIT = "__exit__"
def __init__(self, name: str):
self.name = name
self.props = {}
self.states = {} # state name => State
self.current_state = None
self.error = None
def set_prop(self, name, value):
self.props[name] = value
def get_prop(self, name, default=None):
return self.props.get(name, default=default)
def add_state(self, state: State):
if not isinstance(state, State):
raise TypeError(f"state must be State but got {type(state)}")
if state.name in self.states:
raise RuntimeError(f"can't add duplicate state '{state.name}'")
state.fsm = self
self.states[state.name] = state
def set_current_state(self, name: str):
s = self.states.get(name)
if s is None:
raise RuntimeError(f'FSM has no such state "{name}"')
self.current_state = s
def get_current_state(self):
return self.current_state
def execute(self, **kwargs) -> State:
try:
self.current_state = self._try_execute(**kwargs)
except Exception as e:
self.error = f"exception occurred in state execution: {secure_format_exception(e)}"
self.current_state = None
return self.current_state
def _try_execute(self, **kwargs) -> State:
if self.current_state is None:
raise RuntimeError("FSM has no current state")
next_state_name = self.current_state.execute(**kwargs)
if next_state_name:
if next_state_name == FSM.STATE_NAME_EXIT:
# go to the end
return None
# enter next state
next_state = self.states.get(next_state_name, None)
if next_state is None:
raise RuntimeError(f'FSM has no such state "{next_state_name}"')
# leave current state
self.current_state.leave()
# enter the next state
next_state.enter()
# change to the new state
return next_state
else:
# stay in current state!
return self.current_state
| NVFlare-main | nvflare/fuel/utils/fsm.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import re
from typing import List
from nvflare.fuel.common.excepts import ConfigError
from nvflare.security.logging import secure_format_exception
from .class_utils import ModuleScanner, get_class, instantiate_class
from .dict_utils import extract_first_level_primitive, merge_dict
from .json_scanner import JsonObjectProcessor, JsonScanner, Node
class ConfigContext(object):
def __init__(self):
"""Object containing configuration context."""
self.app_root = ""
self.vars = None
self.config_json = None
self.pass_num = 0
class _EnvUpdater(JsonObjectProcessor):
def __init__(self, vs, element_filter=None):
JsonObjectProcessor.__init__(self)
self.vars = vs
if element_filter is not None and not callable(element_filter):
raise ValueError("element_filter must be a callable function but got {}.".format(type(element_filter)))
self.element_filter = element_filter
def process_element(self, node: Node):
element = node.element
if isinstance(element, str):
if self.element_filter is not None and not self.element_filter(element):
return
element = self.substitute(element)
parent_element = node.parent_element()
if node.position > 0:
# parent is a list
parent_element[node.position - 1] = element
else:
# parent is a dict
parent_element[node.key] = element
def substitute(self, element: str):
a = re.split("{|}", element)
if len(a) == 3 and a[0] == "" and a[2] == "":
element = self.vars.get(a[1], None)
else:
element = element.format(**self.vars)
return element
class Configurator(JsonObjectProcessor):
def __init__(
self,
app_root: str,
cmd_vars: dict,
env_config: dict,
wf_config_file_name: str,
base_pkgs: List[str],
module_names: List[str],
exclude_libs=True,
default_vars=None,
num_passes=1,
element_filter=None,
var_processor=None,
):
"""Base class of Configurator to parse JSON configuration.
Args:
app_root: app root
cmd_vars: command vars
env_config: environment configuration
wf_config_file_name: config file name
base_pkgs: base packages
module_names: module names
exclude_libs: whether to exclude libs
default_vars: default vars
num_passes: number of passes
element_filter: element filter
var_processor: variable processor
"""
JsonObjectProcessor.__init__(self)
assert isinstance(app_root, str), "app_root must be str but got {}.".format(type(app_root))
assert isinstance(num_passes, int), "num_passes must be int but got {}.".format(type(num_passes))
assert num_passes > 0, "num_passes must > 0"
if cmd_vars:
assert isinstance(cmd_vars, dict), "cmd_vars must be dict but got {}.".format(type(cmd_vars))
if env_config:
assert isinstance(env_config, dict), "env_config must be dict but got {}.".format(type(env_config))
assert isinstance(wf_config_file_name, str), "wf_config_file_name must be str but got {}.".format(
type(wf_config_file_name)
)
assert os.path.isfile(wf_config_file_name), "wf_config_file_name {} is not a valid file".format(
wf_config_file_name
)
assert os.path.exists(wf_config_file_name), "wf_config_file_name {} does not exist".format(wf_config_file_name)
if default_vars is not None:
assert isinstance(default_vars, dict), "default_vars must be dict but got {}.".format(type(default_vars))
else:
default_vars = {}
self.cmd_vars = cmd_vars
self.default_vars = default_vars
self.app_root = app_root
self.env_config = env_config
self.wf_config_file_name = wf_config_file_name
self.num_passes = num_passes
self.element_filter = element_filter
self.module_scanner = ModuleScanner(base_pkgs, module_names, exclude_libs)
self.all_vars = None
self.vars_from_cmd = None
self.vars_from_env_config = None
self.vars_from_wf_config = None
self.config_ctx = None
self.var_processor = var_processor
with open(wf_config_file_name) as file:
self.wf_config_data = json.load(file)
self.json_scanner = JsonScanner(self.wf_config_data, wf_config_file_name)
def _do_configure(self):
vars_from_cmd = {}
if self.cmd_vars:
vars_from_cmd = copy.copy(self.cmd_vars)
for key, value in vars_from_cmd.items():
if key.startswith("APP_") and value != "":
vars_from_cmd[key] = os.path.join(self.app_root, value)
vars_from_env_config = {}
if self.env_config:
vars_from_env_config = copy.copy(self.env_config)
for key, value in vars_from_env_config.items():
if key.startswith("APP_") and value != "":
vars_from_env_config[key] = os.path.join(self.app_root, value)
vars_from_wf_conf = extract_first_level_primitive(self.wf_config_data)
if "determinism" in self.wf_config_data:
vars_from_wf_conf["determinism"] = self.wf_config_data["determinism"]
# precedence of vars (high to low):
# vars_from_cmd, vars_from_config, vars_from_wf_conf
# func merge_dict(d1, d2) gives d2 higher precedence for the same key
all_vars = merge_dict(self.default_vars, vars_from_wf_conf)
all_vars = merge_dict(all_vars, vars_from_env_config)
all_vars = merge_dict(all_vars, vars_from_cmd)
# update the wf_config with vars
self.all_vars = all_vars
self.vars_from_cmd = vars_from_cmd
self.vars_from_env_config = vars_from_env_config
self.vars_from_wf_config = vars_from_wf_conf
if self.var_processor:
self.var_processor.process(self.all_vars, app_root=self.app_root)
self.json_scanner.scan(_EnvUpdater(all_vars, self.element_filter))
config_ctx = ConfigContext()
config_ctx.vars = self.all_vars
config_ctx.app_root = self.app_root
config_ctx.config_json = self.wf_config_data
self.config_ctx = config_ctx
self.start_config(self.config_ctx)
# scan the wf_config again to create components
for i in range(self.num_passes):
self.config_ctx.pass_num = i + 1
self.json_scanner.scan(self)
# finalize configuration
self.finalize_config(self.config_ctx)
def configure(self):
try:
self._do_configure()
except ConfigError as e:
raise ConfigError("Config error in {}: {}".format(self.wf_config_file_name, secure_format_exception(e)))
except Exception as e:
print("Error processing config {}: {}".format(self.wf_config_file_name, secure_format_exception(e)))
raise e
def process_element(self, node: Node):
self.process_config_element(self.config_ctx, node)
def process_args(self, args: dict):
return args
def build_component(self, config_dict):
if not config_dict:
return None
if not isinstance(config_dict, dict):
raise ConfigError("component config must be dict but got {}.".format(type(config_dict)))
if config_dict.get("disabled") is True:
return None
class_args = config_dict.get("args", dict())
class_args = self.process_args(class_args)
class_path = self.get_class_path(config_dict)
# Handle the special case, if config pass in the class_attributes, use the user defined class attributes
# parameters directly.
if "class_attributes" in class_args:
class_args = class_args["class_attributes"]
return instantiate_class(class_path, class_args)
def get_class_path(self, config_dict):
if "path" in config_dict.keys():
path_spec = config_dict["path"]
if not isinstance(path_spec, str):
raise ConfigError("path spec must be str but got {}.".format(type(path_spec)))
if len(path_spec) <= 0:
raise ConfigError("path spec must not be empty")
class_path = format(path_spec)
parts = class_path.split(".")
if len(parts) < 2:
raise ConfigError("invalid class path '{}': missing module name".format(class_path))
else:
if "name" not in config_dict:
raise ConfigError("class name or path must be specified")
class_name = config_dict["name"]
if not isinstance(class_name, str):
raise ConfigError("class name must be str")
if len(class_name) <= 0:
raise ConfigError("class name must not be empty")
module_name = self.module_scanner.get_module_name(class_name)
if module_name is None:
raise ConfigError('Cannot find component class "{}"'.format(class_name))
class_path = module_name + ".{}".format(class_name)
return class_path
def is_configured_subclass(self, config_dict, base_class):
return issubclass(get_class(self.get_class_path(config_dict)), base_class)
def start_config(self, config_ctx: ConfigContext):
pass
def process_config_element(self, config_ctx: ConfigContext, node: Node):
pass
def finalize_config(self, config_ctx: ConfigContext):
pass
def get_component_refs(component):
"""Get component reference.
Args:
component: string for component
Returns: list of component and reference
"""
if "name" in component:
name = component["name"]
key = "name"
elif "path" in component:
name = component["path"]
key = "path"
else:
raise ConfigError('component has no "name" or "path')
parts = name.split("#")
component[key] = parts[0]
return parts
| NVFlare-main | nvflare/fuel/utils/wfconf.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
from abc import ABC, abstractmethod
from nvflare.fuel.common.excepts import ComponentNotAuthorized, ConfigError
from nvflare.fuel.utils.config_factory import ConfigFactory
from nvflare.security.logging import secure_format_exception, secure_log_traceback
class Node(object):
def __init__(self, element):
"""A JSON element with additional data.
Args:
element: element to create Node object for
"""
self.parent = None
self.element = element
self.level = 0
self.key = ""
self.position = 0
self.paths = []
self.processor = None
self.exit_cb = None # node_exit_cb_signature(node: Node)
self.props = {}
def path(self):
if len(self.paths) <= 0:
return ""
return ".".join(self.paths)
def parent_element(self):
if self.parent:
return self.parent.element
else:
return None
def _child_node(node: Node, key, pos, element) -> Node:
child = Node(element)
child.processor = node.processor
child.level = node.level + 1
child.position = pos
child.parent = node
child.paths = copy.copy(node.paths)
child.key = key
if pos > 0:
child.key = "#{}".format(pos)
child.paths.append(child.key)
return child
class JsonObjectProcessor(ABC):
"""JsonObjectProcessor is used to process JSON elements by the scan_json() function."""
@abstractmethod
def process_element(self, node: Node):
"""This method is called by the scan() function for each JSON element scanned.
Args:
node: the node representing the JSON element
"""
pass
class JsonScanner(object):
def __init__(self, json_data: dict, location=None):
"""Scanner for processing JSON data.
Args:
json_data: dictionary containing json data to scan
location: location to provide in error messages
"""
if not isinstance(json_data, dict):
raise ValueError("json_data must be dict")
self.location = location
self.data = json_data
self.logger = logging.getLogger("JsonScanner")
def _do_scan(self, node: Node):
try:
node.processor.process_element(node)
except ComponentNotAuthorized as e:
secure_log_traceback(self.logger)
if self.location:
raise ComponentNotAuthorized(
"Error processing {} in JSON element {}: path: {}, exception: {}".format(
self.location, node.element, node.path(), secure_format_exception(e)
)
)
else:
raise ComponentNotAuthorized(
"Error in JSON element: {}, path: {}, exception: {}".format(
node.element, node.path(), secure_format_exception(e)
)
)
except Exception as e:
secure_log_traceback(self.logger)
config = ConfigFactory.load_config(self.location[0])
elmt_str = config.to_str(node.element)
location = config.get_location()
raise ConfigError(self.get_process_err_msg(e, elmt_str, location, node))
element = node.element
if isinstance(element, dict):
# need to make a copy of the element dict in case the processor modifies the dict
iter_dict = copy.copy(element)
for k, v in iter_dict.items():
self._do_scan(_child_node(node, k, 0, v))
elif isinstance(element, list):
for i in range(len(element)):
self._do_scan(_child_node(node, node.key, i + 1, element[i]))
if node.exit_cb is not None:
try:
node.exit_cb(node)
except Exception as e:
raise ConfigError(self.get_post_proces_err_msg(e, node))
def get_process_err_msg(self, e, elmt, location, node):
location_msg = f" processing '{location}' " if location else ""
msg = "Error{}in element '{}': path: '{}', exception: '{}'".format(
location_msg, elmt, node.path(), secure_format_exception(e)
)
return msg
def get_post_proces_err_msg(self, e, node):
location = f" {self.location} in " if self.location else ""
msg = "Error post-processing{}JSON element: {}, exception: {}".format(
location, node.path(), secure_format_exception(e)
)
return msg
def scan(self, processor: JsonObjectProcessor):
if not isinstance(processor, JsonObjectProcessor):
raise ValueError(f"processor must be JsonObjectProcessor, but got type {type(processor)}")
node = Node(self.data)
node.processor = processor
self._do_scan(node)
| NVFlare-main | nvflare/fuel/utils/json_scanner.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
def get_open_ports(number) -> list:
"""Gets the number of open ports from the system.
Args:
number: number of ports
Returns:
A list of open_ports
"""
ports = []
for i in range(number):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
if port > 0:
ports.append(port)
if len(ports) != number:
raise RuntimeError(
"Could not get enough open ports from the system. Needed {} but got {}.".format(number, len(ports))
)
return ports
| NVFlare-main | nvflare/fuel/utils/network_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.fuel.utils.pipe.pipe import Message
def message_to_file_name(msg: Message) -> str:
"""Produce the file name that encodes the meta info of the message
Args:
msg: message for which the file name is to be produced
Returns:
"""
if msg.msg_type == Message.REQUEST:
return f"{msg.msg_type}.{msg.topic}.{msg.msg_id}"
elif msg.msg_type == Message.REPLY:
return f"{msg.msg_type}.{msg.topic}.{msg.req_id}.{msg.msg_id}"
else:
raise ValueError(f"invalid message type '{msg.msg_type}'")
def file_name_to_message(file_name: str) -> Message:
"""Decode the file name to produce the meta info of the message.
Args:
file_name: the file name to be decoded.
Returns: a Message object that contains meta info.
"""
parts = file_name.split(".")
num_parts = len(parts)
if num_parts < 3 or num_parts > 4:
raise ValueError(f"bad file name: {file_name} - wrong number of parts {num_parts}")
msg_type = parts[0]
topic = parts[1]
msg_id = parts[-1]
data = None
if msg_type == Message.REQUEST:
if num_parts != 3:
raise ValueError(f"bad file name for {msg_type}: {file_name} - must be 3 parts but got {num_parts}")
return Message.new_request(topic, data, msg_id)
elif msg_type == Message.REPLY:
if num_parts != 4:
raise ValueError(f"bad file name for {msg_type}: {file_name} - must be 4 parts but got {num_parts}")
req_id = parts[2]
return Message.new_reply(topic, data, req_id, msg_id)
else:
raise ValueError(f"bad file name: {file_name} - invalid msg type '{msg_type}'")
| NVFlare-main | nvflare/fuel/utils/pipe/file_name_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from nvflare.fuel.utils import fobs
from nvflare.fuel.utils.pipe.file_accessor import FileAccessor
class FobsFileAccessor(FileAccessor):
def read(self, file_path: str) -> Any:
"""Read the file as a binary file and decode it with FOBS.
Args:
file_path: path to the file to be read
Returns:
"""
with open(file_path, mode="rb") as file: # b is important -> binary
data = file.read()
return fobs.loads(data)
def write(self, data: Any, file_path) -> None:
"""Write the data as binary file.
Args:
data: data to be written
file_path: path of the file
Returns:
"""
data = fobs.dumps(data)
with open(file_path, "wb") as f:
f.write(data)
| NVFlare-main | nvflare/fuel/utils/pipe/fobs_file_accessor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from queue import Empty, Full, Queue
from typing import Union
from nvflare.fuel.utils.constants import Mode
from nvflare.fuel.utils.pipe.pipe import Message, Pipe
class MemoryPipe(Pipe):
def __init__(self, x_queue: Queue, y_queue: Queue, mode: Mode = Mode.ACTIVE):
super().__init__(mode)
if mode == Mode.ACTIVE:
self.put_queue = x_queue
self.get_queue = y_queue
else:
self.put_queue = y_queue
self.get_queue = x_queue
def open(self, name: str):
pass
def clear(self):
pass
def close(self):
pass
def send(self, msg: Message, timeout=None) -> bool:
try:
self.put_queue.put(msg, block=False, timeout=timeout)
return True
except Full:
return False
def receive(self, timeout=None) -> Union[Message, None]:
try:
return self.get_queue.get(block=False, timeout=timeout)
except Empty:
return None
| NVFlare-main | nvflare/fuel/utils/pipe/memory_pipe.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/utils/pipe/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from typing import Optional
from nvflare.fuel.utils.pipe.pipe import Message, Pipe
from nvflare.fuel.utils.validation_utils import (
check_callable,
check_non_negative_number,
check_object_type,
check_positive_number,
)
class Topic(object):
ABORT = "_ABORT_"
END = "_END_"
HEARTBEAT = "_HEARTBEAT_"
PEER_GONE = "_PEER_GONE_"
class PipeHandler(object):
"""
PipeHandler monitors a pipe for messages from the peer. It reads the pipe periodically and puts received data
in a message queue in the order the data is received.
If the received data indicates a peer status change (END, ABORT, GONE), the data is added the message queue if the
status_cb is not registered. If the status_cb is registered, the data is NOT added the message queue. Instead,
the status_cb is called with the status data.
The PipeHandler should be used as follows:
- The app creates a pipe and then creates the PipeHandler object for the pipe;
- The app starts the PipeHandler. This step must be performed, or data in the pipe won't be read.
- The app should call handler.get_next() periodically to process the message in the queue. This method may return
None if there is no message in the queue. The app also must handle the status change event from the peer if it
does not set the status_cb. The status change event has the special topic value of Topic.END or Topic.ABORT.
- Optionally, the app can set a status_cb and handle the peer's status change immediately.
- Stop the handler when the app is finished.
NOTE: the handler uses a heartbeat mechanism to detect that the peer may be disconnected (gone). It sends
a heartbeat message to the peer based on configured interval. It also expects heartbeats from the peer. If peer's
heartbeat is not received for configured time, it will be treated as disconnected, and a GONE status is generated
for the app to handle.
"""
def __init__(self, pipe: Pipe, read_interval=0.1, heartbeat_interval=5.0, heartbeat_timeout=30.0):
"""
Constructor of the PipeHandler.
Args:
pipe: the pipe to be monitored
read_interval: how often to read from the pipe
heartbeat_interval: how often to send a heartbeat to the peer
heartbeat_timeout: how long to wait for a heartbeat from the peer before treating the peer as gone,
0 means DO NOT check for heartbeat.
"""
check_positive_number("read_interval", read_interval)
check_positive_number("heartbeat_interval", heartbeat_interval)
check_non_negative_number("heartbeat_timeout", heartbeat_timeout)
check_object_type("pipe", pipe, Pipe)
if 0 < heartbeat_timeout <= heartbeat_interval:
raise ValueError(f"heartbeat_interval {heartbeat_interval} must < heartbeat_timeout {heartbeat_timeout}")
self.pipe = pipe
self.read_interval = read_interval
self.heartbeat_interval = heartbeat_interval
self.heartbeat_timeout = heartbeat_timeout
self.messages = []
self.reader = threading.Thread(target=self._read)
self.reader.daemon = True
self.asked_to_stop = False
self.lock = threading.Lock()
self.status_cb = None
self.cb_args = None
self.cb_kwargs = None
def set_status_cb(self, cb, *args, **kwargs):
"""Set CB for status handling. When the peer status is changed (ABORT, END, GONE), this CB is called.
If status CB is not set, the handler simply adds the status change event (topic) to the message queue.
The status_cb must conform to this signature:
cb(topic, data, *args, **kwargs)
where the *args and *kwargs are ones passed to this call.
The status_cb is called from the thread that reads from the pipe, hence it should be short-lived.
Do not put heavy processing logic in the status_cb.
Args:
cb:
*args:
**kwargs:
Returns: None
"""
check_callable("cb", cb)
self.status_cb = cb
self.cb_args = args
self.cb_kwargs = kwargs
def _send_to_pipe(self, msg: Message, timeout=None):
return self.pipe.send(msg, timeout)
def _receive_from_pipe(self):
return self.pipe.receive()
def start(self):
"""Starts the PipeHandler."""
if not self.reader.is_alive():
self.reader.start()
def stop(self, close_pipe=True):
"""Stops the handler and optionally close the monitored pipe.
Args:
close_pipe: whether to close the monitored pipe.
"""
self.asked_to_stop = True
pipe = self.pipe
self.pipe = None
if pipe and close_pipe:
pipe.close()
@staticmethod
def _make_event_message(topic: str, data):
return Message.new_request(topic, data)
def send_to_peer(self, msg: Message, timeout=None) -> bool:
"""Sends a message to peer.
Args:
msg: message to be sent
timeout: how long to wait for the peer to read the data.
If not specified, return False immediately.
Returns:
Whether the peer has read the data.
"""
if timeout is not None:
check_positive_number("timeout", timeout)
try:
return self._send_to_pipe(msg, timeout)
except BrokenPipeError:
self._add_message(self._make_event_message(Topic.PEER_GONE, "send failed"))
return False
def notify_end(self, data):
"""Notifies the peer that the communication is ended normally."""
self.send_to_peer(self._make_event_message(Topic.END, data))
def notify_abort(self, data):
"""Notifies the peer that the communication is aborted."""
self.send_to_peer(self._make_event_message(Topic.ABORT, data))
def _add_message(self, msg: Message):
if msg.topic in [Topic.END, Topic.ABORT, Topic.PEER_GONE]:
if self.status_cb is not None:
self.status_cb(msg, *self.cb_args, **self.cb_kwargs)
return
with self.lock:
self.messages.append(msg)
def _read(self):
try:
self._try_read()
except Exception as e:
self._add_message(self._make_event_message(Topic.PEER_GONE, f"error: {e}"))
def _try_read(self):
last_heartbeat_received_time = time.time()
last_heartbeat_sent_time = 0.0
while not self.asked_to_stop:
now = time.time()
msg = self._receive_from_pipe()
if msg:
last_heartbeat_received_time = now
if msg.topic != Topic.HEARTBEAT:
self._add_message(msg)
if msg.topic in [Topic.END, Topic.ABORT]:
break
else:
# is peer gone?
if self.heartbeat_timeout and now - last_heartbeat_received_time > self.heartbeat_timeout:
self._add_message(self._make_event_message(Topic.PEER_GONE, "missing heartbeat"))
break
# send heartbeat to the peer
if now - last_heartbeat_sent_time > self.heartbeat_interval:
self.send_to_peer(self._make_event_message(Topic.HEARTBEAT, ""))
last_heartbeat_sent_time = now
time.sleep(self.read_interval)
self.reader = None
def get_next(self) -> Optional[Message]:
"""Gets the next message from the message queue.
Returns:
A Message at the top of the message queue.
If the queue is empty, returns None.
"""
with self.lock:
if len(self.messages) > 0:
return self.messages.pop(0)
else:
return None
| NVFlare-main | nvflare/fuel/utils/pipe/pipe_handler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any
class FileAccessor(ABC):
"""Abstract base class for file accessors.
This class provides an interface for accessing and manipulating files. Subclasses
should implement the `write()` and `read()` methods to provide concrete
implementations for writing and reading data from files.
"""
@abstractmethod
def write(self, data: Any, file_path: str) -> None:
"""Writes the specified data to file(s) in the specified path.
Args:
data: The data to be written.
file_path: The path where the data is to be written.
"""
pass
@abstractmethod
def read(self, file_path: str) -> Any:
"""Reads the data located at the specified file_path.
Args:
file_path: location of the data to be read
Returns:
The data object read from the file.
"""
pass
| NVFlare-main | nvflare/fuel/utils/pipe/file_accessor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import time
from nvflare.fuel.utils.constants import Mode
from nvflare.fuel.utils.pipe.file_accessor import FileAccessor
from nvflare.fuel.utils.pipe.file_name_utils import file_name_to_message, message_to_file_name
from nvflare.fuel.utils.pipe.fobs_file_accessor import FobsFileAccessor
from nvflare.fuel.utils.pipe.pipe import Message, Pipe
from nvflare.fuel.utils.validation_utils import check_object_type, check_positive_number, check_str
class FilePipe(Pipe):
def __init__(self, mode: Mode, root_path: str, file_check_interval=0.1):
"""Implementation of communication through the file system.
Args:
root_path: root path
"""
super().__init__(mode=mode)
check_str("root_path", root_path)
check_positive_number("file_check_interval", file_check_interval)
if not os.path.exists(root_path):
# create the root path
os.makedirs(root_path)
self.root_path = root_path
self.file_check_interval = file_check_interval
self.pipe_path = None
self.x_path = None
self.y_path = None
self.t_path = None
if self.mode == Mode.ACTIVE:
self.get_f = self.x_get
self.put_f = self.x_put
elif self.mode == Mode.PASSIVE:
self.get_f = self.y_get
self.put_f = self.y_put
self.accessor = FobsFileAccessor() # default
def set_file_accessor(self, accessor: FileAccessor):
"""Sets the file accessor to be used by the pipe.
The default file accessor is FobsFileAccessor.
Args:
accessor: the accessor to be used.
"""
check_object_type("accessor", accessor, FileAccessor)
self.accessor = accessor
@staticmethod
def _make_dir(path):
try:
os.mkdir(path)
except FileExistsError:
# this is okay
pass
def open(self, name: str):
if not self.accessor:
raise RuntimeError("File accessor is not set. Make sure to set a FileAccessor before opening the pipe")
pipe_path = os.path.join(self.root_path, name)
if not os.path.exists(pipe_path):
self._make_dir(pipe_path)
x_path = os.path.join(pipe_path, "x")
if not os.path.exists(x_path):
self._make_dir(x_path)
y_path = os.path.join(pipe_path, "y")
if not os.path.exists(y_path):
self._make_dir(y_path)
t_path = os.path.join(pipe_path, "t")
if not os.path.exists(t_path):
self._make_dir(t_path)
self.pipe_path = pipe_path
self.x_path = x_path
self.y_path = y_path
self.t_path = t_path
@staticmethod
def _clear_dir(p: str):
file_list = os.listdir(p)
if file_list:
for f in file_list:
try:
os.remove(os.path.join(p, f))
except FileNotFoundError:
pass
def _create_file(self, to_dir: str, msg: Message) -> str:
file_name = message_to_file_name(msg)
file_path = os.path.join(to_dir, file_name)
tmp_path = os.path.join(self.t_path, file_name)
if not self.pipe_path:
raise BrokenPipeError("pipe broken")
try:
self.accessor.write(msg.data, tmp_path)
os.rename(tmp_path, file_path)
except FileNotFoundError:
raise BrokenPipeError("pipe closed")
return file_path
def clear(self):
self._clear_dir(self.x_path)
self._clear_dir(self.y_path)
self._clear_dir(self.t_path)
def _monitor_file(self, file_path: str, timeout) -> bool:
"""Monitors the file until it's read-and-removed by peer, or timed out.
If timeout, remove the file.
Args:
file_path: the path to be monitored
timeout: how long to wait for timeout
Returns:
whether the file has been read and removed
"""
if not timeout:
return False
start = time.time()
while True:
if not self.pipe_path:
raise BrokenPipeError("pipe broken")
if not os.path.exists(file_path):
return True
if time.time() - start > timeout:
# timed out - try to delete the file
try:
os.remove(file_path)
except FileNotFoundError:
# the file is read by the peer!
return True
return False
time.sleep(self.file_check_interval)
def x_put(self, msg: Message, timeout) -> bool:
"""
Args:
msg:
timeout:
Returns: whether file is read by the peer
"""
# put it in Y's queue
file_path = self._create_file(self.y_path, msg)
return self._monitor_file(file_path, timeout)
def _read_file(self, file_path: str):
# since reading file may take time and another process may try to delete the file
# we move the file to a temp name before reading it
file_name = os.path.basename(file_path)
msg = file_name_to_message(file_name)
tmp_path = os.path.join(self.t_path, file_name)
try:
create_time = os.path.getctime(file_path)
os.rename(file_path, tmp_path)
data = self.accessor.read(tmp_path)
if os.path.isfile(tmp_path):
os.remove(tmp_path) # remove this file
elif os.path.isdir(tmp_path):
shutil.rmtree(tmp_path)
else:
raise RuntimeError(f"cannot removed unsupported path: '{tmp_path}'")
msg.data = data
msg.sent_time = create_time
msg.received_time = time.time()
return msg
except FileNotFoundError:
raise BrokenPipeError("pipe closed")
def _get_next(self, from_dir: str):
try:
files = os.listdir(from_dir)
except Exception:
raise BrokenPipeError(f"error reading from {from_dir}")
if files:
files = [os.path.join(from_dir, f) for f in files]
files.sort(key=os.path.getmtime, reverse=False)
file_path = files[0]
return self._read_file(file_path)
else:
return None
def _get_from_dir(self, from_dir: str, timeout=None):
if not timeout or timeout <= 0:
return self._get_next(from_dir)
start = time.time()
while True:
msg = self._get_next(from_dir)
if msg:
return msg
if time.time() - start >= timeout:
break
time.sleep(self.file_check_interval)
return None
def x_get(self, timeout=None):
# read from X's queue
return self._get_from_dir(self.x_path, timeout)
def y_put(self, msg: Message, timeout) -> bool:
# put it in X's queue
file_path = self._create_file(self.x_path, msg)
return self._monitor_file(file_path, timeout)
def y_get(self, timeout=None):
# read from Y's queue
return self._get_from_dir(self.y_path, timeout)
def send(self, msg: Message, timeout=None) -> bool:
"""
Args:
msg:
timeout:
Returns: whether the message is read by peer (if timeout is specified)
"""
if not self.pipe_path:
raise BrokenPipeError("pipe is not open")
return self.put_f(msg, timeout)
def receive(self, timeout=None):
if not self.pipe_path:
raise BrokenPipeError("pipe is not open")
return self.get_f(timeout)
def close(self):
pipe_path = self.pipe_path
self.pipe_path = None
if pipe_path and os.path.exists(pipe_path):
try:
shutil.rmtree(pipe_path)
except Exception:
pass
| NVFlare-main | nvflare/fuel/utils/pipe/file_pipe.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import uuid
from abc import ABC, abstractmethod
from typing import Any, Union
from nvflare.fuel.utils.constants import Mode
from nvflare.fuel.utils.validation_utils import check_str
class Message:
REQUEST = "REQ"
REPLY = "REP"
def __init__(self, msg_type: str, topic: str, data: Any, msg_id=None, req_id=None):
check_str("msg_type", msg_type)
if msg_type not in [Message.REPLY, Message.REQUEST]:
raise ValueError(f"invalid note_type '{msg_type}': must be one of {[Message.REPLY, Message.REQUEST]}")
self.msg_type = msg_type
check_str("topic", topic)
if not topic:
raise ValueError("topic must not be empty")
if not re.match("[a-zA-Z0-9_]+$", topic):
raise ValueError("topic contains invalid char - only alphanumeric and underscore are allowed")
self.topic = topic
if not msg_id:
msg_id = str(uuid.uuid4())
self.data = data
self.msg_id = msg_id
self.req_id = req_id
self.sent_time = None
self.received_time = None
@staticmethod
def new_request(topic: str, data: Any, msg_id=None):
return Message(Message.REQUEST, topic, data, msg_id)
@staticmethod
def new_reply(topic: str, data: Any, req_msg_id, msg_id=None):
return Message(Message.REPLY, topic, data, msg_id, req_id=req_msg_id)
class Pipe(ABC):
def __init__(self, mode: Mode):
"""Creates the pipe.
Args:
mode (Mode): Mode of the endpoint. A pipe has two endpoints.
An endpoint can be either the one that initiates communication or the one listening.
"""
if mode != Mode.ACTIVE and mode != Mode.PASSIVE:
raise ValueError(f"mode must be '{Mode.ACTIVE}' or '{Mode.PASSIVE}' but got {mode}")
self.mode = mode
@abstractmethod
def open(self, name: str):
"""Open the pipe
Args:
name: name of the pipe
"""
pass
@abstractmethod
def clear(self):
"""Clear the pipe"""
pass
@abstractmethod
def send(self, msg: Message, timeout=None) -> bool:
"""Send the specified message to the peer.
Args:
msg: the message to be sent
timeout: if specified, number of secs to wait for the peer to read the message.
Returns: whether the message is read by the peer.
If timeout is not specified, always return False.
"""
pass
@abstractmethod
def receive(self, timeout=None) -> Union[None, Message]:
"""Try to receive message from peer.
Args:
timeout: how long (number of seconds) to try
Returns: the message received; or None if no message
"""
pass
@abstractmethod
def close(self):
"""Close the pipe
Returns: None
"""
pass
| NVFlare-main | nvflare/fuel/utils/pipe/pipe.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from enum import Enum
from typing import Any, Dict, Union
TEN_MEGA = 10 * 1024 * 1024
class DatumType(Enum):
BLOB = 1
FILE = 2
class Datum:
"""Datum is a class that holds information for externalized data"""
def __init__(self, datum_type: DatumType, value: Any):
self.datum_id = str(uuid.uuid4())
self.datum_type = datum_type
self.value = value
@staticmethod
def blob_datum(blob: Union[bytes, bytearray, memoryview]):
"""Factory method to create a BLOB datum"""
return Datum(DatumType.BLOB, blob)
@staticmethod
def file_datum(path: str):
"""Factory method to crate a file datum"""
return Datum(DatumType.FILE, path)
class DatumRef:
"""A reference to externalized datum. If unwrap is true, the reference will be removed and replaced with the
content of the datum"""
def __init__(self, datum_id: str, unwrap=False):
self.datum_id = datum_id
self.unwrap = unwrap
class DatumManager:
def __init__(self, threshold=TEN_MEGA):
self.threshold = threshold
self.datums: Dict[str, Datum] = {}
def get_datums(self):
return self.datums
def externalize(self, data: Any):
if not isinstance(data, (bytes, bytearray, memoryview)):
return data
if len(data) >= self.threshold:
# turn it to Datum
d = Datum.blob_datum(data)
self.datums[d.datum_id] = d
return DatumRef(d.datum_id, True)
else:
return data
def internalize(self, data: Any) -> Any:
if not isinstance(data, DatumRef):
return data
d = self.datums.get(data.datum_id)
if not d:
raise ValueError(f"can't find datum for {data.datum_id}")
if data.unwrap:
return d.value
else:
return d
| NVFlare-main | nvflare/fuel/utils/fobs/datum.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvflare.fuel.utils.fobs.decomposer import Decomposer
from nvflare.fuel.utils.fobs.fobs import (
auto_register_enum_types,
deserialize,
deserialize_stream,
num_decomposers,
register,
register_data_classes,
register_enum_types,
register_folder,
reset,
serialize,
serialize_stream,
)
# aliases for compatibility to Pickle/json
load = deserialize_stream
loads = deserialize
dump = serialize_stream
dumps = serialize
| NVFlare-main | nvflare/fuel/utils/fobs/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import logging
import os
from enum import Enum
from os.path import dirname, join
from typing import Any, BinaryIO, Dict, Type, TypeVar, Union
import msgpack
from nvflare.fuel.utils.fobs.datum import DatumManager
from nvflare.fuel.utils.fobs.decomposer import DataClassDecomposer, Decomposer, EnumTypeDecomposer
__all__ = [
"register",
"register_data_classes",
"register_enum_types",
"auto_register_enum_types",
"register_folder",
"num_decomposers",
"serialize",
"serialize_stream",
"deserialize",
"deserialize_stream",
"reset",
]
from nvflare.security.logging import secure_format_exception
FOBS_TYPE = "__fobs_type__"
FOBS_DATA = "__fobs_data__"
MAX_CONTENT_LEN = 128
MSGPACK_TYPES = (None, bool, int, float, str, bytes, bytearray, memoryview, list, dict)
T = TypeVar("T")
log = logging.getLogger(__name__)
_decomposers: Dict[str, Decomposer] = {}
_decomposers_registered = False
_enum_auto_register = True
class Packer:
def __init__(self, manager: DatumManager):
self.manager = manager
def pack(self, obj: Any) -> dict:
if type(obj) in MSGPACK_TYPES:
return obj
type_name = _get_type_name(obj.__class__)
if type_name not in _decomposers:
if _enum_auto_register and isinstance(obj, Enum):
register_enum_types(type(obj))
else:
return obj
decomposed = _decomposers[type_name].decompose(obj, self.manager)
return {FOBS_TYPE: type_name, FOBS_DATA: decomposed}
def unpack(self, obj: Any) -> Any:
if type(obj) is not dict or FOBS_TYPE not in obj:
return obj
type_name = obj[FOBS_TYPE]
if type_name not in _decomposers:
error = True
if _enum_auto_register:
cls = self._load_class(type_name)
if issubclass(cls, Enum):
register_enum_types(cls)
error = False
if error:
raise TypeError(f"Unknown type {type_name}, caused by mismatching decomposers")
decomposer = _decomposers[type_name]
return decomposer.recompose(obj[FOBS_DATA], self.manager)
@staticmethod
def _load_class(type_name: str):
parts = type_name.split(".")
if len(parts) == 1:
parts = ["builtins", type_name]
mod = __import__(parts[0])
for comp in parts[1:]:
mod = getattr(mod, comp)
return mod
def _get_type_name(cls: Type) -> str:
module = cls.__module__
if module == "builtins":
return cls.__qualname__
return module + "." + cls.__qualname__
def register(decomposer: Union[Decomposer, Type[Decomposer]]) -> None:
"""Register a decomposer. It does nothing if decomposer is already registered for the type
Args:
decomposer: The decomposer type or instance
"""
global _decomposers
if inspect.isclass(decomposer):
instance = decomposer()
else:
instance = decomposer
name = _get_type_name(instance.supported_type())
if name in _decomposers:
return
if not isinstance(instance, Decomposer):
log.error(f"Class {instance.__class__} is not a decomposer")
return
_decomposers[name] = instance
def register_data_classes(*data_classes: Type[T]) -> None:
"""Register generic decomposers for data classes
Args:
data_classes: The classes to be registered
"""
for data_class in data_classes:
decomposer = DataClassDecomposer(data_class)
register(decomposer)
def register_enum_types(*enum_types: Type[Enum]) -> None:
"""Register generic decomposers for enum classes
Args:
enum_types: The enum classes to be registered
"""
for enum_type in enum_types:
if not issubclass(enum_type, Enum):
raise TypeError(f"Can't register class {enum_type}, which is not a subclass of Enum")
decomposer = EnumTypeDecomposer(enum_type)
register(decomposer)
def auto_register_enum_types(enabled=True) -> None:
"""Enable or disable auto registering of enum classes
Args:
enabled: Auto-registering of enum classes is enabled if True
"""
global _enum_auto_register
_enum_auto_register = enabled
def register_folder(folder: str, package: str):
"""Scan the folder and register all decomposers found.
Args:
folder: The folder to scan
package: The package to import the decomposers from
"""
for module in os.listdir(folder):
if module != "__init__.py" and module[-3:] == ".py":
decomposers = package + "." + module[:-3]
imported = importlib.import_module(decomposers, __package__)
for _, cls_obj in inspect.getmembers(imported, inspect.isclass):
spec = inspect.getfullargspec(cls_obj.__init__)
# classes who are abstract or take extra args in __init__ can't be auto-registered
if issubclass(cls_obj, Decomposer) and not inspect.isabstract(cls_obj) and len(spec.args) == 1:
register(cls_obj)
def _register_decomposers():
global _decomposers_registered
if _decomposers_registered:
return
register_folder(join(dirname(__file__), "decomposers"), ".decomposers")
_decomposers_registered = True
def num_decomposers() -> int:
"""Returns the number of decomposers registered.
Returns:
The number of decomposers
"""
return len(_decomposers)
def serialize(obj: Any, manager: DatumManager = None, **kwargs) -> bytes:
"""Serialize object into bytes.
Args:
obj: Object to be serialized
manager: Datum manager used to externalize datum
kwargs: Arguments passed to msgpack.packb
Returns:
Serialized data
"""
_register_decomposers()
packer = Packer(manager)
try:
return msgpack.packb(obj, default=packer.pack, strict_types=True, **kwargs)
except ValueError as ex:
content = str(obj)
if len(content) > MAX_CONTENT_LEN:
content = content[:MAX_CONTENT_LEN] + " ..."
raise ValueError(f"Object {type(obj)} is not serializable: {secure_format_exception(ex)}: {content}")
def serialize_stream(obj: Any, stream: BinaryIO, manager: DatumManager = None, **kwargs):
"""Serialize object and write the data to a stream.
Args:
obj: Object to be serialized
stream: Stream to write the result to
manager: Datum manager to externalize datum
kwargs: Arguments passed to msgpack.packb
"""
data = serialize(obj, manager, **kwargs)
stream.write(data)
def deserialize(data: bytes, manager: DatumManager = None, **kwargs) -> Any:
"""Deserialize bytes into an object.
Args:
data: Serialized data
manager: Datum manager to internalize datum
kwargs: Arguments passed to msgpack.unpackb
Returns:
Deserialized object
"""
_register_decomposers()
packer = Packer(manager)
return msgpack.unpackb(data, strict_map_key=False, object_hook=packer.unpack, **kwargs)
def deserialize_stream(stream: BinaryIO, manager: DatumManager = None, **kwargs) -> Any:
"""Deserialize bytes from stream into an object.
Args:
stream: Stream to write serialized data to
manager: Datum manager to internalize datum
kwargs: Arguments passed to msgpack.unpackb
Returns:
Deserialized object
"""
data = stream.read()
return deserialize(data, manager, **kwargs)
def reset():
"""Reset FOBS to initial state. Used for unit test"""
global _decomposers, _decomposers_registered
_decomposers.clear()
_decomposers_registered = False
| NVFlare-main | nvflare/fuel/utils/fobs/fobs.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, Type, TypeVar
# Generic type supported by the decomposer.
from nvflare.fuel.utils.fobs.datum import DatumManager
T = TypeVar("T")
class Decomposer(ABC):
"""Abstract base class for decomposers.
Every class to be serialized by FOBS must register a decomposer which is
a concrete subclass of this class.
"""
@abstractmethod
def supported_type(self) -> Type[T]:
"""Returns the type/class supported by this decomposer.
Returns:
The class (not instance) of supported type
"""
pass
@abstractmethod
def decompose(self, target: T, manager: DatumManager = None) -> Any:
"""Decompose the target into types supported by msgpack or classes with decomposers registered.
Msgpack supports primitives, bytes, memoryview, lists, dicts.
Args:
target: The instance to be serialized
manager: Datum manager to store externalized datum
Returns:
The decomposed serializable objects
"""
pass
@abstractmethod
def recompose(self, data: Any, manager: DatumManager = None) -> T:
"""Reconstruct the object from decomposed components.
Args:
data: The decomposed component
manager: Datum manager to internalize datum
Returns:
The reconstructed object
"""
pass
class DictDecomposer(Decomposer):
"""Generic decomposer for subclasses of dict like Shareable"""
def __init__(self, dict_type: Type[dict]):
self.dict_type = dict_type
def supported_type(self):
return self.dict_type
def decompose(self, target: dict, manager: DatumManager = None) -> Any:
result = target.copy()
if manager:
result = self._externalize(result, manager)
return result
def recompose(self, data: dict, manager: DatumManager = None) -> dict:
if manager:
data = self._internalize(data, manager)
obj = self.dict_type()
for k, v in data.items():
obj[k] = v
return obj
def _externalize(self, target: dict, manager: DatumManager) -> Any:
if not manager:
return target
if isinstance(target, dict):
for k, v in target.items():
target[k] = self._externalize(v, manager)
elif isinstance(target, list):
for i, v in enumerate(target):
target[i] = self._externalize(v, manager)
else:
# leaf node
target = manager.externalize(target)
return target
def _internalize(self, target, manager: DatumManager) -> Any:
if not manager:
return target
if isinstance(target, dict):
for k, v in target.items():
target[k] = self._internalize(v, manager)
elif isinstance(target, list):
for i, v in enumerate(target):
target[i] = self._internalize(v, manager)
else:
target = manager.internalize(target)
return target
class DataClassDecomposer(Decomposer):
"""Generic decomposers for data classes, which must meet following requirements:
1. All class members must be serializable. The type of member must be one of the
types supported by MessagePack or a decomposer is registered for the type.
2. The __new__ method only takes one argument which is the class type.
3. The __init__ method has no side effects. It can only change the states of the
object. The side effects include creating files, initializing loggers, modifying
global variables.
"""
def __init__(self, data_type: Type[T]):
self.data_type = data_type
def supported_type(self) -> Type[T]:
return self.data_type
def decompose(self, target: T, manager: DatumManager = None) -> Any:
return vars(target)
def recompose(self, data: dict, manager: DatumManager = None) -> T:
instance = self.data_type.__new__(self.data_type)
instance.__dict__.update(data)
return instance
class EnumTypeDecomposer(Decomposer):
"""Generic decomposers for enum types."""
def __init__(self, data_type: Type[Enum]):
if not issubclass(data_type, Enum):
raise TypeError(f"{data_type} is not an enum")
self.data_type = data_type
def supported_type(self) -> Type[Enum]:
return self.data_type
def decompose(self, target: Enum, manager: DatumManager = None) -> Any:
return target.name
def recompose(self, data: Any, manager: DatumManager = None) -> Enum:
return self.data_type[data]
| NVFlare-main | nvflare/fuel/utils/fobs/decomposer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decomposers for Python builtin objects."""
from collections import OrderedDict
from datetime import datetime
from typing import Any
from nvflare.fuel.utils.fobs.datum import DatumManager
from nvflare.fuel.utils.fobs.decomposer import Decomposer
class TupleDecomposer(Decomposer):
def supported_type(self):
return tuple
def decompose(self, target: tuple, manager: DatumManager = None) -> Any:
return list(target)
def recompose(self, data: Any, manager: DatumManager = None) -> tuple:
return tuple(data)
class SetDecomposer(Decomposer):
def supported_type(self):
return set
def decompose(self, target: set, manager: DatumManager = None) -> Any:
return list(target)
def recompose(self, data: Any, manager: DatumManager = None) -> set:
return set(data)
class OrderedDictDecomposer(Decomposer):
def supported_type(self):
return OrderedDict
def decompose(self, target: OrderedDict, manager: DatumManager = None) -> Any:
return list(target.items())
def recompose(self, data: Any, manager: DatumManager = None) -> OrderedDict:
return OrderedDict(data)
class DatetimeDecomposer(Decomposer):
def supported_type(self):
return datetime
def decompose(self, target: datetime, manager: DatumManager = None) -> Any:
return target.isoformat()
def recompose(self, data: Any, manager: DatumManager = None) -> datetime:
return datetime.fromisoformat(data)
| NVFlare-main | nvflare/fuel/utils/fobs/decomposers/core_decomposers.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/utils/fobs/decomposers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ProcessExitCode:
EXCEPTION = 101
UNSAFE_COMPONENT = 102
CONFIG_ERROR = 103
PROCESS_EXIT_REASON = {
ProcessExitCode.UNSAFE_COMPONENT: "unsafe component",
ProcessExitCode.CONFIG_ERROR: "config error",
ProcessExitCode.EXCEPTION: "exception",
}
| NVFlare-main | nvflare/fuel/common/exit_codes.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/common/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CommunicationMetaData(object):
COMMAND = "command"
TASK_NAME = "task_name"
FL_CTX = "fl_ctx"
EVENT_TYPE = "event_type"
HANDLE_CONN = "handle_conn"
EXE_CONN = "exe_conn"
COMPONENTS = "MPExecutor_components"
HANDLERS = "MPExecutor_handlers"
LOCAL_EXECUTOR = "local_executor"
RANK_NUMBER = "rank_number"
SHAREABLE = "shareable"
RELAYER = "relayer"
RANK_PROCESS_STARTED = "rank_process_started"
PARENT_PASSWORD = "parent process secret password"
CHILD_PASSWORD = "client process secret password"
class CommunicateData(object):
EXECUTE = "execute"
HANDLE_EVENT = "handle_event"
CLOSE = "close"
SUB_WORKER_PROCESS = "sub_worker_process"
MULTI_PROCESS_EXECUTOR = "multi_process_executor"
class MultiProcessCommandNames:
INITIALIZE = "initialize"
TASK_EXECUTION = "task_execution"
FIRE_EVENT = "fire_event"
EXECUTE_RESULT = "execute_result"
CLOSE = "close"
| NVFlare-main | nvflare/fuel/common/multi_process_executor_constants.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
class SimpleContext(object):
def __init__(self):
"""A simple context containing a props dictionary of key value pairs and convenience methods."""
self.props = {}
def set_prop(self, key, value):
self.props[key] = value
def set_props(self, props: dict):
if props:
self.props.update(props)
def len(self):
return len(self.props)
def get_prop(self, key, default=None):
return self.props.get(key, default)
def clear_props(self):
self.props = {}
class BaseContext(SimpleContext):
def __init__(self):
"""A SimpleContext with threading locks.
This context class enables thread-safe set/get on top of SimpleContext."""
SimpleContext.__init__(self)
self._update_lock = threading.Lock()
def set_prop(self, key, value):
with self._update_lock:
SimpleContext.set_prop(self, key, value)
def set_props(self, props: dict):
if not props:
return
with self._update_lock:
SimpleContext.set_props(self, props)
def len(self):
with self._update_lock:
return SimpleContext.len(self)
def get_prop(self, key, default=None):
with self._update_lock:
return SimpleContext.get_prop(self, key, default)
def clear_props(self):
with self._update_lock:
SimpleContext.clear_props(self)
| NVFlare-main | nvflare/fuel/common/ctx.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ConfigError(Exception):
"""Raised when configuration parsing error happens."""
pass
class ComponentNotAuthorized(Exception):
"""Raised when component building is not authorized"""
pass
| NVFlare-main | nvflare/fuel/common/excepts.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/fuel/flare_api/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import time
from abc import ABC, abstractmethod
from typing import List, Optional
class MonitorReturnCode(int, enum.Enum):
JOB_FINISHED = 0
TIMEOUT = 1
ENDED_BY_CB = 2
class NoConnection(Exception):
pass
class SessionClosed(Exception):
pass
class InvalidArgumentError(Exception):
pass
class InvalidJobDefinition(Exception):
pass
class JobNotFound(Exception):
pass
class JobNotRunning(Exception):
pass
class JobNotDone(Exception):
pass
class InternalError(Exception):
pass
class AuthenticationError(Exception):
pass
class AuthorizationError(Exception):
pass
class NoClientsAvailable(Exception):
pass
class ClientsStillRunning(Exception):
pass
class InvalidTarget(Exception):
pass
class TargetType:
ALL = "all"
SERVER = "server"
CLIENT = "client"
class ServerInfo:
def __init__(self, status, start_time):
self.status = status
self.start_time = start_time
def __str__(self) -> str:
return f"status: {self.status}, start_time: {time.asctime(time.localtime(self.start_time))}"
class ClientInfo:
def __init__(self, name: str, last_connect_time):
self.name = name
self.last_connect_time = last_connect_time
def __str__(self) -> str:
return f"{self.name}(last_connect_time: {time.asctime(time.localtime(self.last_connect_time))})"
class JobInfo:
def __init__(self, job_id: str, app_name: str):
self.job_id = job_id
self.app_name = app_name
def __str__(self) -> str:
return f"JobInfo:\n job_id: {self.job_id}\n app_name: {self.app_name}"
class SystemInfo:
def __init__(self, server_info: ServerInfo, client_info: List[ClientInfo], job_info: List[JobInfo]):
self.server_info = server_info
self.client_info = client_info
self.job_info = job_info
def __str__(self) -> str:
client_info_str = "\n".join(map(str, self.client_info))
job_info_str = "\n".join(map(str, self.job_info))
return (
f"SystemInfo\nserver_info:\n{self.server_info}\nclient_info:\n{client_info_str}\njob_info:\n{job_info_str}"
)
class SessionSpec(ABC):
@abstractmethod
def submit_job(self, job_definition_path: str) -> str:
"""Submit a predefined job to the NVFLARE system
Args:
job_definition_path: path to the folder that defines a NVFLARE job
Returns: the job id if accepted by the system
If the submission fails, exception will be raised:
"""
pass
@abstractmethod
def clone_job(self, job_id: str) -> str:
"""Create a new job by cloning a specified job
Args:
job_id: job to be cloned
Returns: ID of the new job
"""
pass
@abstractmethod
def get_job_meta(self, job_id: str) -> dict:
"""Get the meta info of the specified job
Args:
job_id: ID of the job
Returns: a dict of job metadata
"""
pass
@abstractmethod
def list_jobs(self, detailed: bool = False, all: bool = False) -> List[dict]:
"""Get the job info from the server
Args:
detailed: True to get the detailed information for each job, False by default
all: True to get jobs submitted by all users (default is to only list jobs submitted by the same user)
Returns: a list of job metadata
"""
pass
@abstractmethod
def download_job_result(self, job_id: str) -> str:
"""
Download result of the job
Args:
job_id: ID of the job
Returns: folder path to the location of the job result
If the job size is smaller than the maximum size set on the server, the job will download to the download_dir
set in Session through the admin config, and the path to the downloaded result will be returned. If the size
of the job is larger than the maximum size, the location to download the job will be returned.
"""
pass
@abstractmethod
def abort_job(self, job_id: str):
"""Abort the specified job
Args:
job_id: job to be aborted
Returns: None
If the job is already done, no effect;
If job is not started yet, it will be cancelled and won't be scheduled.
If the job is being executed, it will be aborted
"""
pass
@abstractmethod
def delete_job(self, job_id: str):
"""Delete the specified job completely from the system
Args:
job_id: job to be deleted
Returns: None
If the job is being executed, the job will be stopped first.
Everything of the job will be deleted from the job store, as well as workspaces on
the FL server and clients.
"""
pass
@abstractmethod
def get_system_info(self) -> SystemInfo:
"""Get general info of the FLARE system"""
pass
@abstractmethod
def get_client_job_status(self, client_names: List[str] = None) -> List[dict]:
"""Get job status info of specified FL clients
Args:
client_names: names of the clients to get status info
Returns: A list of jobs running on the clients. Each job is described by a dict of: id, app name and status.
If there are multiple jobs running on one client, the list contains one entry for each job for that client.
If no FL clients are connected or the server failed to communicate to them, this method returns None.
"""
pass
@abstractmethod
def restart(self, target_type: str, client_names: Optional[List[str]] = None) -> dict:
"""
Restart specified system target(s)
Args:
target_type: what system target (server, client, or all) to restart
client_names: clients to be restarted if target_type is client. If not specified, all clients.
Returns: a dict that contains detailed info about the restart request:
status - the overall status of the result.
server_status - whether the server is restarted successfully - only if target_type is "all" or "server".
client_status - a dict (keyed on client name) that specifies status of each client - only if target_type
is "all" or "client".
"""
pass
@abstractmethod
def shutdown(self, target_type: TargetType, client_names: Optional[List[str]] = None):
"""Shut down specified system target(s)
Args:
target_type: what system target (server, client, or all) to shut down
client_names: clients to be shut down if target_type is client. If not specified, all clients.
Returns: None
"""
pass
@abstractmethod
def set_timeout(self, value: float):
"""
Set a session-specific command timeout. This is the amount of time the server will wait for responses
after sending commands to FL clients.
Note that this value is only effective for the current API session.
Args:
value: a positive float number
Returns: None
"""
pass
@abstractmethod
def unset_timeout(self):
"""
Unset the session-specific command timeout. Once unset, the FL Admin Server's default will be used.
Returns: None
"""
pass
@abstractmethod
def list_sp(self) -> dict:
"""List available service providers
Returns: a dict that contains information about the primary SP and others
"""
pass
@abstractmethod
def get_active_sp(self) -> dict:
"""Get the current active service provider (SP).
Returns: a dict that describes the current active SP. If no SP is available currently, the 'name' attribute of
the result is empty.
"""
pass
@abstractmethod
def promote_sp(self, sp_end_point: str):
"""Promote the specified endpoint to become the active SP.
Args:
sp_end_point: the endpoint of the SP. It's string in this format: <url>:<server_port>:<admin_port>
Returns: None
"""
pass
@abstractmethod
def get_available_apps_to_upload(self):
"""Get defined FLARE app folders from the upload folder on the machine the FLARE API is running
Returns: a list of app folders
"""
pass
@abstractmethod
def shutdown_system(self):
"""Shut down the whole NVFLARE system including the overseer, FL server(s), and all FL clients.
Returns: None
Note: the user must be a Project Admin to use this method; otherwise the NOT_AUTHORIZED exception will raise.
"""
pass
@abstractmethod
def ls_target(self, target: str, options: str = None, path: str = None) -> str:
"""Run the "ls" command on the specified target and return result
Args:
target: the target (server or a client name) the command will run
options: options of the "ls" command
path: the optional file path
Returns: result of "ls" command
"""
pass
@abstractmethod
def cat_target(self, target: str, options: str = None, file: str = None) -> str:
"""Run the "cat" command on the specified target and return result
Args:
target: the target (server or a client name) the command will run
options: options of the "cat" command
file: the file that the "cat" command will run against
Returns: result of "cat" command
"""
pass
@abstractmethod
def tail_target(self, target: str, options: str = None, file: str = None) -> str:
"""Run the "tail" command on the specified target and return result
Args:
target: the target (server or a client name) the command will run
options: options of the "tail" command
file: the file that the "tail" command will run against
Returns: result of "tail" command
"""
pass
@abstractmethod
def tail_target_log(self, target: str, options: str = None) -> str:
"""Run the "tail log.txt" command on the specified target and return result
Args:
target: the target (server or a client name) the command will run
options: options of the "tail" command
Returns: result of "tail" command
"""
pass
@abstractmethod
def head_target(self, target: str, options: str = None, file: str = None) -> str:
"""Run the "head" command on the specified target and return result
Args:
target: the target (server or a client name) the command will run
options: options of the "head" command
file: the file that the "head" command will run against
Returns: result of "head" command
"""
pass
@abstractmethod
def head_target_log(self, target: str, options: str = None) -> str:
"""Run the "head log.txt" command on the specified target and return result
Args:
target: the target (server or a client name) the command will run
options: options of the "head" command
Returns: result of "head" command
"""
pass
@abstractmethod
def grep_target(self, target: str, options: str = None, pattern: str = None, file: str = None) -> str:
"""Run the "grep" command on the specified target and return result
Args:
target: the target (server or a client name) the command will run
options: options of the "grep" command
pattern: the grep pattern
file: the file that the "grep" command will run against
Returns: result of "grep" command
"""
pass
@abstractmethod
def get_working_directory(self, target: str) -> str:
"""Get the working directory of the specified target
Args:
target: the target (server of a client name)
Returns: current working directory of the specified target
"""
pass
@abstractmethod
def show_stats(self, job_id: str, target_type: str, targets: Optional[List[str]] = None) -> dict:
"""Show processing stats of specified job on specified targets
Args:
job_id: ID of the job
target_type: type of target (server or client)
targets: list of client names if target type is "client". All clients if not specified.
Returns: a dict that contains job stats on specified targets. The key of the dict is target name. The value is
a dict of stats reported by different system components (ServerRunner or ClientRunner).
"""
pass
@abstractmethod
def show_errors(self, job_id: str, target_type: str, targets: Optional[List[str]] = None) -> dict:
"""Show processing errors of specified job on specified targets
Args:
job_id: ID of the job
target_type: type of target (server or client)
targets: list of client names if target type is "client". All clients if not specified.
Returns: a dict that contains job errors (if any) on specified targets. The key of the dict is target name.
The value is a dict of errors reported by different system components (ServerRunner or ClientRunner).
"""
pass
@abstractmethod
def reset_errors(self, job_id: str):
"""Clear errors for all system targets for the specified job
Args:
job_id: ID of the job
Returns: None
"""
pass
@abstractmethod
def get_connected_client_list(self) -> List[ClientInfo]:
"""Get the list of connected clients
Returns: a list of ClientInfo objects
"""
pass
@abstractmethod
def monitor_job(
self, job_id: str, timeout: int = 0, poll_interval: float = 2.0, cb=None, *cb_args, **cb_kwargs
) -> MonitorReturnCode:
"""Monitor the job progress until one of the conditions occurs:
- job is done
- timeout
- the status_cb returns False
Args:
job_id: the job to be monitored
timeout: how long to monitor. If 0, never time out.
poll_interval: how often to poll job status
cb: if provided, callback to be called after each poll
Returns: a MonitorReturnCode
Every time the cb is called, it must return a bool indicating whether the monitor
should continue. If False, this method ends.
"""
pass
@abstractmethod
def close(self):
"""Close the session
Returns:
"""
pass
def job_monitor_cb_signature(session: SessionSpec, job_id: str, job_mea: dict, *args, **kwargs) -> bool:
"""
Args:
session: the session
job_id: ID of the job being monitored
job_mea: meta info of the job
*args:
**kwargs:
Returns:
"""
pass
| NVFlare-main | nvflare/fuel/flare_api/api_spec.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import time
from typing import List, Optional
from nvflare.apis.fl_constant import AdminCommandNames
from nvflare.apis.job_def import JobMetaKey
from nvflare.apis.workspace import Workspace
from nvflare.fuel.common.excepts import ConfigError
from nvflare.fuel.hci.client.api import AdminAPI, APIStatus, ResultKey
from nvflare.fuel.hci.client.config import FLAdminClientStarterConfigurator
from nvflare.fuel.hci.client.overseer_service_finder import ServiceFinderByOverseer
from nvflare.fuel.hci.cmd_arg_utils import (
process_targets_into_str,
validate_file_string,
validate_options_string,
validate_path_string,
validate_required_target_string,
validate_sp_string,
)
from nvflare.fuel.hci.proto import MetaKey, MetaStatusValue, ProtoKey
from .api_spec import (
AuthenticationError,
AuthorizationError,
ClientInfo,
ClientsStillRunning,
InternalError,
InvalidArgumentError,
InvalidJobDefinition,
InvalidTarget,
JobInfo,
JobNotDone,
JobNotFound,
JobNotRunning,
MonitorReturnCode,
NoClientsAvailable,
NoConnection,
ServerInfo,
SessionClosed,
SessionSpec,
SystemInfo,
TargetType,
)
_VALID_TARGET_TYPES = [TargetType.ALL, TargetType.SERVER, TargetType.CLIENT]
class Session(SessionSpec):
def __init__(self, username: str = None, startup_path: str = None, secure_mode: bool = True, debug: bool = False):
"""Initializes a session with the NVFLARE system.
Args:
username (str): string of username to log in with
startup_path (str): path to the provisioned startup kit, which contains endpoint of the system
secure_mode (bool): whether to run in secure mode or not
"""
assert isinstance(username, str), "username must be str"
self.username = username
assert isinstance(startup_path, str), "startup_path must be str"
self.secure_mode = secure_mode
assert os.path.isdir(startup_path), f"startup kit does not exist at {startup_path}"
workspace = Workspace(root_dir=startup_path)
conf = FLAdminClientStarterConfigurator(workspace)
conf.configure()
admin_config = conf.config_data.get("admin", None)
if not admin_config:
raise ConfigError("Missing admin section in fed_admin configuration.")
ca_cert = admin_config.get("ca_cert", "")
client_cert = admin_config.get("client_cert", "")
client_key = admin_config.get("client_key", "")
if admin_config.get("with_ssl"):
if len(ca_cert) <= 0:
raise ConfigError("missing CA Cert file name field ca_cert in fed_admin configuration")
if len(client_cert) <= 0:
raise ConfigError("missing Client Cert file name field client_cert in fed_admin configuration")
if len(client_key) <= 0:
raise ConfigError("missing Client Key file name field client_key in fed_admin configuration")
else:
ca_cert = None
client_key = None
client_cert = None
upload_dir = admin_config.get("upload_dir")
download_dir = admin_config.get("download_dir")
if not os.path.isdir(download_dir):
os.makedirs(download_dir)
if self.secure_mode:
if not os.path.isfile(ca_cert):
raise ConfigError(f"rootCA.pem does not exist at {ca_cert}")
if not os.path.isfile(client_cert):
raise ConfigError(f"client.crt does not exist at {client_cert}")
if not os.path.isfile(client_key):
raise ConfigError(f"client.key does not exist at {client_key}")
service_finder = ServiceFinderByOverseer(conf.overseer_agent)
self.api = AdminAPI(
ca_cert=ca_cert,
client_cert=client_cert,
client_key=client_key,
upload_dir=upload_dir,
download_dir=download_dir,
service_finder=service_finder,
user_name=username,
insecure=(not self.secure_mode),
debug=debug,
event_handlers=conf.handlers,
)
self.upload_dir = upload_dir
self.download_dir = download_dir
self.overseer_agent = conf.overseer_agent
def close(self):
"""Close the session."""
self.api.close()
def try_connect(self, timeout):
if self.api.closed:
raise SessionClosed("session closed")
start_time = time.time()
while not self.api.is_ready():
if time.time() - start_time > timeout:
self.api.close()
raise NoConnection(f"cannot connect to FLARE in {timeout} seconds")
time.sleep(0.5)
def _do_command(self, command: str, enforce_meta=True):
if self.api.closed:
raise SessionClosed("session closed")
result = self.api.do_command(command)
if not isinstance(result, dict):
raise InternalError(f"result from server must be dict but got {type(result)}")
# check meta status first
meta = result.get(ResultKey.META, None)
if enforce_meta and not meta:
raise InternalError("missing meta from result")
if meta:
if not isinstance(meta, dict):
raise InternalError(f"meta must be dict but got {type(meta)}")
cmd_status = meta.get(MetaKey.STATUS, MetaStatusValue.OK)
info = meta.get(MetaKey.INFO, "")
if cmd_status == MetaStatusValue.INVALID_JOB_DEFINITION:
raise InvalidJobDefinition(f"invalid job definition: {info}")
elif cmd_status == MetaStatusValue.NOT_AUTHORIZED:
raise AuthorizationError(f"user not authorized for the action '{command}: {info}'")
elif cmd_status == MetaStatusValue.NOT_AUTHENTICATED:
raise AuthenticationError(f"user not authenticated: {info}")
elif cmd_status == MetaStatusValue.SYNTAX_ERROR:
raise InternalError(f"syntax error: {info}")
elif cmd_status == MetaStatusValue.INVALID_JOB_ID:
raise JobNotFound(f"no such job: {info}")
elif cmd_status == MetaStatusValue.JOB_RUNNING:
raise JobNotDone(f"job {info} is still running")
elif cmd_status == MetaStatusValue.JOB_NOT_RUNNING:
raise JobNotRunning(f"job {info} is not running")
elif cmd_status == MetaStatusValue.CLIENTS_RUNNING:
raise ClientsStillRunning("one or more clients are still running")
elif cmd_status == MetaStatusValue.NO_CLIENTS:
raise NoClientsAvailable("no clients available")
elif cmd_status == MetaStatusValue.INTERNAL_ERROR:
raise InternalError(f"server internal error: {info}")
elif cmd_status == MetaStatusValue.INVALID_TARGET:
raise InvalidTarget(info)
elif cmd_status != MetaStatusValue.OK:
raise InternalError(f"{cmd_status}: {info}")
status = result.get(ResultKey.STATUS, None)
if not status:
raise InternalError("missing status in result")
if status in [APIStatus.ERROR_CERT, APIStatus.ERROR_AUTHENTICATION]:
raise AuthenticationError(f"user not authenticated: {status}")
elif status == APIStatus.ERROR_AUTHORIZATION:
raise AuthorizationError(f"user not authorized for the action '{command}'")
elif status == APIStatus.ERROR_INACTIVE_SESSION:
raise SessionClosed("the session is closed on server")
elif status in [APIStatus.ERROR_PROTOCOL, APIStatus.ERROR_SYNTAX]:
raise InternalError(f"protocol error: {status}")
elif status in [APIStatus.ERROR_SERVER_CONNECTION]:
raise ConnectionError(f"cannot connect to server: {status}")
elif status != APIStatus.SUCCESS:
details = result.get(ResultKey.DETAILS, "")
raise RuntimeError(f"runtime error encountered: {status}: {details}")
return result
@staticmethod
def _validate_job_id(job_id: str):
if not job_id:
raise JobNotFound("job_id is required but not specified.")
if not isinstance(job_id, str):
raise JobNotFound(f"invalid job_id {job_id}")
def clone_job(self, job_id: str) -> str:
"""Create a new job by cloning a specified job.
Args:
job_id: job to be cloned
Returns: ID of the new job
"""
self._validate_job_id(job_id)
result = self._do_command(AdminCommandNames.CLONE_JOB + " " + job_id)
meta = result[ResultKey.META]
job_id = meta.get(MetaKey.JOB_ID, None)
info = meta.get(MetaKey.INFO, "")
if not job_id:
raise InternalError(f"server failed to return job id: {info}")
return job_id
def submit_job(self, job_definition_path: str) -> str:
"""Submit a predefined job to the NVFLARE system.
Args:
job_definition_path: path to the folder that defines a NVFLARE job
Returns: the job id if accepted by the system
If the submission fails, an exception will be raised.
"""
if not job_definition_path:
raise InvalidJobDefinition("job_definition_path is required but not specified.")
if not isinstance(job_definition_path, str):
raise InvalidJobDefinition(f"job_definition_path must be str but got {type(job_definition_path)}.")
if not os.path.isdir(job_definition_path):
if os.path.isdir(os.path.join(self.upload_dir, job_definition_path)):
job_definition_path = os.path.join(self.upload_dir, job_definition_path)
else:
raise InvalidJobDefinition(f"job_definition_path '{job_definition_path}' is not a valid folder")
result = self._do_command(AdminCommandNames.SUBMIT_JOB + " " + job_definition_path)
meta = result[ResultKey.META]
job_id = meta.get(MetaKey.JOB_ID, None)
if not job_id:
raise InternalError("server failed to return job id")
return job_id
def get_job_meta(self, job_id: str) -> dict:
"""Get the meta info of the specified job.
Args:
job_id: ID of the job
Returns: a dict of job metadata
"""
self._validate_job_id(job_id)
result = self._do_command(AdminCommandNames.GET_JOB_META + " " + job_id)
meta = result[ResultKey.META]
job_meta = meta.get(MetaKey.JOB_META, None)
if not job_meta:
raise InternalError("server failed to return job meta")
return job_meta
def list_jobs(
self,
detailed: bool = False,
limit: Optional[int] = None,
id_prefix: str = None,
name_prefix: str = None,
reverse: bool = False,
) -> List[dict]:
"""Get the job info from the server.
Args:
detailed (bool): True to get the detailed information for each job, False by default
limit (int, optional): maximum number of jobs to show, with 0 or None to show all (defaults to None to show all)
id_prefix (str): if included, only return jobs with the beginning of the job ID matching the id_prefix
name_prefix (str): if included, only return jobs with the beginning of the job name matching the name_prefix
reverse (bool): if specified, list jobs in the reverse order of submission times
Returns: a list of job metadata
"""
if not isinstance(detailed, bool):
raise ValueError(f"detailed must be bool but got {type(detailed)}")
if not isinstance(reverse, bool):
raise ValueError(f"reverse must be bool but got {type(reverse)}")
if limit is not None and not isinstance(limit, int):
raise ValueError(f"limit must be None or int but got {type(limit)}")
if id_prefix is not None and not isinstance(id_prefix, str):
raise ValueError(f"id_prefix must be None or str but got {type(id_prefix)}")
if name_prefix is not None and not isinstance(name_prefix, str):
raise ValueError(f"name_prefix must be None or str but got {type(name_prefix)}")
command = AdminCommandNames.LIST_JOBS
if detailed:
command = command + " -d"
if reverse:
command = command + " -r"
if limit:
if not isinstance(limit, int):
raise InvalidArgumentError(f"limit must be int but got {type(limit)}")
command = command + " -m " + str(limit)
if name_prefix:
if not isinstance(name_prefix, str):
raise InvalidArgumentError("name_prefix must be str but got {}.".format(type(name_prefix)))
else:
command = command + " -n " + name_prefix
if id_prefix:
if not isinstance(id_prefix, str):
raise InvalidArgumentError("id_prefix must be str but got {}.".format(type(id_prefix)))
else:
command = command + " " + id_prefix
result = self._do_command(command)
meta = result[ResultKey.META]
jobs_list = meta.get(MetaKey.JOBS, [])
return jobs_list
def download_job_result(self, job_id: str) -> str:
"""Download result of the job.
Args:
job_id (str): ID of the job
Returns: folder path to the location of the job result
If the job size is smaller than the maximum size set on the server, the job will download to the download_dir
set in Session through the admin config, and the path to the downloaded result will be returned. If the size
of the job is larger than the maximum size, the location to download the job will be returned.
"""
self._validate_job_id(job_id)
result = self._do_command(AdminCommandNames.DOWNLOAD_JOB + " " + job_id)
meta = result[ResultKey.META]
download_job_id = meta.get(MetaKey.JOB_ID, None)
job_download_url = meta.get(MetaKey.JOB_DOWNLOAD_URL, None)
if not job_download_url:
return os.path.join(self.download_dir, download_job_id)
else:
return job_download_url
def abort_job(self, job_id: str):
"""Abort the specified job.
Args:
job_id (str): job to be aborted
Returns: dict of (status, info)
If the job is already done, no effect;
If job is not started yet, it will be cancelled and won't be scheduled.
If the job is being executed, it will be aborted.
"""
self._validate_job_id(job_id)
# result = self._do_command(AdminCommandNames.ABORT_JOB + " " + job_id)
# return result.get(ResultKey.META, None)
self._do_command(AdminCommandNames.ABORT_JOB + " " + job_id)
def delete_job(self, job_id: str):
"""Delete the specified job completely from the system.
Args:
job_id (str): job to be deleted
Returns: None
The job will be deleted from the job store if the job is not currently running.
"""
self._validate_job_id(job_id)
self._do_command(AdminCommandNames.DELETE_JOB + " " + job_id)
def get_system_info(self):
"""Get general system information.
Returns: a SystemInfo object
"""
return self._do_get_system_info(AdminCommandNames.CHECK_STATUS)
def _do_get_system_info(self, cmd: str):
result = self._do_command(f"{cmd} {TargetType.SERVER}")
meta = result[ResultKey.META]
server_info = ServerInfo(status=meta.get(MetaKey.SERVER_STATUS), start_time=meta.get(MetaKey.SERVER_START_TIME))
clients = []
client_meta_list = meta.get(MetaKey.CLIENTS, None)
if client_meta_list:
for c in client_meta_list:
client_info = ClientInfo(
name=c.get(MetaKey.CLIENT_NAME), last_connect_time=c.get(MetaKey.CLIENT_LAST_CONNECT_TIME)
)
clients.append(client_info)
jobs = []
job_meta_list = meta.get(MetaKey.JOBS, None)
if job_meta_list:
for j in job_meta_list:
job_info = JobInfo(app_name=j.get(MetaKey.APP_NAME), job_id=j.get(MetaKey.JOB_ID))
jobs.append(job_info)
return SystemInfo(server_info=server_info, client_info=clients, job_info=jobs)
def get_client_job_status(self, client_names: List[str] = None) -> List[dict]:
"""Get job status info of specified FL clients.
Args:
client_names (List[str]): names of the clients to get status info
Returns: A list of jobs running on the clients. Each job is described by a dict of: id, app name and status.
If there are multiple jobs running on one client, the list contains one entry for each job for that client.
If no FL clients are connected or the server failed to communicate to them, this method returns None.
"""
parts = [AdminCommandNames.CHECK_STATUS, TargetType.CLIENT]
if client_names:
processed_targets_str = process_targets_into_str(client_names)
parts.append(processed_targets_str)
command = " ".join(parts)
result = self._do_command(command)
meta = result[ResultKey.META]
return meta.get(MetaKey.CLIENT_STATUS, None)
def restart(self, target_type: str, client_names: Optional[List[str]] = None) -> dict:
"""Restart specified system target(s).
Args:
target_type (str): what system target (server, client, or all) to restart
client_names (List[str]): clients to be restarted if target_type is client. If not specified, all clients.
Returns: a dict that contains detailed info about the restart request:
status - the overall status of the result.
server_status - whether the server is restarted successfully - only if target_type is "all" or "server".
client_status - a dict (keyed on client name) that specifies status of each client - only if target_type
is "all" or "client".
"""
if target_type not in _VALID_TARGET_TYPES:
raise ValueError(f"invalid target_type {target_type} - must be in {_VALID_TARGET_TYPES}")
parts = [AdminCommandNames.RESTART, target_type]
if target_type == TargetType.CLIENT and client_names:
processed_targets_str = process_targets_into_str(client_names)
parts.append(processed_targets_str)
command = " ".join(parts)
result = self._do_command(command)
return result[ResultKey.META]
def shutdown(self, target_type: TargetType, client_names: Optional[List[str]] = None):
"""Shut down specified system target(s).
Args:
target_type: what system target (server, client, or all) to shut down
client_names: clients to be shut down if target_type is client. If not specified, all clients.
Returns: None
"""
if target_type not in _VALID_TARGET_TYPES:
raise ValueError(f"invalid target_type {target_type} - must be in {_VALID_TARGET_TYPES}")
parts = [AdminCommandNames.SHUTDOWN, target_type]
if target_type == TargetType.CLIENT and client_names:
processed_targets_str = process_targets_into_str(client_names)
parts.append(processed_targets_str)
command = " ".join(parts)
self._do_command(command)
def set_timeout(self, value: float):
"""Set a session-specific command timeout.
This is the amount of time the server will wait for responses after sending commands to FL clients.
Note that this value is only effective for the current API session.
Args:
value (float): a positive float number for the timeout in seconds
Returns: None
"""
self.api.set_command_timeout(value)
def unset_timeout(self):
"""Unset the session-specific command timeout.
Once unset, the FL Admin Server's default timeout will be used.
Returns: None
"""
self.api.unset_command_timeout()
def list_sp(self) -> dict:
"""List available service providers.
Returns: a dict that contains information about the primary SP and others
"""
reply = self._do_command("list_sp", enforce_meta=False)
return reply.get(ResultKey.DETAILS)
def get_active_sp(self) -> dict:
"""Get the current active service provider (SP).
Returns: a dict that describes the current active SP. If no SP is available currently, the 'name' attribute of
the result is empty.
"""
reply = self._do_command("get_active_sp", enforce_meta=False)
return reply.get(ResultKey.META)
def promote_sp(self, sp_end_point: str):
"""Promote the specified endpoint to become the active SP.
Args:
sp_end_point: the endpoint of the SP. It's string in this format: <url>:<server_port>:<admin_port>
Returns: None
"""
sp_end_point = validate_sp_string(sp_end_point)
self._do_command("promote_sp " + sp_end_point)
def get_available_apps_to_upload(self):
"""Get defined FLARE app folders from the upload folder on the machine the FLARE API is running.
Returns: a list of app folders
"""
dir_list = []
for item in os.listdir(self.upload_dir):
if os.path.isdir(os.path.join(self.upload_dir, item)):
dir_list.append(item)
return dir_list
def shutdown_system(self):
"""Shutdown the whole NVFLARE system including the overseer, FL server(s), and all FL clients.
Returns: None
Note: the user must be a Project Admin to use this method; otherwise the NOT_AUTHORIZED exception will be raised.
"""
sys_info = self._do_get_system_info(AdminCommandNames.ADMIN_CHECK_STATUS)
if sys_info.server_info.status != "stopped":
raise JobNotDone("there are still running jobs")
resp = self.overseer_agent.set_state("shutdown")
err = json.loads(resp.text).get("Error")
if err:
raise RuntimeError(err)
def ls_target(self, target: str, options: str = None, path: str = None) -> str:
"""Run the "ls" command on the specified target and return the result.
Args:
target: the target (server or a client name) the command will be run on
options: options of the "ls" command
path: the optional file path
Returns: result of "ls" command
"""
return self._shell_command_on_target("ls", target, options, path)
def cat_target(self, target: str, options: str = None, file: str = None) -> str:
"""Run the "cat" command on the specified target and return the result.
Args:
target: the target (server or a client name) the command will be run on
options: options of the "cat" command
file: the file that the "cat" command will run against
Returns: result of "cat" command
"""
return self._shell_command_on_target("cat", target, options, file, fp_required=True, fp_type="file")
def tail_target(self, target: str, options: str = None, file: str = None) -> str:
"""Run the "tail" command on the specified target and return the result.
Args:
target: the target (server or a client name) the command will be run on
options: options of the "tail" command
file: the file that the "tail" command will run against
Returns: result of "tail" command
"""
return self._shell_command_on_target("tail", target, options, file, fp_required=True, fp_type="file")
def tail_target_log(self, target: str, options: str = None) -> str:
"""Run the "tail log.txt" command on the specified target and return the result.
Args:
target: the target (server or a client name) the command will be run on
options: options of the "tail" command
Returns: result of "tail" command
"""
return self.tail_target(target, options, file="log.txt")
def head_target(self, target: str, options: str = None, file: str = None) -> str:
"""Run the "head" command on the specified target and return the result.
Args:
target: the target (server or a client name) the command will be run on
options: options of the "head" command
file: the file that the "head" command will run against
Returns: result of "head" command
"""
return self._shell_command_on_target("head", target, options, file, fp_required=True, fp_type="file")
def head_target_log(self, target: str, options: str = None) -> str:
"""Run the "head log.txt" command on the specified target and return the result.
Args:
target: the target (server or a client name) the command will be run on
options: options of the "head" command
Returns: result of "head" command
"""
return self.head_target(target, options, file="log.txt")
def grep_target(self, target: str, options: str = None, pattern: str = None, file: str = None) -> str:
"""Run the "grep" command on the specified target and return the result.
Args:
target: the target (server or a client name) the command will be run on
options: options of the "grep" command
pattern: the grep pattern
file: the file that the "grep" command will run against
Returns: result of "grep" command
"""
return self._shell_command_on_target(
"grep", target, options, file, pattern=pattern, pattern_required=True, fp_required=True, fp_type="file"
)
def get_working_directory(self, target: str) -> str:
"""Get the working directory of the specified target.
Args:
target (str): the target (server of a client name)
Returns: current working directory of the specified target
"""
return self._shell_command_on_target("pwd", target, options=None, fp=None)
def _shell_command_on_target(
self,
cmd: str,
target: str,
options,
fp,
pattern=None,
pattern_required=False,
fp_required=False,
fp_type="path",
) -> str:
target = validate_required_target_string(target)
parts = [cmd, target]
if options:
options = validate_options_string(options)
parts.append(options)
if pattern_required:
if not pattern:
raise SyntaxError("pattern is required but not specified.")
if not isinstance(pattern, str):
raise ValueError("pattern is not str.")
parts.append('"' + pattern + '"')
if fp_required and not fp:
raise SyntaxError(f"{fp_type} is required but not specified.")
if fp:
if fp_type == "path":
validate_path_string(fp)
else:
validate_file_string(fp)
parts.append(fp)
command = " ".join(parts)
reply = self._do_command(command, enforce_meta=False)
return self._get_string_data(reply)
@staticmethod
def _get_string_data(reply: dict) -> str:
result = ""
data_items = reply.get(ProtoKey.DATA, [])
for it in data_items:
if isinstance(it, dict):
if it.get(ProtoKey.TYPE) == ProtoKey.STRING:
result += it.get(ProtoKey.DATA, "")
return result
@staticmethod
def _get_dict_data(reply: dict) -> dict:
result = {}
data_items = reply.get(ProtoKey.DATA, [])
for it in data_items:
if isinstance(it, dict):
if it.get(ProtoKey.TYPE) == ProtoKey.DICT:
return it.get(ProtoKey.DATA, {})
return result
def show_stats(self, job_id: str, target_type: str, targets: Optional[List[str]] = None) -> dict:
"""Show processing stats of specified job on specified targets.
Args:
job_id (str): ID of the job
target_type (str): type of target (server or client)
targets: list of client names if target type is "client". All clients if not specified.
Returns: a dict that contains job stats on specified targets. The key of the dict is target name. The value is
a dict of stats reported by different system components (ServerRunner or ClientRunner).
"""
return self._collect_info(AdminCommandNames.SHOW_STATS, job_id, target_type, targets)
def show_errors(self, job_id: str, target_type: str, targets: Optional[List[str]] = None) -> dict:
"""Show processing errors of specified job on specified targets.
Args:
job_id (str): ID of the job
target_type (str): type of target (server or client)
targets: list of client names if target type is "client". All clients if not specified.
Returns: a dict that contains job errors (if any) on specified targets. The key of the dict is target name.
The value is a dict of errors reported by different system components (ServerRunner or ClientRunner).
"""
return self._collect_info(AdminCommandNames.SHOW_ERRORS, job_id, target_type, targets)
def reset_errors(self, job_id: str):
"""Clear errors for all system targets for the specified job.
Args:
job_id (str): ID of the job
Returns: None
"""
self._collect_info(AdminCommandNames.RESET_ERRORS, job_id, TargetType.ALL)
def _collect_info(self, cmd: str, job_id: str, target_type: str, targets=None) -> dict:
if not job_id:
raise ValueError("job_id is required but not specified.")
if not isinstance(job_id, str):
raise TypeError("job_id must be str but got {}.".format(type(job_id)))
if target_type not in _VALID_TARGET_TYPES:
raise ValueError(f"invalid target_type {target_type}: must be one of {_VALID_TARGET_TYPES}")
parts = [cmd, job_id, target_type]
if target_type == TargetType.CLIENT and targets:
processed_targets_str = process_targets_into_str(targets)
parts.append(processed_targets_str)
command = " ".join(parts)
reply = self._do_command(command, enforce_meta=False)
return self._get_dict_data(reply)
def get_connected_client_list(self) -> List[ClientInfo]:
"""Get the list of connected clients.
Returns: a list of ClientInfo objects
"""
sys_info = self.get_system_info()
return sys_info.client_info
def monitor_job(
self, job_id: str, timeout: float = 0.0, poll_interval: float = 2.0, cb=None, *cb_args, **cb_kwargs
) -> MonitorReturnCode:
"""Monitor the job progress.
Monitors until one of the conditions occurs:
- job is done
- timeout
- the status_cb returns False
Args:
job_id (str): the job to be monitored
timeout (float): how long to monitor. If 0, never time out.
poll_interval (float): how often to poll job status
cb: if provided, callback to be called after each status poll
Returns: a MonitorReturnCode
Every time the cb is called, it must return a bool indicating whether the monitor
should continue. If False, this method ends.
"""
start_time = time.time()
while True:
if 0 < timeout < time.time() - start_time:
return MonitorReturnCode.TIMEOUT
job_meta = self.get_job_meta(job_id)
if cb is not None:
should_continue = cb(self, job_id, job_meta, *cb_args, **cb_kwargs)
if not should_continue:
return MonitorReturnCode.ENDED_BY_CB
# check whether the job is finished
job_status = job_meta.get(JobMetaKey.STATUS.value, None)
if not job_status:
raise InternalError(f"missing status in job {job_id}")
if job_status.startswith("FINISHED"):
return MonitorReturnCode.JOB_FINISHED
time.sleep(poll_interval)
def basic_cb_with_print(session: Session, job_id: str, job_meta, *cb_args, **cb_kwargs) -> bool:
"""This is a sample callback to use with monitor_job.
This demonstrates how a custom callback can be used.
"""
if job_meta["status"] == "RUNNING":
if cb_kwargs["cb_run_counter"]["count"] < 3:
print(job_meta)
else:
print(".", end="")
else:
print("\n" + str(job_meta))
cb_kwargs["cb_run_counter"]["count"] += 1
return True
def new_secure_session(username: str, startup_kit_location: str, debug: bool = False, timeout: float = 10.0) -> Session:
"""Create a new secure FLARE API session with the NVFLARE system.
Args:
username (str): username assigned to the user
startup_kit_location (str): path to the provisioned startup folder, the root admin dir containing the startup folder
debug (bool): enable debug mode
timeout (float): how long to try to establish the session, in seconds
Returns: a Session object
"""
session = Session(username=username, startup_path=startup_kit_location, secure_mode=True, debug=debug)
session.try_connect(timeout)
return session
def new_insecure_session(startup_kit_location: str, debug: bool = False, timeout: float = 10.0) -> Session:
"""Create a new insecure FLARE API session with the NVFLARE system.
Args:
startup_kit_location (str): path to the provisioned startup folder
debug (bool): enable debug mode
timeout (float): how long to try to establish the session, in seconds
Returns: a Session object
The username for insecure session is always "admin".
"""
session = Session(username="admin", startup_path=startup_kit_location, secure_mode=False, debug=debug)
session.try_connect(timeout)
return session
| NVFlare-main | nvflare/fuel/flare_api/flare_api.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/statistics/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NVFlare-main | nvflare/app_opt/statistics/visualization/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from nvflare.fuel.utils.import_utils import optional_import
class Visualization:
def import_modules(self):
display, import_flag = optional_import(module="IPython.display", name="display")
if not import_flag:
print(display.failure)
pd, import_flag = optional_import(module="pandas")
if not import_flag:
print(pd.failure)
return display, pd
def show_stats(self, data, white_list_features=[]):
display, pd = self.import_modules()
all_features = [k for k in data]
target_features = self._get_target_features(all_features, white_list_features)
for feature in target_features:
print(f"\n{feature}\n")
feature_metrics = data[feature]
df = pd.DataFrame.from_dict(feature_metrics)
display(df)
def show_histograms(self, data, display_format="sample_count", white_list_features=[], plot_type="both"):
feature_dfs = self.get_histogram_dataframes(data, display_format, white_list_features)
self.show_dataframe_plots(feature_dfs, plot_type)
def show_dataframe_plots(self, feature_dfs, plot_type="both"):
for feature in feature_dfs:
df = feature_dfs[feature]
if plot_type == "both":
axes = df.plot.line(rot=40, title=feature)
axes = df.plot.line(rot=40, subplots=True, title=feature)
elif plot_type == "main":
axes = df.plot.line(rot=40, title=feature)
elif plot_type == "subplot":
axes = df.plot.line(rot=40, subplots=True, title=feature)
else:
print(f"not supported plot type: '{plot_type}'")
def get_histogram_dataframes(self, data, display_format="sample_count", white_list_features=[]) -> Dict:
display, pd = self.import_modules()
(hists, edges) = self._prepare_histogram_data(data, display_format, white_list_features)
all_features = [k for k in edges]
target_features = self._get_target_features(all_features, white_list_features)
feature_dfs = {}
for feature in target_features:
hist_data = hists[feature]
index = edges[feature]
df = pd.DataFrame(hist_data, index=index)
feature_dfs[feature] = df
return feature_dfs
def _prepare_histogram_data(self, data, display_format="sample_count", white_list_features=[]):
all_features = [k for k in data]
target_features = self._get_target_features(all_features, white_list_features)
feature_hists = {}
feature_edges = {}
for feature in target_features:
xs = data[feature]["histogram"]
hists = {}
feature_edges[feature] = []
for i, ds in enumerate(xs):
ds_hist = xs[ds]
ds_bucket_counts = []
for bucket in ds_hist:
if i == 0:
feature_edges[feature].append(bucket[0])
if display_format == "percent":
sum_value = self.sum_counts_in_histogram(ds_hist)
ds_bucket_counts.append(bucket[2] / sum_value)
else:
ds_bucket_counts.append(bucket[2])
hists[ds] = ds_bucket_counts
feature_hists[feature] = hists
return feature_hists, feature_edges
def sum_counts_in_histogram(self, hist):
sum_value = 0
for bucket in hist:
sum_value += bucket[2]
return sum_value
def _get_target_features(self, all_features, white_list_features=[]):
target_features = white_list_features
if not white_list_features:
target_features = all_features
return target_features
| NVFlare-main | nvflare/app_opt/statistics/visualization/statistics_visualization.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .api import FLCallback as FLCallback
from .api import patch as patch
| NVFlare-main | nvflare/app_opt/lightning/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import pytorch_lightning as pl
from pytorch_lightning.callbacks import Callback
from torch import Tensor
from nvflare.app_common.abstract.fl_model import FLModel
from nvflare.client.api import clear, get_config, init, receive, send
from nvflare.client.config import ConfigKey
def patch(trainer: pl.Trainer):
fl_callback = FLCallback()
callbacks = trainer.callbacks
if isinstance(callbacks, list):
callbacks.append(fl_callback)
elif isinstance(callbacks, Callback):
callbacks = [callbacks, fl_callback]
else:
callbacks = [fl_callback]
trainer.callbacks = callbacks
class FLCallback(Callback):
def __init__(self):
super(FLCallback, self).__init__()
init()
self.has_global_eval = get_config().get(ConfigKey.GLOBAL_EVAL, False)
self.has_training = get_config().get(ConfigKey.TRAINING, False)
self.input_fl_model = None
self._receive_model()
self.metrics = None
def reset_state(self):
# If the next round of federated training needs to reuse the same callback
# instance, the reset_state() needs to be called first
self.input_fl_model = None
self.metrics = None
clear()
def on_train_start(self, trainer, pl_module):
# receive the global model and update the local model with global model
if self.has_training:
self._receive_and_update_model(pl_module)
def on_train_end(self, trainer, pl_module):
if self.has_training:
self._send_model(FLModel(params=pl_module.cpu().state_dict()))
self.reset_state()
def on_validation_start(self, trainer, pl_module):
# receive the global model and update the local model with global model
# the 1st time validate() or train() is called.
# expect user will validate the global model first (i.e. validate()), once that's done.
# the metrics will be set.
# The subsequence validate() calls will not trigger the receive update model.
# Hence the validate() will be validating the local model.
if pl_module and self.has_global_eval and self.metrics is None:
self._receive_and_update_model(pl_module)
def on_validation_end(self, trainer, pl_module):
if pl_module and self.has_global_eval and self.metrics is None:
self.metrics = _extract_metrics(trainer.callback_metrics)
self._send_model(FLModel(metrics=self.metrics))
def _receive_and_update_model(self, pl_module):
self._receive_model()
if self.input_fl_model and self.input_fl_model.params:
pl_module.load_state_dict(self.input_fl_model.params)
def _receive_model(self) -> FLModel:
"""Receives model from NVFlare."""
model = receive()
if model:
self.input_fl_model = model
return model
def _send_model(self, output_model: FLModel):
try:
send(output_model, clear_registry=False)
except Exception as e:
raise RuntimeError("failed to send FL model", e)
def __del__(self):
clear()
def _extract_metrics(metrics: Dict[str, Tensor]):
result_metrics = {}
for key, t in metrics.items():
result_metrics[key] = t.item()
return result_metrics
| NVFlare-main | nvflare/app_opt/lightning/api.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decomposers for HE related classes"""
from typing import Any
import tenseal as ts
from nvflare.fuel.utils import fobs
from nvflare.fuel.utils.fobs.datum import DatumManager
class CKKSVectorDecomposer(fobs.Decomposer):
def supported_type(self):
return ts.CKKSVector
def decompose(self, target: ts.CKKSVector, manager: DatumManager = None) -> Any:
return target.serialize(), target.context().serialize()
def recompose(self, data: Any, manager: DatumManager = None) -> ts.CKKSVector:
vec_data, ctx_data = data
context = ts.context_from(ctx_data)
return ts.ckks_vector_from(context, vec_data)
def register():
if register.registered:
return
fobs.register(CKKSVectorDecomposer)
register.registered = True
register.registered = False
| NVFlare-main | nvflare/app_opt/he/decomposers.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.