code
stringlengths
1.36k
41.2k
apis
list
extract_api
stringlengths
328
40.6k
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import grpc import requests from grpc._cython import cygrpc from fate_arch.common.base_utils import json_dumps, json_loads from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.settings import FATEFLOW_SERVICE_NAME, HEADERS, DEFAULT_REMOTE_REQUEST_TIMEOUT from fate_flow.settings import IP, GRPC_PORT, stat_logger from fate_flow.utils.proto_compatibility import basic_meta_pb2 from fate_flow.utils.proto_compatibility import proxy_pb2 from fate_flow.utils.proto_compatibility import proxy_pb2_grpc import time import sys from fate_flow.tests.grpc.xthread import ThreadPoolExecutor def wrap_grpc_packet(json_body, http_method, url, src_party_id, dst_party_id, job_id=None, overall_timeout=DEFAULT_REMOTE_REQUEST_TIMEOUT): _src_end_point = basic_meta_pb2.Endpoint(ip=IP, port=GRPC_PORT) _src = proxy_pb2.Topic(name=job_id, partyId="{}".format(src_party_id), role=FATEFLOW_SERVICE_NAME, callback=_src_end_point) _dst = proxy_pb2.Topic(name=job_id, partyId="{}".format(dst_party_id), role=FATEFLOW_SERVICE_NAME, callback=None) _task = proxy_pb2.Task(taskId=job_id) _command = proxy_pb2.Command(name=FATEFLOW_SERVICE_NAME) _conf = proxy_pb2.Conf(overallTimeout=overall_timeout) _meta = proxy_pb2.Metadata(src=_src, dst=_dst, task=_task, command=_command, operator=http_method, conf=_conf) _data = proxy_pb2.Data(key=url, value=bytes(json_dumps(json_body), 'utf-8')) return proxy_pb2.Packet(header=_meta, body=_data) def get_url(_suffix): return "http://{}:{}/{}".format(RuntimeConfig.JOB_SERVER_HOST, RuntimeConfig.HTTP_PORT, _suffix.lstrip('/')) class UnaryService(proxy_pb2_grpc.DataTransferServiceServicer): def unaryCall(self, _request, context): packet = _request header = packet.header _suffix = packet.body.key param_bytes = packet.body.value param = bytes.decode(param_bytes) job_id = header.task.taskId src = header.src dst = header.dst method = header.operator param_dict = json_loads(param) param_dict['src_party_id'] = str(src.partyId) source_routing_header = [] for key, value in context.invocation_metadata(): source_routing_header.append((key, value)) stat_logger.info(f"grpc request routing header: {source_routing_header}") param = bytes.decode(bytes(json_dumps(param_dict), 'utf-8')) action = getattr(requests, method.lower(), None) if action: print(_suffix) #resp = action(url=get_url(_suffix), data=param, headers=HEADERS) else: pass #resp_json = resp.json() resp_json = {"status": "test"} import time print("sleep") time.sleep(60) return wrap_grpc_packet(resp_json, method, _suffix, dst.partyId, src.partyId, job_id) thread_pool_executor = ThreadPoolExecutor(max_workers=5) print(f"start grpc server pool on {thread_pool_executor._max_workers} max workers") server = grpc.server(thread_pool_executor, options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), (cygrpc.ChannelArgKey.max_receive_message_length, -1)]) proxy_pb2_grpc.add_DataTransferServiceServicer_to_server(UnaryService(), server) server.add_insecure_port("{}:{}".format("127.0.0.1", 7777)) server.start() try: while True: time.sleep(60 * 60 * 24) except KeyboardInterrupt: server.stop(0) sys.exit(0)
[ "fate_flow.utils.proto_compatibility.basic_meta_pb2.Endpoint", "fate_flow.utils.proto_compatibility.proxy_pb2.Conf", "fate_flow.tests.grpc.xthread.ThreadPoolExecutor", "fate_flow.utils.proto_compatibility.proxy_pb2.Command", "fate_flow.settings.stat_logger.info", "fate_flow.utils.proto_compatibility.proxy_pb2.Packet", "fate_flow.utils.proto_compatibility.proxy_pb2.Metadata", "fate_flow.utils.proto_compatibility.proxy_pb2.Task" ]
[((3486, 3519), 'fate_flow.tests.grpc.xthread.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(5)'}), '(max_workers=5)\n', (3504, 3519), False, 'from fate_flow.tests.grpc.xthread import ThreadPoolExecutor\n'), ((3613, 3773), 'grpc.server', 'grpc.server', (['thread_pool_executor'], {'options': '[(cygrpc.ChannelArgKey.max_send_message_length, -1), (cygrpc.ChannelArgKey.\n max_receive_message_length, -1)]'}), '(thread_pool_executor, options=[(cygrpc.ChannelArgKey.\n max_send_message_length, -1), (cygrpc.ChannelArgKey.\n max_receive_message_length, -1)])\n', (3624, 3773), False, 'import grpc\n'), ((1381, 1427), 'fate_flow.utils.proto_compatibility.basic_meta_pb2.Endpoint', 'basic_meta_pb2.Endpoint', ([], {'ip': 'IP', 'port': 'GRPC_PORT'}), '(ip=IP, port=GRPC_PORT)\n', (1404, 1427), False, 'from fate_flow.utils.proto_compatibility import basic_meta_pb2\n'), ((1686, 1715), 'fate_flow.utils.proto_compatibility.proxy_pb2.Task', 'proxy_pb2.Task', ([], {'taskId': 'job_id'}), '(taskId=job_id)\n', (1700, 1715), False, 'from fate_flow.utils.proto_compatibility import proxy_pb2\n'), ((1731, 1776), 'fate_flow.utils.proto_compatibility.proxy_pb2.Command', 'proxy_pb2.Command', ([], {'name': 'FATEFLOW_SERVICE_NAME'}), '(name=FATEFLOW_SERVICE_NAME)\n', (1748, 1776), False, 'from fate_flow.utils.proto_compatibility import proxy_pb2\n'), ((1789, 1835), 'fate_flow.utils.proto_compatibility.proxy_pb2.Conf', 'proxy_pb2.Conf', ([], {'overallTimeout': 'overall_timeout'}), '(overallTimeout=overall_timeout)\n', (1803, 1835), False, 'from fate_flow.utils.proto_compatibility import proxy_pb2\n'), ((1848, 1954), 'fate_flow.utils.proto_compatibility.proxy_pb2.Metadata', 'proxy_pb2.Metadata', ([], {'src': '_src', 'dst': '_dst', 'task': '_task', 'command': '_command', 'operator': 'http_method', 'conf': '_conf'}), '(src=_src, dst=_dst, task=_task, command=_command,\n operator=http_method, conf=_conf)\n', (1866, 1954), False, 'from fate_flow.utils.proto_compatibility import proxy_pb2\n'), ((2043, 2085), 'fate_flow.utils.proto_compatibility.proxy_pb2.Packet', 'proxy_pb2.Packet', ([], {'header': '_meta', 'body': '_data'}), '(header=_meta, body=_data)\n', (2059, 2085), False, 'from fate_flow.utils.proto_compatibility import proxy_pb2\n'), ((2646, 2663), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['param'], {}), '(param)\n', (2656, 2663), False, 'from fate_arch.common.base_utils import json_dumps, json_loads\n'), ((2873, 2946), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['f"""grpc request routing header: {source_routing_header}"""'], {}), "(f'grpc request routing header: {source_routing_header}')\n", (2889, 2946), False, 'from fate_flow.settings import IP, GRPC_PORT, stat_logger\n'), ((3353, 3367), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (3363, 3367), False, 'import time\n'), ((4002, 4026), 'time.sleep', 'time.sleep', (['(60 * 60 * 24)'], {}), '(60 * 60 * 24)\n', (4012, 4026), False, 'import time\n'), ((4076, 4087), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4084, 4087), False, 'import sys\n'), ((1999, 2020), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['json_body'], {}), '(json_body)\n', (2009, 2020), False, 'from fate_arch.common.base_utils import json_dumps, json_loads\n'), ((2983, 3005), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['param_dict'], {}), '(param_dict)\n', (2993, 3005), False, 'from fate_arch.common.base_utils import json_dumps, json_loads\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from flask import Flask, request from fate_arch.common import conf_utils from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.settings import stat_logger from fate_flow.utils.api_utils import get_json_result from fate_flow.utils.service_utils import ServiceUtils manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/get', methods=['POST']) def get_fate_version_info(): version = RuntimeConfig.get_env(request.json.get('module', 'FATE')) return get_json_result(data={request.json.get('module'): version}) @manager.route('/registry', methods=['POST']) def service_registry(): update_server = ServiceUtils.register_service(request.json) return get_json_result(data={"update_server": update_server}) @manager.route('/query', methods=['POST']) def service_query(): service_info = ServiceUtils.get(request.json.get("service_name")) return get_json_result(data={"service_info": service_info})
[ "fate_flow.utils.service_utils.ServiceUtils.register_service", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.api_utils.get_json_result" ]
[((911, 926), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (916, 926), False, 'from flask import Flask, request\n'), ((990, 1014), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1011, 1014), False, 'from fate_flow.settings import stat_logger\n'), ((1377, 1420), 'fate_flow.utils.service_utils.ServiceUtils.register_service', 'ServiceUtils.register_service', (['request.json'], {}), '(request.json)\n', (1406, 1420), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((1432, 1486), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'update_server': update_server}"}), "(data={'update_server': update_server})\n", (1447, 1486), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1634, 1686), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'service_info': service_info}"}), "(data={'service_info': service_info})\n", (1649, 1686), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1178, 1212), 'flask.request.json.get', 'request.json.get', (['"""module"""', '"""FATE"""'], {}), "('module', 'FATE')\n", (1194, 1212), False, 'from flask import Flask, request\n'), ((1589, 1621), 'flask.request.json.get', 'request.json.get', (['"""service_name"""'], {}), "('service_name')\n", (1605, 1621), False, 'from flask import Flask, request\n'), ((1247, 1273), 'flask.request.json.get', 'request.json.get', (['"""module"""'], {}), "('module')\n", (1263, 1273), False, 'from flask import Flask, request\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import shutil import time from fate_arch.common import log, file_utils, EngineType from fate_arch.storage import StorageEngine, EggRollStorageType from fate_flow.entity.metric import Metric, MetricMeta from fate_flow.utils import job_utils, data_utils from fate_flow.scheduling_apps.client import ControllerClient from fate_arch import storage LOGGER = log.getLogger() class Upload(object): def __init__(self): self.taskid = '' self.tracker = None self.MAX_PARTITIONS = 1024 self.MAX_BYTES = 1024*1024*8 self.parameters = {} self.table = None def run(self, component_parameters=None, args=None): self.parameters = component_parameters["UploadParam"] LOGGER.info(self.parameters) LOGGER.info(args) self.parameters["role"] = component_parameters["role"] self.parameters["local"] = component_parameters["local"] storage_engine = self.parameters["storage_engine"] storage_address = self.parameters["storage_address"] # if not set storage, use job storage as default if not storage_engine: storage_engine = args["job_parameters"].storage_engine if not storage_address: storage_address = args["job_parameters"].engines_address[EngineType.STORAGE] job_id = self.taskid.split("_")[0] if not os.path.isabs(self.parameters.get("file", "")): self.parameters["file"] = os.path.join(file_utils.get_project_base_directory(), self.parameters["file"]) if not os.path.exists(self.parameters["file"]): raise Exception("%s is not exist, please check the configure" % (self.parameters["file"])) if not os.path.getsize(self.parameters["file"]): raise Exception("%s is an empty file" % (self.parameters["file"])) name, namespace = self.parameters.get("name"), self.parameters.get("namespace") _namespace, _table_name = self.generate_table_name(self.parameters["file"]) if namespace is None: namespace = _namespace if name is None: name = _table_name read_head = self.parameters['head'] if read_head == 0: head = False elif read_head == 1: head = True else: raise Exception("'head' in conf.json should be 0 or 1") partitions = self.parameters["partition"] if partitions <= 0 or partitions >= self.MAX_PARTITIONS: raise Exception("Error number of partition, it should between %d and %d" % (0, self.MAX_PARTITIONS)) with storage.Session.build(session_id=job_utils.generate_session_id(self.tracker.task_id, self.tracker.task_version, self.tracker.role, self.tracker.party_id, suffix="storage", random_end=True), namespace=namespace, name=name) as storage_session: if self.parameters.get("destroy", False): table = storage_session.get_table() if table: LOGGER.info(f"destroy table name: {name} namespace: {namespace} engine: {table.get_engine()}") table.destroy() else: LOGGER.info(f"can not found table name: {name} namespace: {namespace}, pass destroy") address_dict = storage_address.copy() with storage.Session.build(session_id=job_utils.generate_session_id(self.tracker.task_id, self.tracker.task_version, self.tracker.role, self.tracker.party_id, suffix="storage", random_end=True), storage_engine=storage_engine, options=self.parameters.get("options")) as storage_session: if storage_engine in {StorageEngine.EGGROLL, StorageEngine.STANDALONE}: upload_address = {"name": name, "namespace": namespace, "storage_type": EggRollStorageType.ROLLPAIR_LMDB} elif storage_engine in {StorageEngine.MYSQL}: upload_address = {"db": namespace, "name": name} elif storage_engine in {StorageEngine.HDFS}: upload_address = {"path": data_utils.default_input_fs_path(name=name, namespace=namespace, prefix=address_dict.get("path_prefix"))} else: raise RuntimeError(f"can not support this storage engine: {storage_engine}") address_dict.update(upload_address) LOGGER.info(f"upload to {storage_engine} storage, address: {address_dict}") address = storage.StorageTableMeta.create_address(storage_engine=storage_engine, address_dict=address_dict) self.parameters["partitions"] = partitions self.parameters["name"] = name self.table = storage_session.create_table(address=address, **self.parameters) data_table_count = self.save_data_table(job_id, name, namespace, head) self.table.get_meta().update_metas(in_serialized=True) LOGGER.info("------------load data finish!-----------------") # rm tmp file try: if '{}/fate_upload_tmp'.format(job_id) in self.parameters['file']: LOGGER.info("remove tmp upload file") shutil.rmtree(os.path.join(self.parameters["file"].split('tmp')[0], 'tmp')) except: LOGGER.info("remove tmp file failed") LOGGER.info("file: {}".format(self.parameters["file"])) LOGGER.info("total data_count: {}".format(data_table_count)) LOGGER.info("table name: {}, table namespace: {}".format(name, namespace)) def set_taskid(self, taskid): self.taskid = taskid def set_tracker(self, tracker): self.tracker = tracker def save_data_table(self, job_id, dst_table_name, dst_table_namespace, head=True): input_file = self.parameters["file"] input_feature_count = self.get_count(input_file) with open(input_file, 'r') as fin: lines_count = 0 if head is True: data_head = fin.readline() input_feature_count -= 1 self.table.get_meta().update_metas(schema=data_utils.get_header_schema(header_line=data_head, id_delimiter=self.parameters["id_delimiter"])) n = 0 while True: data = list() lines = fin.readlines(self.MAX_BYTES) if lines: for line in lines: values = line.rstrip().split(self.parameters["id_delimiter"]) data.append((values[0], data_utils.list_to_str(values[1:], id_delimiter=self.parameters["id_delimiter"]))) lines_count += len(data) save_progress = lines_count/input_feature_count*100//1 job_info = {'progress': save_progress, "job_id": job_id, "role": self.parameters["local"]['role'], "party_id": self.parameters["local"]['party_id']} ControllerClient.update_job(job_info=job_info) self.table.put_all(data) if n == 0: self.table.get_meta().update_metas(part_of_data=data) else: table_count = self.table.count() self.table.get_meta().update_metas(count=table_count, partitions=self.parameters["partition"]) self.tracker.log_output_data_info(data_name='upload', table_namespace=dst_table_namespace, table_name=dst_table_name) self.tracker.log_metric_data(metric_namespace="upload", metric_name="data_access", metrics=[Metric("count", table_count)]) self.tracker.set_metric_meta(metric_namespace="upload", metric_name="data_access", metric_meta=MetricMeta(name='upload', metric_type='UPLOAD')) return table_count n += 1 def get_count(self, input_file): with open(input_file, 'r', encoding='utf-8') as fp: count = 0 for line in fp: count += 1 return count def generate_table_name(self, input_file_path): str_time = time.strftime("%Y%m%d%H%M%S", time.localtime()) file_name = input_file_path.split(".")[0] file_name = file_name.split("/")[-1] return file_name, str_time def save_data(self): return None def export_model(self): return None
[ "fate_flow.utils.data_utils.get_header_schema", "fate_flow.utils.data_utils.list_to_str", "fate_flow.entity.metric.MetricMeta", "fate_flow.utils.job_utils.generate_session_id", "fate_flow.entity.metric.Metric", "fate_flow.scheduling_apps.client.ControllerClient.update_job" ]
[((981, 996), 'fate_arch.common.log.getLogger', 'log.getLogger', ([], {}), '()\n', (994, 996), False, 'from fate_arch.common import log, file_utils, EngineType\n'), ((2170, 2209), 'os.path.exists', 'os.path.exists', (["self.parameters['file']"], {}), "(self.parameters['file'])\n", (2184, 2209), False, 'import os\n'), ((2329, 2369), 'os.path.getsize', 'os.path.getsize', (["self.parameters['file']"], {}), "(self.parameters['file'])\n", (2344, 2369), False, 'import os\n'), ((5046, 5147), 'fate_arch.storage.StorageTableMeta.create_address', 'storage.StorageTableMeta.create_address', ([], {'storage_engine': 'storage_engine', 'address_dict': 'address_dict'}), '(storage_engine=storage_engine,\n address_dict=address_dict)\n', (5085, 5147), False, 'from fate_arch import storage\n'), ((8975, 8991), 'time.localtime', 'time.localtime', ([], {}), '()\n', (8989, 8991), False, 'import time\n'), ((2089, 2128), 'fate_arch.common.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (2126, 2128), False, 'from fate_arch.common import log, file_utils, EngineType\n'), ((3248, 3413), 'fate_flow.utils.job_utils.generate_session_id', 'job_utils.generate_session_id', (['self.tracker.task_id', 'self.tracker.task_version', 'self.tracker.role', 'self.tracker.party_id'], {'suffix': '"""storage"""', 'random_end': '(True)'}), "(self.tracker.task_id, self.tracker.\n task_version, self.tracker.role, self.tracker.party_id, suffix=\n 'storage', random_end=True)\n", (3277, 3413), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((3960, 4125), 'fate_flow.utils.job_utils.generate_session_id', 'job_utils.generate_session_id', (['self.tracker.task_id', 'self.tracker.task_version', 'self.tracker.role', 'self.tracker.party_id'], {'suffix': '"""storage"""', 'random_end': '(True)'}), "(self.tracker.task_id, self.tracker.\n task_version, self.tracker.role, self.tracker.party_id, suffix=\n 'storage', random_end=True)\n", (3989, 4125), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((7474, 7520), 'fate_flow.scheduling_apps.client.ControllerClient.update_job', 'ControllerClient.update_job', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (7501, 7520), False, 'from fate_flow.scheduling_apps.client import ControllerClient\n'), ((6658, 6760), 'fate_flow.utils.data_utils.get_header_schema', 'data_utils.get_header_schema', ([], {'header_line': 'data_head', 'id_delimiter': "self.parameters['id_delimiter']"}), "(header_line=data_head, id_delimiter=self.\n parameters['id_delimiter'])\n", (6686, 6760), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((8566, 8613), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': '"""upload"""', 'metric_type': '"""UPLOAD"""'}), "(name='upload', metric_type='UPLOAD')\n", (8576, 8613), False, 'from fate_flow.entity.metric import Metric, MetricMeta\n'), ((7082, 7167), 'fate_flow.utils.data_utils.list_to_str', 'data_utils.list_to_str', (['values[1:]'], {'id_delimiter': "self.parameters['id_delimiter']"}), "(values[1:], id_delimiter=self.parameters['id_delimiter']\n )\n", (7104, 7167), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((8322, 8350), 'fate_flow.entity.metric.Metric', 'Metric', (['"""count"""', 'table_count'], {}), "('count', table_count)\n", (8328, 8350), False, 'from fate_flow.entity.metric import Metric, MetricMeta\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import shutil from ruamel import yaml from datetime import datetime from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel from fate_flow.pipelined_model import pipelined_model from fate_arch.common.base_utils import json_loads, json_dumps from fate_arch.common.file_utils import get_project_base_directory from fate_flow.settings import stat_logger from fate_flow.utils import model_utils from federatedml.protobuf.model_migrate.model_migrate import model_migration from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter def gen_model_file_path(model_id, model_version): return os.path.join(get_project_base_directory(), "model_local_cache", model_id, model_version) def compare_roles(request_conf_roles: dict, run_time_conf_roles: dict): if request_conf_roles.keys() == run_time_conf_roles.keys(): verify_format = True verify_equality = True for key in request_conf_roles.keys(): verify_format = verify_format and (len(request_conf_roles[key]) == len(run_time_conf_roles[key])) and (isinstance(request_conf_roles[key], list)) request_conf_roles_set = set(str(item) for item in request_conf_roles[key]) run_time_conf_roles_set = set(str(item) for item in run_time_conf_roles[key]) verify_equality = verify_equality and (request_conf_roles_set == run_time_conf_roles_set) if not verify_format: raise Exception("The structure of roles data of local configuration is different from " "model runtime configuration's. Migration aborting.") else: return verify_equality raise Exception("The structure of roles data of local configuration is different from " "model runtime configuration's. Migration aborting.") def import_from_files(config: dict): model = pipelined_model.PipelinedModel(model_id=config["model_id"], model_version=config["model_version"]) if config['force']: model.force = True model.unpack_model(config["file"]) def import_from_db(config: dict): model_path = gen_model_file_path(config["model_id"], config["model_version"]) if config['force']: os.rename(model_path, model_path + '_backup_{}'.format(datetime.now().strftime('%Y%m%d%H%M'))) def migration(config_data: dict): try: party_model_id = model_utils.gen_party_model_id(model_id=config_data["model_id"], role=config_data["local"]["role"], party_id=config_data["local"]["party_id"]) model = pipelined_model.PipelinedModel(model_id=party_model_id, model_version=config_data["model_version"]) if not model.exists(): raise Exception("Can not found {} {} model local cache".format(config_data["model_id"], config_data["model_version"])) with DB.connection_context(): if MLModel.get_or_none(MLModel.f_model_version == config_data["unify_model_version"]): raise Exception("Unify model version {} has been occupied in database. " "Please choose another unify model version and try again.".format( config_data["unify_model_version"])) model_data = model.collect_models(in_bytes=True) if "pipeline.pipeline:Pipeline" not in model_data: raise Exception("Can not found pipeline file in model.") migrate_model = pipelined_model.PipelinedModel(model_id=model_utils.gen_party_model_id(model_id=model_utils.gen_model_id(config_data["migrate_role"]), role=config_data["local"]["role"], party_id=config_data["local"]["migrate_party_id"]), model_version=config_data["unify_model_version"]) # migrate_model.create_pipelined_model() shutil.copytree(src=model.model_path, dst=migrate_model.model_path) pipeline = migrate_model.read_component_model('pipeline', 'pipeline')['Pipeline'] # Utilize Pipeline_model collect model data. And modify related inner information of model train_runtime_conf = json_loads(pipeline.train_runtime_conf) train_runtime_conf["role"] = config_data["migrate_role"] train_runtime_conf["initiator"] = config_data["migrate_initiator"] adapter = JobRuntimeConfigAdapter(train_runtime_conf) train_runtime_conf = adapter.update_model_id_version(model_id=model_utils.gen_model_id(train_runtime_conf["role"]), model_version=migrate_model.model_version) # update pipeline.pb file pipeline.train_runtime_conf = json_dumps(train_runtime_conf, byte=True) pipeline.model_id = bytes(adapter.get_common_parameters().to_dict().get("model_id"), "utf-8") pipeline.model_version = bytes(adapter.get_common_parameters().to_dict().get("model_version"), "utf-8") if model_utils.compare_version(pipeline.fate_version, '1.5.0') == 'gt': pipeline.initiator_role = config_data["migrate_initiator"]['role'] pipeline.initiator_party_id = config_data["migrate_initiator"]['party_id'] # save updated pipeline.pb file migrate_model.save_pipeline(pipeline) shutil.copyfile(os.path.join(migrate_model.model_path, "pipeline.pb"), os.path.join(migrate_model.model_path, "variables", "data", "pipeline", "pipeline", "Pipeline")) # modify proto with open(os.path.join(migrate_model.model_path, 'define', 'define_meta.yaml'), 'r') as fin: define_yaml = yaml.safe_load(fin) for key, value in define_yaml['model_proto'].items(): if key == 'pipeline': continue for v in value.keys(): buffer_obj = migrate_model.read_component_model(key, v) module_name = define_yaml['component_define'].get(key, {}).get('module_name') modified_buffer = model_migration(model_contents=buffer_obj, module_name=module_name, old_guest_list=config_data['role']['guest'], new_guest_list=config_data['migrate_role']['guest'], old_host_list=config_data['role']['host'], new_host_list=config_data['migrate_role']['host'], old_arbiter_list=config_data.get('role', {}).get('arbiter', None), new_arbiter_list=config_data.get('migrate_role', {}).get('arbiter', None)) migrate_model.save_component_model(component_name=key, component_module_name=module_name, model_alias=v, model_buffers=modified_buffer) archive_path = migrate_model.packaging_model() shutil.rmtree(os.path.abspath(migrate_model.model_path)) return (0, f"Migrating model successfully. " \ "The configuration of model has been modified automatically. " \ "New model id is: {}, model version is: {}. " \ "Model files can be found at '{}'.".format(adapter.get_common_parameters().to_dict().get("model_id"), migrate_model.model_version, os.path.abspath(archive_path)), {"model_id": migrate_model.model_id, "model_version": migrate_model.model_version, "path": os.path.abspath(archive_path)}) except Exception as e: stat_logger.exception(e) return 100, str(e), {}
[ "fate_flow.db.db_models.MachineLearningModelInfo.get_or_none", "fate_flow.utils.config_adapter.JobRuntimeConfigAdapter", "fate_flow.utils.model_utils.compare_version", "fate_flow.pipelined_model.pipelined_model.PipelinedModel", "fate_flow.db.db_models.DB.connection_context", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.model_utils.gen_party_model_id", "fate_flow.utils.model_utils.gen_model_id" ]
[((2493, 2596), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'pipelined_model.PipelinedModel', ([], {'model_id': "config['model_id']", 'model_version': "config['model_version']"}), "(model_id=config['model_id'], model_version=\n config['model_version'])\n", (2523, 2596), False, 'from fate_flow.pipelined_model import pipelined_model\n'), ((1257, 1285), 'fate_arch.common.file_utils.get_project_base_directory', 'get_project_base_directory', ([], {}), '()\n', (1283, 1285), False, 'from fate_arch.common.file_utils import get_project_base_directory\n'), ((3040, 3187), 'fate_flow.utils.model_utils.gen_party_model_id', 'model_utils.gen_party_model_id', ([], {'model_id': "config_data['model_id']", 'role': "config_data['local']['role']", 'party_id': "config_data['local']['party_id']"}), "(model_id=config_data['model_id'], role=\n config_data['local']['role'], party_id=config_data['local']['party_id'])\n", (3070, 3187), False, 'from fate_flow.utils import model_utils\n'), ((3311, 3415), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'pipelined_model.PipelinedModel', ([], {'model_id': 'party_model_id', 'model_version': "config_data['model_version']"}), "(model_id=party_model_id, model_version=\n config_data['model_version'])\n", (3341, 3415), False, 'from fate_flow.pipelined_model import pipelined_model\n'), ((4863, 4930), 'shutil.copytree', 'shutil.copytree', ([], {'src': 'model.model_path', 'dst': 'migrate_model.model_path'}), '(src=model.model_path, dst=migrate_model.model_path)\n', (4878, 4930), False, 'import shutil\n'), ((5151, 5190), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.train_runtime_conf'], {}), '(pipeline.train_runtime_conf)\n', (5161, 5190), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((5350, 5393), 'fate_flow.utils.config_adapter.JobRuntimeConfigAdapter', 'JobRuntimeConfigAdapter', (['train_runtime_conf'], {}), '(train_runtime_conf)\n', (5373, 5393), False, 'from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter\n'), ((5695, 5736), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['train_runtime_conf'], {'byte': '(True)'}), '(train_runtime_conf, byte=True)\n', (5705, 5736), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((3708, 3731), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (3729, 3731), False, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((3748, 3835), 'fate_flow.db.db_models.MachineLearningModelInfo.get_or_none', 'MLModel.get_or_none', (["(MLModel.f_model_version == config_data['unify_model_version'])"], {}), "(MLModel.f_model_version == config_data[\n 'unify_model_version'])\n", (3767, 3835), True, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((5963, 6022), 'fate_flow.utils.model_utils.compare_version', 'model_utils.compare_version', (['pipeline.fate_version', '"""1.5.0"""'], {}), "(pipeline.fate_version, '1.5.0')\n", (5990, 6022), False, 'from fate_flow.utils import model_utils\n'), ((6309, 6362), 'os.path.join', 'os.path.join', (['migrate_model.model_path', '"""pipeline.pb"""'], {}), "(migrate_model.model_path, 'pipeline.pb')\n", (6321, 6362), False, 'import os\n'), ((6388, 6487), 'os.path.join', 'os.path.join', (['migrate_model.model_path', '"""variables"""', '"""data"""', '"""pipeline"""', '"""pipeline"""', '"""Pipeline"""'], {}), "(migrate_model.model_path, 'variables', 'data', 'pipeline',\n 'pipeline', 'Pipeline')\n", (6400, 6487), False, 'import os\n'), ((6636, 6655), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['fin'], {}), '(fin)\n', (6650, 6655), False, 'from ruamel import yaml\n'), ((8046, 8087), 'os.path.abspath', 'os.path.abspath', (['migrate_model.model_path'], {}), '(migrate_model.model_path)\n', (8061, 8087), False, 'import os\n'), ((8806, 8830), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (8827, 8830), False, 'from fate_flow.settings import stat_logger\n'), ((5464, 5516), 'fate_flow.utils.model_utils.gen_model_id', 'model_utils.gen_model_id', (["train_runtime_conf['role']"], {}), "(train_runtime_conf['role'])\n", (5488, 5516), False, 'from fate_flow.utils import model_utils\n'), ((6527, 6595), 'os.path.join', 'os.path.join', (['migrate_model.model_path', '"""define"""', '"""define_meta.yaml"""'], {}), "(migrate_model.model_path, 'define', 'define_meta.yaml')\n", (6539, 6595), False, 'import os\n'), ((8565, 8594), 'os.path.abspath', 'os.path.abspath', (['archive_path'], {}), '(archive_path)\n', (8580, 8594), False, 'import os\n'), ((8738, 8767), 'os.path.abspath', 'os.path.abspath', (['archive_path'], {}), '(archive_path)\n', (8753, 8767), False, 'import os\n'), ((4368, 4421), 'fate_flow.utils.model_utils.gen_model_id', 'model_utils.gen_model_id', (["config_data['migrate_role']"], {}), "(config_data['migrate_role'])\n", (4392, 4421), False, 'from fate_flow.utils import model_utils\n'), ((2930, 2944), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2942, 2944), False, 'from datetime import datetime\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from flask import Flask, request from arch.api.utils import file_utils from fate_flow.settings import JOB_MODULE_CONF from fate_flow.settings import stat_logger, CLUSTER_STANDALONE_JOB_SERVER_PORT from fate_flow.utils.api_utils import get_json_result, request_execute_server from fate_flow.utils.job_utils import generate_job_id, get_job_directory, new_runtime_conf, run_subprocess from fate_flow.utils import detect_utils from fate_flow.entity.constant_config import WorkMode from fate_flow.entity.runtime_config import RuntimeConfig manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/<data_func>', methods=['post']) def download_upload(data_func): request_config = request.json _job_id = generate_job_id() stat_logger.info('generated job_id {}, body {}'.format(_job_id, request_config)) _job_dir = get_job_directory(_job_id) os.makedirs(_job_dir, exist_ok=True) module = data_func required_arguments = ['work_mode', 'namespace', 'table_name'] if module == 'upload': required_arguments.extend(['file', 'head', 'partition']) elif module == 'download': required_arguments.extend(['output_path']) else: raise Exception('can not support this operating: {}'.format(module)) detect_utils.check_config(request_config, required_arguments=required_arguments) job_work_mode = request_config['work_mode'] # todo: The current code here is redundant with job_app/submit_job, the next version of this function will be implemented by job_app/submit_job if job_work_mode != RuntimeConfig.WORK_MODE: if RuntimeConfig.WORK_MODE == WorkMode.CLUSTER and job_work_mode == WorkMode.STANDALONE: # use cluster standalone job server to execute standalone job return request_execute_server(request=request, execute_host='{}:{}'.format(request.remote_addr, CLUSTER_STANDALONE_JOB_SERVER_PORT)) else: raise Exception('server run on standalone can not support cluster mode job') if module == "upload": if not os.path.isabs(request_config['file']): request_config["file"] = os.path.join(file_utils.get_project_base_directory(), request_config["file"]) try: conf_file_path = new_runtime_conf(job_dir=_job_dir, method=data_func, module=module, role=request_config.get('local', {}).get("role"), party_id=request_config.get('local', {}).get("party_id", '')) file_utils.dump_json_conf(request_config, conf_file_path) progs = ["python3", os.path.join(file_utils.get_project_base_directory(), JOB_MODULE_CONF[module]["module_path"]), "-j", _job_id, "-c", conf_file_path ] try: p = run_subprocess(config_dir=_job_dir, process_cmd=progs) except Exception as e: stat_logger.exception(e) p = None return get_json_result(retcode=(0 if p else 101), job_id=_job_id, data={'table_name': request_config['table_name'], 'namespace': request_config['namespace'], 'pid': p.pid if p else ''}) except Exception as e: stat_logger.exception(e) return get_json_result(retcode=-104, retmsg="failed", job_id=_job_id)
[ "fate_flow.utils.job_utils.run_subprocess", "fate_flow.utils.detect_utils.check_config", "fate_flow.utils.job_utils.generate_job_id", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.api_utils.get_json_result", "fate_flow.utils.job_utils.get_job_directory" ]
[((1174, 1189), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1179, 1189), False, 'from flask import Flask, request\n'), ((1253, 1277), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1274, 1277), False, 'from fate_flow.settings import stat_logger, CLUSTER_STANDALONE_JOB_SERVER_PORT\n'), ((1464, 1481), 'fate_flow.utils.job_utils.generate_job_id', 'generate_job_id', ([], {}), '()\n', (1479, 1481), False, 'from fate_flow.utils.job_utils import generate_job_id, get_job_directory, new_runtime_conf, run_subprocess\n'), ((1582, 1608), 'fate_flow.utils.job_utils.get_job_directory', 'get_job_directory', (['_job_id'], {}), '(_job_id)\n', (1599, 1608), False, 'from fate_flow.utils.job_utils import generate_job_id, get_job_directory, new_runtime_conf, run_subprocess\n'), ((1613, 1649), 'os.makedirs', 'os.makedirs', (['_job_dir'], {'exist_ok': '(True)'}), '(_job_dir, exist_ok=True)\n', (1624, 1649), False, 'import os\n'), ((2004, 2089), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (['request_config'], {'required_arguments': 'required_arguments'}), '(request_config, required_arguments=required_arguments\n )\n', (2029, 2089), False, 'from fate_flow.utils import detect_utils\n'), ((3252, 3309), 'arch.api.utils.file_utils.dump_json_conf', 'file_utils.dump_json_conf', (['request_config', 'conf_file_path'], {}), '(request_config, conf_file_path)\n', (3277, 3309), False, 'from arch.api.utils import file_utils\n'), ((3727, 3911), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0 if p else 101)', 'job_id': '_job_id', 'data': "{'table_name': request_config['table_name'], 'namespace': request_config[\n 'namespace'], 'pid': p.pid if p else ''}"}), "(retcode=0 if p else 101, job_id=_job_id, data={'table_name':\n request_config['table_name'], 'namespace': request_config['namespace'],\n 'pid': p.pid if p else ''})\n", (3742, 3911), False, 'from fate_flow.utils.api_utils import get_json_result, request_execute_server\n'), ((2792, 2829), 'os.path.isabs', 'os.path.isabs', (["request_config['file']"], {}), "(request_config['file'])\n", (2805, 2829), False, 'import os\n'), ((3568, 3622), 'fate_flow.utils.job_utils.run_subprocess', 'run_subprocess', ([], {'config_dir': '_job_dir', 'process_cmd': 'progs'}), '(config_dir=_job_dir, process_cmd=progs)\n', (3582, 3622), False, 'from fate_flow.utils.job_utils import generate_job_id, get_job_directory, new_runtime_conf, run_subprocess\n'), ((4009, 4033), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (4030, 4033), False, 'from fate_flow.settings import stat_logger, CLUSTER_STANDALONE_JOB_SERVER_PORT\n'), ((4049, 4111), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(-104)', 'retmsg': '"""failed"""', 'job_id': '_job_id'}), "(retcode=-104, retmsg='failed', job_id=_job_id)\n", (4064, 4111), False, 'from fate_flow.utils.api_utils import get_json_result, request_execute_server\n'), ((2881, 2920), 'arch.api.utils.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (2918, 2920), False, 'from arch.api.utils import file_utils\n'), ((3368, 3407), 'arch.api.utils.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (3405, 3407), False, 'from arch.api.utils import file_utils\n'), ((3666, 3690), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (3687, 3690), False, 'from fate_flow.settings import stat_logger, CLUSTER_STANDALONE_JOB_SERVER_PORT\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_arch.common import EngineType from fate_arch.common import engine_utils from fate_arch.common.base_utils import json_dumps, current_timestamp from fate_arch.computing import ComputingEngine from fate_flow.controller.task_controller import TaskController from fate_flow.db.job_default_config import JobDefaultConfig from fate_flow.db.runtime_config import RuntimeConfig from fate_flow.entity import RunParameters from fate_flow.entity.run_status import JobStatus, EndStatus from fate_flow.entity.types import InputSearchType, WorkerName from fate_flow.manager.provider_manager import ProviderManager from fate_flow.manager.resource_manager import ResourceManager from fate_flow.manager.worker_manager import WorkerManager from fate_flow.operation.job_saver import JobSaver from fate_flow.operation.job_tracker import Tracker from fate_flow.protobuf.python import pipeline_pb2 from fate_flow.settings import USE_AUTHENTICATION, USE_DATA_AUTHENTICATION, ENGINES from fate_flow.utils import job_utils, schedule_utils, data_utils from fate_flow.utils.authentication_utils import authentication_check from fate_flow.utils.authentication_utils import data_authentication_check from fate_flow.utils.log_utils import schedule_logger class JobController(object): @classmethod def create_job(cls, job_id, role, party_id, job_info): # parse job configuration dsl = job_info['dsl'] runtime_conf = job_info['runtime_conf'] train_runtime_conf = job_info['train_runtime_conf'] if USE_AUTHENTICATION: authentication_check(src_role=job_info.get('src_role', None), src_party_id=job_info.get('src_party_id', None), dsl=dsl, runtime_conf=runtime_conf, role=role, party_id=party_id) dsl_parser = schedule_utils.get_job_dsl_parser(dsl=dsl, runtime_conf=runtime_conf, train_runtime_conf=train_runtime_conf) job_parameters = dsl_parser.get_job_parameters(runtime_conf) schedule_logger(job_id).info('job parameters:{}'.format(job_parameters)) dest_user = job_parameters.get(role, {}).get(party_id, {}).get('user', '') user = {} src_party_id = int(job_info['src_party_id']) if job_info.get('src_party_id') else 0 src_role = job_info.get('src_role', '') src_user = job_parameters.get(src_role, {}).get(src_party_id, {}).get('user', '') if src_role else '' for _role, party_id_item in job_parameters.items(): user[_role] = {} for _party_id, _parameters in party_id_item.items(): user[_role][_party_id] = _parameters.get("user", "") schedule_logger(job_id).info('job user:{}'.format(user)) if USE_DATA_AUTHENTICATION: job_args = dsl_parser.get_args_input() schedule_logger(job_id).info('job args:{}'.format(job_args)) dataset_dict = cls.get_dataset(False, role, party_id, runtime_conf.get("role"), job_args) dataset_list = [] if dataset_dict.get(role, {}).get(party_id): for k, v in dataset_dict[role][party_id].items(): dataset_list.append({"namespace": v.split('.')[0], "table_name": v.split('.')[1]}) data_authentication_check(src_role=job_info.get('src_role'), src_party_id=job_info.get('src_party_id'), src_user=src_user, dest_user=dest_user, dataset_list=dataset_list) job_parameters = RunParameters(**job_parameters.get(role, {}).get(party_id, {})) # save new job into db if role == job_info["initiator_role"] and party_id == job_info["initiator_party_id"]: is_initiator = True else: is_initiator = False job_info["status"] = JobStatus.READY job_info["user_id"] = dest_user job_info["src_user"] = src_user job_info["user"] = user # this party configuration job_info["role"] = role job_info["party_id"] = party_id job_info["is_initiator"] = is_initiator job_info["progress"] = 0 cls.create_job_parameters_on_party(role=role, party_id=party_id, job_parameters=job_parameters) # update job parameters on party job_info["runtime_conf_on_party"]["job_parameters"] = job_parameters.to_dict() JobSaver.create_job(job_info=job_info) initialized_result, provider_group = cls.initialize_tasks(job_id=job_id, role=role, party_id=party_id, run_on_this_party=True, initiator_role=job_info["initiator_role"], initiator_party_id=job_info["initiator_party_id"], job_parameters=job_parameters, dsl_parser=dsl_parser) for provider_key, group_info in provider_group.items(): for cpn in group_info["components"]: dsl["components"][cpn]["provider"] = provider_key roles = job_info['roles'] cls.initialize_job_tracker(job_id=job_id, role=role, party_id=party_id, job_parameters=job_parameters, roles=roles, is_initiator=is_initiator, dsl_parser=dsl_parser) job_utils.save_job_conf(job_id=job_id, role=role, party_id=party_id, dsl=dsl, runtime_conf=runtime_conf, runtime_conf_on_party=job_info["runtime_conf_on_party"], train_runtime_conf=train_runtime_conf, pipeline_dsl=None) return {"components": initialized_result} @classmethod def set_federated_mode(cls, job_parameters: RunParameters): if not job_parameters.federated_mode: job_parameters.federated_mode = ENGINES["federated_mode"] @classmethod def set_engines(cls, job_parameters: RunParameters, engine_type=None): engines = engine_utils.get_engines() if not engine_type: engine_type = {EngineType.COMPUTING, EngineType.FEDERATION, EngineType.STORAGE} for k in engine_type: setattr(job_parameters, f"{k}_engine", engines[k]) @classmethod def create_common_job_parameters(cls, job_id, initiator_role, common_job_parameters: RunParameters): JobController.set_federated_mode(job_parameters=common_job_parameters) JobController.set_engines(job_parameters=common_job_parameters, engine_type={EngineType.COMPUTING}) JobController.fill_default_job_parameters(job_id=job_id, job_parameters=common_job_parameters) JobController.adapt_job_parameters(role=initiator_role, job_parameters=common_job_parameters, create_initiator_baseline=True) @classmethod def create_job_parameters_on_party(cls, role, party_id, job_parameters: RunParameters): JobController.set_engines(job_parameters=job_parameters) cls.fill_party_specific_parameters(role=role, party_id=party_id, job_parameters=job_parameters) @classmethod def fill_party_specific_parameters(cls, role, party_id, job_parameters: RunParameters): cls.adapt_job_parameters(role=role, job_parameters=job_parameters) engines_info = cls.get_job_engines_address(job_parameters=job_parameters) cls.check_parameters(job_parameters=job_parameters, role=role, party_id=party_id, engines_info=engines_info) @classmethod def fill_default_job_parameters(cls, job_id, job_parameters: RunParameters): keys = {"task_parallelism", "auto_retries", "auto_retry_delay", "federated_status_collect_type"} for key in keys: if hasattr(job_parameters, key) and getattr(job_parameters, key) is None: if hasattr(JobDefaultConfig, key): setattr(job_parameters, key, getattr(JobDefaultConfig, key)) else: schedule_logger(job_id).warning(f"can not found {key} job parameter default value from job_default_settings") @classmethod def adapt_job_parameters(cls, role, job_parameters: RunParameters, create_initiator_baseline=False): ResourceManager.adapt_engine_parameters( role=role, job_parameters=job_parameters, create_initiator_baseline=create_initiator_baseline) if create_initiator_baseline: if job_parameters.task_parallelism is None: job_parameters.task_parallelism = JobDefaultConfig.task_parallelism if job_parameters.federated_status_collect_type is None: job_parameters.federated_status_collect_type = JobDefaultConfig.federated_status_collect_type if create_initiator_baseline and not job_parameters.computing_partitions: job_parameters.computing_partitions = job_parameters.adaptation_parameters[ "task_cores_per_node"] * job_parameters.adaptation_parameters["task_nodes"] @classmethod def get_job_engines_address(cls, job_parameters: RunParameters): engines_info = {} engine_list = [ (EngineType.COMPUTING, job_parameters.computing_engine), (EngineType.FEDERATION, job_parameters.federation_engine), (EngineType.STORAGE, job_parameters.storage_engine) ] for engine_type, engine_name in engine_list: engine_info = ResourceManager.get_engine_registration_info( engine_type=engine_type, engine_name=engine_name) job_parameters.engines_address[engine_type] = engine_info.f_engine_config if engine_info else {} engines_info[engine_type] = engine_info return engines_info @classmethod def check_parameters(cls, job_parameters: RunParameters, role, party_id, engines_info): status, cores_submit, max_cores_per_job = ResourceManager.check_resource_apply( job_parameters=job_parameters, role=role, party_id=party_id, engines_info=engines_info) if not status: msg = "" msg2 = "default value is fate_flow/settings.py#DEFAULT_TASK_CORES_PER_NODE, refer fate_flow/examples/simple/simple_job_conf.json" if job_parameters.computing_engine in {ComputingEngine.EGGROLL, ComputingEngine.STANDALONE}: msg = "please use task_cores job parameters to set request task cores or you can customize it with eggroll_run job parameters" elif job_parameters.computing_engine in {ComputingEngine.SPARK}: msg = "please use task_cores job parameters to set request task cores or you can customize it with spark_run job parameters" raise RuntimeError( f"max cores per job is {max_cores_per_job} base on (fate_flow/settings#MAX_CORES_PERCENT_PER_JOB * conf/service_conf.yaml#nodes * conf/service_conf.yaml#cores_per_node), expect {cores_submit} cores, {msg}, {msg2}") @classmethod def gen_updated_parameters(cls, job_id, initiator_role, initiator_party_id, input_job_parameters, input_component_parameters): # todo: check can not update job parameters job_configuration = job_utils.get_job_configuration(job_id=job_id, role=initiator_role, party_id=initiator_party_id) updated_job_parameters = job_configuration.runtime_conf["job_parameters"] updated_component_parameters = job_configuration.runtime_conf["component_parameters"] if input_job_parameters: if input_job_parameters.get("common"): common_job_parameters = RunParameters(**input_job_parameters["common"]) cls.create_common_job_parameters(job_id=job_id, initiator_role=initiator_role, common_job_parameters=common_job_parameters) for attr in {"model_id", "model_version"}: setattr(common_job_parameters, attr, updated_job_parameters["common"].get(attr)) updated_job_parameters["common"] = common_job_parameters.to_dict() # not support role updated_components = set() if input_component_parameters: cls.merge_update(input_component_parameters, updated_component_parameters) return updated_job_parameters, updated_component_parameters, list(updated_components) @classmethod def merge_update(cls, inputs: dict, results: dict): if not isinstance(inputs, dict) or not isinstance(results, dict): raise ValueError(f"must both dict, but {type(inputs)} inputs and {type(results)} results") for k, v in inputs.items(): if k not in results: results[k] = v elif isinstance(v, dict): cls.merge_update(v, results[k]) else: results[k] = v @classmethod def update_parameter(cls, job_id, role, party_id, updated_parameters: dict): job_configuration = job_utils.get_job_configuration(job_id=job_id, role=role, party_id=party_id) job_parameters = updated_parameters.get("job_parameters") component_parameters = updated_parameters.get("component_parameters") if job_parameters: job_configuration.runtime_conf["job_parameters"] = job_parameters job_parameters = RunParameters(**job_parameters["common"]) cls.create_job_parameters_on_party(role=role, party_id=party_id, job_parameters=job_parameters) job_configuration.runtime_conf_on_party["job_parameters"] = job_parameters.to_dict() if component_parameters: job_configuration.runtime_conf["component_parameters"] = component_parameters job_configuration.runtime_conf_on_party["component_parameters"] = component_parameters job_info = {} job_info["job_id"] = job_id job_info["role"] = role job_info["party_id"] = party_id job_info["runtime_conf"] = job_configuration.runtime_conf job_info["runtime_conf_on_party"] = job_configuration.runtime_conf_on_party JobSaver.update_job(job_info) @classmethod def initialize_task(cls, role, party_id, task_info: dict): task_info["role"] = role task_info["party_id"] = party_id initialized_result, provider_group = cls.initialize_tasks(components=[task_info["component_name"]], **task_info) return initialized_result @classmethod def initialize_tasks(cls, job_id, role, party_id, run_on_this_party, initiator_role, initiator_party_id, job_parameters: RunParameters = None, dsl_parser=None, components: list = None, **kwargs): common_task_info = {} common_task_info["job_id"] = job_id common_task_info["initiator_role"] = initiator_role common_task_info["initiator_party_id"] = initiator_party_id common_task_info["role"] = role common_task_info["party_id"] = party_id common_task_info["run_on_this_party"] = run_on_this_party common_task_info["federated_mode"] = kwargs.get("federated_mode", job_parameters.federated_mode if job_parameters else None) common_task_info["federated_status_collect_type"] = kwargs.get("federated_status_collect_type", job_parameters.federated_status_collect_type if job_parameters else None) common_task_info["auto_retries"] = kwargs.get("auto_retries", job_parameters.auto_retries if job_parameters else None) common_task_info["auto_retry_delay"] = kwargs.get("auto_retry_delay", job_parameters.auto_retry_delay if job_parameters else None) common_task_info["task_version"] = kwargs.get("task_version") if dsl_parser is None: dsl_parser = schedule_utils.get_job_dsl_parser_by_job_id(job_id) provider_group = ProviderManager.get_job_provider_group(dsl_parser=dsl_parser, components=components) initialized_result = {} for group_key, group_info in provider_group.items(): initialized_config = {} initialized_config.update(group_info) initialized_config["common_task_info"] = common_task_info if run_on_this_party: code, _result = WorkerManager.start_general_worker(worker_name=WorkerName.TASK_INITIALIZER, job_id=job_id, role=role, party_id=party_id, initialized_config=initialized_config, run_in_subprocess=False if initialized_config["if_default_provider"] else True) initialized_result.update(_result) else: cls.initialize_task_holder_for_scheduling(role=role, party_id=party_id, components=initialized_config["components"], common_task_info=common_task_info, provider_info=initialized_config["provider"]) return initialized_result, provider_group @classmethod def initialize_task_holder_for_scheduling(cls, role, party_id, components, common_task_info, provider_info): for component_name in components: task_info = {} task_info.update(common_task_info) task_info["component_name"] = component_name task_info["component_module"] = "" task_info["provider_info"] = provider_info task_info["component_parameters"] = {} TaskController.create_task(role=role, party_id=party_id, run_on_this_party=common_task_info["run_on_this_party"], task_info=task_info) @classmethod def initialize_job_tracker(cls, job_id, role, party_id, job_parameters: RunParameters, roles, is_initiator, dsl_parser): tracker = Tracker(job_id=job_id, role=role, party_id=party_id, model_id=job_parameters.model_id, model_version=job_parameters.model_version, job_parameters=job_parameters) if job_parameters.job_type != "predict": tracker.init_pipeline_model() partner = {} show_role = {} for _role, _role_party in roles.items(): if is_initiator or _role == role: show_role[_role] = show_role.get(_role, []) for _party_id in _role_party: if is_initiator or _party_id == party_id: show_role[_role].append(_party_id) if _role != role: partner[_role] = partner.get(_role, []) partner[_role].extend(_role_party) else: for _party_id in _role_party: if _party_id != party_id: partner[_role] = partner.get(_role, []) partner[_role].append(_party_id) job_args = dsl_parser.get_args_input() dataset = cls.get_dataset( is_initiator, role, party_id, roles, job_args) tracker.log_job_view( {'partner': partner, 'dataset': dataset, 'roles': show_role}) @classmethod def get_dataset(cls, is_initiator, role, party_id, roles, job_args): dataset = {} dsl_version = 1 if job_args.get('dsl_version'): if job_args.get('dsl_version') == 2: dsl_version = 2 for _role, _role_party_args in job_args.items(): if _role == "dsl_version": continue if is_initiator or _role == role: for _party_index in range(len(_role_party_args)): _party_id = roles[_role][_party_index] if is_initiator or _party_id == party_id: dataset[_role] = dataset.get(_role, {}) dataset[_role][_party_id] = dataset[_role].get( _party_id, {}) if dsl_version == 1: for _data_type, _data_location in _role_party_args[_party_index]['args']['data'].items(): dataset[_role][_party_id][_data_type] = '{}.{}'.format( _data_location['namespace'], _data_location['name']) else: for key in _role_party_args[_party_index].keys(): for _data_type, _data_location in _role_party_args[_party_index][key].items(): search_type = data_utils.get_input_search_type(parameters=_data_location) if search_type is InputSearchType.TABLE_INFO: dataset[_role][_party_id][key] = '{}.{}'.format(_data_location['namespace'], _data_location['name']) elif search_type is InputSearchType.JOB_COMPONENT_OUTPUT: dataset[_role][_party_id][key] = '{}.{}.{}'.format(_data_location['job_id'], _data_location['component_name'], _data_location['data_name']) else: dataset[_role][_party_id][key] = "unknown" return dataset @classmethod def query_job_input_args(cls, input_data, role, party_id): min_partition = data_utils.get_input_data_min_partitions( input_data, role, party_id) return {'min_input_data_partition': min_partition} @classmethod def start_job(cls, job_id, role, party_id, extra_info=None): schedule_logger(job_id).info( f"try to start job on {role} {party_id}") job_info = { "job_id": job_id, "role": role, "party_id": party_id, "status": JobStatus.RUNNING, "start_time": current_timestamp() } if extra_info: schedule_logger(job_id).info(f"extra info: {extra_info}") job_info.update(extra_info) cls.update_job_status(job_info=job_info) cls.update_job(job_info=job_info) schedule_logger(job_id).info( f"start job on {role} {party_id} successfully") @classmethod def update_job(cls, job_info): """ Save to local database :param job_info: :return: """ return JobSaver.update_job(job_info=job_info) @classmethod def update_job_status(cls, job_info): update_status = JobSaver.update_job_status(job_info=job_info) if update_status and EndStatus.contains(job_info.get("status")): ResourceManager.return_job_resource( job_id=job_info["job_id"], role=job_info["role"], party_id=job_info["party_id"]) return update_status @classmethod def stop_jobs(cls, job_id, stop_status, role=None, party_id=None): if role and party_id: jobs = JobSaver.query_job( job_id=job_id, role=role, party_id=party_id) else: jobs = JobSaver.query_job(job_id=job_id) kill_status = True kill_details = {} for job in jobs: kill_job_status, kill_job_details = cls.stop_job( job=job, stop_status=stop_status) kill_status = kill_status & kill_job_status kill_details[job_id] = kill_job_details return kill_status, kill_details @classmethod def stop_job(cls, job, stop_status): tasks = JobSaver.query_task( job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id, reverse=True) kill_status = True kill_details = {} for task in tasks: kill_task_status = TaskController.stop_task( task=task, stop_status=stop_status) kill_status = kill_status & kill_task_status kill_details[task.f_task_id] = 'success' if kill_task_status else 'failed' if kill_status: job_info = job.to_human_model_dict(only_primary_with=["status"]) job_info["status"] = stop_status JobController.update_job_status(job_info) return kill_status, kill_details # Job status depends on the final operation result and initiator calculate @classmethod def save_pipelined_model(cls, job_id, role, party_id): schedule_logger(job_id).info(f"start to save pipeline model on {role} {party_id}") job_configuration = job_utils.get_job_configuration(job_id=job_id, role=role, party_id=party_id) runtime_conf_on_party = job_configuration.runtime_conf_on_party job_parameters = runtime_conf_on_party.get('job_parameters', {}) if role in job_parameters.get("assistant_role", []): return model_id = job_parameters['model_id'] model_version = job_parameters['model_version'] job_type = job_parameters.get('job_type', '') roles = runtime_conf_on_party['role'] initiator_role = runtime_conf_on_party['initiator']['role'] initiator_party_id = runtime_conf_on_party['initiator']['party_id'] if job_type == 'predict': return dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job_configuration.dsl, runtime_conf=job_configuration.runtime_conf, train_runtime_conf=job_configuration.train_runtime_conf) components_parameters = {} tasks = JobSaver.query_task(job_id=job_id, role=role, party_id=party_id, only_latest=True) for task in tasks: components_parameters[task.f_component_name] = task.f_component_parameters predict_dsl = schedule_utils.fill_inference_dsl(dsl_parser, origin_inference_dsl=job_configuration.dsl, components_parameters=components_parameters) pipeline = pipeline_pb2.Pipeline() pipeline.inference_dsl = json_dumps(predict_dsl, byte=True) pipeline.train_dsl = json_dumps(job_configuration.dsl, byte=True) pipeline.train_runtime_conf = json_dumps(job_configuration.runtime_conf, byte=True) pipeline.fate_version = RuntimeConfig.get_env("FATE") pipeline.model_id = model_id pipeline.model_version = model_version pipeline.parent = True pipeline.loaded_times = 0 pipeline.roles = json_dumps(roles, byte=True) pipeline.initiator_role = initiator_role pipeline.initiator_party_id = initiator_party_id pipeline.runtime_conf_on_party = json_dumps( runtime_conf_on_party, byte=True) pipeline.parent_info = json_dumps({}, byte=True) tracker = Tracker(job_id=job_id, role=role, party_id=party_id, model_id=model_id, model_version=model_version, job_parameters=RunParameters(**job_parameters)) tracker.save_pipeline_model(pipeline_buffer_object=pipeline) if role != 'local': tracker.save_machine_learning_model_info() schedule_logger(job_id).info(f"save pipeline on {role} {party_id} successfully") @classmethod def clean_job(cls, job_id, role, party_id, roles): schedule_logger(job_id).info(f"start to clean job on {role} {party_id}") # todo schedule_logger(job_id).info(f"job on {role} {party_id} clean done")
[ "fate_flow.operation.job_saver.JobSaver.update_job_status", "fate_flow.utils.data_utils.get_input_data_min_partitions", "fate_flow.manager.resource_manager.ResourceManager.adapt_engine_parameters", "fate_flow.manager.resource_manager.ResourceManager.get_engine_registration_info", "fate_flow.utils.schedule_utils.get_job_dsl_parser_by_job_id", "fate_flow.manager.resource_manager.ResourceManager.return_job_resource", "fate_flow.controller.task_controller.TaskController.create_task", "fate_flow.manager.provider_manager.ProviderManager.get_job_provider_group", "fate_flow.utils.data_utils.get_input_search_type", "fate_flow.protobuf.python.pipeline_pb2.Pipeline", "fate_flow.utils.schedule_utils.fill_inference_dsl", "fate_flow.utils.schedule_utils.get_job_dsl_parser", "fate_flow.operation.job_tracker.Tracker", "fate_flow.utils.job_utils.save_job_conf", "fate_flow.entity.RunParameters", "fate_flow.controller.task_controller.TaskController.stop_task", "fate_flow.operation.job_saver.JobSaver.create_job", "fate_flow.operation.job_saver.JobSaver.update_job", "fate_flow.utils.job_utils.get_job_configuration", "fate_flow.operation.job_saver.JobSaver.query_task", "fate_flow.operation.job_saver.JobSaver.query_job", "fate_flow.utils.log_utils.schedule_logger", "fate_flow.manager.resource_manager.ResourceManager.check_resource_apply", "fate_flow.db.runtime_config.RuntimeConfig.get_env", "fate_flow.manager.worker_manager.WorkerManager.start_general_worker" ]
[((2404, 2516), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'dsl', 'runtime_conf': 'runtime_conf', 'train_runtime_conf': 'train_runtime_conf'}), '(dsl=dsl, runtime_conf=runtime_conf,\n train_runtime_conf=train_runtime_conf)\n', (2437, 2516), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((5030, 5068), 'fate_flow.operation.job_saver.JobSaver.create_job', 'JobSaver.create_job', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (5049, 5068), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((5850, 6083), 'fate_flow.utils.job_utils.save_job_conf', 'job_utils.save_job_conf', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'dsl': 'dsl', 'runtime_conf': 'runtime_conf', 'runtime_conf_on_party': "job_info['runtime_conf_on_party']", 'train_runtime_conf': 'train_runtime_conf', 'pipeline_dsl': 'None'}), "(job_id=job_id, role=role, party_id=party_id, dsl=\n dsl, runtime_conf=runtime_conf, runtime_conf_on_party=job_info[\n 'runtime_conf_on_party'], train_runtime_conf=train_runtime_conf,\n pipeline_dsl=None)\n", (5873, 6083), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((6653, 6679), 'fate_arch.common.engine_utils.get_engines', 'engine_utils.get_engines', ([], {}), '()\n', (6677, 6679), False, 'from fate_arch.common import engine_utils\n'), ((8948, 9087), 'fate_flow.manager.resource_manager.ResourceManager.adapt_engine_parameters', 'ResourceManager.adapt_engine_parameters', ([], {'role': 'role', 'job_parameters': 'job_parameters', 'create_initiator_baseline': 'create_initiator_baseline'}), '(role=role, job_parameters=\n job_parameters, create_initiator_baseline=create_initiator_baseline)\n', (8987, 9087), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((10606, 10735), 'fate_flow.manager.resource_manager.ResourceManager.check_resource_apply', 'ResourceManager.check_resource_apply', ([], {'job_parameters': 'job_parameters', 'role': 'role', 'party_id': 'party_id', 'engines_info': 'engines_info'}), '(job_parameters=job_parameters, role=\n role, party_id=party_id, engines_info=engines_info)\n', (10642, 10735), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((11888, 11988), 'fate_flow.utils.job_utils.get_job_configuration', 'job_utils.get_job_configuration', ([], {'job_id': 'job_id', 'role': 'initiator_role', 'party_id': 'initiator_party_id'}), '(job_id=job_id, role=initiator_role,\n party_id=initiator_party_id)\n', (11919, 11988), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((13735, 13811), 'fate_flow.utils.job_utils.get_job_configuration', 'job_utils.get_job_configuration', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, role=role, party_id=party_id)\n', (13766, 13811), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((15062, 15091), 'fate_flow.operation.job_saver.JobSaver.update_job', 'JobSaver.update_job', (['job_info'], {}), '(job_info)\n', (15081, 15091), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((16756, 16845), 'fate_flow.manager.provider_manager.ProviderManager.get_job_provider_group', 'ProviderManager.get_job_provider_group', ([], {'dsl_parser': 'dsl_parser', 'components': 'components'}), '(dsl_parser=dsl_parser, components=\n components)\n', (16794, 16845), False, 'from fate_flow.manager.provider_manager import ProviderManager\n'), ((19203, 19373), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'model_id': 'job_parameters.model_id', 'model_version': 'job_parameters.model_version', 'job_parameters': 'job_parameters'}), '(job_id=job_id, role=role, party_id=party_id, model_id=\n job_parameters.model_id, model_version=job_parameters.model_version,\n job_parameters=job_parameters)\n', (19210, 19373), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((22724, 22792), 'fate_flow.utils.data_utils.get_input_data_min_partitions', 'data_utils.get_input_data_min_partitions', (['input_data', 'role', 'party_id'], {}), '(input_data, role, party_id)\n', (22764, 22792), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((23735, 23773), 'fate_flow.operation.job_saver.JobSaver.update_job', 'JobSaver.update_job', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (23754, 23773), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((23858, 23903), 'fate_flow.operation.job_saver.JobSaver.update_job_status', 'JobSaver.update_job_status', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (23884, 23903), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((24852, 24953), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id', 'reverse': '(True)'}), '(job_id=job.f_job_id, role=job.f_role, party_id=job.\n f_party_id, reverse=True)\n', (24871, 24953), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((25815, 25891), 'fate_flow.utils.job_utils.get_job_configuration', 'job_utils.get_job_configuration', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, role=role, party_id=party_id)\n', (25846, 25891), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((26597, 26769), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job_configuration.dsl', 'runtime_conf': 'job_configuration.runtime_conf', 'train_runtime_conf': 'job_configuration.train_runtime_conf'}), '(dsl=job_configuration.dsl, runtime_conf=\n job_configuration.runtime_conf, train_runtime_conf=job_configuration.\n train_runtime_conf)\n', (26630, 26769), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((26922, 27008), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'only_latest': '(True)'}), '(job_id=job_id, role=role, party_id=party_id,\n only_latest=True)\n', (26941, 27008), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((27141, 27280), 'fate_flow.utils.schedule_utils.fill_inference_dsl', 'schedule_utils.fill_inference_dsl', (['dsl_parser'], {'origin_inference_dsl': 'job_configuration.dsl', 'components_parameters': 'components_parameters'}), '(dsl_parser, origin_inference_dsl=\n job_configuration.dsl, components_parameters=components_parameters)\n', (27174, 27280), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((27296, 27319), 'fate_flow.protobuf.python.pipeline_pb2.Pipeline', 'pipeline_pb2.Pipeline', ([], {}), '()\n', (27317, 27319), False, 'from fate_flow.protobuf.python import pipeline_pb2\n'), ((27353, 27387), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['predict_dsl'], {'byte': '(True)'}), '(predict_dsl, byte=True)\n', (27363, 27387), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((27417, 27461), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['job_configuration.dsl'], {'byte': '(True)'}), '(job_configuration.dsl, byte=True)\n', (27427, 27461), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((27500, 27553), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['job_configuration.runtime_conf'], {'byte': '(True)'}), '(job_configuration.runtime_conf, byte=True)\n', (27510, 27553), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((27586, 27615), 'fate_flow.db.runtime_config.RuntimeConfig.get_env', 'RuntimeConfig.get_env', (['"""FATE"""'], {}), "('FATE')\n", (27607, 27615), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((27791, 27819), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['roles'], {'byte': '(True)'}), '(roles, byte=True)\n', (27801, 27819), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((27967, 28011), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['runtime_conf_on_party'], {'byte': '(True)'}), '(runtime_conf_on_party, byte=True)\n', (27977, 28011), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((28056, 28081), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['{}'], {'byte': '(True)'}), '({}, byte=True)\n', (28066, 28081), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((10145, 10243), 'fate_flow.manager.resource_manager.ResourceManager.get_engine_registration_info', 'ResourceManager.get_engine_registration_info', ([], {'engine_type': 'engine_type', 'engine_name': 'engine_name'}), '(engine_type=engine_type,\n engine_name=engine_name)\n', (10189, 10243), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((14210, 14251), 'fate_flow.entity.RunParameters', 'RunParameters', ([], {}), "(**job_parameters['common'])\n", (14223, 14251), False, 'from fate_flow.entity import RunParameters\n'), ((16679, 16730), 'fate_flow.utils.schedule_utils.get_job_dsl_parser_by_job_id', 'schedule_utils.get_job_dsl_parser_by_job_id', (['job_id'], {}), '(job_id)\n', (16722, 16730), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((18829, 18968), 'fate_flow.controller.task_controller.TaskController.create_task', 'TaskController.create_task', ([], {'role': 'role', 'party_id': 'party_id', 'run_on_this_party': "common_task_info['run_on_this_party']", 'task_info': 'task_info'}), "(role=role, party_id=party_id, run_on_this_party=\n common_task_info['run_on_this_party'], task_info=task_info)\n", (18855, 18968), False, 'from fate_flow.controller.task_controller import TaskController\n'), ((23218, 23237), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (23235, 23237), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((23989, 24110), 'fate_flow.manager.resource_manager.ResourceManager.return_job_resource', 'ResourceManager.return_job_resource', ([], {'job_id': "job_info['job_id']", 'role': "job_info['role']", 'party_id': "job_info['party_id']"}), "(job_id=job_info['job_id'], role=\n job_info['role'], party_id=job_info['party_id'])\n", (24024, 24110), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((24290, 24353), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, role=role, party_id=party_id)\n', (24308, 24353), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((24404, 24437), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (24422, 24437), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((25073, 25133), 'fate_flow.controller.task_controller.TaskController.stop_task', 'TaskController.stop_task', ([], {'task': 'task', 'stop_status': 'stop_status'}), '(task=task, stop_status=stop_status)\n', (25097, 25133), False, 'from fate_flow.controller.task_controller import TaskController\n'), ((2700, 2723), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (2715, 2723), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((3355, 3378), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (3370, 3378), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((12405, 12452), 'fate_flow.entity.RunParameters', 'RunParameters', ([], {}), "(**input_job_parameters['common'])\n", (12418, 12452), False, 'from fate_flow.entity import RunParameters\n'), ((17220, 17473), 'fate_flow.manager.worker_manager.WorkerManager.start_general_worker', 'WorkerManager.start_general_worker', ([], {'worker_name': 'WorkerName.TASK_INITIALIZER', 'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'initialized_config': 'initialized_config', 'run_in_subprocess': "(False if initialized_config['if_default_provider'] else True)"}), "(worker_name=WorkerName.TASK_INITIALIZER,\n job_id=job_id, role=role, party_id=party_id, initialized_config=\n initialized_config, run_in_subprocess=False if initialized_config[\n 'if_default_provider'] else True)\n", (17254, 17473), False, 'from fate_flow.manager.worker_manager import WorkerManager\n'), ((22956, 22979), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (22971, 22979), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((23480, 23503), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (23495, 23503), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((25704, 25727), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (25719, 25727), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((28243, 28274), 'fate_flow.entity.RunParameters', 'RunParameters', ([], {}), '(**job_parameters)\n', (28256, 28274), False, 'from fate_flow.entity import RunParameters\n'), ((28436, 28459), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (28451, 28459), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((28598, 28621), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (28613, 28621), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((28694, 28717), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (28709, 28717), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((3511, 3534), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (3526, 3534), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((23283, 23306), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (23298, 23306), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((8707, 8730), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (8722, 8730), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((21914, 21973), 'fate_flow.utils.data_utils.get_input_search_type', 'data_utils.get_input_search_type', ([], {'parameters': '_data_location'}), '(parameters=_data_location)\n', (21946, 21973), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n')]
# Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import datetime import errno import os import sys import threading import typing from fate_arch.common import file_utils, FederatedMode from fate_arch.common.base_utils import json_dumps, fate_uuid, current_timestamp from fate_flow.utils.log_utils import schedule_logger from fate_flow.db.db_models import DB, Job, Task from fate_flow.entity import JobConfiguration from fate_flow.entity.run_status import JobStatus, TaskStatus from fate_flow.entity import RunParameters from fate_flow.db.job_default_config import JobDefaultConfig from fate_flow.settings import FATE_BOARD_DASHBOARD_ENDPOINT from fate_flow.db.service_registry import ServiceRegistry from fate_flow.utils import detect_utils, process_utils from fate_flow.utils import session_utils from fate_flow.utils.base_utils import get_fate_flow_directory class JobIdGenerator(object): _lock = threading.RLock() def __init__(self, initial_value=0): self._value = initial_value self._pre_timestamp = None self._max = 99999 def next_id(self): ''' generate next job id with locking ''' #todo: there is duplication in the case of multiple instances deployment now = datetime.datetime.now() with JobIdGenerator._lock: if self._pre_timestamp == now: if self._value < self._max: self._value += 1 else: now += datetime.timedelta(microseconds=1) self._pre_timestamp = now self._value = 0 else: self._pre_timestamp = now self._value = 0 return "{}{}".format(now.strftime("%Y%m%d%H%M%S%f"), self._value) job_id_generator = JobIdGenerator() def generate_job_id(): return job_id_generator.next_id() def generate_task_id(job_id, component_name): return '{}_{}'.format(job_id, component_name) def generate_task_version_id(task_id, task_version): return "{}_{}".format(task_id, task_version) def generate_session_id(task_id, task_version, role, party_id, suffix=None, random_end=False): items = [task_id, str(task_version), role, str(party_id)] if suffix: items.append(suffix) if random_end: items.append(fate_uuid()) return "_".join(items) def generate_task_input_data_namespace(task_id, task_version, role, party_id): return "input_data_{}".format(generate_session_id(task_id=task_id, task_version=task_version, role=role, party_id=party_id)) def get_job_directory(job_id, *args): return os.path.join(get_fate_flow_directory(), 'jobs', job_id, *args) def get_job_log_directory(job_id, *args): return os.path.join(get_fate_flow_directory(), 'logs', job_id, *args) def get_task_directory(job_id, role, party_id, component_name, task_id, task_version, **kwargs): return get_job_directory(job_id, role, party_id, component_name, task_id, task_version) def get_general_worker_directory(worker_name, worker_id, *args): return os.path.join(get_fate_flow_directory(), worker_name, worker_id, *args) def get_general_worker_log_directory(worker_name, worker_id, *args): return os.path.join(get_fate_flow_directory(), 'logs', worker_name, worker_id, *args) def check_config(config: typing.Dict, required_parameters: typing.List): for parameter in required_parameters: if parameter not in config: return False, 'configuration no {} parameter'.format(parameter) else: return True, 'ok' def check_job_runtime_conf(runtime_conf: typing.Dict): detect_utils.check_config(runtime_conf, ['initiator', 'role']) detect_utils.check_config(runtime_conf['initiator'], ['role', 'party_id']) # deal party id runtime_conf['initiator']['party_id'] = int(runtime_conf['initiator']['party_id']) for r in runtime_conf['role'].keys(): for i in range(len(runtime_conf['role'][r])): runtime_conf['role'][r][i] = int(runtime_conf['role'][r][i]) def runtime_conf_basic(if_local=False): job_runtime_conf = { "dsl_version": 2, "initiator": {}, "job_parameters": { "common": { "federated_mode": FederatedMode.SINGLE }, }, "role": {}, "component_parameters": {} } if if_local: job_runtime_conf["initiator"]["role"] = "local" job_runtime_conf["initiator"]["party_id"] = 0 job_runtime_conf["role"]["local"] = [0] return job_runtime_conf def new_runtime_conf(job_dir, method, module, role, party_id): if role: conf_path_dir = os.path.join(job_dir, method, module, role, str(party_id)) else: conf_path_dir = os.path.join(job_dir, method, module, str(party_id)) os.makedirs(conf_path_dir, exist_ok=True) return os.path.join(conf_path_dir, 'runtime_conf.json') def save_job_conf(job_id, role, party_id, dsl, runtime_conf, runtime_conf_on_party, train_runtime_conf, pipeline_dsl=None): path_dict = get_job_conf_path(job_id=job_id, role=role, party_id=party_id) dump_job_conf(path_dict=path_dict, dsl=dsl, runtime_conf=runtime_conf, runtime_conf_on_party=runtime_conf_on_party, train_runtime_conf=train_runtime_conf, pipeline_dsl=pipeline_dsl) return path_dict def save_task_using_job_conf(task: Task): task_dir = get_task_directory(job_id=task.f_job_id, role=task.f_role, party_id=task.f_party_id, component_name=task.f_component_name, task_id=task.f_task_id, task_version=str(task.f_task_version)) return save_using_job_conf(task.f_job_id, task.f_role, task.f_party_id, config_dir=task_dir) def save_using_job_conf(job_id, role, party_id, config_dir): path_dict = get_job_conf_path(job_id=job_id, role=role, party_id=party_id, specified_dir=config_dir) job_configuration = get_job_configuration(job_id=job_id, role=role, party_id=party_id) dump_job_conf(path_dict=path_dict, dsl=job_configuration.dsl, runtime_conf=job_configuration.runtime_conf, runtime_conf_on_party=job_configuration.runtime_conf_on_party, train_runtime_conf=job_configuration.train_runtime_conf, pipeline_dsl=None) return path_dict def dump_job_conf(path_dict, dsl, runtime_conf, runtime_conf_on_party, train_runtime_conf, pipeline_dsl=None): os.makedirs(os.path.dirname(path_dict.get('dsl_path')), exist_ok=True) os.makedirs(os.path.dirname(path_dict.get('runtime_conf_on_party_path')), exist_ok=True) for data, conf_path in [(dsl, path_dict['dsl_path']), (runtime_conf, path_dict['runtime_conf_path']), (runtime_conf_on_party, path_dict['runtime_conf_on_party_path']), (train_runtime_conf, path_dict['train_runtime_conf_path']), (pipeline_dsl, path_dict['pipeline_dsl_path'])]: with open(conf_path, 'w+') as f: f.truncate() if not data: data = {} f.write(json_dumps(data, indent=4)) f.flush() return path_dict @DB.connection_context() def get_job_configuration(job_id, role, party_id) -> JobConfiguration: jobs = Job.select(Job.f_dsl, Job.f_runtime_conf, Job.f_train_runtime_conf, Job.f_runtime_conf_on_party).where(Job.f_job_id == job_id, Job.f_role == role, Job.f_party_id == party_id) if jobs: job = jobs[0] return JobConfiguration(**job.to_human_model_dict()) def get_task_using_job_conf(task_info: dict): task_dir = get_task_directory(**task_info) return read_job_conf(task_info["job_id"], task_info["role"], task_info["party_id"], task_dir) def read_job_conf(job_id, role, party_id, specified_dir=None): path_dict = get_job_conf_path(job_id=job_id, role=role, party_id=party_id, specified_dir=specified_dir) conf_dict = {} for key, path in path_dict.items(): config = file_utils.load_json_conf(path) conf_dict[key.rstrip("_path")] = config return JobConfiguration(**conf_dict) def get_job_conf_path(job_id, role, party_id, specified_dir=None): conf_dir = get_job_directory(job_id) if not specified_dir else specified_dir job_dsl_path = os.path.join(conf_dir, 'job_dsl.json') job_runtime_conf_path = os.path.join(conf_dir, 'job_runtime_conf.json') if not specified_dir: job_runtime_conf_on_party_path = os.path.join(conf_dir, role, str(party_id), 'job_runtime_on_party_conf.json') else: job_runtime_conf_on_party_path = os.path.join(conf_dir, 'job_runtime_on_party_conf.json') train_runtime_conf_path = os.path.join(conf_dir, 'train_runtime_conf.json') pipeline_dsl_path = os.path.join(conf_dir, 'pipeline_dsl.json') return {'dsl_path': job_dsl_path, 'runtime_conf_path': job_runtime_conf_path, 'runtime_conf_on_party_path': job_runtime_conf_on_party_path, 'train_runtime_conf_path': train_runtime_conf_path, 'pipeline_dsl_path': pipeline_dsl_path} @DB.connection_context() def get_upload_job_configuration_summary(upload_tasks: typing.List[Task]): jobs_run_conf = {} for task in upload_tasks: jobs = Job.select(Job.f_job_id, Job.f_runtime_conf_on_party, Job.f_description).where(Job.f_job_id == task.f_job_id) job = jobs[0] jobs_run_conf[job.f_job_id] = job.f_runtime_conf_on_party["component_parameters"]["role"]["local"]["0"]["upload_0"] jobs_run_conf[job.f_job_id]["notes"] = job.f_description return jobs_run_conf @DB.connection_context() def get_job_parameters(job_id, role, party_id): jobs = Job.select(Job.f_runtime_conf_on_party).where(Job.f_job_id == job_id, Job.f_role == role, Job.f_party_id == party_id) if jobs: job = jobs[0] return job.f_runtime_conf_on_party.get("job_parameters") else: return {} @DB.connection_context() def get_job_dsl(job_id, role, party_id): jobs = Job.select(Job.f_dsl).where(Job.f_job_id == job_id, Job.f_role == role, Job.f_party_id == party_id) if jobs: job = jobs[0] return job.f_dsl else: return {} def job_pipeline_component_name(): return "pipeline" def job_pipeline_component_module_name(): return "Pipeline" @DB.connection_context() def list_job(limit): if limit > 0: jobs = Job.select().order_by(Job.f_create_time.desc()).limit(limit) else: jobs = Job.select().order_by(Job.f_create_time.desc()) return [job for job in jobs] @DB.connection_context() def list_task(limit): if limit > 0: tasks = Task.select().order_by(Task.f_create_time.desc()).limit(limit) else: tasks = Task.select().order_by(Task.f_create_time.desc()) return [task for task in tasks] def check_job_process(pid): if pid < 0: return False if pid == 0: raise ValueError('invalid PID 0') try: os.kill(pid, 0) except OSError as err: if err.errno == errno.ESRCH: # ESRCH == No such process return False elif err.errno == errno.EPERM: # EPERM clearly means there's a process to deny access to return True else: # According to "man 2 kill" possible error values are # (EINVAL, EPERM, ESRCH) raise else: return True def check_job_is_timeout(job: Job): job_parameters = job.f_runtime_conf_on_party["job_parameters"] timeout = job_parameters.get("timeout", JobDefaultConfig.job_timeout) now_time = current_timestamp() running_time = (now_time - job.f_create_time)/1000 if running_time > timeout: schedule_logger(job.f_job_id).info(f'run time {running_time}s timeout') return True else: return False def start_session_stop(task): job_parameters = RunParameters(**get_job_parameters(job_id=task.f_job_id, role=task.f_role, party_id=task.f_party_id)) session_manager_id = generate_session_id(task.f_task_id, task.f_task_version, task.f_role, task.f_party_id) if task.f_status != TaskStatus.WAITING: schedule_logger(task.f_job_id).info(f'start run subprocess to stop task sessions {session_manager_id}') else: schedule_logger(task.f_job_id).info(f'task is waiting, pass stop sessions {session_manager_id}') return task_dir = os.path.join(get_job_directory(job_id=task.f_job_id), task.f_role, task.f_party_id, task.f_component_name, 'session_stop') os.makedirs(task_dir, exist_ok=True) process_cmd = [ sys.executable or 'python3', sys.modules[session_utils.SessionStop.__module__].__file__, '--session', session_manager_id, '--computing', job_parameters.computing_engine, '--federation', job_parameters.federation_engine, '--storage', job_parameters.storage_engine, '-c', 'stop' if task.f_status == JobStatus.SUCCESS else 'kill' ] p = process_utils.run_subprocess(job_id=task.f_job_id, config_dir=task_dir, process_cmd=process_cmd) p.wait() p.poll() def get_timeout(job_id, timeout, runtime_conf, dsl): try: if timeout > 0: schedule_logger(job_id).info(f'setting job timeout {timeout}') return timeout else: default_timeout = job_default_timeout(runtime_conf, dsl) schedule_logger(job_id).info(f'setting job timeout {timeout} not a positive number, using the default timeout {default_timeout}') return default_timeout except: default_timeout = job_default_timeout(runtime_conf, dsl) schedule_logger(job_id).info(f'setting job timeout {timeout} is incorrect, using the default timeout {default_timeout}') return default_timeout def job_default_timeout(runtime_conf, dsl): # future versions will improve timeout = JobDefaultConfig.job_timeout return timeout def get_board_url(job_id, role, party_id): board_url = "http://{}:{}{}".format( ServiceRegistry.FATEBOARD.get("host"), ServiceRegistry.FATEBOARD.get("port"), FATE_BOARD_DASHBOARD_ENDPOINT).format(job_id, role, party_id) return board_url
[ "fate_flow.utils.base_utils.get_fate_flow_directory", "fate_flow.db.db_models.DB.connection_context", "fate_flow.db.db_models.Job.select", "fate_flow.db.db_models.Job.f_create_time.desc", "fate_flow.db.db_models.Task.select", "fate_flow.entity.JobConfiguration", "fate_flow.db.service_registry.ServiceRegistry.FATEBOARD.get", "fate_flow.utils.log_utils.schedule_logger", "fate_flow.db.db_models.Task.f_create_time.desc", "fate_flow.utils.detect_utils.check_config", "fate_flow.utils.process_utils.run_subprocess" ]
[((8246, 8269), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (8267, 8269), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((10386, 10409), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (10407, 10409), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((10902, 10925), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (10923, 10925), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((11348, 11371), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (11369, 11371), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((11818, 11841), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (11839, 11841), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((12066, 12089), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (12087, 12089), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((1470, 1487), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1485, 1487), False, 'import threading\n'), ((4348, 4410), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (['runtime_conf', "['initiator', 'role']"], {}), "(runtime_conf, ['initiator', 'role'])\n", (4373, 4410), False, 'from fate_flow.utils import detect_utils, process_utils\n'), ((4415, 4489), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (["runtime_conf['initiator']", "['role', 'party_id']"], {}), "(runtime_conf['initiator'], ['role', 'party_id'])\n", (4440, 4489), False, 'from fate_flow.utils import detect_utils, process_utils\n'), ((5533, 5574), 'os.makedirs', 'os.makedirs', (['conf_path_dir'], {'exist_ok': '(True)'}), '(conf_path_dir, exist_ok=True)\n', (5544, 5574), False, 'import os\n'), ((5586, 5634), 'os.path.join', 'os.path.join', (['conf_path_dir', '"""runtime_conf.json"""'], {}), "(conf_path_dir, 'runtime_conf.json')\n", (5598, 5634), False, 'import os\n'), ((9384, 9413), 'fate_flow.entity.JobConfiguration', 'JobConfiguration', ([], {}), '(**conf_dict)\n', (9400, 9413), False, 'from fate_flow.entity import JobConfiguration\n'), ((9583, 9621), 'os.path.join', 'os.path.join', (['conf_dir', '"""job_dsl.json"""'], {}), "(conf_dir, 'job_dsl.json')\n", (9595, 9621), False, 'import os\n'), ((9650, 9697), 'os.path.join', 'os.path.join', (['conf_dir', '"""job_runtime_conf.json"""'], {}), "(conf_dir, 'job_runtime_conf.json')\n", (9662, 9697), False, 'import os\n'), ((9981, 10030), 'os.path.join', 'os.path.join', (['conf_dir', '"""train_runtime_conf.json"""'], {}), "(conf_dir, 'train_runtime_conf.json')\n", (9993, 10030), False, 'import os\n'), ((10055, 10098), 'os.path.join', 'os.path.join', (['conf_dir', '"""pipeline_dsl.json"""'], {}), "(conf_dir, 'pipeline_dsl.json')\n", (10067, 10098), False, 'import os\n'), ((13100, 13119), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (13117, 13119), False, 'from fate_arch.common.base_utils import json_dumps, fate_uuid, current_timestamp\n'), ((14060, 14096), 'os.makedirs', 'os.makedirs', (['task_dir'], {'exist_ok': '(True)'}), '(task_dir, exist_ok=True)\n', (14071, 14096), False, 'import os\n'), ((14514, 14614), 'fate_flow.utils.process_utils.run_subprocess', 'process_utils.run_subprocess', ([], {'job_id': 'task.f_job_id', 'config_dir': 'task_dir', 'process_cmd': 'process_cmd'}), '(job_id=task.f_job_id, config_dir=task_dir,\n process_cmd=process_cmd)\n', (14542, 14614), False, 'from fate_flow.utils import detect_utils, process_utils\n'), ((1812, 1835), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1833, 1835), False, 'import datetime\n'), ((3353, 3378), 'fate_flow.utils.base_utils.get_fate_flow_directory', 'get_fate_flow_directory', ([], {}), '()\n', (3376, 3378), False, 'from fate_flow.utils.base_utils import get_fate_flow_directory\n'), ((3471, 3496), 'fate_flow.utils.base_utils.get_fate_flow_directory', 'get_fate_flow_directory', ([], {}), '()\n', (3494, 3496), False, 'from fate_flow.utils.base_utils import get_fate_flow_directory\n'), ((3803, 3828), 'fate_flow.utils.base_utils.get_fate_flow_directory', 'get_fate_flow_directory', ([], {}), '()\n', (3826, 3828), False, 'from fate_flow.utils.base_utils import get_fate_flow_directory\n'), ((3956, 3981), 'fate_flow.utils.base_utils.get_fate_flow_directory', 'get_fate_flow_directory', ([], {}), '()\n', (3979, 3981), False, 'from fate_flow.utils.base_utils import get_fate_flow_directory\n'), ((9293, 9324), 'fate_arch.common.file_utils.load_json_conf', 'file_utils.load_json_conf', (['path'], {}), '(path)\n', (9318, 9324), False, 'from fate_arch.common import file_utils, FederatedMode\n'), ((9894, 9950), 'os.path.join', 'os.path.join', (['conf_dir', '"""job_runtime_on_party_conf.json"""'], {}), "(conf_dir, 'job_runtime_on_party_conf.json')\n", (9906, 9950), False, 'import os\n'), ((12464, 12479), 'os.kill', 'os.kill', (['pid', '(0)'], {}), '(pid, 0)\n', (12471, 12479), False, 'import os\n'), ((2877, 2888), 'fate_arch.common.base_utils.fate_uuid', 'fate_uuid', ([], {}), '()\n', (2886, 2888), False, 'from fate_arch.common.base_utils import json_dumps, fate_uuid, current_timestamp\n'), ((8352, 8453), 'fate_flow.db.db_models.Job.select', 'Job.select', (['Job.f_dsl', 'Job.f_runtime_conf', 'Job.f_train_runtime_conf', 'Job.f_runtime_conf_on_party'], {}), '(Job.f_dsl, Job.f_runtime_conf, Job.f_train_runtime_conf, Job.\n f_runtime_conf_on_party)\n', (8362, 8453), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((10985, 11024), 'fate_flow.db.db_models.Job.select', 'Job.select', (['Job.f_runtime_conf_on_party'], {}), '(Job.f_runtime_conf_on_party)\n', (10995, 11024), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((11424, 11445), 'fate_flow.db.db_models.Job.select', 'Job.select', (['Job.f_dsl'], {}), '(Job.f_dsl)\n', (11434, 11445), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((12004, 12028), 'fate_flow.db.db_models.Job.f_create_time.desc', 'Job.f_create_time.desc', ([], {}), '()\n', (12026, 12028), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((12258, 12283), 'fate_flow.db.db_models.Task.f_create_time.desc', 'Task.f_create_time.desc', ([], {}), '()\n', (12281, 12283), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((8172, 8198), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['data'], {'indent': '(4)'}), '(data, indent=4)\n', (8182, 8198), False, 'from fate_arch.common.base_utils import json_dumps, fate_uuid, current_timestamp\n'), ((10553, 10625), 'fate_flow.db.db_models.Job.select', 'Job.select', (['Job.f_job_id', 'Job.f_runtime_conf_on_party', 'Job.f_description'], {}), '(Job.f_job_id, Job.f_runtime_conf_on_party, Job.f_description)\n', (10563, 10625), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((11982, 11994), 'fate_flow.db.db_models.Job.select', 'Job.select', ([], {}), '()\n', (11992, 11994), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((12235, 12248), 'fate_flow.db.db_models.Task.select', 'Task.select', ([], {}), '()\n', (12246, 12248), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((13214, 13243), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (13229, 13243), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((13656, 13686), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (13671, 13686), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((13778, 13808), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (13793, 13808), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((15561, 15598), 'fate_flow.db.service_registry.ServiceRegistry.FATEBOARD.get', 'ServiceRegistry.FATEBOARD.get', (['"""host"""'], {}), "('host')\n", (15590, 15598), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((15608, 15645), 'fate_flow.db.service_registry.ServiceRegistry.FATEBOARD.get', 'ServiceRegistry.FATEBOARD.get', (['"""port"""'], {}), "('port')\n", (15637, 15645), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((2044, 2078), 'datetime.timedelta', 'datetime.timedelta', ([], {'microseconds': '(1)'}), '(microseconds=1)\n', (2062, 2078), False, 'import datetime\n'), ((11918, 11942), 'fate_flow.db.db_models.Job.f_create_time.desc', 'Job.f_create_time.desc', ([], {}), '()\n', (11940, 11942), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((12169, 12194), 'fate_flow.db.db_models.Task.f_create_time.desc', 'Task.f_create_time.desc', ([], {}), '()\n', (12192, 12194), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((14737, 14760), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (14752, 14760), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((14922, 14945), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (14937, 14945), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((15172, 15195), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (15187, 15195), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((11896, 11908), 'fate_flow.db.db_models.Job.select', 'Job.select', ([], {}), '()\n', (11906, 11908), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((12146, 12159), 'fate_flow.db.db_models.Task.select', 'Task.select', ([], {}), '()\n', (12157, 12159), False, 'from fate_flow.db.db_models import DB, Job, Task\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json import requests from arch.api.utils import file_utils from flask import jsonify from flask import Response from arch.api.utils.log_utils import audit_logger from fate_flow.entity.constant_config import WorkMode from fate_flow.settings import DEFAULT_GRPC_OVERALL_TIMEOUT, CHECK_NODES_IDENTITY,\ FATE_MANAGER_GET_NODE_INFO_ENDPOINT, HEADERS, SERVER_CONF_PATH, SERVERS from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_proxy_data_channel, forward_grpc_packet from fate_flow.utils.service_utils import ServiceUtils from fate_flow.entity.runtime_config import RuntimeConfig def get_json_result(retcode=0, retmsg='success', data=None, job_id=None, meta=None): result_dict = {"retcode": retcode, "retmsg": retmsg, "data": data, "jobId": job_id, "meta": meta} response = {} for key, value in result_dict.items(): if not value and key != "retcode": continue else: response[key] = value return jsonify(response) def error_response(response_code, retmsg): return Response(json.dumps({'retmsg': retmsg, 'retcode': response_code}), status=response_code, mimetype='application/json') def federated_api(job_id, method, endpoint, src_party_id, dest_party_id, src_role, json_body, work_mode, overall_timeout=DEFAULT_GRPC_OVERALL_TIMEOUT): if int(dest_party_id) == 0: return local_api(job_id=job_id, method=method, endpoint=endpoint, json_body=json_body) if work_mode == WorkMode.STANDALONE: return local_api(job_id=job_id, method=method, endpoint=endpoint, json_body=json_body) elif work_mode == WorkMode.CLUSTER: return remote_api(job_id=job_id, method=method, endpoint=endpoint, src_party_id=src_party_id, src_role=src_role, dest_party_id=dest_party_id, json_body=json_body, overall_timeout=overall_timeout) else: raise Exception('{} work mode is not supported'.format(work_mode)) def remote_api(job_id, method, endpoint, src_party_id, dest_party_id, src_role, json_body, overall_timeout=DEFAULT_GRPC_OVERALL_TIMEOUT): json_body['src_role'] = src_role if CHECK_NODES_IDENTITY: get_node_identity(json_body, src_party_id) _packet = wrap_grpc_packet(json_body, method, endpoint, src_party_id, dest_party_id, job_id, overall_timeout=overall_timeout) try: channel, stub = get_proxy_data_channel() _return = stub.unaryCall(_packet) audit_logger(job_id).info("grpc api response: {}".format(_return)) channel.close() json_body = json.loads(_return.body.value) return json_body except Exception as e: tips = '' if 'Error received from peer' in str(e): tips = 'Please check if the fate flow server of the other party is started. ' if 'failed to connect to all addresses' in str(e): tips = 'Please check whether the rollsite service(port: 9370) is started. ' raise Exception('{}rpc request error: {}'.format(tips,e)) def local_api(method, endpoint, json_body, job_id=None): try: url = "http://{}{}".format(RuntimeConfig.JOB_SERVER_HOST, endpoint) audit_logger(job_id).info('local api request: {}'.format(url)) action = getattr(requests, method.lower(), None) response = action(url=url, json=json_body, headers=HEADERS) audit_logger(job_id).info(response.text) response_json_body = response.json() audit_logger(job_id).info('local api response: {} {}'.format(endpoint, response_json_body)) return response_json_body except Exception as e: raise Exception('local request error: {}'.format(e)) def request_execute_server(request, execute_host): try: endpoint = request.base_url.replace(request.host_url, '') method = request.method url = "http://{}/{}".format(execute_host, endpoint) audit_logger().info('sub request: {}'.format(url)) action = getattr(requests, method.lower(), None) response = action(url=url, json=request.json, headers=HEADERS) return jsonify(response.json()) except requests.exceptions.ConnectionError as e: return get_json_result(retcode=999, retmsg='please start fate flow server: {}'.format(execute_host)) except Exception as e: raise Exception('local request error: {}'.format(e)) def get_node_identity(json_body, src_party_id): params = { 'partyId': int(src_party_id), 'federatedId': file_utils.load_json_conf_real_time(SERVER_CONF_PATH).get(SERVERS).get('fatemanager', {}).get('federatedId') } try: response = requests.post(url="http://{}:{}{}".format( ServiceUtils.get_item("fatemanager", "host"), ServiceUtils.get_item("fatemanager", "port"), FATE_MANAGER_GET_NODE_INFO_ENDPOINT), json=params) json_body['appKey'] = response.json().get('data').get('appKey') json_body['appSecret'] = response.json().get('data').get('appSecret') json_body['_src_role'] = response.json().get('data').get('role') except Exception as e: raise Exception('get appkey and secret failed: {}'.format(str(e))) def forward_api(job_id, method, endpoint, src_party_id, dest_party_id, json_body, role, overall_timeout=DEFAULT_GRPC_OVERALL_TIMEOUT): _packet = forward_grpc_packet(json_body, method, endpoint, src_party_id, dest_party_id, job_id=job_id, role=role, overall_timeout=overall_timeout) channel, stub = get_proxy_data_channel() _return = stub.unaryCall(_packet) channel.close() json_body = json.loads(_return.body.value) return json_body
[ "fate_flow.utils.grpc_utils.forward_grpc_packet", "fate_flow.utils.grpc_utils.get_proxy_data_channel", "fate_flow.utils.service_utils.ServiceUtils.get_item", "fate_flow.utils.grpc_utils.wrap_grpc_packet" ]
[((1588, 1605), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (1595, 1605), False, 'from flask import jsonify\n'), ((2856, 2975), 'fate_flow.utils.grpc_utils.wrap_grpc_packet', 'wrap_grpc_packet', (['json_body', 'method', 'endpoint', 'src_party_id', 'dest_party_id', 'job_id'], {'overall_timeout': 'overall_timeout'}), '(json_body, method, endpoint, src_party_id, dest_party_id,\n job_id, overall_timeout=overall_timeout)\n', (2872, 2975), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_proxy_data_channel, forward_grpc_packet\n'), ((6011, 6151), 'fate_flow.utils.grpc_utils.forward_grpc_packet', 'forward_grpc_packet', (['json_body', 'method', 'endpoint', 'src_party_id', 'dest_party_id'], {'job_id': 'job_id', 'role': 'role', 'overall_timeout': 'overall_timeout'}), '(json_body, method, endpoint, src_party_id,\n dest_party_id, job_id=job_id, role=role, overall_timeout=overall_timeout)\n', (6030, 6151), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_proxy_data_channel, forward_grpc_packet\n'), ((6203, 6227), 'fate_flow.utils.grpc_utils.get_proxy_data_channel', 'get_proxy_data_channel', ([], {}), '()\n', (6225, 6227), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_proxy_data_channel, forward_grpc_packet\n'), ((6302, 6332), 'json.loads', 'json.loads', (['_return.body.value'], {}), '(_return.body.value)\n', (6312, 6332), False, 'import json\n'), ((1671, 1727), 'json.dumps', 'json.dumps', (["{'retmsg': retmsg, 'retcode': response_code}"], {}), "({'retmsg': retmsg, 'retcode': response_code})\n", (1681, 1727), False, 'import json\n'), ((3036, 3060), 'fate_flow.utils.grpc_utils.get_proxy_data_channel', 'get_proxy_data_channel', ([], {}), '()\n', (3058, 3060), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_proxy_data_channel, forward_grpc_packet\n'), ((3222, 3252), 'json.loads', 'json.loads', (['_return.body.value'], {}), '(_return.body.value)\n', (3232, 3252), False, 'import json\n'), ((3111, 3131), 'arch.api.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (3123, 3131), False, 'from arch.api.utils.log_utils import audit_logger\n'), ((3827, 3847), 'arch.api.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (3839, 3847), False, 'from arch.api.utils.log_utils import audit_logger\n'), ((4023, 4043), 'arch.api.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (4035, 4043), False, 'from arch.api.utils.log_utils import audit_logger\n'), ((4117, 4137), 'arch.api.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (4129, 4137), False, 'from arch.api.utils.log_utils import audit_logger\n'), ((4559, 4573), 'arch.api.utils.log_utils.audit_logger', 'audit_logger', ([], {}), '()\n', (4571, 4573), False, 'from arch.api.utils.log_utils import audit_logger\n'), ((5352, 5396), 'fate_flow.utils.service_utils.ServiceUtils.get_item', 'ServiceUtils.get_item', (['"""fatemanager"""', '"""host"""'], {}), "('fatemanager', 'host')\n", (5373, 5396), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((5410, 5454), 'fate_flow.utils.service_utils.ServiceUtils.get_item', 'ServiceUtils.get_item', (['"""fatemanager"""', '"""port"""'], {}), "('fatemanager', 'port')\n", (5431, 5454), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((5154, 5207), 'arch.api.utils.file_utils.load_json_conf_real_time', 'file_utils.load_json_conf_real_time', (['SERVER_CONF_PATH'], {}), '(SERVER_CONF_PATH)\n', (5189, 5207), False, 'from arch.api.utils import file_utils\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64 from fate_flow.utils.log_utils import schedule_logger from fate_flow.db import db_utils from fate_flow.db.db_models import (DB, TrackingMetric) from fate_flow.entity import Metric from fate_flow.utils import job_utils class MetricManager: def __init__(self, job_id: str, role: str, party_id: int, component_name: str, task_id: str = None, task_version: int = None): self.job_id = job_id self.role = role self.party_id = party_id self.component_name = component_name self.task_id = task_id self.task_version = task_version @DB.connection_context() def read_metric_data(self, metric_namespace: str, metric_name: str, job_level=False): metrics = [] for k, v in self.read_metrics_from_db(metric_namespace, metric_name, 1, job_level): metrics.append(Metric(key=k, value=v)) return metrics @DB.connection_context() def insert_metrics_into_db(self, metric_namespace: str, metric_name: str, data_type: int, kv, job_level=False): try: model_class = self.get_model_class() tracking_metric = model_class() tracking_metric.f_job_id = self.job_id tracking_metric.f_component_name = ( self.component_name if not job_level else job_utils.job_pipeline_component_name()) tracking_metric.f_task_id = self.task_id tracking_metric.f_task_version = self.task_version tracking_metric.f_role = self.role tracking_metric.f_party_id = self.party_id tracking_metric.f_metric_namespace = metric_namespace tracking_metric.f_metric_name = metric_name tracking_metric.f_type = data_type default_db_source = tracking_metric.to_dict() tracking_metric_data_source = [] for k, v in kv: db_source = default_db_source.copy() db_source['f_key'] = serialize_b64(k) db_source['f_value'] = serialize_b64(v) db_source['f_create_time'] = current_timestamp() tracking_metric_data_source.append(db_source) db_utils.bulk_insert_into_db(model_class, tracking_metric_data_source, schedule_logger(self.job_id)) except Exception as e: schedule_logger(self.job_id).exception( "An exception where inserted metric {} of metric namespace: {} to database:\n{}".format( metric_name, metric_namespace, e )) @DB.connection_context() def read_metrics_from_db(self, metric_namespace: str, metric_name: str, data_type, job_level=False): metrics = [] try: tracking_metric_model = self.get_model_class() tracking_metrics = tracking_metric_model.select(tracking_metric_model.f_key, tracking_metric_model.f_value).where( tracking_metric_model.f_job_id == self.job_id, tracking_metric_model.f_component_name == ( self.component_name if not job_level else job_utils.job_pipeline_component_name()), tracking_metric_model.f_role == self.role, tracking_metric_model.f_party_id == self.party_id, tracking_metric_model.f_metric_namespace == metric_namespace, tracking_metric_model.f_metric_name == metric_name, tracking_metric_model.f_type == data_type ) for tracking_metric in tracking_metrics: yield deserialize_b64(tracking_metric.f_key), deserialize_b64(tracking_metric.f_value) except Exception as e: schedule_logger(self.job_id).exception(e) raise e return metrics @DB.connection_context() def clean_metrics(self): tracking_metric_model = self.get_model_class() operate = tracking_metric_model.delete().where( tracking_metric_model.f_task_id == self.task_id, tracking_metric_model.f_task_version == self.task_version, tracking_metric_model.f_role == self.role, tracking_metric_model.f_party_id == self.party_id ) return operate.execute() > 0 @DB.connection_context() def get_metric_list(self, job_level: bool = False): metrics = {} tracking_metric_model = self.get_model_class() if tracking_metric_model.table_exists(): tracking_metrics = tracking_metric_model.select( tracking_metric_model.f_metric_namespace, tracking_metric_model.f_metric_name ).where( tracking_metric_model.f_job_id == self.job_id, tracking_metric_model.f_component_name == (self.component_name if not job_level else 'dag'), tracking_metric_model.f_role == self.role, tracking_metric_model.f_party_id == self.party_id ).distinct() for tracking_metric in tracking_metrics: metrics[tracking_metric.f_metric_namespace] = metrics.get(tracking_metric.f_metric_namespace, []) metrics[tracking_metric.f_metric_namespace].append(tracking_metric.f_metric_name) return metrics @DB.connection_context() def read_component_metrics(self): try: tracking_metric_model = self.get_model_class() tracking_metrics = tracking_metric_model.select().where( tracking_metric_model.f_job_id == self.job_id, tracking_metric_model.f_component_name == self.component_name, tracking_metric_model.f_role == self.role, tracking_metric_model.f_party_id == self.party_id, tracking_metric_model.f_task_version == self.task_version ) return [tracking_metric for tracking_metric in tracking_metrics] except Exception as e: schedule_logger(self.job_id).exception(e) raise e @DB.connection_context() def reload_metric(self, source_metric_manager): component_metrics = source_metric_manager.read_component_metrics() for component_metric in component_metrics: model_class = self.get_model_class() tracking_metric = model_class() tracking_metric.f_job_id = self.job_id tracking_metric.f_component_name = self.component_name tracking_metric.f_task_id = self.task_id tracking_metric.f_task_version = self.task_version tracking_metric.f_role = self.role tracking_metric.f_party_id = self.party_id tracking_metric.f_metric_namespace = component_metric.f_metric_namespace tracking_metric.f_metric_name = component_metric.f_metric_name tracking_metric.f_type = component_metric.f_type tracking_metric.f_key = component_metric.f_key tracking_metric.f_value = component_metric.f_value tracking_metric.save() def get_model_class(self): return db_utils.get_dynamic_db_model(TrackingMetric, self.job_id)
[ "fate_flow.utils.job_utils.job_pipeline_component_name", "fate_flow.db.db_models.DB.connection_context", "fate_flow.db.db_utils.get_dynamic_db_model", "fate_flow.utils.log_utils.schedule_logger", "fate_flow.entity.Metric" ]
[((1340, 1363), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (1361, 1363), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((1647, 1670), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (1668, 1670), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((3319, 3342), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (3340, 3342), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((4589, 4612), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (4610, 4612), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((5055, 5078), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (5076, 5078), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((6071, 6094), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (6092, 6094), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((6818, 6841), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (6839, 6841), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((7874, 7932), 'fate_flow.db.db_utils.get_dynamic_db_model', 'db_utils.get_dynamic_db_model', (['TrackingMetric', 'self.job_id'], {}), '(TrackingMetric, self.job_id)\n', (7903, 7932), False, 'from fate_flow.db import db_utils\n'), ((1594, 1616), 'fate_flow.entity.Metric', 'Metric', ([], {'key': 'k', 'value': 'v'}), '(key=k, value=v)\n', (1600, 1616), False, 'from fate_flow.entity import Metric\n'), ((2051, 2090), 'fate_flow.utils.job_utils.job_pipeline_component_name', 'job_utils.job_pipeline_component_name', ([], {}), '()\n', (2088, 2090), False, 'from fate_flow.utils import job_utils\n'), ((2700, 2716), 'fate_arch.common.base_utils.serialize_b64', 'serialize_b64', (['k'], {}), '(k)\n', (2713, 2716), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((2756, 2772), 'fate_arch.common.base_utils.serialize_b64', 'serialize_b64', (['v'], {}), '(v)\n', (2769, 2772), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((2818, 2837), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (2835, 2837), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((2983, 3011), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (2998, 3011), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((3056, 3084), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (3071, 3084), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((3913, 3952), 'fate_flow.utils.job_utils.job_pipeline_component_name', 'job_utils.job_pipeline_component_name', ([], {}), '()\n', (3950, 3952), False, 'from fate_flow.utils import job_utils\n'), ((4374, 4412), 'fate_arch.common.base_utils.deserialize_b64', 'deserialize_b64', (['tracking_metric.f_key'], {}), '(tracking_metric.f_key)\n', (4389, 4412), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((4414, 4454), 'fate_arch.common.base_utils.deserialize_b64', 'deserialize_b64', (['tracking_metric.f_value'], {}), '(tracking_metric.f_value)\n', (4429, 4454), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((4498, 4526), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (4513, 4526), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((6750, 6778), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (6765, 6778), False, 'from fate_flow.utils.log_utils import schedule_logger\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ################################################################################ # # ################################################################################ # ============================================================================= # HeteroSecureBoostingGuest # ============================================================================= import functools from operator import itemgetter import numpy as np from federatedml.tree.tree_core.predict_cache import PredictDataCache from federatedml.util.io_check import assert_io_num_rows_equal from numpy import random from arch.api.utils import log_utils from fate_flow.entity.metric import Metric from fate_flow.entity.metric import MetricMeta from federatedml.feature.binning.quantile_binning import QuantileBinning from federatedml.feature.fate_element_type import NoneType from federatedml.loss import FairLoss from federatedml.loss import HuberLoss from federatedml.loss import LeastAbsoluteErrorLoss from federatedml.loss import LeastSquaredErrorLoss from federatedml.loss import LogCoshLoss from federatedml.loss import SigmoidBinaryCrossEntropyLoss from federatedml.loss import SoftmaxCrossEntropyLoss from federatedml.loss import TweedieLoss from federatedml.optim.convergence import converge_func_factory from federatedml.param.evaluation_param import EvaluateParam from federatedml.param.feature_binning_param import FeatureBinningParam from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import ObjectiveMeta from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo from federatedml.secureprotol import IterativeAffineEncrypt from federatedml.secureprotol import PaillierEncrypt from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator from federatedml.statistic import data_overview from federatedml.transfer_variable.transfer_class.hetero_secure_boost_transfer_variable import \ HeteroSecureBoostingTreeTransferVariable from federatedml.tree import BoostingTree from federatedml.tree import HeteroDecisionTreeGuest from federatedml.util import consts from federatedml.util.classify_label_checker import ClassifyLabelChecker from federatedml.util.classify_label_checker import RegressionLabelChecker LOGGER = log_utils.getLogger() class HeteroSecureBoostingTreeGuest(BoostingTree): def __init__(self): super(HeteroSecureBoostingTreeGuest, self).__init__() self.convegence = None self.y = None self.F = None self.predict_F = None self.data_bin = None self.loss = None self.init_score = None self.classes_dict = {} self.classes_ = [] self.num_classes = 0 self.classify_target = "binary" self.feature_num = None self.encrypter = None self.grad_and_hess = None self.tree_dim = 1 self.tree_meta = None self.trees_ = [] self.history_loss = [] self.bin_split_points = None self.bin_sparse_points = None self.encrypted_mode_calculator = None self.predict_data_cache = PredictDataCache() self.feature_importances_ = {} self.role = consts.GUEST self.transfer_variable = HeteroSecureBoostingTreeTransferVariable() self.data_alignment_map = {} def set_loss(self, objective_param): loss_type = objective_param.objective params = objective_param.params LOGGER.info("set objective, objective is {}".format(loss_type)) if self.task_type == consts.CLASSIFICATION: if loss_type == "cross_entropy": if self.num_classes == 2: self.loss = SigmoidBinaryCrossEntropyLoss() else: self.loss = SoftmaxCrossEntropyLoss() else: raise NotImplementedError("objective %s not supported yet" % (loss_type)) elif self.task_type == consts.REGRESSION: if loss_type == "lse": self.loss = LeastSquaredErrorLoss() elif loss_type == "lae": self.loss = LeastAbsoluteErrorLoss() elif loss_type == "huber": self.loss = HuberLoss(params[0]) elif loss_type == "fair": self.loss = FairLoss(params[0]) elif loss_type == "tweedie": self.loss = TweedieLoss(params[0]) elif loss_type == "log_cosh": self.loss = LogCoshLoss() else: raise NotImplementedError("objective %s not supported yet" % (loss_type)) else: raise NotImplementedError("objective %s not supported yet" % (loss_type)) def convert_feature_to_bin(self, data_instance): LOGGER.info("convert feature to bins") param_obj = FeatureBinningParam(bin_num=self.bin_num) if self.use_missing: binning_obj = QuantileBinning(param_obj, abnormal_list=[NoneType()]) else: binning_obj = QuantileBinning(param_obj) binning_obj.fit_split_points(data_instance) self.data_bin, self.bin_split_points, self.bin_sparse_points = binning_obj.convert_feature_to_bin(data_instance) LOGGER.info("convert feature to bins over") def set_y(self): LOGGER.info("set label from data and check label") self.y = self.data_bin.mapValues(lambda instance: instance.label) self.check_label() def generate_flowid(self, round_num, tree_num): LOGGER.info("generate flowid, flowid {}".format(self.flowid)) return ".".join(map(str, [self.flowid, round_num, tree_num])) def check_label(self): LOGGER.info("check label") if self.task_type == consts.CLASSIFICATION: self.num_classes, self.classes_ = ClassifyLabelChecker.validate_label(self.data_bin) if self.num_classes > 2: self.classify_target = "multinomial" self.tree_dim = self.num_classes range_from_zero = True for _class in self.classes_: try: if _class >= 0 and _class < self.num_classes and isinstance(_class, int): continue else: range_from_zero = False break except: range_from_zero = False self.classes_ = sorted(self.classes_) if not range_from_zero: class_mapping = dict(zip(self.classes_, range(self.num_classes))) self.y = self.y.mapValues(lambda _class: class_mapping[_class]) else: RegressionLabelChecker.validate_label(self.data_bin) self.set_loss(self.objective_param) def generate_encrypter(self): LOGGER.info("generate encrypter") if self.encrypt_param.method.lower() == consts.PAILLIER.lower(): self.encrypter = PaillierEncrypt() self.encrypter.generate_key(self.encrypt_param.key_length) elif self.encrypt_param.method.lower() == consts.ITERATIVEAFFINE.lower(): self.encrypter = IterativeAffineEncrypt() self.encrypter.generate_key(self.encrypt_param.key_length) else: raise NotImplementedError("encrypt method not supported yes!!!") self.encrypted_calculator = EncryptModeCalculator(self.encrypter, self.calculated_mode, self.re_encrypted_rate) @staticmethod def accumulate_f(f_val, new_f_val, lr=0.1, idx=0): f_val[idx] += lr * new_f_val return f_val def update_feature_importance(self, tree_feature_importance): for fid in tree_feature_importance: if fid not in self.feature_importances_: self.feature_importances_[fid] = 0 self.feature_importances_[fid] += tree_feature_importance[fid] def update_f_value(self, new_f=None, tidx=-1, mode="train"): LOGGER.info("update tree f value, tree idx is {}".format(tidx)) if mode == "train" and self.F is None: if self.tree_dim > 1: self.F, self.init_score = self.loss.initialize(self.y, self.tree_dim) else: self.F, self.init_score = self.loss.initialize(self.y) else: accumulate_f = functools.partial(self.accumulate_f, lr=self.learning_rate, idx=tidx) if mode == "train": self.F = self.F.join(new_f, accumulate_f) else: self.predict_F = self.predict_F.join(new_f, accumulate_f) def compute_grad_and_hess(self): LOGGER.info("compute grad and hess") loss_method = self.loss if self.task_type == consts.CLASSIFICATION: self.grad_and_hess = self.y.join(self.F, lambda y, f_val: \ (loss_method.compute_grad(y, loss_method.predict(f_val)), \ loss_method.compute_hess(y, loss_method.predict(f_val)))) else: self.grad_and_hess = self.y.join(self.F, lambda y, f_val: (loss_method.compute_grad(y, f_val), loss_method.compute_hess(y, f_val))) def compute_loss(self): LOGGER.info("compute loss") if self.task_type == consts.CLASSIFICATION: loss_method = self.loss y_predict = self.F.mapValues(lambda val: loss_method.predict(val)) loss = loss_method.compute_loss(self.y, y_predict) elif self.task_type == consts.REGRESSION: if self.objective_param.objective in ["lse", "lae", "logcosh", "tweedie", "log_cosh", "huber"]: loss_method = self.loss loss = loss_method.compute_loss(self.y, self.F) else: loss_method = self.loss y_predict = self.F.mapValues(lambda val: loss_method.predict(val)) loss = loss_method.compute_loss(self.y, y_predict) return float(loss) def get_grad_and_hess(self, tree_idx): LOGGER.info("get grad and hess of tree {}".format(tree_idx)) grad_and_hess_subtree = self.grad_and_hess.mapValues( lambda grad_and_hess: (grad_and_hess[0][tree_idx], grad_and_hess[1][tree_idx])) return grad_and_hess_subtree def check_convergence(self, loss): LOGGER.info("check convergence") if self.convegence is None: self.convegence = converge_func_factory("diff", self.tol) return self.convegence.is_converge(loss) def sample_valid_features(self): LOGGER.info("sample valid features") if self.feature_num is None: self.feature_num = self.bin_split_points.shape[0] choose_feature = random.choice(range(0, self.feature_num), \ max(1, int(self.subsample_feature_rate * self.feature_num)), replace=False) valid_features = [False for i in range(self.feature_num)] for fid in choose_feature: valid_features[fid] = True return valid_features def sync_tree_dim(self): LOGGER.info("sync tree dim to host") self.transfer_variable.tree_dim.remote(self.tree_dim, role=consts.HOST, idx=-1) def sync_stop_flag(self, stop_flag, num_round): LOGGER.info("sync stop flag to host, boost round is {}".format(num_round)) self.transfer_variable.stop_flag.remote(stop_flag, role=consts.HOST, idx=-1, suffix=(num_round,)) def sync_predict_start_round(self, num_round): LOGGER.info("sync predict start round {}".format(num_round)) self.transfer_variable.predict_start_round.remote(num_round, role=consts.HOST, idx=-1) def fit(self, data_inst, validate_data=None): LOGGER.info("begin to train secureboosting guest model") self.gen_feature_fid_mapping(data_inst.schema) self.validation_strategy = self.init_validation_strategy(data_inst, validate_data) data_inst = self.data_alignment(data_inst) self.convert_feature_to_bin(data_inst) self.set_y() self.update_f_value() self.generate_encrypter() self.sync_tree_dim() self.callback_meta("loss", "train", MetricMeta(name="train", metric_type="LOSS", extra_metas={"unit_name": "iters"})) for i in range(self.num_trees): self.compute_grad_and_hess() for tidx in range(self.tree_dim): LOGGER.info("start to fit, boost round: {}, tree index: {}".format(i, tidx)) tree_inst = HeteroDecisionTreeGuest(self.tree_param) tree_inst.set_inputinfo(self.data_bin, self.get_grad_and_hess(tidx), self.bin_split_points, self.bin_sparse_points) valid_features = self.sample_valid_features() tree_inst.set_valid_features(valid_features) tree_inst.set_encrypter(self.encrypter) tree_inst.set_encrypted_mode_calculator(self.encrypted_calculator) tree_inst.set_flowid(self.generate_flowid(i, tidx)) tree_inst.set_host_party_idlist(self.component_properties.host_party_idlist) tree_inst.set_runtime_idx(self.component_properties.local_partyid) tree_inst.fit() tree_meta, tree_param = tree_inst.get_model() self.trees_.append(tree_param) if self.tree_meta is None: self.tree_meta = tree_meta self.update_f_value(new_f=tree_inst.predict_weights, tidx=tidx) self.update_feature_importance(tree_inst.get_feature_importance()) loss = self.compute_loss() self.history_loss.append(loss) LOGGER.debug("boost round {} loss is {}".format(i, loss)) self.callback_metric("loss", "train", [Metric(i, loss)]) if self.validation_strategy: self.validation_strategy.validate(self, i) if self.validation_strategy.need_stop(): LOGGER.debug('early stopping triggered') break if self.n_iter_no_change is True: if self.check_convergence(loss): self.sync_stop_flag(True, i) LOGGER.debug("check loss convergence on boost round {}".format(i)) break else: self.sync_stop_flag(False, i) LOGGER.debug("history loss is {}".format(self.history_loss)) self.callback_meta("loss", "train", MetricMeta(name="train", metric_type="LOSS", extra_metas={"Best": min(self.history_loss)})) if self.validation_strategy and self.validation_strategy.has_saved_best_model(): self.load_model(self.validation_strategy.cur_best_model) LOGGER.info("end to train secureboosting guest model") def predict_f_value(self, data_inst, cache_dataset_key): LOGGER.debug("predict tree f value, there are {} trees".format(len(self.trees_))) init_score = self.init_score last_round = self.predict_data_cache.predict_data_last_round(cache_dataset_key) rounds = len(self.trees_) // self.tree_dim if last_round == -1: self.predict_F = data_inst.mapValues(lambda v: init_score) else: LOGGER.debug("hit cache, cached round is {}".format(last_round)) if last_round >= rounds - 1: LOGGER.debug("predict data cached, rounds is {}, total cached round is {}".format(rounds, last_round)) self.predict_F = self.predict_data_cache.predict_data_at(cache_dataset_key, min(rounds - 1, last_round)) self.sync_predict_start_round(last_round + 1) for i in range(last_round + 1, rounds): for tidx in range(self.tree_dim): LOGGER.info("start to predict, boost round: {}, tree index: {}".format(i, tidx)) tree_inst = HeteroDecisionTreeGuest(self.tree_param) tree_inst.load_model(self.tree_meta, self.trees_[i * self.tree_dim + tidx]) # tree_inst.set_tree_model(self.trees_[i * self.tree_dim + tidx]) tree_inst.set_flowid(self.generate_flowid(i, tidx)) tree_inst.set_runtime_idx(self.component_properties.local_partyid) tree_inst.set_host_party_idlist(self.component_properties.host_party_idlist) predict_data = tree_inst.predict(data_inst) self.update_f_value(new_f=predict_data, tidx=tidx, mode="predict") self.predict_data_cache.add_data(cache_dataset_key, self.predict_F) @assert_io_num_rows_equal def predict(self, data_inst): LOGGER.info("start predict") cache_dataset_key = self.predict_data_cache.get_data_key(data_inst) if cache_dataset_key in self.data_alignment_map: data_inst = self.data_alignment_map[cache_dataset_key] else: data_inst = self.data_alignment(data_inst) header = [None] * len(self.feature_name_fid_mapping) for idx, col in self.feature_name_fid_mapping.items(): header[idx] = col data_inst = data_overview.header_alignment(data_inst, header) self.data_alignment_map[cache_dataset_key] = data_inst self.predict_f_value(data_inst, cache_dataset_key) if self.task_type == consts.CLASSIFICATION: loss_method = self.loss if self.num_classes == 2: predicts = self.predict_F.mapValues(lambda f: float(loss_method.predict(f))) else: predicts = self.predict_F.mapValues(lambda f: loss_method.predict(f).tolist()) elif self.task_type == consts.REGRESSION: if self.objective_param.objective in ["lse", "lae", "huber", "log_cosh", "fair", "tweedie"]: predicts = self.predict_F else: raise NotImplementedError("objective {} not supported yet".format(self.objective_param.objective)) if self.task_type == consts.CLASSIFICATION: classes_ = self.classes_ if self.num_classes == 2: threshold = self.predict_param.threshold predict_result = data_inst.join(predicts, lambda inst, pred: [inst.label, classes_[1] if pred > threshold else classes_[0], pred, {"0": 1 - pred, "1": pred}]) else: predict_label = predicts.mapValues(lambda preds: classes_[np.argmax(preds)]) predict_result = data_inst.join(predicts, lambda inst, preds: [inst.label, classes_[np.argmax(preds)], np.max(preds), dict(zip(map(str, classes_), preds))]) elif self.task_type == consts.REGRESSION: predict_result = data_inst.join(predicts, lambda inst, pred: [inst.label, float(pred), float(pred), {"label": float(pred)}]) else: raise NotImplementedError("task type {} not supported yet".format(self.task_type)) LOGGER.info("end predict") return predict_result def get_feature_importance(self): return self.feature_importances_ def get_model_meta(self): model_meta = BoostingTreeModelMeta() model_meta.tree_meta.CopyFrom(self.tree_meta) model_meta.learning_rate = self.learning_rate model_meta.num_trees = self.num_trees model_meta.quantile_meta.CopyFrom(QuantileMeta(bin_num=self.bin_num)) model_meta.objective_meta.CopyFrom(ObjectiveMeta(objective=self.objective_param.objective, param=self.objective_param.params)) model_meta.task_type = self.task_type # model_meta.tree_dim = self.tree_dim model_meta.n_iter_no_change = self.n_iter_no_change model_meta.tol = self.tol # model_meta.num_classes = self.num_classes # model_meta.classes_.extend(map(str, self.classes_)) # model_meta.need_run = self.need_run meta_name = "HeteroSecureBoostingTreeGuestMeta" return meta_name, model_meta def set_model_meta(self, model_meta): self.tree_meta = model_meta.tree_meta self.learning_rate = model_meta.learning_rate self.num_trees = model_meta.num_trees self.bin_num = model_meta.quantile_meta.bin_num self.objective_param.objective = model_meta.objective_meta.objective self.objective_param.params = list(model_meta.objective_meta.param) self.task_type = model_meta.task_type # self.tree_dim = model_meta.tree_dim # self.num_classes = model_meta.num_classes self.n_iter_no_change = model_meta.n_iter_no_change self.tol = model_meta.tol # self.classes_ = list(model_meta.classes_) # self.set_loss(self.objective_param) def get_model_param(self): model_param = BoostingTreeModelParam() model_param.tree_num = len(list(self.trees_)) model_param.tree_dim = self.tree_dim model_param.trees_.extend(self.trees_) model_param.init_score.extend(self.init_score) model_param.losses.extend(self.history_loss) model_param.classes_.extend(map(str, self.classes_)) model_param.num_classes = self.num_classes model_param.best_iteration = -1 if self.validation_strategy is None else self.validation_strategy.best_iteration feature_importances = list(self.get_feature_importance().items()) feature_importances = sorted(feature_importances, key=itemgetter(1), reverse=True) feature_importance_param = [] for (sitename, fid), _importance in feature_importances: feature_importance_param.append(FeatureImportanceInfo(sitename=sitename, fid=fid, importance=_importance)) model_param.feature_importances.extend(feature_importance_param) model_param.feature_name_fid_mapping.update(self.feature_name_fid_mapping) param_name = "HeteroSecureBoostingTreeGuestParam" return param_name, model_param def set_model_param(self, model_param): self.trees_ = list(model_param.trees_) self.init_score = np.array(list(model_param.init_score)) self.history_loss = list(model_param.losses) self.classes_ = list(map(int, model_param.classes_)) self.tree_dim = model_param.tree_dim self.num_classes = model_param.num_classes self.feature_name_fid_mapping.update(model_param.feature_name_fid_mapping) def get_metrics_param(self): if self.task_type == consts.CLASSIFICATION: if self.num_classes == 2: return EvaluateParam(eval_type="binary", pos_label=self.classes_[1], metrics=self.metrics) else: return EvaluateParam(eval_type="multi", metrics=self.metrics) else: return EvaluateParam(eval_type="regression", metrics=self.metrics) def export_model(self): if self.need_cv: return None meta_name, meta_protobuf = self.get_model_meta() param_name, param_protobuf = self.get_model_param() return {meta_name: meta_protobuf, param_name: param_protobuf} def load_model(self, model_dict): model_param = None model_meta = None for _, value in model_dict["model"].items(): for model in value: if model.endswith("Meta"): model_meta = value[model] if model.endswith("Param"): model_param = value[model] LOGGER.info("load model") self.set_model_meta(model_meta) self.set_model_param(model_param) self.set_loss(self.objective_param)
[ "fate_flow.entity.metric.MetricMeta", "fate_flow.entity.metric.Metric" ]
[((3203, 3224), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', ([], {}), '()\n', (3222, 3224), False, 'from arch.api.utils import log_utils\n'), ((4045, 4063), 'federatedml.tree.tree_core.predict_cache.PredictDataCache', 'PredictDataCache', ([], {}), '()\n', (4061, 4063), False, 'from federatedml.tree.tree_core.predict_cache import PredictDataCache\n'), ((4171, 4213), 'federatedml.transfer_variable.transfer_class.hetero_secure_boost_transfer_variable.HeteroSecureBoostingTreeTransferVariable', 'HeteroSecureBoostingTreeTransferVariable', ([], {}), '()\n', (4211, 4213), False, 'from federatedml.transfer_variable.transfer_class.hetero_secure_boost_transfer_variable import HeteroSecureBoostingTreeTransferVariable\n'), ((5748, 5789), 'federatedml.param.feature_binning_param.FeatureBinningParam', 'FeatureBinningParam', ([], {'bin_num': 'self.bin_num'}), '(bin_num=self.bin_num)\n', (5767, 5789), False, 'from federatedml.param.feature_binning_param import FeatureBinningParam\n'), ((8294, 8382), 'federatedml.secureprotol.encrypt_mode.EncryptModeCalculator', 'EncryptModeCalculator', (['self.encrypter', 'self.calculated_mode', 'self.re_encrypted_rate'], {}), '(self.encrypter, self.calculated_mode, self.\n re_encrypted_rate)\n', (8315, 8382), False, 'from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator\n'), ((21287, 21310), 'federatedml.protobuf.generated.boosting_tree_model_meta_pb2.BoostingTreeModelMeta', 'BoostingTreeModelMeta', ([], {}), '()\n', (21308, 21310), False, 'from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta\n'), ((22964, 22988), 'federatedml.protobuf.generated.boosting_tree_model_param_pb2.BoostingTreeModelParam', 'BoostingTreeModelParam', ([], {}), '()\n', (22986, 22988), False, 'from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam\n'), ((5941, 5967), 'federatedml.feature.binning.quantile_binning.QuantileBinning', 'QuantileBinning', (['param_obj'], {}), '(param_obj)\n', (5956, 5967), False, 'from federatedml.feature.binning.quantile_binning import QuantileBinning\n'), ((6730, 6780), 'federatedml.util.classify_label_checker.ClassifyLabelChecker.validate_label', 'ClassifyLabelChecker.validate_label', (['self.data_bin'], {}), '(self.data_bin)\n', (6765, 6780), False, 'from federatedml.util.classify_label_checker import ClassifyLabelChecker\n'), ((7593, 7645), 'federatedml.util.classify_label_checker.RegressionLabelChecker.validate_label', 'RegressionLabelChecker.validate_label', (['self.data_bin'], {}), '(self.data_bin)\n', (7630, 7645), False, 'from federatedml.util.classify_label_checker import RegressionLabelChecker\n'), ((7816, 7839), 'federatedml.util.consts.PAILLIER.lower', 'consts.PAILLIER.lower', ([], {}), '()\n', (7837, 7839), False, 'from federatedml.util import consts\n'), ((7870, 7887), 'federatedml.secureprotol.PaillierEncrypt', 'PaillierEncrypt', ([], {}), '()\n', (7885, 7887), False, 'from federatedml.secureprotol import PaillierEncrypt\n'), ((9236, 9305), 'functools.partial', 'functools.partial', (['self.accumulate_f'], {'lr': 'self.learning_rate', 'idx': 'tidx'}), '(self.accumulate_f, lr=self.learning_rate, idx=tidx)\n', (9253, 9305), False, 'import functools\n'), ((11396, 11435), 'federatedml.optim.convergence.converge_func_factory', 'converge_func_factory', (['"""diff"""', 'self.tol'], {}), "('diff', self.tol)\n", (11417, 11435), False, 'from federatedml.optim.convergence import converge_func_factory\n'), ((13575, 13660), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': '"""train"""', 'metric_type': '"""LOSS"""', 'extra_metas': "{'unit_name': 'iters'}"}), "(name='train', metric_type='LOSS', extra_metas={'unit_name': 'iters'}\n )\n", (13585, 13660), False, 'from fate_flow.entity.metric import MetricMeta\n'), ((18840, 18889), 'federatedml.statistic.data_overview.header_alignment', 'data_overview.header_alignment', (['data_inst', 'header'], {}), '(data_inst, header)\n', (18870, 18889), False, 'from federatedml.statistic import data_overview\n'), ((21507, 21541), 'federatedml.protobuf.generated.boosting_tree_model_meta_pb2.QuantileMeta', 'QuantileMeta', ([], {'bin_num': 'self.bin_num'}), '(bin_num=self.bin_num)\n', (21519, 21541), False, 'from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta\n'), ((21586, 21681), 'federatedml.protobuf.generated.boosting_tree_model_meta_pb2.ObjectiveMeta', 'ObjectiveMeta', ([], {'objective': 'self.objective_param.objective', 'param': 'self.objective_param.params'}), '(objective=self.objective_param.objective, param=self.\n objective_param.params)\n', (21599, 21681), False, 'from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import ObjectiveMeta\n'), ((25100, 25159), 'federatedml.param.evaluation_param.EvaluateParam', 'EvaluateParam', ([], {'eval_type': '"""regression"""', 'metrics': 'self.metrics'}), "(eval_type='regression', metrics=self.metrics)\n", (25113, 25159), False, 'from federatedml.param.evaluation_param import EvaluateParam\n'), ((8009, 8039), 'federatedml.util.consts.ITERATIVEAFFINE.lower', 'consts.ITERATIVEAFFINE.lower', ([], {}), '()\n', (8037, 8039), False, 'from federatedml.util import consts\n'), ((8070, 8094), 'federatedml.secureprotol.IterativeAffineEncrypt', 'IterativeAffineEncrypt', ([], {}), '()\n', (8092, 8094), False, 'from federatedml.secureprotol import IterativeAffineEncrypt\n'), ((13982, 14022), 'federatedml.tree.HeteroDecisionTreeGuest', 'HeteroDecisionTreeGuest', (['self.tree_param'], {}), '(self.tree_param)\n', (14005, 14022), False, 'from federatedml.tree import HeteroDecisionTreeGuest\n'), ((17595, 17635), 'federatedml.tree.HeteroDecisionTreeGuest', 'HeteroDecisionTreeGuest', (['self.tree_param'], {}), '(self.tree_param)\n', (17618, 17635), False, 'from federatedml.tree import HeteroDecisionTreeGuest\n'), ((23614, 23627), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (23624, 23627), False, 'from operator import itemgetter\n'), ((23790, 23863), 'federatedml.protobuf.generated.boosting_tree_model_param_pb2.FeatureImportanceInfo', 'FeatureImportanceInfo', ([], {'sitename': 'sitename', 'fid': 'fid', 'importance': '_importance'}), '(sitename=sitename, fid=fid, importance=_importance)\n', (23811, 23863), False, 'from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo\n'), ((24850, 24938), 'federatedml.param.evaluation_param.EvaluateParam', 'EvaluateParam', ([], {'eval_type': '"""binary"""', 'pos_label': 'self.classes_[1]', 'metrics': 'self.metrics'}), "(eval_type='binary', pos_label=self.classes_[1], metrics=self.\n metrics)\n", (24863, 24938), False, 'from federatedml.param.evaluation_param import EvaluateParam\n'), ((25012, 25066), 'federatedml.param.evaluation_param.EvaluateParam', 'EvaluateParam', ([], {'eval_type': '"""multi"""', 'metrics': 'self.metrics'}), "(eval_type='multi', metrics=self.metrics)\n", (25025, 25066), False, 'from federatedml.param.evaluation_param import EvaluateParam\n'), ((4622, 4653), 'federatedml.loss.SigmoidBinaryCrossEntropyLoss', 'SigmoidBinaryCrossEntropyLoss', ([], {}), '()\n', (4651, 4653), False, 'from federatedml.loss import SigmoidBinaryCrossEntropyLoss\n'), ((4708, 4733), 'federatedml.loss.SoftmaxCrossEntropyLoss', 'SoftmaxCrossEntropyLoss', ([], {}), '()\n', (4731, 4733), False, 'from federatedml.loss import SoftmaxCrossEntropyLoss\n'), ((4955, 4978), 'federatedml.loss.LeastSquaredErrorLoss', 'LeastSquaredErrorLoss', ([], {}), '()\n', (4976, 4978), False, 'from federatedml.loss import LeastSquaredErrorLoss\n'), ((15370, 15385), 'fate_flow.entity.metric.Metric', 'Metric', (['i', 'loss'], {}), '(i, loss)\n', (15376, 15385), False, 'from fate_flow.entity.metric import Metric\n'), ((5044, 5068), 'federatedml.loss.LeastAbsoluteErrorLoss', 'LeastAbsoluteErrorLoss', ([], {}), '()\n', (5066, 5068), False, 'from federatedml.loss import LeastAbsoluteErrorLoss\n'), ((5888, 5898), 'federatedml.feature.fate_element_type.NoneType', 'NoneType', ([], {}), '()\n', (5896, 5898), False, 'from federatedml.feature.fate_element_type import NoneType\n'), ((5136, 5156), 'federatedml.loss.HuberLoss', 'HuberLoss', (['params[0]'], {}), '(params[0])\n', (5145, 5156), False, 'from federatedml.loss import HuberLoss\n'), ((20366, 20382), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (20375, 20382), True, 'import numpy as np\n'), ((20583, 20596), 'numpy.max', 'np.max', (['preds'], {}), '(preds)\n', (20589, 20596), True, 'import numpy as np\n'), ((5223, 5242), 'federatedml.loss.FairLoss', 'FairLoss', (['params[0]'], {}), '(params[0])\n', (5231, 5242), False, 'from federatedml.loss import FairLoss\n'), ((20485, 20501), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (20494, 20501), True, 'import numpy as np\n'), ((5312, 5334), 'federatedml.loss.TweedieLoss', 'TweedieLoss', (['params[0]'], {}), '(params[0])\n', (5323, 5334), False, 'from federatedml.loss import TweedieLoss\n'), ((5405, 5418), 'federatedml.loss.LogCoshLoss', 'LogCoshLoss', ([], {}), '()\n', (5416, 5418), False, 'from federatedml.loss import LogCoshLoss\n')]
import hashlib import json import os import random import threading import sys import time import uuid import functools import pandas as pd import numpy as np from fate_test._config import Config from fate_test._io import echo, LOGGER def import_fate(): from fate_arch import storage from fate_flow.utils import data_utils from fate_arch import session from fate_arch.storage import StorageEngine from fate_arch.common.conf_utils import get_base_config from fate_arch.storage import EggRollStoreType return storage, data_utils, session, StorageEngine, get_base_config, EggRollStoreType storage, data_utils, session, StorageEngine, get_base_config, EggRollStoreType = import_fate() sys.setrecursionlimit(1000000) class data_progress: def __init__(self, down_load, time_start): self.time_start = time_start self.down_load = down_load self.time_percent = 0 self.switch = True def set_switch(self, switch): self.switch = switch def get_switch(self): return self.switch def set_time_percent(self, time_percent): self.time_percent = time_percent def get_time_percent(self): return self.time_percent def progress(self, percent): if percent > 100: percent = 100 end = time.time() if percent != 100: print(f"\r{self.down_load} %.f%s [%s] running" % (percent, '%', self.timer(end - self.time_start)), flush=True, end='') else: print(f"\r{self.down_load} %.f%s [%s] success" % (percent, '%', self.timer(end - self.time_start)), flush=True, end='') @staticmethod def timer(times): hours, rem = divmod(times, 3600) minutes, seconds = divmod(rem, 60) return "{:0>2}:{:0>2}:{:0>2}".format(int(hours), int(minutes), int(seconds)) def remove_file(path): os.remove(path) def id_encryption(encryption_type, start_num, end_num): if encryption_type == 'md5': return [hashlib.md5(bytes(str(value), encoding='utf-8')).hexdigest() for value in range(start_num, end_num)] elif encryption_type == 'sha256': return [hashlib.sha256(bytes(str(value), encoding='utf-8')).hexdigest() for value in range(start_num, end_num)] else: return [str(value) for value in range(start_num, end_num)] def get_big_data(guest_data_size, host_data_size, guest_feature_num, host_feature_num, include_path, host_data_type, conf: Config, encryption_type, match_rate, sparsity, force, split_host, output_path, parallelize): global big_data_dir def list_tag_value(feature_nums, head): # data = '' # for f in range(feature_nums): # data += head[f] + ':' + str(round(np.random.randn(), 4)) + ";" # return data[:-1] return ";".join([head[k] + ':' + str(round(v, 4)) for k, v in enumerate(np.random.randn(feature_nums))]) def list_tag(feature_nums, data_list): data = '' for f in range(feature_nums): data += random.choice(data_list) + ";" return data[:-1] def _generate_tag_value_data(data_path, start_num, end_num, feature_nums, progress): data_num = end_num - start_num section_data_size = round(data_num / 100) iteration = round(data_num / section_data_size) head = ['x' + str(i) for i in range(feature_nums)] for batch in range(iteration + 1): progress.set_time_percent(batch) output_data = pd.DataFrame(columns=["id"]) if section_data_size * (batch + 1) <= data_num: output_data["id"] = id_encryption(encryption_type, section_data_size * batch + start_num, section_data_size * (batch + 1) + start_num) slicing_data_size = section_data_size elif section_data_size * batch < data_num: output_data['id'] = id_encryption(encryption_type, section_data_size * batch + start_num, end_num) slicing_data_size = data_num - section_data_size * batch else: break feature = [list_tag_value(feature_nums, head) for i in range(slicing_data_size)] output_data['feature'] = feature output_data.to_csv(data_path, mode='a+', index=False, header=False) def _generate_dens_data(data_path, start_num, end_num, feature_nums, label_flag, progress): if label_flag: head_1 = ['id', 'y'] else: head_1 = ['id'] data_num = end_num - start_num head_2 = ['x' + str(i) for i in range(feature_nums)] df_data_1 = pd.DataFrame(columns=head_1) head_data = pd.DataFrame(columns=head_1 + head_2) head_data.to_csv(data_path, mode='a+', index=False) section_data_size = round(data_num / 100) iteration = round(data_num / section_data_size) for batch in range(iteration + 1): progress.set_time_percent(batch) if section_data_size * (batch + 1) <= data_num: df_data_1["id"] = id_encryption(encryption_type, section_data_size * batch + start_num, section_data_size * (batch + 1) + start_num) slicing_data_size = section_data_size elif section_data_size * batch < data_num: df_data_1 = pd.DataFrame(columns=head_1) df_data_1["id"] = id_encryption(encryption_type, section_data_size * batch + start_num, end_num) slicing_data_size = data_num - section_data_size * batch else: break if label_flag: df_data_1["y"] = [round(np.random.random()) for x in range(slicing_data_size)] feature = np.random.randint(-10000, 10000, size=[slicing_data_size, feature_nums]) / 10000 df_data_2 = pd.DataFrame(feature, columns=head_2) output_data = pd.concat([df_data_1, df_data_2], axis=1) output_data.to_csv(data_path, mode='a+', index=False, header=False) def _generate_tag_data(data_path, start_num, end_num, feature_nums, sparsity, progress): data_num = end_num - start_num section_data_size = round(data_num / 100) iteration = round(data_num / section_data_size) valid_set = [x for x in range(2019120799, 2019120799 + round(feature_nums / sparsity))] data = list(map(str, valid_set)) for batch in range(iteration + 1): progress.set_time_percent(batch) output_data = pd.DataFrame(columns=["id"]) if section_data_size * (batch + 1) <= data_num: output_data["id"] = id_encryption(encryption_type, section_data_size * batch + start_num, section_data_size * (batch + 1) + start_num) slicing_data_size = section_data_size elif section_data_size * batch < data_num: output_data["id"] = id_encryption(encryption_type, section_data_size * batch + start_num, end_num) slicing_data_size = data_num - section_data_size * batch else: break feature = [list_tag(feature_nums, data_list=data) for i in range(slicing_data_size)] output_data['feature'] = feature output_data.to_csv(data_path, mode='a+', index=False, header=False) def _generate_parallelize_data(start_num, end_num, feature_nums, table_name, namespace, label_flag, data_type, partition, progress): def expand_id_range(k, v): if label_flag: return [(id_encryption(encryption_type, ids, ids + 1)[0], ",".join([str(round(np.random.random()))] + [str(round(i, 4)) for i in np.random.randn(v)])) for ids in range(int(k), min(step + int(k), end_num))] else: if data_type == 'tag': valid_set = [x for x in range(2019120799, 2019120799 + round(feature_nums / sparsity))] data = list(map(str, valid_set)) return [(id_encryption(encryption_type, ids, ids + 1)[0], ";".join([random.choice(data) for i in range(int(v))])) for ids in range(int(k), min(step + int(k), data_num))] elif data_type == 'tag_value': return [(id_encryption(encryption_type, ids, ids + 1)[0], ";".join([f"x{i}" + ':' + str(round(i, 4)) for i in np.random.randn(v)])) for ids in range(int(k), min(step + int(k), data_num))] elif data_type == 'dense': return [(id_encryption(encryption_type, ids, ids + 1)[0], ",".join([str(round(i, 4)) for i in np.random.randn(v)])) for ids in range(int(k), min(step + int(k), data_num))] data_num = end_num - start_num step = 10000 if data_num > 10000 else int(data_num / 10) table_list = [(f"{i * step}", f"{feature_nums}") for i in range(int(data_num / step) + start_num)] table = sess.computing.parallelize(table_list, partition=partition, include_key=True) table = table.flatMap(functools.partial(expand_id_range)) if label_flag: schema = {"sid": "id", "header": ",".join(["y"] + [f"x{i}" for i in range(feature_nums)])} else: schema = {"sid": "id", "header": ",".join([f"x{i}" for i in range(feature_nums)])} if data_type != "dense": schema = None h_table = sess.get_table(name=table_name, namespace=namespace) if h_table: h_table.destroy() table_meta = sess.persistent(computing_table=table, name=table_name, namespace=namespace, schema=schema) storage_session = sess.storage() s_table = storage_session.get_table(namespace=table_meta.get_namespace(), name=table_meta.get_name()) if s_table.count() == data_num: progress.set_time_percent(100) from fate_flow.manager.data_manager import DataTableTracker DataTableTracker.create_table_tracker( table_name=table_name, table_namespace=namespace, entity_info={} ) def data_save(data_info, table_names, namespaces, partition_list): data_count = 0 for idx, data_name in enumerate(data_info.keys()): label_flag = True if 'guest' in data_info[data_name] else False data_type = 'dense' if 'guest' in data_info[data_name] else host_data_type if split_host and ('host' in data_info[data_name]): host_end_num = int(np.ceil(host_data_size / len(data_info))) * (data_count + 1) if np.ceil( host_data_size / len(data_info)) * (data_count + 1) <= host_data_size else host_data_size host_start_num = int(np.ceil(host_data_size / len(data_info))) * data_count data_count += 1 else: host_end_num = host_data_size host_start_num = 0 out_path = os.path.join(str(big_data_dir), data_name) if os.path.exists(out_path) and os.path.isfile(out_path) and not parallelize: if force: remove_file(out_path) else: echo.echo('{} Already exists'.format(out_path)) continue data_i = (idx + 1) / len(data_info) downLoad = f'dataget [{"#" * int(24 * data_i)}{"-" * (24 - int(24 * data_i))}] {idx + 1}/{len(data_info)}' start = time.time() progress = data_progress(downLoad, start) thread = threading.Thread(target=run, args=[progress]) thread.start() try: if 'guest' in data_info[data_name]: if not parallelize: _generate_dens_data(out_path, guest_start_num, guest_end_num, guest_feature_num, label_flag, progress) else: _generate_parallelize_data( guest_start_num, guest_end_num, guest_feature_num, table_names[idx], namespaces[idx], label_flag, data_type, partition_list[idx], progress) else: if data_type == 'tag' and not parallelize: _generate_tag_data(out_path, host_start_num, host_end_num, host_feature_num, sparsity, progress) elif data_type == 'tag_value' and not parallelize: _generate_tag_value_data(out_path, host_start_num, host_end_num, host_feature_num, progress) elif data_type == 'dense' and not parallelize: _generate_dens_data(out_path, host_start_num, host_end_num, host_feature_num, label_flag, progress) elif parallelize: _generate_parallelize_data( host_start_num, host_end_num, host_feature_num, table_names[idx], namespaces[idx], label_flag, data_type, partition_list[idx], progress) progress.set_switch(False) time.sleep(1) except Exception: exception_id = uuid.uuid1() echo.echo(f"exception_id={exception_id}") LOGGER.exception(f"exception id: {exception_id}") finally: progress.set_switch(False) echo.stdout_newline() def run(p): while p.get_switch(): time.sleep(1) p.progress(p.get_time_percent()) if not match_rate > 0 or not match_rate <= 1: raise Exception(f"The value is between (0-1), Please check match_rate:{match_rate}") guest_start_num = host_data_size - int(guest_data_size * match_rate) guest_end_num = guest_start_num + guest_data_size if os.path.isfile(include_path): with include_path.open("r") as f: testsuite_config = json.load(f) else: raise Exception(f'Input file error, please check{include_path}.') try: if output_path is not None: big_data_dir = os.path.abspath(output_path) else: big_data_dir = os.path.abspath(conf.cache_directory) except Exception: raise Exception('{}path does not exist'.format(big_data_dir)) date_set = {} table_name_list = [] table_namespace_list = [] partition_list = [] for upload_dict in testsuite_config.get('data'): date_set[os.path.basename(upload_dict.get('file'))] = upload_dict.get('role') table_name_list.append(upload_dict.get('table_name')) table_namespace_list.append(upload_dict.get('namespace')) partition_list.append(upload_dict.get('partition', 8)) if parallelize: with session.Session() as sess: session_id = str(uuid.uuid1()) sess.init_computing(session_id) data_save( data_info=date_set, table_names=table_name_list, namespaces=table_namespace_list, partition_list=partition_list) else: data_save( data_info=date_set, table_names=table_name_list, namespaces=table_namespace_list, partition_list=partition_list) echo.echo(f'Data storage address, please check{big_data_dir}')
[ "fate_flow.manager.data_manager.DataTableTracker.create_table_tracker" ]
[((716, 746), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(1000000)'], {}), '(1000000)\n', (737, 746), False, 'import sys\n'), ((1916, 1931), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (1925, 1931), False, 'import os\n'), ((14545, 14573), 'os.path.isfile', 'os.path.isfile', (['include_path'], {}), '(include_path)\n', (14559, 14573), False, 'import os\n'), ((1318, 1329), 'time.time', 'time.time', ([], {}), '()\n', (1327, 1329), False, 'import time\n'), ((4700, 4728), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'head_1'}), '(columns=head_1)\n', (4712, 4728), True, 'import pandas as pd\n'), ((4749, 4786), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': '(head_1 + head_2)'}), '(columns=head_1 + head_2)\n', (4761, 4786), True, 'import pandas as pd\n'), ((10252, 10359), 'fate_flow.manager.data_manager.DataTableTracker.create_table_tracker', 'DataTableTracker.create_table_tracker', ([], {'table_name': 'table_name', 'table_namespace': 'namespace', 'entity_info': '{}'}), '(table_name=table_name,\n table_namespace=namespace, entity_info={})\n', (10289, 10359), False, 'from fate_flow.manager.data_manager import DataTableTracker\n'), ((15990, 16052), 'fate_test._io.echo.echo', 'echo.echo', (['f"""Data storage address, please check{big_data_dir}"""'], {}), "(f'Data storage address, please check{big_data_dir}')\n", (15999, 16052), False, 'from fate_test._io import echo, LOGGER\n'), ((3540, 3568), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['id']"}), "(columns=['id'])\n", (3552, 3568), True, 'import pandas as pd\n'), ((5939, 5976), 'pandas.DataFrame', 'pd.DataFrame', (['feature'], {'columns': 'head_2'}), '(feature, columns=head_2)\n', (5951, 5976), True, 'import pandas as pd\n'), ((6003, 6044), 'pandas.concat', 'pd.concat', (['[df_data_1, df_data_2]'], {'axis': '(1)'}), '([df_data_1, df_data_2], axis=1)\n', (6012, 6044), True, 'import pandas as pd\n'), ((6615, 6643), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['id']"}), "(columns=['id'])\n", (6627, 6643), True, 'import pandas as pd\n'), ((9375, 9409), 'functools.partial', 'functools.partial', (['expand_id_range'], {}), '(expand_id_range)\n', (9392, 9409), False, 'import functools\n'), ((11756, 11767), 'time.time', 'time.time', ([], {}), '()\n', (11765, 11767), False, 'import time\n'), ((11843, 11888), 'threading.Thread', 'threading.Thread', ([], {'target': 'run', 'args': '[progress]'}), '(target=run, args=[progress])\n', (11859, 11888), False, 'import threading\n'), ((14207, 14220), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (14217, 14220), False, 'import time\n'), ((14648, 14660), 'json.load', 'json.load', (['f'], {}), '(f)\n', (14657, 14660), False, 'import json\n'), ((14817, 14845), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (14832, 14845), False, 'import os\n'), ((14887, 14924), 'os.path.abspath', 'os.path.abspath', (['conf.cache_directory'], {}), '(conf.cache_directory)\n', (14902, 14924), False, 'import os\n'), ((15478, 15495), 'fate_arch.session.Session', 'session.Session', ([], {}), '()\n', (15493, 15495), False, 'from fate_arch import session\n'), ((3076, 3100), 'random.choice', 'random.choice', (['data_list'], {}), '(data_list)\n', (3089, 3100), False, 'import random\n'), ((5834, 5906), 'numpy.random.randint', 'np.random.randint', (['(-10000)', '(10000)'], {'size': '[slicing_data_size, feature_nums]'}), '(-10000, 10000, size=[slicing_data_size, feature_nums])\n', (5851, 5906), True, 'import numpy as np\n'), ((11305, 11329), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (11319, 11329), False, 'import os\n'), ((11334, 11358), 'os.path.isfile', 'os.path.isfile', (['out_path'], {}), '(out_path)\n', (11348, 11358), False, 'import os\n'), ((13834, 13847), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (13844, 13847), False, 'import time\n'), ((14126, 14147), 'fate_test._io.echo.stdout_newline', 'echo.stdout_newline', ([], {}), '()\n', (14145, 14147), False, 'from fate_test._io import echo, LOGGER\n'), ((15534, 15546), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (15544, 15546), False, 'import uuid\n'), ((5435, 5463), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'head_1'}), '(columns=head_1)\n', (5447, 5463), True, 'import pandas as pd\n'), ((13909, 13921), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (13919, 13921), False, 'import uuid\n'), ((13938, 13979), 'fate_test._io.echo.echo', 'echo.echo', (['f"""exception_id={exception_id}"""'], {}), "(f'exception_id={exception_id}')\n", (13947, 13979), False, 'from fate_test._io import echo, LOGGER\n'), ((13996, 14045), 'fate_test._io.LOGGER.exception', 'LOGGER.exception', (['f"""exception id: {exception_id}"""'], {}), "(f'exception id: {exception_id}')\n", (14012, 14045), False, 'from fate_test._io import echo, LOGGER\n'), ((2923, 2952), 'numpy.random.randn', 'np.random.randn', (['feature_nums'], {}), '(feature_nums)\n', (2938, 2952), True, 'import numpy as np\n'), ((5757, 5775), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5773, 5775), True, 'import numpy as np\n'), ((8305, 8324), 'random.choice', 'random.choice', (['data'], {}), '(data)\n', (8318, 8324), False, 'import random\n'), ((7869, 7887), 'numpy.random.randn', 'np.random.randn', (['v'], {}), '(v)\n', (7884, 7887), True, 'import numpy as np\n'), ((7818, 7836), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7834, 7836), True, 'import numpy as np\n'), ((8642, 8660), 'numpy.random.randn', 'np.random.randn', (['v'], {}), '(v)\n', (8657, 8660), True, 'import numpy as np\n'), ((8934, 8952), 'numpy.random.randn', 'np.random.randn', (['v'], {}), '(v)\n', (8949, 8952), True, 'import numpy as np\n')]
import json import os import time import unittest import requests from fate_flow.entity.run_status import EndStatus, JobStatus from fate_arch.common.file_utils import load_json_conf from fate_flow.utils.base_utils import get_fate_flow_python_directory from fate_flow.settings import API_VERSION, HOST, HTTP_PORT,IS_STANDALONE WORK_MODE = 1 if not IS_STANDALONE else 0 class TestTracking(unittest.TestCase): def setUp(self): self.sleep_time = 10 self.success_job_dir = './jobs/' self.dsl_path = 'fate_flow/examples/test_hetero_lr_job_dsl.json' self.config_path = 'fate_flow/examples/test_hetero_lr_job_conf.json' self.test_component_name = 'hetero_feature_selection_0' self.server_url = "http://{}:{}/{}".format(HOST, HTTP_PORT, API_VERSION) self.party_info = load_json_conf(os.path.abspath(os.path.join('./jobs', 'party_info.json'))) if WORK_MODE else None self.guest_party_id = self.party_info['guest'] if WORK_MODE else 9999 self.host_party_id = self.party_info['host'] if WORK_MODE else 10000 def test_tracking(self): with open(os.path.join(get_fate_flow_python_directory(), self.dsl_path), 'r') as f: dsl_data = json.load(f) with open(os.path.join(get_fate_flow_python_directory(), self.config_path), 'r') as f: config_data = json.load(f) config_data[ "initiator"]["party_id"] = self.guest_party_id config_data["role"] = { "guest": [self.guest_party_id], "host": [self.host_party_id], "arbiter": [self.host_party_id] } response = requests.post("/".join([self.server_url, 'job', 'submit']), json={'job_dsl': dsl_data, 'job_runtime_conf': config_data}) self.assertTrue(response.status_code in [200, 201]) self.assertTrue(int(response.json()['retcode']) == 0) job_id = response.json()['jobId'] job_info = {'f_status': 'running'} for i in range(60): response = requests.post("/".join([self.server_url, 'job', 'query']), json={'job_id': job_id, 'role': 'guest'}) self.assertTrue(response.status_code in [200, 201]) job_info = response.json()['data'][0] if EndStatus.contains(job_info['f_status']): break time.sleep(self.sleep_time) print('waiting job run success, the job has been running for {}s'.format((i+1)*self.sleep_time)) self.assertTrue(job_info['f_status'] == JobStatus.SUCCESS) os.makedirs(self.success_job_dir, exist_ok=True) with open(os.path.join(self.success_job_dir, job_id), 'w') as fw: json.dump(job_info, fw) self.assertTrue(os.path.exists(os.path.join(self.success_job_dir, job_id))) # test_component_parameters test_component(self, 'component/parameters') # test_component_metric_all test_component(self, 'component/metric/all') # test_component_metric test_component(self, 'component/metrics') # test_component_output_model test_component(self, 'component/output/model') # test_component_output_data_download test_component(self, 'component/output/data') # test_component_output_data_download test_component(self, 'component/output/data/download') # test_job_data_view test_component(self, 'job/data_view') def test_component(self, fun): job_id = os.listdir(os.path.abspath(os.path.join(self.success_job_dir)))[-1] job_info = load_json_conf(os.path.abspath(os.path.join(self.success_job_dir, job_id))) data = {'job_id': job_id, 'role': job_info['f_role'], 'party_id': job_info['f_party_id'], 'component_name': self.test_component_name} if 'download' in fun: response = requests.get("/".join([self.server_url, "tracking", fun]), json=data, stream=True) self.assertTrue(response.status_code in [200, 201]) else: response = requests.post("/".join([self.server_url, 'tracking', fun]), json=data) self.assertTrue(response.status_code in [200, 201]) self.assertTrue(int(response.json()['retcode']) == 0) if __name__ == '__main__': unittest.main()
[ "fate_flow.utils.base_utils.get_fate_flow_python_directory", "fate_flow.entity.run_status.EndStatus.contains" ]
[((4254, 4269), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4267, 4269), False, 'import unittest\n'), ((2581, 2629), 'os.makedirs', 'os.makedirs', (['self.success_job_dir'], {'exist_ok': '(True)'}), '(self.success_job_dir, exist_ok=True)\n', (2592, 2629), False, 'import os\n'), ((1221, 1233), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1230, 1233), False, 'import json\n'), ((1355, 1367), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1364, 1367), False, 'import json\n'), ((2293, 2333), 'fate_flow.entity.run_status.EndStatus.contains', 'EndStatus.contains', (["job_info['f_status']"], {}), "(job_info['f_status'])\n", (2311, 2333), False, 'from fate_flow.entity.run_status import EndStatus, JobStatus\n'), ((2369, 2396), 'time.sleep', 'time.sleep', (['self.sleep_time'], {}), '(self.sleep_time)\n', (2379, 2396), False, 'import time\n'), ((2716, 2739), 'json.dump', 'json.dump', (['job_info', 'fw'], {}), '(job_info, fw)\n', (2725, 2739), False, 'import json\n'), ((3628, 3670), 'os.path.join', 'os.path.join', (['self.success_job_dir', 'job_id'], {}), '(self.success_job_dir, job_id)\n', (3640, 3670), False, 'import os\n'), ((2648, 2690), 'os.path.join', 'os.path.join', (['self.success_job_dir', 'job_id'], {}), '(self.success_job_dir, job_id)\n', (2660, 2690), False, 'import os\n'), ((2779, 2821), 'os.path.join', 'os.path.join', (['self.success_job_dir', 'job_id'], {}), '(self.success_job_dir, job_id)\n', (2791, 2821), False, 'import os\n'), ((3541, 3575), 'os.path.join', 'os.path.join', (['self.success_job_dir'], {}), '(self.success_job_dir)\n', (3553, 3575), False, 'import os\n'), ((854, 895), 'os.path.join', 'os.path.join', (['"""./jobs"""', '"""party_info.json"""'], {}), "('./jobs', 'party_info.json')\n", (866, 895), False, 'import os\n'), ((1137, 1169), 'fate_flow.utils.base_utils.get_fate_flow_python_directory', 'get_fate_flow_python_directory', ([], {}), '()\n', (1167, 1169), False, 'from fate_flow.utils.base_utils import get_fate_flow_python_directory\n'), ((1265, 1297), 'fate_flow.utils.base_utils.get_fate_flow_python_directory', 'get_fate_flow_python_directory', ([], {}), '()\n', (1295, 1297), False, 'from fate_flow.utils.base_utils import get_fate_flow_python_directory\n')]
# # Copyright 2021 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import hashlib from pathlib import Path from typing import Dict, Tuple from shutil import copytree, rmtree from base64 import b64encode from datetime import datetime from collections import deque, OrderedDict from ruamel import yaml from fate_arch.common.base_utils import json_dumps, json_loads from fate_flow.settings import stat_logger from fate_flow.entity import RunParameters from fate_flow.utils.model_utils import gen_party_model_id from fate_flow.utils.base_utils import get_fate_flow_directory from fate_flow.model import Locker class Checkpoint(Locker): def __init__(self, directory: Path, step_index: int, step_name: str, mkdir: bool = True): self.step_index = step_index self.step_name = step_name self.mkdir = mkdir self.create_time = None directory = directory / f'{step_index}#{step_name}' if self.mkdir: directory.mkdir(0o755, True, True) self.database = directory / 'database.yaml' super().__init__(directory) @property def available(self): return self.database.exists() def save(self, model_buffers: Dict[str, Tuple[str, bytes, dict]]): if not model_buffers: raise ValueError('model_buffers is empty.') self.create_time = datetime.utcnow() data = { 'step_index': self.step_index, 'step_name': self.step_name, 'create_time': self.create_time.isoformat(), 'models': {}, } model_data = {} for model_name, (pb_name, serialized_string, json_format_dict) in model_buffers.items(): model_data[model_name] = (serialized_string, json_format_dict) data['models'][model_name] = { 'sha1': hashlib.sha1(serialized_string).hexdigest(), 'buffer_name': pb_name, } with self.lock: for model_name, model in data['models'].items(): serialized_string, json_format_dict = model_data[model_name] (self.directory / f'{model_name}.pb').write_bytes(serialized_string) (self.directory / f'{model_name}.json').write_text(json_dumps(json_format_dict), 'utf8') self.database.write_text(yaml.dump(data, Dumper=yaml.RoundTripDumper), 'utf8') stat_logger.info(f'Checkpoint saved. path: {self.directory}') return self.directory def read_database(self): with self.lock: data = yaml.safe_load(self.database.read_text('utf8')) if data['step_index'] != self.step_index or data['step_name'] != self.step_name: raise ValueError('Checkpoint may be incorrect: step_index or step_name dose not match. ' f'filepath: {self.database} ' f'expected step_index: {self.step_index} actual step_index: {data["step_index"]} ' f'expected step_name: {self.step_name} actual step_index: {data["step_name"]}') self.create_time = datetime.fromisoformat(data['create_time']) return data def read(self, parse_models: bool = True, include_database: bool = False): data = self.read_database() with self.lock: for model_name, model in data['models'].items(): model['filepath_pb'] = self.directory / f'{model_name}.pb' model['filepath_json'] = self.directory / f'{model_name}.json' if not model['filepath_pb'].exists() or not model['filepath_json'].exists(): raise FileNotFoundError( 'Checkpoint is incorrect: protobuf file or json file not found. ' f'protobuf filepath: {model["filepath_pb"]} json filepath: {model["filepath_json"]}' ) model_data = { model_name: ( model['filepath_pb'].read_bytes(), json_loads(model['filepath_json'].read_text('utf8')), ) for model_name, model in data['models'].items() } for model_name, model in data['models'].items(): serialized_string, json_format_dict = model_data[model_name] sha1 = hashlib.sha1(serialized_string).hexdigest() if sha1 != model['sha1']: raise ValueError('Checkpoint may be incorrect: hash dose not match. ' f'filepath: {model["filepath"]} expected: {model["sha1"]} actual: {sha1}') data['models'] = { model_name: ( model['buffer_name'], *model_data[model_name], ) if parse_models else b64encode(model_data[model_name][0]).decode('ascii') for model_name, model in data['models'].items() } return data if include_database else data['models'] def remove(self): self.create_time = None rmtree(self.directory) if self.mkdir: self.directory.mkdir(0o755) def to_dict(self, include_models: bool = False): if not include_models: return self.read_database() return self.read(False, True) class CheckpointManager: def __init__(self, job_id: str = None, role: str = None, party_id: int = None, model_id: str = None, model_version: str = None, component_name: str = None, component_module_name: str = None, task_id: str = None, task_version: int = None, job_parameters: RunParameters = None, max_to_keep: int = None, mkdir: bool = True, ): self.job_id = job_id self.role = role self.party_id = party_id self.model_id = model_id self.model_version = model_version self.party_model_id = gen_party_model_id(self.model_id, self.role, self.party_id) self.component_name = component_name if component_name else 'pipeline' self.module_name = component_module_name if component_module_name else 'Pipeline' self.task_id = task_id self.task_version = task_version self.job_parameters = job_parameters self.mkdir = mkdir self.directory = (Path(get_fate_flow_directory()) / 'model_local_cache' / self.party_model_id / model_version / 'checkpoint' / self.component_name) if self.mkdir: self.directory.mkdir(0o755, True, True) if isinstance(max_to_keep, int): if max_to_keep <= 0: raise ValueError('max_to_keep must be positive') elif max_to_keep is not None: raise TypeError('max_to_keep must be an integer') self.checkpoints = deque(maxlen=max_to_keep) def load_checkpoints_from_disk(self): checkpoints = [] for directory in self.directory.glob('*'): if not directory.is_dir() or '#' not in directory.name: continue step_index, step_name = directory.name.split('#', 1) checkpoint = Checkpoint(self.directory, int(step_index), step_name) if not checkpoint.available: continue checkpoints.append(checkpoint) self.checkpoints = deque(sorted(checkpoints, key=lambda i: i.step_index), self.max_checkpoints_number) @property def checkpoints_number(self): return len(self.checkpoints) @property def max_checkpoints_number(self): return self.checkpoints.maxlen @property def number_indexed_checkpoints(self): return OrderedDict((i.step_index, i) for i in self.checkpoints) @property def name_indexed_checkpoints(self): return OrderedDict((i.step_name, i) for i in self.checkpoints) def get_checkpoint_by_index(self, step_index: int): return self.number_indexed_checkpoints.get(step_index) def get_checkpoint_by_name(self, step_name: str): return self.name_indexed_checkpoints.get(step_name) @property def latest_checkpoint(self): if self.checkpoints: return self.checkpoints[-1] @property def latest_step_index(self): if self.latest_checkpoint is not None: return self.latest_checkpoint.step_index @property def latest_step_name(self): if self.latest_checkpoint is not None: return self.latest_checkpoint.step_name def new_checkpoint(self, step_index: int, step_name: str): popped_checkpoint = None if self.max_checkpoints_number and self.checkpoints_number >= self.max_checkpoints_number: popped_checkpoint = self.checkpoints[0] checkpoint = Checkpoint(self.directory, step_index, step_name) self.checkpoints.append(checkpoint) if popped_checkpoint is not None: popped_checkpoint.remove() return checkpoint def clean(self): self.checkpoints = deque(maxlen=self.max_checkpoints_number) rmtree(self.directory) if self.mkdir: self.directory.mkdir(0o755) def deploy(self, new_model_version: str, model_alias: str, step_index: int = None, step_name: str = None): if step_index is not None: checkpoint = self.get_checkpoint_by_index(step_index) elif step_name is not None: checkpoint = self.get_checkpoint_by_name(step_name) else: raise KeyError('step_index or step_name is required.') if checkpoint is None: raise TypeError('Checkpoint not found.') # check files hash checkpoint.read() directory = Path(get_fate_flow_directory()) / 'model_local_cache' / self.party_model_id / new_model_version target = directory / 'variables' / 'data' / self.component_name / model_alias locker = Locker(directory) with locker.lock: rmtree(target, True) copytree(checkpoint.directory, target, ignore=lambda src, names: {i for i in names if i.startswith('.')}) for f in target.glob('*.pb'): f.replace(f.with_suffix('')) def to_dict(self, include_models: bool = False): return [checkpoint.to_dict(include_models) for checkpoint in self.checkpoints]
[ "fate_flow.settings.stat_logger.info", "fate_flow.model.Locker", "fate_flow.utils.base_utils.get_fate_flow_directory", "fate_flow.utils.model_utils.gen_party_model_id" ]
[((1895, 1912), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1910, 1912), False, 'from datetime import datetime\n'), ((2925, 2986), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['f"""Checkpoint saved. path: {self.directory}"""'], {}), "(f'Checkpoint saved. path: {self.directory}')\n", (2941, 2986), False, 'from fate_flow.settings import stat_logger\n'), ((3656, 3699), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["data['create_time']"], {}), "(data['create_time'])\n", (3678, 3699), False, 'from datetime import datetime\n'), ((5571, 5593), 'shutil.rmtree', 'rmtree', (['self.directory'], {}), '(self.directory)\n', (5577, 5593), False, 'from shutil import copytree, rmtree\n'), ((6471, 6530), 'fate_flow.utils.model_utils.gen_party_model_id', 'gen_party_model_id', (['self.model_id', 'self.role', 'self.party_id'], {}), '(self.model_id, self.role, self.party_id)\n', (6489, 6530), False, 'from fate_flow.utils.model_utils import gen_party_model_id\n'), ((7369, 7394), 'collections.deque', 'deque', ([], {'maxlen': 'max_to_keep'}), '(maxlen=max_to_keep)\n', (7374, 7394), False, 'from collections import deque, OrderedDict\n'), ((8225, 8281), 'collections.OrderedDict', 'OrderedDict', (['((i.step_index, i) for i in self.checkpoints)'], {}), '((i.step_index, i) for i in self.checkpoints)\n', (8236, 8281), False, 'from collections import deque, OrderedDict\n'), ((8352, 8407), 'collections.OrderedDict', 'OrderedDict', (['((i.step_name, i) for i in self.checkpoints)'], {}), '((i.step_name, i) for i in self.checkpoints)\n', (8363, 8407), False, 'from collections import deque, OrderedDict\n'), ((9576, 9617), 'collections.deque', 'deque', ([], {'maxlen': 'self.max_checkpoints_number'}), '(maxlen=self.max_checkpoints_number)\n', (9581, 9617), False, 'from collections import deque, OrderedDict\n'), ((9626, 9648), 'shutil.rmtree', 'rmtree', (['self.directory'], {}), '(self.directory)\n', (9632, 9648), False, 'from shutil import copytree, rmtree\n'), ((10464, 10481), 'fate_flow.model.Locker', 'Locker', (['directory'], {}), '(directory)\n', (10470, 10481), False, 'from fate_flow.model import Locker\n'), ((10521, 10541), 'shutil.rmtree', 'rmtree', (['target', '(True)'], {}), '(target, True)\n', (10527, 10541), False, 'from shutil import copytree, rmtree\n'), ((2862, 2906), 'ruamel.yaml.dump', 'yaml.dump', (['data'], {'Dumper': 'yaml.RoundTripDumper'}), '(data, Dumper=yaml.RoundTripDumper)\n', (2871, 2906), False, 'from ruamel import yaml\n'), ((2786, 2814), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['json_format_dict'], {}), '(json_format_dict)\n', (2796, 2814), False, 'from fate_arch.common.base_utils import json_dumps, json_loads\n'), ((4869, 4900), 'hashlib.sha1', 'hashlib.sha1', (['serialized_string'], {}), '(serialized_string)\n', (4881, 4900), False, 'import hashlib\n'), ((2372, 2403), 'hashlib.sha1', 'hashlib.sha1', (['serialized_string'], {}), '(serialized_string)\n', (2384, 2403), False, 'import hashlib\n'), ((5325, 5361), 'base64.b64encode', 'b64encode', (['model_data[model_name][0]'], {}), '(model_data[model_name][0])\n', (5334, 5361), False, 'from base64 import b64encode\n'), ((10270, 10295), 'fate_flow.utils.base_utils.get_fate_flow_directory', 'get_fate_flow_directory', ([], {}), '()\n', (10293, 10295), False, 'from fate_flow.utils.base_utils import get_fate_flow_directory\n'), ((6876, 6901), 'fate_flow.utils.base_utils.get_fate_flow_directory', 'get_fate_flow_directory', ([], {}), '()\n', (6899, 6901), False, 'from fate_flow.utils.base_utils import get_fate_flow_directory\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys from fate_arch.common import FederatedCommunicationType from fate_arch.common.log import schedule_logger from fate_flow.db.db_models import Task from fate_flow.operation.task_executor import TaskExecutor from fate_flow.scheduler.federated_scheduler import FederatedScheduler from fate_flow.entity.types import TaskStatus, EndStatus, KillProcessStatusCode from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.utils import job_utils import os from fate_flow.operation.job_saver import JobSaver from fate_arch.common.base_utils import json_dumps, current_timestamp from fate_arch.common import base_utils from fate_flow.entity.types import RunParameters from fate_flow.manager.resource_manager import ResourceManager from fate_flow.operation.job_tracker import Tracker from fate_arch.computing import ComputingEngine from fate_flow.utils.authentication_utils import PrivilegeAuth class TaskController(object): INITIATOR_COLLECT_FIELDS = ["status", "party_status", "start_time", "update_time", "end_time", "elapsed"] @classmethod def create_task(cls, role, party_id, run_on_this_party, task_info): task_info["role"] = role task_info["party_id"] = party_id task_info["status"] = TaskStatus.WAITING task_info["party_status"] = TaskStatus.WAITING task_info["create_time"] = base_utils.current_timestamp() task_info["run_on_this_party"] = run_on_this_party if "task_id" not in task_info: task_info["task_id"] = job_utils.generate_task_id(job_id=task_info["job_id"], component_name=task_info["component_name"]) if "task_version" not in task_info: task_info["task_version"] = 0 JobSaver.create_task(task_info=task_info) @classmethod def start_task(cls, job_id, component_name, task_id, task_version, role, party_id, **kwargs): """ Start task, update status and party status :param job_id: :param component_name: :param task_id: :param task_version: :param role: :param party_id: :return: """ job_dsl = job_utils.get_job_dsl(job_id, role, party_id) PrivilegeAuth.authentication_component(job_dsl, src_party_id=kwargs.get('src_party_id'), src_role=kwargs.get('src_role'), party_id=party_id, component_name=component_name) schedule_logger(job_id).info( 'try to start job {} task {} {} on {} {} executor subprocess'.format(job_id, task_id, task_version, role, party_id)) task_executor_process_start_status = False task_info = { "job_id": job_id, "task_id": task_id, "task_version": task_version, "role": role, "party_id": party_id, } try: task_dir = os.path.join(job_utils.get_job_directory(job_id=job_id), role, party_id, component_name, task_id, task_version) os.makedirs(task_dir, exist_ok=True) task_parameters_path = os.path.join(task_dir, 'task_parameters.json') run_parameters_dict = job_utils.get_job_parameters(job_id, role, party_id) with open(task_parameters_path, 'w') as fw: fw.write(json_dumps(run_parameters_dict)) run_parameters = RunParameters(**run_parameters_dict) schedule_logger(job_id=job_id).info(f"use computing engine {run_parameters.computing_engine}") if run_parameters.computing_engine in {ComputingEngine.EGGROLL, ComputingEngine.STANDALONE}: process_cmd = [ sys.executable, sys.modules[TaskExecutor.__module__].__file__, '-j', job_id, '-n', component_name, '-t', task_id, '-v', task_version, '-r', role, '-p', party_id, '-c', task_parameters_path, '--run_ip', RuntimeConfig.JOB_SERVER_HOST, '--job_server', '{}:{}'.format(RuntimeConfig.JOB_SERVER_HOST, RuntimeConfig.HTTP_PORT), ] elif run_parameters.computing_engine == ComputingEngine.SPARK: if "SPARK_HOME" not in os.environ: raise EnvironmentError("SPARK_HOME not found") spark_home = os.environ["SPARK_HOME"] # additional configs spark_submit_config = run_parameters.spark_run deploy_mode = spark_submit_config.get("deploy-mode", "client") if deploy_mode not in ["client"]: raise ValueError(f"deploy mode {deploy_mode} not supported") spark_submit_cmd = os.path.join(spark_home, "bin/spark-submit") process_cmd = [spark_submit_cmd, f'--name={task_id}#{role}'] for k, v in spark_submit_config.items(): if k != "conf": process_cmd.append(f'--{k}={v}') if "conf" in spark_submit_config: for ck, cv in spark_submit_config["conf"].items(): process_cmd.append(f'--conf') process_cmd.append(f'{ck}={cv}') process_cmd.extend([ sys.modules[TaskExecutor.__module__].__file__, '-j', job_id, '-n', component_name, '-t', task_id, '-v', task_version, '-r', role, '-p', party_id, '-c', task_parameters_path, '--run_ip', RuntimeConfig.JOB_SERVER_HOST, '--job_server', '{}:{}'.format(RuntimeConfig.JOB_SERVER_HOST, RuntimeConfig.HTTP_PORT), ]) else: raise ValueError(f"${run_parameters.computing_engine} is not supported") task_log_dir = os.path.join(job_utils.get_job_log_directory(job_id=job_id), role, party_id, component_name) task_job_dir = os.path.join(job_utils.get_job_directory(job_id=job_id), role, party_id, component_name) schedule_logger(job_id).info( 'job {} task {} {} on {} {} executor subprocess is ready'.format(job_id, task_id, task_version, role, party_id)) p = job_utils.run_subprocess(job_id=job_id, config_dir=task_dir, process_cmd=process_cmd, log_dir=task_log_dir, job_dir=task_job_dir) if p: task_info["party_status"] = TaskStatus.RUNNING #task_info["run_pid"] = p.pid task_info["start_time"] = current_timestamp() task_executor_process_start_status = True else: task_info["party_status"] = TaskStatus.FAILED except Exception as e: schedule_logger(job_id).exception(e) task_info["party_status"] = TaskStatus.FAILED finally: try: cls.update_task(task_info=task_info) cls.update_task_status(task_info=task_info) except Exception as e: schedule_logger(job_id).exception(e) schedule_logger(job_id).info( 'job {} task {} {} on {} {} executor subprocess start {}'.format(job_id, task_id, task_version, role, party_id, "success" if task_executor_process_start_status else "failed")) @classmethod def update_task(cls, task_info): """ Save to local database and then report to Initiator :param task_info: :return: """ update_status = False try: update_status = JobSaver.update_task(task_info=task_info) cls.report_task_to_initiator(task_info=task_info) except Exception as e: schedule_logger(job_id=task_info["job_id"]).exception(e) finally: return update_status @classmethod def update_task_status(cls, task_info): update_status = JobSaver.update_task_status(task_info=task_info) if update_status and EndStatus.contains(task_info.get("status")): ResourceManager.return_task_resource(task_info=task_info) cls.clean_task(job_id=task_info["job_id"], task_id=task_info["task_id"], task_version=task_info["task_version"], role=task_info["role"], party_id=task_info["party_id"], content_type="table" ) cls.report_task_to_initiator(task_info=task_info) return update_status @classmethod def report_task_to_initiator(cls, task_info): tasks = JobSaver.query_task(task_id=task_info["task_id"], task_version=task_info["task_version"], role=task_info["role"], party_id=task_info["party_id"]) if tasks[0].f_federated_status_collect_type == FederatedCommunicationType.PUSH: FederatedScheduler.report_task_to_initiator(task=tasks[0]) @classmethod def collect_task(cls, job_id, component_name, task_id, task_version, role, party_id): tasks = JobSaver.query_task(job_id=job_id, component_name=component_name, task_id=task_id, task_version=task_version, role=role, party_id=party_id) if tasks: return tasks[0].to_human_model_dict(only_primary_with=cls.INITIATOR_COLLECT_FIELDS) else: return None @classmethod def stop_task(cls, task, stop_status): """ Try to stop the task, but the status depends on the final operation result :param task: :param stop_status: :return: """ kill_status = cls.kill_task(task=task) task_info = { "job_id": task.f_job_id, "task_id": task.f_task_id, "task_version": task.f_task_version, "role": task.f_role, "party_id": task.f_party_id, "party_status": stop_status } cls.update_task_status(task_info=task_info) cls.update_task(task_info=task_info) return kill_status @classmethod def kill_task(cls, task: Task): kill_status = False try: # kill task executor kill_status_code = job_utils.kill_task_executor_process(task) # session stop if kill_status_code == KillProcessStatusCode.KILLED or task.f_status not in {TaskStatus.WAITING}: job_utils.start_session_stop(task) except Exception as e: schedule_logger(task.f_job_id).exception(e) else: kill_status = True finally: schedule_logger(task.f_job_id).info( 'job {} task {} {} on {} {} process {} kill {}'.format(task.f_job_id, task.f_task_id, task.f_task_version, task.f_role, task.f_party_id, task.f_run_pid, 'success' if kill_status else 'failed')) return kill_status @classmethod def clean_task(cls, job_id, task_id, task_version, role, party_id, content_type): status = set() if content_type == "metrics": tracker = Tracker(job_id=job_id, role=role, party_id=party_id, task_id=task_id, task_version=task_version) status.add(tracker.clean_metrics()) elif content_type == "table": jobs = JobSaver.query_job(job_id=job_id, role=role, party_id=party_id) if jobs: job = jobs[0] job_parameters = RunParameters(**job.f_runtime_conf_on_party["job_parameters"]) tracker = Tracker(job_id=job_id, role=role, party_id=party_id, task_id=task_id, task_version=task_version, job_parameters=job_parameters) status.add(tracker.clean_task(job.f_runtime_conf_on_party)) if len(status) == 1 and True in status: return True else: return False @classmethod def query_task_input_args(cls, job_id, task_id, role, party_id, job_args, job_parameters, input_dsl, filter_type=None, filter_attr=None): task_run_args = TaskExecutor.get_task_run_args(job_id=job_id, role=role, party_id=party_id, task_id=task_id, job_args=job_args, job_parameters=job_parameters, task_parameters={}, input_dsl=input_dsl, filter_type=filter_type, filter_attr=filter_attr ) return task_run_args
[ "fate_flow.operation.job_saver.JobSaver.update_task_status", "fate_flow.utils.job_utils.get_job_parameters", "fate_flow.operation.job_saver.JobSaver.create_task", "fate_flow.utils.job_utils.get_job_dsl", "fate_flow.operation.job_tracker.Tracker", "fate_flow.utils.job_utils.get_job_log_directory", "fate_flow.utils.job_utils.run_subprocess", "fate_flow.manager.resource_manager.ResourceManager.return_task_resource", "fate_flow.utils.job_utils.kill_task_executor_process", "fate_flow.operation.job_saver.JobSaver.query_task", "fate_flow.entity.types.RunParameters", "fate_flow.operation.job_saver.JobSaver.query_job", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.report_task_to_initiator", "fate_flow.utils.job_utils.generate_task_id", "fate_flow.operation.task_executor.TaskExecutor.get_task_run_args", "fate_flow.operation.job_saver.JobSaver.update_task", "fate_flow.utils.job_utils.start_session_stop", "fate_flow.utils.job_utils.get_job_directory" ]
[((1970, 2000), 'fate_arch.common.base_utils.current_timestamp', 'base_utils.current_timestamp', ([], {}), '()\n', (1998, 2000), False, 'from fate_arch.common import base_utils\n'), ((2327, 2368), 'fate_flow.operation.job_saver.JobSaver.create_task', 'JobSaver.create_task', ([], {'task_info': 'task_info'}), '(task_info=task_info)\n', (2347, 2368), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((2748, 2793), 'fate_flow.utils.job_utils.get_job_dsl', 'job_utils.get_job_dsl', (['job_id', 'role', 'party_id'], {}), '(job_id, role, party_id)\n', (2769, 2793), False, 'from fate_flow.utils import job_utils\n'), ((8636, 8684), 'fate_flow.operation.job_saver.JobSaver.update_task_status', 'JobSaver.update_task_status', ([], {'task_info': 'task_info'}), '(task_info=task_info)\n', (8663, 8684), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((9366, 9516), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'task_id': "task_info['task_id']", 'task_version': "task_info['task_version']", 'role': "task_info['role']", 'party_id': "task_info['party_id']"}), "(task_id=task_info['task_id'], task_version=task_info[\n 'task_version'], role=task_info['role'], party_id=task_info['party_id'])\n", (9385, 9516), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((9903, 10047), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'job_id': 'job_id', 'component_name': 'component_name', 'task_id': 'task_id', 'task_version': 'task_version', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, component_name=component_name, task_id=\n task_id, task_version=task_version, role=role, party_id=party_id)\n', (9922, 10047), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((13180, 13425), 'fate_flow.operation.task_executor.TaskExecutor.get_task_run_args', 'TaskExecutor.get_task_run_args', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'task_id': 'task_id', 'job_args': 'job_args', 'job_parameters': 'job_parameters', 'task_parameters': '{}', 'input_dsl': 'input_dsl', 'filter_type': 'filter_type', 'filter_attr': 'filter_attr'}), '(job_id=job_id, role=role, party_id=party_id,\n task_id=task_id, job_args=job_args, job_parameters=job_parameters,\n task_parameters={}, input_dsl=input_dsl, filter_type=filter_type,\n filter_attr=filter_attr)\n', (13210, 13425), False, 'from fate_flow.operation.task_executor import TaskExecutor\n'), ((2134, 2237), 'fate_flow.utils.job_utils.generate_task_id', 'job_utils.generate_task_id', ([], {'job_id': "task_info['job_id']", 'component_name': "task_info['component_name']"}), "(job_id=task_info['job_id'], component_name=\n task_info['component_name'])\n", (2160, 2237), False, 'from fate_flow.utils import job_utils\n'), ((3596, 3632), 'os.makedirs', 'os.makedirs', (['task_dir'], {'exist_ok': '(True)'}), '(task_dir, exist_ok=True)\n', (3607, 3632), False, 'import os\n'), ((3668, 3714), 'os.path.join', 'os.path.join', (['task_dir', '"""task_parameters.json"""'], {}), "(task_dir, 'task_parameters.json')\n", (3680, 3714), False, 'import os\n'), ((3749, 3801), 'fate_flow.utils.job_utils.get_job_parameters', 'job_utils.get_job_parameters', (['job_id', 'role', 'party_id'], {}), '(job_id, role, party_id)\n', (3777, 3801), False, 'from fate_flow.utils import job_utils\n'), ((3946, 3982), 'fate_flow.entity.types.RunParameters', 'RunParameters', ([], {}), '(**run_parameters_dict)\n', (3959, 3982), False, 'from fate_flow.entity.types import RunParameters\n'), ((6979, 7113), 'fate_flow.utils.job_utils.run_subprocess', 'job_utils.run_subprocess', ([], {'job_id': 'job_id', 'config_dir': 'task_dir', 'process_cmd': 'process_cmd', 'log_dir': 'task_log_dir', 'job_dir': 'task_job_dir'}), '(job_id=job_id, config_dir=task_dir, process_cmd=\n process_cmd, log_dir=task_log_dir, job_dir=task_job_dir)\n', (7003, 7113), False, 'from fate_flow.utils import job_utils\n'), ((8296, 8337), 'fate_flow.operation.job_saver.JobSaver.update_task', 'JobSaver.update_task', ([], {'task_info': 'task_info'}), '(task_info=task_info)\n', (8316, 8337), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((8771, 8828), 'fate_flow.manager.resource_manager.ResourceManager.return_task_resource', 'ResourceManager.return_task_resource', ([], {'task_info': 'task_info'}), '(task_info=task_info)\n', (8807, 8828), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((9720, 9778), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.report_task_to_initiator', 'FederatedScheduler.report_task_to_initiator', ([], {'task': 'tasks[0]'}), '(task=tasks[0])\n', (9763, 9778), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((11030, 11072), 'fate_flow.utils.job_utils.kill_task_executor_process', 'job_utils.kill_task_executor_process', (['task'], {}), '(task)\n', (11066, 11072), False, 'from fate_flow.utils import job_utils\n'), ((12242, 12342), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'task_id': 'task_id', 'task_version': 'task_version'}), '(job_id=job_id, role=role, party_id=party_id, task_id=task_id,\n task_version=task_version)\n', (12249, 12342), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((3030, 3053), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (3045, 3053), False, 'from fate_arch.common.log import schedule_logger\n'), ((3485, 3527), 'fate_flow.utils.job_utils.get_job_directory', 'job_utils.get_job_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (3512, 3527), False, 'from fate_flow.utils import job_utils\n'), ((6596, 6642), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (6627, 6642), False, 'from fate_flow.utils import job_utils\n'), ((6716, 6758), 'fate_flow.utils.job_utils.get_job_directory', 'job_utils.get_job_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (6743, 6758), False, 'from fate_flow.utils import job_utils\n'), ((7278, 7297), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (7295, 7297), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((11226, 11260), 'fate_flow.utils.job_utils.start_session_stop', 'job_utils.start_session_stop', (['task'], {}), '(task)\n', (11254, 11260), False, 'from fate_flow.utils import job_utils\n'), ((12444, 12507), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, role=role, party_id=party_id)\n', (12462, 12507), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((3883, 3914), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['run_parameters_dict'], {}), '(run_parameters_dict)\n', (3893, 3914), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((3996, 4026), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (4011, 4026), False, 'from fate_arch.common.log import schedule_logger\n'), ((5383, 5427), 'os.path.join', 'os.path.join', (['spark_home', '"""bin/spark-submit"""'], {}), "(spark_home, 'bin/spark-submit')\n", (5395, 5427), False, 'import os\n'), ((6804, 6827), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (6819, 6827), False, 'from fate_arch.common.log import schedule_logger\n'), ((7821, 7844), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (7836, 7844), False, 'from fate_arch.common.log import schedule_logger\n'), ((11422, 11452), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (11437, 11452), False, 'from fate_arch.common.log import schedule_logger\n'), ((12592, 12654), 'fate_flow.entity.types.RunParameters', 'RunParameters', ([], {}), "(**job.f_runtime_conf_on_party['job_parameters'])\n", (12605, 12654), False, 'from fate_flow.entity.types import RunParameters\n'), ((12681, 12812), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'task_id': 'task_id', 'task_version': 'task_version', 'job_parameters': 'job_parameters'}), '(job_id=job_id, role=role, party_id=party_id, task_id=task_id,\n task_version=task_version, job_parameters=job_parameters)\n', (12688, 12812), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((7479, 7502), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (7494, 7502), False, 'from fate_arch.common.log import schedule_logger\n'), ((8443, 8486), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': "task_info['job_id']"}), "(job_id=task_info['job_id'])\n", (8458, 8486), False, 'from fate_arch.common.log import schedule_logger\n'), ((11304, 11334), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (11319, 11334), False, 'from fate_arch.common.log import schedule_logger\n'), ((7772, 7795), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (7787, 7795), False, 'from fate_arch.common.log import schedule_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from arch.api.utils.core import get_lan_ip from arch.api.utils.log_utils import schedule_logger from fate_flow.settings import detect_logger, API_VERSION from fate_flow.utils import cron, job_utils, api_utils class JobDetector(cron.Cron): def run_do(self): try: running_tasks = job_utils.query_task(status='running', run_ip=get_lan_ip()) stop_job_ids = set() # detect_logger.info('start to detect running job..') for task in running_tasks: try: process_exist = job_utils.check_job_process(int(task.f_run_pid)) if not process_exist: detect_logger.info( 'job {} component {} on {} {} task {} {} process does not exist'.format(task.f_job_id, task.f_component_name, task.f_role, task.f_party_id, task.f_task_id, task.f_run_pid)) stop_job_ids.add(task.f_job_id) except Exception as e: detect_logger.exception(e) if stop_job_ids: schedule_logger().info('start to stop jobs: {}'.format(stop_job_ids)) for job_id in stop_job_ids: jobs = job_utils.query_job(job_id=job_id) if jobs: initiator_party_id = jobs[0].f_initiator_party_id job_work_mode = jobs[0].f_work_mode if len(jobs) > 1: # i am initiator my_party_id = initiator_party_id else: my_party_id = jobs[0].f_party_id initiator_party_id = jobs[0].f_initiator_party_id api_utils.federated_api(job_id=job_id, method='POST', endpoint='/{}/job/stop'.format( API_VERSION), src_party_id=my_party_id, dest_party_id=initiator_party_id, src_role=None, json_body={'job_id': job_id}, work_mode=job_work_mode) # schedule_logger(job_id).info('send stop job {} command'.format(job_id)) except Exception as e: detect_logger.exception(e) finally: detect_logger.info('finish detect running job')
[ "fate_flow.utils.job_utils.query_job", "fate_flow.settings.detect_logger.exception", "fate_flow.settings.detect_logger.info" ]
[((3609, 3656), 'fate_flow.settings.detect_logger.info', 'detect_logger.info', (['"""finish detect running job"""'], {}), "('finish detect running job')\n", (3627, 3656), False, 'from fate_flow.settings import detect_logger, API_VERSION\n'), ((2331, 2365), 'fate_flow.utils.job_utils.query_job', 'job_utils.query_job', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (2350, 2365), False, 'from fate_flow.utils import cron, job_utils, api_utils\n'), ((3553, 3579), 'fate_flow.settings.detect_logger.exception', 'detect_logger.exception', (['e'], {}), '(e)\n', (3576, 3579), False, 'from fate_flow.settings import detect_logger, API_VERSION\n'), ((966, 978), 'arch.api.utils.core.get_lan_ip', 'get_lan_ip', ([], {}), '()\n', (976, 978), False, 'from arch.api.utils.core import get_lan_ip\n'), ((2126, 2152), 'fate_flow.settings.detect_logger.exception', 'detect_logger.exception', (['e'], {}), '(e)\n', (2149, 2152), False, 'from fate_flow.settings import detect_logger, API_VERSION\n'), ((2198, 2215), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (2213, 2215), False, 'from arch.api.utils.log_utils import schedule_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys from fate_arch.common.log import schedule_logger from fate_flow.controller.engine_operation.base import BaseEngine from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.entity.types import KillProcessStatusCode, TaskStatus from fate_flow.operation.task_executor import TaskExecutor from fate_flow.utils import job_utils class EggrollEngine(BaseEngine): @staticmethod def run(job_id, component_name, task_id, task_version, role, party_id, task_parameters_path, task_info, **kwargs): process_cmd = [ sys.executable, sys.modules[TaskExecutor.__module__].__file__, '-j', job_id, '-n', component_name, '-t', task_id, '-v', task_version, '-r', role, '-p', party_id, '-c', task_parameters_path, '--run_ip', RuntimeConfig.JOB_SERVER_HOST, '--job_server', '{}:{}'.format(RuntimeConfig.JOB_SERVER_HOST, RuntimeConfig.HTTP_PORT), ] task_log_dir = os.path.join(job_utils.get_job_log_directory(job_id=job_id), role, party_id, component_name) task_job_dir = os.path.join(job_utils.get_job_directory(job_id=job_id), role, party_id, component_name) schedule_logger(job_id).info( 'job {} task {} {} on {} {} executor subprocess is ready'.format(job_id, task_id, task_version, role, party_id)) task_dir = os.path.dirname(task_parameters_path) p = job_utils.run_subprocess(job_id=job_id, config_dir=task_dir, process_cmd=process_cmd, log_dir=task_log_dir, job_dir=task_job_dir) task_info["run_pid"] = p.pid return p @staticmethod def kill(task): kill_status_code = job_utils.kill_task_executor_process(task) # session stop if kill_status_code == KillProcessStatusCode.KILLED or task.f_status not in {TaskStatus.WAITING}: job_utils.start_session_stop(task) @staticmethod def is_alive(task): return job_utils.check_job_process(int(task.f_run_pid))
[ "fate_flow.utils.job_utils.run_subprocess", "fate_flow.utils.job_utils.kill_task_executor_process", "fate_flow.utils.job_utils.get_job_log_directory", "fate_flow.utils.job_utils.start_session_stop", "fate_flow.utils.job_utils.get_job_directory" ]
[((2123, 2160), 'os.path.dirname', 'os.path.dirname', (['task_parameters_path'], {}), '(task_parameters_path)\n', (2138, 2160), False, 'import os\n'), ((2173, 2307), 'fate_flow.utils.job_utils.run_subprocess', 'job_utils.run_subprocess', ([], {'job_id': 'job_id', 'config_dir': 'task_dir', 'process_cmd': 'process_cmd', 'log_dir': 'task_log_dir', 'job_dir': 'task_job_dir'}), '(job_id=job_id, config_dir=task_dir, process_cmd=\n process_cmd, log_dir=task_log_dir, job_dir=task_job_dir)\n', (2197, 2307), False, 'from fate_flow.utils import job_utils\n'), ((2460, 2502), 'fate_flow.utils.job_utils.kill_task_executor_process', 'job_utils.kill_task_executor_process', (['task'], {}), '(task)\n', (2496, 2502), False, 'from fate_flow.utils import job_utils\n'), ((1672, 1718), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (1703, 1718), False, 'from fate_flow.utils import job_utils\n'), ((1788, 1830), 'fate_flow.utils.job_utils.get_job_directory', 'job_utils.get_job_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (1815, 1830), False, 'from fate_flow.utils import job_utils\n'), ((2644, 2678), 'fate_flow.utils.job_utils.start_session_stop', 'job_utils.start_session_stop', (['task'], {}), '(task)\n', (2672, 2678), False, 'from fate_flow.utils import job_utils\n'), ((1872, 1895), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (1887, 1895), False, 'from fate_arch.common.log import schedule_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import shutil from flask import Flask, request, send_file from fate_flow.settings import stat_logger, API_VERSION, SERVING_PATH, SERVINGS_ZK_PATH, \ USE_CONFIGURATION_CENTER, ZOOKEEPER_HOSTS, SERVER_CONF_PATH, DEFAULT_MODEL_STORE_ADDRESS, TEMP_DIRECTORY from fate_flow.driver.job_controller import JobController from fate_flow.manager.model_manager import publish_model from fate_flow.manager.model_manager import pipelined_model from fate_flow.utils.api_utils import get_json_result, federated_api from fate_flow.utils.job_utils import generate_job_id, runtime_conf_basic from fate_flow.utils.node_check_utils import check_nodes from fate_flow.utils.setting_utils import CenterConfig from fate_flow.utils.detect_utils import check_config from fate_flow.entity.constant_config import ModelOperation manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/load', methods=['POST']) def load_model(): request_config = request.json _job_id = generate_job_id() initiator_party_id = request_config['initiator']['party_id'] initiator_role = request_config['initiator']['role'] publish_model.generate_publish_model_info(request_config) load_status = True load_status_info = {} load_status_msg = 'success' for role_name, role_partys in request_config.get("role").items(): if role_name == 'arbiter': continue load_status_info[role_name] = load_status_info.get(role_name, {}) for _party_id in role_partys: request_config['local'] = {'role': role_name, 'party_id': _party_id} try: response = federated_api(job_id=_job_id, method='POST', endpoint='/{}/model/load/do'.format(API_VERSION), src_party_id=initiator_party_id, dest_party_id=_party_id, src_role = initiator_role, json_body=request_config, work_mode=request_config['job_parameters']['work_mode']) load_status_info[role_name][_party_id] = response['retcode'] except Exception as e: stat_logger.exception(e) load_status = False load_status_msg = 'failed' load_status_info[role_name][_party_id] = 100 return get_json_result(job_id=_job_id, retcode=(0 if load_status else 101), retmsg=load_status_msg, data=load_status_info) @manager.route('/load/do', methods=['POST']) @check_nodes def do_load_model(): request_data = request.json request_data["servings"] = CenterConfig.get_settings(path=SERVING_PATH, servings_zk_path=SERVINGS_ZK_PATH, use_zk=USE_CONFIGURATION_CENTER, hosts=ZOOKEEPER_HOSTS, server_conf_path=SERVER_CONF_PATH) load_status = publish_model.load_model(config_data=request_data) return get_json_result(retcode=(0 if load_status else 101)) @manager.route('/bind', methods=['POST']) def bind_model_service(): request_config = request.json if not request_config.get('servings'): # get my party all servings request_config['servings'] = CenterConfig.get_settings(path=SERVING_PATH, servings_zk_path=SERVINGS_ZK_PATH, use_zk=USE_CONFIGURATION_CENTER, hosts=ZOOKEEPER_HOSTS, server_conf_path=SERVER_CONF_PATH) if not request_config.get('service_id'): return get_json_result(retcode=101, retmsg='no service id') bind_status, service_id = publish_model.bind_model_service(config_data=request_config) return get_json_result(retcode=(0 if bind_status else 101), retmsg='service id is {}'.format(service_id)) @manager.route('/transfer', methods=['post']) def transfer_model(): model_data = publish_model.download_model(request.json) return get_json_result(retcode=0, retmsg="success", data=model_data) @manager.route('/<model_operation>', methods=['post', 'get']) def operate_model(model_operation): request_config = request.json or request.form.to_dict() job_id = generate_job_id() required_arguments = ["model_id", "model_version"] if model_operation not in [ModelOperation.STORE, ModelOperation.RESTORE, ModelOperation.EXPORT, ModelOperation.IMPORT]: raise Exception('Can not support this operating now: {}'.format(model_operation)) check_config(request_config, required_arguments=required_arguments) if model_operation in [ModelOperation.EXPORT, ModelOperation.IMPORT]: if model_operation == ModelOperation.IMPORT: file = request.files.get('file') file_path = os.path.join(TEMP_DIRECTORY, file.filename) try: os.makedirs(os.path.dirname(file_path), exist_ok=True) file.save(file_path) except Exception as e: shutil.rmtree(file_path) raise e request_config['file'] = file_path model = pipelined_model.PipelinedModel(model_id=request_config["model_id"], model_version=request_config["model_version"]) model.unpack_model(file_path) return get_json_result() else: model = pipelined_model.PipelinedModel(model_id=request_config["model_id"], model_version=request_config["model_version"]) archive_file_path = model.packaging_model() return send_file(archive_file_path, attachment_filename=os.path.basename(archive_file_path), as_attachment=True) else: data = {} job_dsl, job_runtime_conf = gen_model_operation_job_config(request_config, model_operation) job_id, job_dsl_path, job_runtime_conf_path, logs_directory, model_info, board_url = JobController.submit_job( {'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}, job_id=job_id) data.update({'job_dsl_path': job_dsl_path, 'job_runtime_conf_path': job_runtime_conf_path, 'board_url': board_url, 'logs_directory': logs_directory}) return get_json_result(job_id=job_id, data=data) def gen_model_operation_job_config(config_data: dict, model_operation: ModelOperation): job_runtime_conf = runtime_conf_basic(if_local=True) initiator_role = "local" job_dsl = { "components": {} } if model_operation in [ModelOperation.STORE, ModelOperation.RESTORE]: component_name = "{}_0".format(model_operation) component_parameters = dict() component_parameters.update(config_data) for k, v in config_data.items(): component_parameters[k] = [v] if "store_address" not in config_data: component_parameters["store_address"] = [DEFAULT_MODEL_STORE_ADDRESS] job_runtime_conf["role_parameters"][initiator_role] = {component_name: component_parameters} job_dsl["components"][component_name] = { "module": "Model{}".format(model_operation.capitalize()) } else: raise Exception("Can not support this model operation: {}".format(model_operation)) return job_dsl, job_runtime_conf
[ "fate_flow.manager.model_manager.publish_model.download_model", "fate_flow.settings.stat_logger.exception", "fate_flow.manager.model_manager.publish_model.bind_model_service", "fate_flow.manager.model_manager.publish_model.load_model", "fate_flow.driver.job_controller.JobController.submit_job", "fate_flow.manager.model_manager.pipelined_model.PipelinedModel", "fate_flow.utils.job_utils.generate_job_id", "fate_flow.utils.api_utils.get_json_result", "fate_flow.utils.job_utils.runtime_conf_basic", "fate_flow.manager.model_manager.publish_model.generate_publish_model_info", "fate_flow.utils.detect_utils.check_config", "fate_flow.utils.setting_utils.CenterConfig.get_settings" ]
[((1442, 1457), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1447, 1457), False, 'from flask import Flask, request, send_file\n'), ((1521, 1545), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1542, 1545), False, 'from fate_flow.settings import stat_logger, API_VERSION, SERVING_PATH, SERVINGS_ZK_PATH, USE_CONFIGURATION_CENTER, ZOOKEEPER_HOSTS, SERVER_CONF_PATH, DEFAULT_MODEL_STORE_ADDRESS, TEMP_DIRECTORY\n'), ((1711, 1728), 'fate_flow.utils.job_utils.generate_job_id', 'generate_job_id', ([], {}), '()\n', (1726, 1728), False, 'from fate_flow.utils.job_utils import generate_job_id, runtime_conf_basic\n'), ((1855, 1912), 'fate_flow.manager.model_manager.publish_model.generate_publish_model_info', 'publish_model.generate_publish_model_info', (['request_config'], {}), '(request_config)\n', (1896, 1912), False, 'from fate_flow.manager.model_manager import publish_model\n'), ((3211, 3329), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'job_id': '_job_id', 'retcode': '(0 if load_status else 101)', 'retmsg': 'load_status_msg', 'data': 'load_status_info'}), '(job_id=_job_id, retcode=0 if load_status else 101, retmsg=\n load_status_msg, data=load_status_info)\n', (3226, 3329), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api\n'), ((3498, 3678), 'fate_flow.utils.setting_utils.CenterConfig.get_settings', 'CenterConfig.get_settings', ([], {'path': 'SERVING_PATH', 'servings_zk_path': 'SERVINGS_ZK_PATH', 'use_zk': 'USE_CONFIGURATION_CENTER', 'hosts': 'ZOOKEEPER_HOSTS', 'server_conf_path': 'SERVER_CONF_PATH'}), '(path=SERVING_PATH, servings_zk_path=\n SERVINGS_ZK_PATH, use_zk=USE_CONFIGURATION_CENTER, hosts=\n ZOOKEEPER_HOSTS, server_conf_path=SERVER_CONF_PATH)\n', (3523, 3678), False, 'from fate_flow.utils.setting_utils import CenterConfig\n'), ((3801, 3851), 'fate_flow.manager.model_manager.publish_model.load_model', 'publish_model.load_model', ([], {'config_data': 'request_data'}), '(config_data=request_data)\n', (3825, 3851), False, 'from fate_flow.manager.model_manager import publish_model\n'), ((3863, 3913), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0 if load_status else 101)'}), '(retcode=0 if load_status else 101)\n', (3878, 3913), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api\n'), ((4576, 4636), 'fate_flow.manager.model_manager.publish_model.bind_model_service', 'publish_model.bind_model_service', ([], {'config_data': 'request_config'}), '(config_data=request_config)\n', (4608, 4636), False, 'from fate_flow.manager.model_manager import publish_model\n'), ((4834, 4876), 'fate_flow.manager.model_manager.publish_model.download_model', 'publish_model.download_model', (['request.json'], {}), '(request.json)\n', (4862, 4876), False, 'from fate_flow.manager.model_manager import publish_model\n'), ((4888, 4949), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'model_data'}), "(retcode=0, retmsg='success', data=model_data)\n", (4903, 4949), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api\n'), ((5123, 5140), 'fate_flow.utils.job_utils.generate_job_id', 'generate_job_id', ([], {}), '()\n', (5138, 5140), False, 'from fate_flow.utils.job_utils import generate_job_id, runtime_conf_basic\n'), ((5414, 5481), 'fate_flow.utils.detect_utils.check_config', 'check_config', (['request_config'], {'required_arguments': 'required_arguments'}), '(request_config, required_arguments=required_arguments)\n', (5426, 5481), False, 'from fate_flow.utils.detect_utils import check_config\n'), ((7221, 7254), 'fate_flow.utils.job_utils.runtime_conf_basic', 'runtime_conf_basic', ([], {'if_local': '(True)'}), '(if_local=True)\n', (7239, 7254), False, 'from fate_flow.utils.job_utils import generate_job_id, runtime_conf_basic\n'), ((4136, 4316), 'fate_flow.utils.setting_utils.CenterConfig.get_settings', 'CenterConfig.get_settings', ([], {'path': 'SERVING_PATH', 'servings_zk_path': 'SERVINGS_ZK_PATH', 'use_zk': 'USE_CONFIGURATION_CENTER', 'hosts': 'ZOOKEEPER_HOSTS', 'server_conf_path': 'SERVER_CONF_PATH'}), '(path=SERVING_PATH, servings_zk_path=\n SERVINGS_ZK_PATH, use_zk=USE_CONFIGURATION_CENTER, hosts=\n ZOOKEEPER_HOSTS, server_conf_path=SERVER_CONF_PATH)\n', (4161, 4316), False, 'from fate_flow.utils.setting_utils import CenterConfig\n'), ((4493, 4545), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""no service id"""'}), "(retcode=101, retmsg='no service id')\n", (4508, 4545), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api\n'), ((5087, 5109), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (5107, 5109), False, 'from flask import Flask, request, send_file\n'), ((6759, 6862), 'fate_flow.driver.job_controller.JobController.submit_job', 'JobController.submit_job', (["{'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}"], {'job_id': 'job_id'}), "({'job_dsl': job_dsl, 'job_runtime_conf':\n job_runtime_conf}, job_id=job_id)\n", (6783, 6862), False, 'from fate_flow.driver.job_controller import JobController\n'), ((7066, 7107), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'job_id': 'job_id', 'data': 'data'}), '(job_id=job_id, data=data)\n', (7081, 7107), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api\n'), ((5628, 5653), 'flask.request.files.get', 'request.files.get', (['"""file"""'], {}), "('file')\n", (5645, 5653), False, 'from flask import Flask, request, send_file\n'), ((5678, 5721), 'os.path.join', 'os.path.join', (['TEMP_DIRECTORY', 'file.filename'], {}), '(TEMP_DIRECTORY, file.filename)\n', (5690, 5721), False, 'import os\n'), ((6014, 6132), 'fate_flow.manager.model_manager.pipelined_model.PipelinedModel', 'pipelined_model.PipelinedModel', ([], {'model_id': "request_config['model_id']", 'model_version': "request_config['model_version']"}), "(model_id=request_config['model_id'],\n model_version=request_config['model_version'])\n", (6044, 6132), False, 'from fate_flow.manager.model_manager import pipelined_model\n'), ((6190, 6207), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {}), '()\n', (6205, 6207), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api\n'), ((6242, 6360), 'fate_flow.manager.model_manager.pipelined_model.PipelinedModel', 'pipelined_model.PipelinedModel', ([], {'model_id': "request_config['model_id']", 'model_version': "request_config['model_version']"}), "(model_id=request_config['model_id'],\n model_version=request_config['model_version'])\n", (6272, 6360), False, 'from fate_flow.manager.model_manager import pipelined_model\n'), ((3035, 3059), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (3056, 3059), False, 'from fate_flow.settings import stat_logger, API_VERSION, SERVING_PATH, SERVINGS_ZK_PATH, USE_CONFIGURATION_CENTER, ZOOKEEPER_HOSTS, SERVER_CONF_PATH, DEFAULT_MODEL_STORE_ADDRESS, TEMP_DIRECTORY\n'), ((5767, 5793), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (5782, 5793), False, 'import os\n'), ((5898, 5922), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (5911, 5922), False, 'import shutil\n'), ((6481, 6516), 'os.path.basename', 'os.path.basename', (['archive_file_path'], {}), '(archive_file_path)\n', (6497, 6516), False, 'import os\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_flow.manager.data_manager import delete_tables_by_table_infos, delete_metric_data from fate_flow.operation.job_tracker import Tracker from fate_flow.operation.job_saver import JobSaver from fate_flow.settings import stat_logger from fate_flow.utils.job_utils import start_session_stop class JobClean(object): @classmethod def clean_table(cls, job_id, role, party_id, component_name): # clean data table stat_logger.info('start delete {} {} {} {} data table'.format(job_id, role, party_id, component_name)) tracker = Tracker(job_id=job_id, role=role, party_id=party_id, component_name=component_name) output_data_table_infos = tracker.get_output_data_info() if output_data_table_infos: delete_tables_by_table_infos(output_data_table_infos) stat_logger.info('delete {} {} {} {} data table success'.format(job_id, role, party_id, component_name)) @classmethod def start_clean_job(cls, **kwargs): tasks = JobSaver.query_task(**kwargs) if tasks: for task in tasks: try: # clean session stat_logger.info('start {} {} {} {} session stop'.format(task.f_job_id, task.f_role, task.f_party_id, task.f_component_name)) start_session_stop(task) stat_logger.info('stop {} {} {} {} session success'.format(task.f_job_id, task.f_role, task.f_party_id, task.f_component_name)) except Exception as e: pass try: # clean data table JobClean.clean_table(job_id=task.f_job_id, role=task.f_role, party_id=task.f_party_id, component_name=task.f_component_name) except Exception as e: stat_logger.info('delete {} {} {} {} data table failed'.format(task.f_job_id, task.f_role, task.f_party_id, task.f_component_name)) stat_logger.exception(e) try: # clean metric data stat_logger.info('start delete {} {} {} {} metric data'.format(task.f_job_id, task.f_role, task.f_party_id, task.f_component_name)) delete_metric_data({'job_id': task.f_job_id, 'role': task.f_role, 'party_id': task.f_party_id, 'component_name': task.f_component_name}) stat_logger.info('delete {} {} {} {} metric data success'.format(task.f_job_id, task.f_role, task.f_party_id, task.f_component_name)) except Exception as e: stat_logger.info('delete {} {} {} {} metric data failed'.format(task.f_job_id, task.f_role, task.f_party_id, task.f_component_name)) stat_logger.exception(e) else: raise Exception('no found task')
[ "fate_flow.operation.job_tracker.Tracker", "fate_flow.manager.data_manager.delete_tables_by_table_infos", "fate_flow.operation.job_saver.JobSaver.query_task", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.job_utils.start_session_stop", "fate_flow.manager.data_manager.delete_metric_data" ]
[((1177, 1265), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'component_name': 'component_name'}), '(job_id=job_id, role=role, party_id=party_id, component_name=\n component_name)\n', (1184, 1265), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((1619, 1648), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {}), '(**kwargs)\n', (1638, 1648), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((1374, 1427), 'fate_flow.manager.data_manager.delete_tables_by_table_infos', 'delete_tables_by_table_infos', (['output_data_table_infos'], {}), '(output_data_table_infos)\n', (1402, 1427), False, 'from fate_flow.manager.data_manager import delete_tables_by_table_infos, delete_metric_data\n'), ((1998, 2022), 'fate_flow.utils.job_utils.start_session_stop', 'start_session_stop', (['task'], {}), '(task)\n', (2016, 2022), False, 'from fate_flow.utils.job_utils import start_session_stop\n'), ((3361, 3501), 'fate_flow.manager.data_manager.delete_metric_data', 'delete_metric_data', (["{'job_id': task.f_job_id, 'role': task.f_role, 'party_id': task.f_party_id,\n 'component_name': task.f_component_name}"], {}), "({'job_id': task.f_job_id, 'role': task.f_role,\n 'party_id': task.f_party_id, 'component_name': task.f_component_name})\n", (3379, 3501), False, 'from fate_flow.manager.data_manager import delete_tables_by_table_infos, delete_metric_data\n'), ((2937, 2961), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (2958, 2961), False, 'from fate_flow.settings import stat_logger\n'), ((4322, 4346), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (4343, 4346), False, 'from fate_flow.settings import stat_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import io import os import json import tarfile from flask import request, send_file, abort from fate_arch.common.base_utils import json_loads, json_dumps from fate_flow.scheduler.dag_scheduler import DAGScheduler from fate_flow.scheduler.federated_scheduler import FederatedScheduler from fate_flow.settings import stat_logger, TEMP_DIRECTORY from fate_flow.utils import job_utils, detect_utils, schedule_utils, log_utils from fate_flow.entity.run_status import FederatedSchedulingStatusCode, JobStatus from fate_flow.utils.api_utils import get_json_result, error_response from fate_flow.entity import RetCode from fate_flow.entity import JobConfigurationBase from fate_flow.operation.job_tracker import Tracker from fate_flow.operation.job_saver import JobSaver from fate_flow.operation.job_clean import JobClean from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter from fate_flow.utils.log_utils import schedule_logger from fate_flow.controller.job_controller import JobController @manager.route('/submit', methods=['POST']) def submit_job(): submit_result = DAGScheduler.submit(JobConfigurationBase(**request.json)) return get_json_result(retcode=submit_result["code"], retmsg=submit_result["message"], job_id=submit_result["job_id"], data=submit_result if submit_result["code"] == RetCode.SUCCESS else None) @manager.route('/stop', methods=['POST']) def stop_job(): job_id = request.json.get('job_id') stop_status = request.json.get("stop_status", "canceled") jobs = JobSaver.query_job(job_id=job_id) if jobs: schedule_logger(job_id).info(f"stop job on this party") kill_status, kill_details = JobController.stop_jobs(job_id=job_id, stop_status=stop_status) schedule_logger(job_id).info(f"stop job on this party status {kill_status}") schedule_logger(job_id).info(f"request stop job to {stop_status}") status_code, response = FederatedScheduler.request_stop_job(job=jobs[0], stop_status=stop_status, command_body=jobs[0].to_dict()) if status_code == FederatedSchedulingStatusCode.SUCCESS: return get_json_result(retcode=RetCode.SUCCESS, retmsg=f"stop job on this party {kill_status}; stop job on all party success") else: return get_json_result(retcode=RetCode.OPERATING_ERROR, retmsg=f"stop job on this party {kill_status}", data=response) else: schedule_logger(job_id).info(f"can not found job to stop") return get_json_result(retcode=RetCode.DATA_ERROR, retmsg="can not found job") @manager.route('/rerun', methods=['POST']) def rerun_job(): job_id = request.json.get("job_id") jobs = JobSaver.query_job(job_id=job_id) if jobs: status_code, response = FederatedScheduler.request_rerun_job(job=jobs[0], command_body=request.json) if status_code == FederatedSchedulingStatusCode.SUCCESS: return get_json_result(retcode=RetCode.SUCCESS, retmsg="rerun job success") else: return get_json_result(retcode=RetCode.OPERATING_ERROR, retmsg="rerun job failed:\n{}".format(json_dumps(response))) else: return get_json_result(retcode=RetCode.DATA_ERROR, retmsg="can not found job") @manager.route('/query', methods=['POST']) def query_job(): jobs = JobSaver.query_job(**request.json) if not jobs: return get_json_result(retcode=0, retmsg='no job could be found', data=[]) return get_json_result(retcode=0, retmsg='success', data=[job.to_dict() for job in jobs]) @manager.route('/list/job', methods=['POST']) def list_job(): limit, offset = parse_limit_and_offset() query = { 'tag': ('!=', 'submit_failed'), } for i in ('job_id', 'party_id', 'description'): if request.json.get(i) is not None: query[i] = request.json[i] if query.get('party_id') is not None: try: query['party_id'] = int(query['party_id']) except Exception: return error_response(400, f"Invalid parameter 'party_id'.") if query.get('description') is not None: query['description'] = ('contains', query['description']) for i in ('role', 'status'): if request.json.get(i) is None: continue if isinstance(request.json[i], str): request.json[i] = [request.json[i]] if not isinstance(request.json[i], list): return error_response(400, f"Invalid parameter '{i}'.") request.json[i] = set(request.json[i]) for j in request.json[i]: if j not in valid_query_parameters[i]: return error_response(400, f"Invalid parameter '{i}'.") query[i] = ('in_', request.json[i]) jobs, count = job_utils.list_job(limit, offset, query, parse_order_by(('create_time', 'desc'))) jobs = [job.to_human_model_dict() for job in jobs] for job in jobs: job['party_id'] = int(job['party_id']) job['partners'] = set() for i in ('guest', 'host', 'arbiter'): job['partners'].update(job['roles'].get(i, [])) job['partners'].discard(job['party_id']) job['partners'] = sorted(job['partners']) return get_json_result(data={ 'jobs': jobs, 'count': count, }) @manager.route('/update', methods=['POST']) def update_job(): job_info = request.json jobs = JobSaver.query_job(job_id=job_info['job_id'], party_id=job_info['party_id'], role=job_info['role']) if not jobs: return get_json_result(retcode=101, retmsg='find job failed') else: JobSaver.update_job(job_info={'description': job_info.get('notes', ''), 'job_id': job_info['job_id'], 'role': job_info['role'], 'party_id': job_info['party_id']}) return get_json_result(retcode=0, retmsg='success') @manager.route('/parameter/update', methods=['POST']) @detect_utils.validate_request("job_id") def update_parameters(): job_info = request.json component_parameters = job_info.pop("component_parameters", None) job_parameters = job_info.pop("job_parameters", None) job_info["is_initiator"] = True jobs = JobSaver.query_job(**job_info) if not jobs: return get_json_result(retcode=RetCode.DATA_ERROR, retmsg=log_utils.failed_log(f"query job by {job_info}")) else: retcode, retdata = DAGScheduler.update_parameters(jobs[0], job_parameters, component_parameters) return get_json_result(retcode=retcode, data=retdata) @manager.route('/config', methods=['POST']) def job_config(): jobs = JobSaver.query_job(**request.json) if not jobs: return get_json_result(retcode=101, retmsg='find job failed') else: job = jobs[0] response_data = dict() response_data['job_id'] = job.f_job_id response_data['dsl'] = job.f_dsl response_data['runtime_conf'] = job.f_runtime_conf response_data['train_runtime_conf'] = job.f_train_runtime_conf adapter = JobRuntimeConfigAdapter(job.f_runtime_conf) job_parameters = adapter.get_common_parameters().to_dict() response_data['model_info'] = {'model_id': job_parameters.get('model_id'), 'model_version': job_parameters.get('model_version')} return get_json_result(retcode=0, retmsg='success', data=response_data) def check_job_log_dir(): job_id = str(request.json['job_id']) job_log_dir = job_utils.get_job_log_directory(job_id=job_id) if not os.path.exists(job_log_dir): abort(error_response(404, f"Log file path: '{job_log_dir}' not found. Please check if the job id is valid.")) return job_id, job_log_dir @manager.route('/log/download', methods=['POST']) @detect_utils.validate_request('job_id') def job_log_download(): job_id, job_log_dir = check_job_log_dir() memory_file = io.BytesIO() tar = tarfile.open(fileobj=memory_file, mode='w:gz') for root, dir, files in os.walk(job_log_dir): for file in files: full_path = os.path.join(root, file) rel_path = os.path.relpath(full_path, job_log_dir) tar.add(full_path, rel_path) tar.close() memory_file.seek(0) return send_file(memory_file, attachment_filename=f'job_{job_id}_log.tar.gz', as_attachment=True) @manager.route('/log/path', methods=['POST']) @detect_utils.validate_request('job_id') def job_log_path(): job_id, job_log_dir = check_job_log_dir() return get_json_result(data={"logs_directory": job_log_dir}) @manager.route('/task/query', methods=['POST']) def query_task(): tasks = JobSaver.query_task(**request.json) if not tasks: return get_json_result(retcode=101, retmsg='find task failed') return get_json_result(retcode=0, retmsg='success', data=[task.to_dict() for task in tasks]) @manager.route('/list/task', methods=['POST']) def list_task(): limit, offset = parse_limit_and_offset() query = {} for i in ('job_id', 'role', 'party_id'): if request.json.get(i) is not None: query[i] = request.json[i] if query.get('role') is not None: if query['role'] not in valid_query_parameters['role']: return error_response(400, f"Invalid parameter 'role'.") if query.get('party_id') is not None: try: query['party_id'] = int(query['party_id']) except Exception: return error_response(400, f"Invalid parameter 'party_id'.") tasks, count = job_utils.list_task(limit, offset, query, parse_order_by(('create_time', 'asc'))) return get_json_result(data={ 'tasks': [task.to_human_model_dict() for task in tasks], 'count': count, }) @manager.route('/data/view/query', methods=['POST']) def query_component_output_data_info(): output_data_infos = Tracker.query_output_data_infos(**request.json) if not output_data_infos: return get_json_result(retcode=101, retmsg='find data view failed') return get_json_result(retcode=0, retmsg='success', data=[output_data_info.to_dict() for output_data_info in output_data_infos]) @manager.route('/clean', methods=['POST']) def clean_job(): JobClean.start_clean_job(**request.json) return get_json_result(retcode=0, retmsg='success') @manager.route('/clean/queue', methods=['POST']) def clean_queue(): jobs = JobSaver.query_job(is_initiator=True, status=JobStatus.WAITING) clean_status = {} for job in jobs: status_code, response = FederatedScheduler.request_stop_job(job=job, stop_status=JobStatus.CANCELED) clean_status[job.f_job_id] = status_code return get_json_result(retcode=0, retmsg='success', data=clean_status) @manager.route('/dsl/generate', methods=['POST']) def dsl_generator(): data = request.json cpn_str = data.get("cpn_str", "") try: if not cpn_str: raise Exception("Component list should not be empty.") if isinstance(cpn_str, list): cpn_list = cpn_str else: if (cpn_str.find("/") and cpn_str.find("\\")) != -1: raise Exception("Component list string should not contain '/' or '\\'.") cpn_str = cpn_str.replace(" ", "").replace("\n", "").strip(",[]") cpn_list = cpn_str.split(",") train_dsl = json_loads(data.get("train_dsl")) parser = schedule_utils.get_dsl_parser_by_version(data.get("version", "2")) predict_dsl = parser.deploy_component(cpn_list, train_dsl) if data.get("filename"): os.makedirs(TEMP_DIRECTORY, exist_ok=True) temp_filepath = os.path.join(TEMP_DIRECTORY, data.get("filename")) with open(temp_filepath, "w") as fout: fout.write(json.dumps(predict_dsl, indent=4)) return send_file(open(temp_filepath, 'rb'), as_attachment=True, attachment_filename=data.get("filename")) return get_json_result(data=predict_dsl) except Exception as e: stat_logger.exception(e) return error_response(210, "DSL generating failed. For more details, " "please check logs/fate_flow/fate_flow_stat.log.") @manager.route('/url/get', methods=['POST']) @detect_utils.validate_request('job_id', 'role', 'party_id') def get_url(): request_data = request.json jobs = JobSaver.query_job(job_id=request_data.get('job_id'), role=request_data.get('role'), party_id=request_data.get('party_id')) if jobs: board_urls = [] for job in jobs: board_url = job_utils.get_board_url(job.f_job_id, job.f_role, job.f_party_id) board_urls.append(board_url) return get_json_result(data={'board_url': board_urls}) else: return get_json_result(retcode=101, retmsg='no found job') def parse_limit_and_offset(): try: limit = int(request.json.get('limit', 0)) page = int(request.json.get('page', 1)) - 1 except Exception: abort(error_response(400, f"Invalid parameter 'limit' or 'page'.")) return limit, limit * page def parse_order_by(default=None): order_by = [] if request.json.get('order_by') is not None: if request.json['order_by'] not in valid_query_parameters['order_by']: abort(error_response(400, f"Invalid parameter 'order_by'.")) order_by.append(request.json['order_by']) if request.json.get('order') is not None: if request.json['order'] not in valid_query_parameters['order']: abort(error_response(400, f"Invalid parameter order 'order'.")) order_by.append(request.json['order']) return order_by or default valid_query_parameters = { 'role': {'guest', 'host', 'arbiter', 'local'}, 'status': {'success', 'running', 'waiting', 'failed', 'canceled'}, 'order_by': {'create_time', 'start_time', 'end_time', 'elapsed'}, 'order': {'asc', 'desc'}, }
[ "fate_flow.settings.stat_logger.exception", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.request_stop_job", "fate_flow.utils.api_utils.error_response", "fate_flow.scheduler.dag_scheduler.DAGScheduler.update_parameters", "fate_flow.utils.job_utils.get_job_log_directory", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.request_rerun_job", "fate_flow.operation.job_clean.JobClean.start_clean_job", "fate_flow.operation.job_saver.JobSaver.query_task", "fate_flow.utils.job_utils.get_board_url", "fate_flow.utils.api_utils.get_json_result", "fate_flow.operation.job_saver.JobSaver.query_job", "fate_flow.controller.job_controller.JobController.stop_jobs", "fate_flow.utils.detect_utils.validate_request", "fate_flow.utils.log_utils.schedule_logger", "fate_flow.operation.job_tracker.Tracker.query_output_data_infos", "fate_flow.utils.config_adapter.JobRuntimeConfigAdapter", "fate_flow.utils.log_utils.failed_log", "fate_flow.entity.JobConfigurationBase" ]
[((6527, 6566), 'fate_flow.utils.detect_utils.validate_request', 'detect_utils.validate_request', (['"""job_id"""'], {}), "('job_id')\n", (6556, 6566), False, 'from fate_flow.utils import job_utils, detect_utils, schedule_utils, log_utils\n'), ((8377, 8416), 'fate_flow.utils.detect_utils.validate_request', 'detect_utils.validate_request', (['"""job_id"""'], {}), "('job_id')\n", (8406, 8416), False, 'from fate_flow.utils import job_utils, detect_utils, schedule_utils, log_utils\n'), ((8998, 9037), 'fate_flow.utils.detect_utils.validate_request', 'detect_utils.validate_request', (['"""job_id"""'], {}), "('job_id')\n", (9027, 9037), False, 'from fate_flow.utils import job_utils, detect_utils, schedule_utils, log_utils\n'), ((12847, 12906), 'fate_flow.utils.detect_utils.validate_request', 'detect_utils.validate_request', (['"""job_id"""', '"""role"""', '"""party_id"""'], {}), "('job_id', 'role', 'party_id')\n", (12876, 12906), False, 'from fate_flow.utils import job_utils, detect_utils, schedule_utils, log_utils\n'), ((1768, 1963), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': "submit_result['code']", 'retmsg': "submit_result['message']", 'job_id': "submit_result['job_id']", 'data': "(submit_result if submit_result['code'] == RetCode.SUCCESS else None)"}), "(retcode=submit_result['code'], retmsg=submit_result[\n 'message'], job_id=submit_result['job_id'], data=submit_result if \n submit_result['code'] == RetCode.SUCCESS else None)\n", (1783, 1963), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((2081, 2107), 'flask.request.json.get', 'request.json.get', (['"""job_id"""'], {}), "('job_id')\n", (2097, 2107), False, 'from flask import request, send_file, abort\n'), ((2126, 2169), 'flask.request.json.get', 'request.json.get', (['"""stop_status"""', '"""canceled"""'], {}), "('stop_status', 'canceled')\n", (2142, 2169), False, 'from flask import request, send_file, abort\n'), ((2181, 2214), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (2199, 2214), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((3278, 3304), 'flask.request.json.get', 'request.json.get', (['"""job_id"""'], {}), "('job_id')\n", (3294, 3304), False, 'from flask import request, send_file, abort\n'), ((3316, 3349), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (3334, 3349), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((3938, 3972), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {}), '(**request.json)\n', (3956, 3972), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((5825, 5877), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'jobs': jobs, 'count': count}"}), "(data={'jobs': jobs, 'count': count})\n", (5840, 5877), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((6004, 6107), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': "job_info['job_id']", 'party_id': "job_info['party_id']", 'role': "job_info['role']"}), "(job_id=job_info['job_id'], party_id=job_info['party_id'],\n role=job_info['role'])\n", (6022, 6107), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((6795, 6825), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {}), '(**job_info)\n', (6813, 6825), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((7211, 7245), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {}), '(**request.json)\n', (7229, 7245), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((8086, 8132), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (8117, 8132), False, 'from fate_flow.utils import job_utils, detect_utils, schedule_utils, log_utils\n'), ((8506, 8518), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (8516, 8518), False, 'import io\n'), ((8529, 8575), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'memory_file', 'mode': '"""w:gz"""'}), "(fileobj=memory_file, mode='w:gz')\n", (8541, 8575), False, 'import tarfile\n'), ((8604, 8624), 'os.walk', 'os.walk', (['job_log_dir'], {}), '(job_log_dir)\n', (8611, 8624), False, 'import os\n'), ((8858, 8952), 'flask.send_file', 'send_file', (['memory_file'], {'attachment_filename': 'f"""job_{job_id}_log.tar.gz"""', 'as_attachment': '(True)'}), "(memory_file, attachment_filename=f'job_{job_id}_log.tar.gz',\n as_attachment=True)\n", (8867, 8952), False, 'from flask import request, send_file, abort\n'), ((9116, 9169), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'logs_directory': job_log_dir}"}), "(data={'logs_directory': job_log_dir})\n", (9131, 9169), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((9250, 9285), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {}), '(**request.json)\n', (9269, 9285), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((10458, 10505), 'fate_flow.operation.job_tracker.Tracker.query_output_data_infos', 'Tracker.query_output_data_infos', ([], {}), '(**request.json)\n', (10489, 10505), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((10811, 10851), 'fate_flow.operation.job_clean.JobClean.start_clean_job', 'JobClean.start_clean_job', ([], {}), '(**request.json)\n', (10835, 10851), False, 'from fate_flow.operation.job_clean import JobClean\n'), ((10863, 10907), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (10878, 10907), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((10989, 11052), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'is_initiator': '(True)', 'status': 'JobStatus.WAITING'}), '(is_initiator=True, status=JobStatus.WAITING)\n', (11007, 11052), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((11265, 11328), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'clean_status'}), "(retcode=0, retmsg='success', data=clean_status)\n", (11280, 11328), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((1719, 1755), 'fate_flow.entity.JobConfigurationBase', 'JobConfigurationBase', ([], {}), '(**request.json)\n', (1739, 1755), False, 'from fate_flow.entity import JobConfigurationBase\n'), ((2328, 2391), 'fate_flow.controller.job_controller.JobController.stop_jobs', 'JobController.stop_jobs', ([], {'job_id': 'job_id', 'stop_status': 'stop_status'}), '(job_id=job_id, stop_status=stop_status)\n', (2351, 2391), False, 'from fate_flow.controller.job_controller import JobController\n'), ((3131, 3202), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'RetCode.DATA_ERROR', 'retmsg': '"""can not found job"""'}), "(retcode=RetCode.DATA_ERROR, retmsg='can not found job')\n", (3146, 3202), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((3395, 3471), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.request_rerun_job', 'FederatedScheduler.request_rerun_job', ([], {'job': 'jobs[0]', 'command_body': 'request.json'}), '(job=jobs[0], command_body=request.json)\n', (3431, 3471), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((3793, 3864), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'RetCode.DATA_ERROR', 'retmsg': '"""can not found job"""'}), "(retcode=RetCode.DATA_ERROR, retmsg='can not found job')\n", (3808, 3864), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((4005, 4072), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no job could be found"""', 'data': '[]'}), "(retcode=0, retmsg='no job could be found', data=[])\n", (4020, 4072), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((6136, 6190), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""find job failed"""'}), "(retcode=101, retmsg='find job failed')\n", (6151, 6190), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((6425, 6469), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (6440, 6469), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((6996, 7073), 'fate_flow.scheduler.dag_scheduler.DAGScheduler.update_parameters', 'DAGScheduler.update_parameters', (['jobs[0]', 'job_parameters', 'component_parameters'], {}), '(jobs[0], job_parameters, component_parameters)\n', (7026, 7073), False, 'from fate_flow.scheduler.dag_scheduler import DAGScheduler\n'), ((7089, 7135), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'retcode', 'data': 'retdata'}), '(retcode=retcode, data=retdata)\n', (7104, 7135), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7278, 7332), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""find job failed"""'}), "(retcode=101, retmsg='find job failed')\n", (7293, 7332), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7633, 7676), 'fate_flow.utils.config_adapter.JobRuntimeConfigAdapter', 'JobRuntimeConfigAdapter', (['job.f_runtime_conf'], {}), '(job.f_runtime_conf)\n', (7656, 7676), False, 'from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter\n'), ((7935, 7999), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'response_data'}), "(retcode=0, retmsg='success', data=response_data)\n", (7950, 7999), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((8145, 8172), 'os.path.exists', 'os.path.exists', (['job_log_dir'], {}), '(job_log_dir)\n', (8159, 8172), False, 'import os\n'), ((9319, 9374), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""find task failed"""'}), "(retcode=101, retmsg='find task failed')\n", (9334, 9374), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((10551, 10611), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""find data view failed"""'}), "(retcode=101, retmsg='find data view failed')\n", (10566, 10611), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((11128, 11204), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.request_stop_job', 'FederatedScheduler.request_stop_job', ([], {'job': 'job', 'stop_status': 'JobStatus.CANCELED'}), '(job=job, stop_status=JobStatus.CANCELED)\n', (11163, 11204), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((12540, 12573), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'predict_dsl'}), '(data=predict_dsl)\n', (12555, 12573), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((13327, 13374), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'board_url': board_urls}"}), "(data={'board_url': board_urls})\n", (13342, 13374), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((13400, 13451), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""no found job"""'}), "(retcode=101, retmsg='no found job')\n", (13415, 13451), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((13787, 13815), 'flask.request.json.get', 'request.json.get', (['"""order_by"""'], {}), "('order_by')\n", (13803, 13815), False, 'from flask import request, send_file, abort\n'), ((2774, 2898), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'RetCode.SUCCESS', 'retmsg': 'f"""stop job on this party {kill_status}; stop job on all party success"""'}), "(retcode=RetCode.SUCCESS, retmsg=\n f'stop job on this party {kill_status}; stop job on all party success')\n", (2789, 2898), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((2927, 3043), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'RetCode.OPERATING_ERROR', 'retmsg': 'f"""stop job on this party {kill_status}"""', 'data': 'response'}), "(retcode=RetCode.OPERATING_ERROR, retmsg=\n f'stop job on this party {kill_status}', data=response)\n", (2942, 3043), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((3556, 3624), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'RetCode.SUCCESS', 'retmsg': '"""rerun job success"""'}), "(retcode=RetCode.SUCCESS, retmsg='rerun job success')\n", (3571, 3624), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((4401, 4420), 'flask.request.json.get', 'request.json.get', (['i'], {}), '(i)\n', (4417, 4420), False, 'from flask import request, send_file, abort\n'), ((4838, 4857), 'flask.request.json.get', 'request.json.get', (['i'], {}), '(i)\n', (4854, 4857), False, 'from flask import request, send_file, abort\n'), ((5051, 5099), 'fate_flow.utils.api_utils.error_response', 'error_response', (['(400)', 'f"""Invalid parameter \'{i}\'."""'], {}), '(400, f"Invalid parameter \'{i}\'.")\n', (5065, 5099), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((8188, 8299), 'fate_flow.utils.api_utils.error_response', 'error_response', (['(404)', 'f"""Log file path: \'{job_log_dir}\' not found. Please check if the job id is valid."""'], {}), '(404,\n f"Log file path: \'{job_log_dir}\' not found. Please check if the job id is valid."\n )\n', (8202, 8299), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((8677, 8701), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (8689, 8701), False, 'import os\n'), ((8725, 8764), 'os.path.relpath', 'os.path.relpath', (['full_path', 'job_log_dir'], {}), '(full_path, job_log_dir)\n', (8740, 8764), False, 'import os\n'), ((9655, 9674), 'flask.request.json.get', 'request.json.get', (['i'], {}), '(i)\n', (9671, 9674), False, 'from flask import request, send_file, abort\n'), ((9848, 9897), 'fate_flow.utils.api_utils.error_response', 'error_response', (['(400)', 'f"""Invalid parameter \'role\'."""'], {}), '(400, f"Invalid parameter \'role\'.")\n', (9862, 9897), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((12172, 12214), 'os.makedirs', 'os.makedirs', (['TEMP_DIRECTORY'], {'exist_ok': '(True)'}), '(TEMP_DIRECTORY, exist_ok=True)\n', (12183, 12214), False, 'import os\n'), ((12609, 12633), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (12630, 12633), False, 'from fate_flow.settings import stat_logger, TEMP_DIRECTORY\n'), ((12649, 12769), 'fate_flow.utils.api_utils.error_response', 'error_response', (['(210)', '"""DSL generating failed. For more details, please check logs/fate_flow/fate_flow_stat.log."""'], {}), "(210,\n 'DSL generating failed. For more details, please check logs/fate_flow/fate_flow_stat.log.'\n )\n", (12663, 12769), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((13205, 13270), 'fate_flow.utils.job_utils.get_board_url', 'job_utils.get_board_url', (['job.f_job_id', 'job.f_role', 'job.f_party_id'], {}), '(job.f_job_id, job.f_role, job.f_party_id)\n', (13228, 13270), False, 'from fate_flow.utils import job_utils, detect_utils, schedule_utils, log_utils\n'), ((13513, 13541), 'flask.request.json.get', 'request.json.get', (['"""limit"""', '(0)'], {}), "('limit', 0)\n", (13529, 13541), False, 'from flask import request, send_file, abort\n'), ((14043, 14068), 'flask.request.json.get', 'request.json.get', (['"""order"""'], {}), "('order')\n", (14059, 14068), False, 'from flask import request, send_file, abort\n'), ((2236, 2259), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (2251, 2259), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((2400, 2423), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (2415, 2423), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((2485, 2508), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (2500, 2508), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((3057, 3080), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (3072, 3080), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((4628, 4681), 'fate_flow.utils.api_utils.error_response', 'error_response', (['(400)', 'f"""Invalid parameter \'party_id\'."""'], {}), '(400, f"Invalid parameter \'party_id\'.")\n', (4642, 4681), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((5256, 5304), 'fate_flow.utils.api_utils.error_response', 'error_response', (['(400)', 'f"""Invalid parameter \'{i}\'."""'], {}), '(400, f"Invalid parameter \'{i}\'.")\n', (5270, 5304), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((6909, 6957), 'fate_flow.utils.log_utils.failed_log', 'log_utils.failed_log', (['f"""query job by {job_info}"""'], {}), "(f'query job by {job_info}')\n", (6929, 6957), False, 'from fate_flow.utils import job_utils, detect_utils, schedule_utils, log_utils\n'), ((10053, 10106), 'fate_flow.utils.api_utils.error_response', 'error_response', (['(400)', 'f"""Invalid parameter \'party_id\'."""'], {}), '(400, f"Invalid parameter \'party_id\'.")\n', (10067, 10106), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((13562, 13589), 'flask.request.json.get', 'request.json.get', (['"""page"""', '(1)'], {}), "('page', 1)\n", (13578, 13589), False, 'from flask import request, send_file, abort\n'), ((13631, 13691), 'fate_flow.utils.api_utils.error_response', 'error_response', (['(400)', 'f"""Invalid parameter \'limit\' or \'page\'."""'], {}), '(400, f"Invalid parameter \'limit\' or \'page\'.")\n', (13645, 13691), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((13926, 13979), 'fate_flow.utils.api_utils.error_response', 'error_response', (['(400)', 'f"""Invalid parameter \'order_by\'."""'], {}), '(400, f"Invalid parameter \'order_by\'.")\n', (13940, 13979), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((12372, 12405), 'json.dumps', 'json.dumps', (['predict_dsl'], {'indent': '(4)'}), '(predict_dsl, indent=4)\n', (12382, 12405), False, 'import json\n'), ((14181, 14237), 'fate_flow.utils.api_utils.error_response', 'error_response', (['(400)', 'f"""Invalid parameter order \'order\'."""'], {}), '(400, f"Invalid parameter order \'order\'.")\n', (14195, 14237), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((3745, 3765), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['response'], {}), '(response)\n', (3755, 3765), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import importlib import inspect import typing from pathlib import Path from fate_flow.utils.log_utils import getLogger from fate_flow.components._base import ComponentMeta _flow_base = Path(__file__).resolve().parent.parent.parent LOGGER = getLogger() def _search_components(path): try: module_name = '.'.join( path.absolute() .relative_to(_flow_base) .with_suffix("") .parts ) module = importlib.import_module(module_name) except ImportError as e: # or skip ? raise e _obj_pairs = inspect.getmembers(module, lambda obj: isinstance(obj, ComponentMeta)) return _obj_pairs, module_name class Components: provider_version = None provider_name = None @classmethod def get_names(cls) -> typing.Dict[str, dict]: names = {} _components_base = Path(__file__).resolve().parent for p in _components_base.glob("**/*.py"): obj_pairs, module_name = _search_components(p) for name, obj in obj_pairs: names[obj.name] = {"module": module_name} LOGGER.info(f"component register {obj.name} with cache info {module_name}") return names @classmethod def get(cls, name: str, cache) -> ComponentMeta: if cache: importlib.import_module(cache[name]["module"]) else: from .model_operation import ( model_restore_cpn_meta, model_store_cpn_meta, ) from .reader import reader_cpn_meta from .upload import upload_cpn_meta from .download import download_cpn_meta cpn = ComponentMeta.get_meta(name) return cpn
[ "fate_flow.utils.log_utils.getLogger", "fate_flow.components._base.ComponentMeta.get_meta" ]
[((860, 871), 'fate_flow.utils.log_utils.getLogger', 'getLogger', ([], {}), '()\n', (869, 871), False, 'from fate_flow.utils.log_utils import getLogger\n'), ((1085, 1121), 'importlib.import_module', 'importlib.import_module', (['module_name'], {}), '(module_name)\n', (1108, 1121), False, 'import importlib\n'), ((2310, 2338), 'fate_flow.components._base.ComponentMeta.get_meta', 'ComponentMeta.get_meta', (['name'], {}), '(name)\n', (2332, 2338), False, 'from fate_flow.components._base import ComponentMeta\n'), ((1951, 1997), 'importlib.import_module', 'importlib.import_module', (["cache[name]['module']"], {}), "(cache[name]['module'])\n", (1974, 1997), False, 'import importlib\n'), ((804, 818), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (808, 818), False, 'from pathlib import Path\n'), ((1497, 1511), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1501, 1511), False, 'from pathlib import Path\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from copy import deepcopy import requests from fate_flow.utils.log_utils import schedule_logger from fate_flow.controller.engine_controller.engine import EngineABC from fate_flow.db.runtime_config import RuntimeConfig from fate_flow.entity.types import KillProcessRetCode from fate_flow.entity.run_status import LinkisJobStatus from fate_flow.settings import LINKIS_EXECUTE_ENTRANCE, LINKIS_SUBMIT_PARAMS, LINKIS_RUNTYPE, \ LINKIS_LABELS, LINKIS_QUERT_STATUS, LINKIS_KILL_ENTRANCE, detect_logger from fate_flow.db.service_registry import ServiceRegistry from fate_flow.db.db_models import Task class LinkisSparkEngine(EngineABC): def run(self, task: Task, run_parameters, run_parameters_path, config_dir, log_dir, cwd_dir, **kwargs): linkis_execute_url = "http://{}:{}{}".format(ServiceRegistry.LINKIS_SPARK_CONFIG.get("host"), ServiceRegistry.LINKIS_SPARK_CONFIG.get("port"), LINKIS_EXECUTE_ENTRANCE) headers = {"Token-Code": ServiceRegistry.LINKIS_SPARK_CONFIG.get("token_code"), "Token-User": kwargs.get("user_name"), "Content-Type": "application/json"} schedule_logger(Task.f_job_id).info(f"headers:{headers}") python_path = ServiceRegistry.LINKIS_SPARK_CONFIG.get("python_path") execution_code = 'import sys\nsys.path.append("{}")\n' \ 'from fate_flow.worker.task_executor import TaskExecutor\n' \ 'task_info = TaskExecutor.run_task(job_id="{}",component_name="{}",' \ 'task_id="{}",task_version={},role="{}",party_id={},' \ 'run_ip="{}",config="{}",job_server="{}")\n' \ 'TaskExecutor.report_task_update_to_driver(task_info=task_info)'. \ format(python_path, task.f_job_id, task.f_component_name, task.f_task_id, task.f_task_version, task.f_role, task.f_party_id, RuntimeConfig.JOB_SERVER_HOST, run_parameters_path, '{}:{}'.format(RuntimeConfig.JOB_SERVER_HOST, RuntimeConfig.HTTP_PORT)) schedule_logger(task.f_job_id).info(f"execution code:{execution_code}") params = deepcopy(LINKIS_SUBMIT_PARAMS) schedule_logger(task.f_job_id).info(f"spark run parameters:{run_parameters.spark_run}") for spark_key, v in run_parameters.spark_run.items(): if spark_key in ["spark.executor.memory", "spark.driver.memory", "spark.executor.instances", "wds.linkis.rm.yarnqueue"]: params["configuration"]["startup"][spark_key] = v data = { "method": LINKIS_EXECUTE_ENTRANCE, "params": params, "executeApplicationName": "spark", "executionCode": execution_code, "runType": LINKIS_RUNTYPE, "source": {}, "labels": LINKIS_LABELS } schedule_logger(task.f_job_id).info(f'submit linkis spark, data:{data}') task_info = { "engine_conf": {} } task_info["engine_conf"]["data"] = data task_info["engine_conf"]["headers"] = headers res = requests.post(url=linkis_execute_url, headers=headers, json=data) schedule_logger(task.f_job_id).info(f"start linkis spark task: {res.text}") if res.status_code == 200: if res.json().get("status"): raise Exception(f"submit linkis spark failed: {res.json()}") task_info["engine_conf"]["execID"] = res.json().get("data").get("execID") task_info["engine_conf"]["taskID"] = res.json().get("data").get("taskID") schedule_logger(task.f_job_id).info('submit linkis spark success') else: raise Exception(f"submit linkis spark failed: {res.text}") return task_info @staticmethod def kill(task): linkis_query_url = "http://{}:{}{}".format(ServiceRegistry.LINKIS_SPARK_CONFIG.get("host"), ServiceRegistry.LINKIS_SPARK_CONFIG.get("port"), LINKIS_QUERT_STATUS.replace("execID", task.f_engine_conf.get("execID"))) headers = task.f_engine_conf.get("headers") response = requests.get(linkis_query_url, headers=headers).json() schedule_logger(task.f_job_id).info(f"querty task response:{response}") if response.get("data").get("status") != LinkisJobStatus.SUCCESS: linkis_execute_url = "http://{}:{}{}".format(ServiceRegistry.LINKIS_SPARK_CONFIG.get("host"), ServiceRegistry.LINKIS_SPARK_CONFIG.get("port"), LINKIS_KILL_ENTRANCE.replace("execID", task.f_engine_conf.get("execID"))) schedule_logger(task.f_job_id).info(f"start stop task:{linkis_execute_url}") schedule_logger(task.f_job_id).info(f"headers: {headers}") kill_result = requests.get(linkis_execute_url, headers=headers) schedule_logger(task.f_job_id).info(f"kill result:{kill_result}") if kill_result.status_code == 200: pass return KillProcessRetCode.KILLED def is_alive(self, task): process_exist = True try: linkis_query_url = "http://{}:{}{}".format(ServiceRegistry.LINKIS_SPARK_CONFIG.get("host"), ServiceRegistry.LINKIS_SPARK_CONFIG.get("port"), LINKIS_QUERT_STATUS.replace("execID", task.f_engine_conf.get("execID"))) headers = task.f_engine_conf["headers"] response = requests.get(linkis_query_url, headers=headers).json() detect_logger.info(response) if response.get("data").get("status") == LinkisJobStatus.FAILED: process_exist = False except Exception as e: detect_logger.exception(e) process_exist = False return process_exist
[ "fate_flow.db.service_registry.ServiceRegistry.LINKIS_SPARK_CONFIG.get", "fate_flow.settings.detect_logger.exception", "fate_flow.settings.detect_logger.info", "fate_flow.utils.log_utils.schedule_logger" ]
[((1933, 1987), 'fate_flow.db.service_registry.ServiceRegistry.LINKIS_SPARK_CONFIG.get', 'ServiceRegistry.LINKIS_SPARK_CONFIG.get', (['"""python_path"""'], {}), "('python_path')\n", (1972, 1987), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((2859, 2889), 'copy.deepcopy', 'deepcopy', (['LINKIS_SUBMIT_PARAMS'], {}), '(LINKIS_SUBMIT_PARAMS)\n', (2867, 2889), False, 'from copy import deepcopy\n'), ((3832, 3897), 'requests.post', 'requests.post', ([], {'url': 'linkis_execute_url', 'headers': 'headers', 'json': 'data'}), '(url=linkis_execute_url, headers=headers, json=data)\n', (3845, 3897), False, 'import requests\n'), ((1415, 1462), 'fate_flow.db.service_registry.ServiceRegistry.LINKIS_SPARK_CONFIG.get', 'ServiceRegistry.LINKIS_SPARK_CONFIG.get', (['"""host"""'], {}), "('host')\n", (1454, 1462), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((1517, 1564), 'fate_flow.db.service_registry.ServiceRegistry.LINKIS_SPARK_CONFIG.get', 'ServiceRegistry.LINKIS_SPARK_CONFIG.get', (['"""port"""'], {}), "('port')\n", (1556, 1564), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((1677, 1730), 'fate_flow.db.service_registry.ServiceRegistry.LINKIS_SPARK_CONFIG.get', 'ServiceRegistry.LINKIS_SPARK_CONFIG.get', (['"""token_code"""'], {}), "('token_code')\n", (1716, 1730), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((4586, 4633), 'fate_flow.db.service_registry.ServiceRegistry.LINKIS_SPARK_CONFIG.get', 'ServiceRegistry.LINKIS_SPARK_CONFIG.get', (['"""host"""'], {}), "('host')\n", (4625, 4633), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((4686, 4733), 'fate_flow.db.service_registry.ServiceRegistry.LINKIS_SPARK_CONFIG.get', 'ServiceRegistry.LINKIS_SPARK_CONFIG.get', (['"""port"""'], {}), "('port')\n", (4725, 4733), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((5833, 5882), 'requests.get', 'requests.get', (['linkis_execute_url'], {'headers': 'headers'}), '(linkis_execute_url, headers=headers)\n', (5845, 5882), False, 'import requests\n'), ((6621, 6649), 'fate_flow.settings.detect_logger.info', 'detect_logger.info', (['response'], {}), '(response)\n', (6639, 6649), False, 'from fate_flow.settings import LINKIS_EXECUTE_ENTRANCE, LINKIS_SUBMIT_PARAMS, LINKIS_RUNTYPE, LINKIS_LABELS, LINKIS_QUERT_STATUS, LINKIS_KILL_ENTRANCE, detect_logger\n'), ((1853, 1883), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['Task.f_job_id'], {}), '(Task.f_job_id)\n', (1868, 1883), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((2770, 2800), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (2785, 2800), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((2898, 2928), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (2913, 2928), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((3581, 3611), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (3596, 3611), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((3906, 3936), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (3921, 3936), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((5009, 5056), 'requests.get', 'requests.get', (['linkis_query_url'], {'headers': 'headers'}), '(linkis_query_url, headers=headers)\n', (5021, 5056), False, 'import requests\n'), ((5072, 5102), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (5087, 5102), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((5275, 5322), 'fate_flow.db.service_registry.ServiceRegistry.LINKIS_SPARK_CONFIG.get', 'ServiceRegistry.LINKIS_SPARK_CONFIG.get', (['"""host"""'], {}), "('host')\n", (5314, 5322), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((5381, 5428), 'fate_flow.db.service_registry.ServiceRegistry.LINKIS_SPARK_CONFIG.get', 'ServiceRegistry.LINKIS_SPARK_CONFIG.get', (['"""port"""'], {}), "('port')\n", (5420, 5428), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((6198, 6245), 'fate_flow.db.service_registry.ServiceRegistry.LINKIS_SPARK_CONFIG.get', 'ServiceRegistry.LINKIS_SPARK_CONFIG.get', (['"""host"""'], {}), "('host')\n", (6237, 6245), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((6302, 6349), 'fate_flow.db.service_registry.ServiceRegistry.LINKIS_SPARK_CONFIG.get', 'ServiceRegistry.LINKIS_SPARK_CONFIG.get', (['"""port"""'], {}), "('port')\n", (6341, 6349), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((6808, 6834), 'fate_flow.settings.detect_logger.exception', 'detect_logger.exception', (['e'], {}), '(e)\n', (6831, 6834), False, 'from fate_flow.settings import LINKIS_EXECUTE_ENTRANCE, LINKIS_SUBMIT_PARAMS, LINKIS_RUNTYPE, LINKIS_LABELS, LINKIS_QUERT_STATUS, LINKIS_KILL_ENTRANCE, detect_logger\n'), ((4319, 4349), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (4334, 4349), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((5659, 5689), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (5674, 5689), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((5748, 5778), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (5763, 5778), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((5895, 5925), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (5910, 5925), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((6554, 6601), 'requests.get', 'requests.get', (['linkis_query_url'], {'headers': 'headers'}), '(linkis_query_url, headers=headers)\n', (6566, 6601), False, 'import requests\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json import requests import time from flask import jsonify from flask import Response from fate_arch.common.base_utils import json_loads, json_dumps from fate_arch.common.conf_utils import get_base_config from fate_arch.common.log import audit_logger, schedule_logger from fate_arch.common import FederatedMode from fate_arch.common import conf_utils from fate_arch.common import CoordinationProxyService, CoordinationCommunicationProtocol from fate_flow.settings import DEFAULT_REMOTE_REQUEST_TIMEOUT, CHECK_NODES_IDENTITY,\ FATE_MANAGER_GET_NODE_INFO_ENDPOINT, HEADERS, API_VERSION, stat_logger from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_command_federation_channel, gen_routing_metadata, \ forward_grpc_packet from fate_flow.utils.service_utils import ServiceUtils from fate_flow.entity.runtime_config import RuntimeConfig def get_json_result(retcode=0, retmsg='success', data=None, job_id=None, meta=None): result_dict = {"retcode": retcode, "retmsg": retmsg, "data": data, "jobId": job_id, "meta": meta} response = {} for key, value in result_dict.items(): if value is None and key != "retcode": continue else: response[key] = value return jsonify(response) def server_error_response(e): stat_logger.exception(e) if len(e.args) > 1: return get_json_result(retcode=100, retmsg=str(e.args[0]), data=e.args[1]) else: return get_json_result(retcode=100, retmsg=str(e)) def error_response(response_code, retmsg): return Response(json.dumps({'retmsg': retmsg, 'retcode': response_code}), status=response_code, mimetype='application/json') def federated_api(job_id, method, endpoint, src_party_id, dest_party_id, src_role, json_body, federated_mode, api_version=API_VERSION, overall_timeout=DEFAULT_REMOTE_REQUEST_TIMEOUT): if int(dest_party_id) == 0: federated_mode = FederatedMode.SINGLE if federated_mode == FederatedMode.SINGLE: return local_api(job_id=job_id, method=method, endpoint=endpoint, json_body=json_body, api_version=api_version) elif federated_mode == FederatedMode.MULTIPLE: host, port, protocol = get_federated_proxy_address(src_party_id, dest_party_id) if protocol == CoordinationCommunicationProtocol.HTTP: return federated_coordination_on_http(job_id=job_id, method=method, host=host, port=port, endpoint=endpoint, src_party_id=src_party_id, src_role=src_role, dest_party_id=dest_party_id, json_body=json_body, api_version=api_version, overall_timeout=overall_timeout) elif protocol == CoordinationCommunicationProtocol.GRPC: return federated_coordination_on_grpc(job_id=job_id, method=method, host=host, port=port, endpoint=endpoint, src_party_id=src_party_id, src_role=src_role, dest_party_id=dest_party_id, json_body=json_body, api_version=api_version, overall_timeout=overall_timeout) else: raise Exception(f"{protocol} coordination communication protocol is not supported.") else: raise Exception('{} work mode is not supported'.format(federated_mode)) def local_api(job_id, method, endpoint, json_body, api_version=API_VERSION, try_times=3): return federated_coordination_on_http(job_id=job_id, method=method, host=RuntimeConfig.JOB_SERVER_HOST, port=RuntimeConfig.HTTP_PORT, endpoint=endpoint, src_party_id="", src_role="", dest_party_id="", json_body=json_body, api_version=api_version, try_times=try_times) def get_federated_proxy_address(src_party_id, dest_party_id): proxy_config = get_base_config("fateflow", {}).get("proxy", None) protocol_config = get_base_config("fateflow", {}).get("protocol", "default") if isinstance(proxy_config, str): if proxy_config == CoordinationProxyService.ROLLSITE: proxy_address = get_base_config("fate_on_eggroll", {}).get(proxy_config) return proxy_address["host"], proxy_address.get("grpc_port", proxy_address["port"]), CoordinationCommunicationProtocol.GRPC elif proxy_config == CoordinationProxyService.NGINX: proxy_address = get_base_config("fate_on_spark", {}).get(proxy_config) protocol = CoordinationCommunicationProtocol.HTTP if protocol_config == "default" else protocol_config return proxy_address["host"], proxy_address[f"{protocol}_port"], protocol else: raise RuntimeError(f"can not support coordinate proxy {proxy_config}") elif isinstance(proxy_config, dict): proxy_address = proxy_config protocol = CoordinationCommunicationProtocol.HTTP if protocol_config == "default" else protocol_config proxy_name = proxy_config.get("name", CoordinationProxyService.FATEFLOW) if proxy_name == CoordinationProxyService.FATEFLOW and str(dest_party_id) == str(src_party_id): host = RuntimeConfig.JOB_SERVER_HOST port = RuntimeConfig.HTTP_PORT else: host = proxy_address["host"] port = proxy_address[f"{protocol}_port"] return host, port, protocol else: raise RuntimeError(f"can not support coordinate proxy config {proxy_config}") def federated_coordination_on_http(job_id, method, host, port, endpoint, src_party_id, src_role, dest_party_id, json_body, api_version=API_VERSION, overall_timeout=DEFAULT_REMOTE_REQUEST_TIMEOUT, try_times=3): endpoint = f"/{api_version}{endpoint}" exception = None json_body['src_role'] = src_role json_body['src_party_id'] = src_party_id for t in range(try_times): try: url = "http://{}:{}{}".format(host, port, endpoint) audit_logger(job_id).info('remote http api request: {}'.format(url)) action = getattr(requests, method.lower(), None) headers = HEADERS.copy() headers["dest-party-id"] = str(dest_party_id) headers["src-party-id"] = str(src_party_id) headers["src-role"] = str(src_role) http_response = action(url=url, data=json_dumps(json_body), headers=headers) audit_logger(job_id).info(http_response.text) response = http_response.json() audit_logger(job_id).info('remote http api response: {} {}'.format(endpoint, response)) return response except Exception as e: exception = e schedule_logger(job_id).warning(f"remote http request {endpoint} error, sleep and try again") time.sleep(2 * (t+1)) else: raise Exception('remote http request error: {}'.format(exception)) def federated_coordination_on_grpc(job_id, method, host, port, endpoint, src_party_id, src_role, dest_party_id, json_body, api_version=API_VERSION, overall_timeout=DEFAULT_REMOTE_REQUEST_TIMEOUT, try_times=3): endpoint = f"/{api_version}{endpoint}" json_body['src_role'] = src_role json_body['src_party_id'] = src_party_id if CHECK_NODES_IDENTITY: get_node_identity(json_body, src_party_id) _packet = wrap_grpc_packet(json_body, method, endpoint, src_party_id, dest_party_id, job_id, overall_timeout=overall_timeout) _routing_metadata = gen_routing_metadata(src_party_id=src_party_id, dest_party_id=dest_party_id) exception = None for t in range(try_times): try: channel, stub = get_command_federation_channel(host, port) _return, _call = stub.unaryCall.with_call(_packet, metadata=_routing_metadata, timeout=(overall_timeout/1000)) audit_logger(job_id).info("grpc api response: {}".format(_return)) channel.close() response = json_loads(_return.body.value) return response except Exception as e: exception = e schedule_logger(job_id).warning(f"remote request {endpoint} error, sleep and try again") time.sleep(2 * (t+1)) else: tips = 'Please check rollSite and fateflow network connectivity' """ if 'Error received from peer' in str(exception): tips = 'Please check if the fate flow server of the other party is started. ' if 'failed to connect to all addresses' in str(exception): tips = 'Please check whether the rollsite service(port: 9370) is started. ' """ raise Exception('{}rpc request error: {}'.format(tips, exception)) def proxy_api(role, _job_id, request_config): job_id = request_config.get('header').get('job_id', _job_id) method = request_config.get('header').get('method', 'POST') endpoint = request_config.get('header').get('endpoint') src_party_id = request_config.get('header').get('src_party_id') dest_party_id = request_config.get('header').get('dest_party_id') json_body = request_config.get('body') _packet = forward_grpc_packet(json_body, method, endpoint, src_party_id, dest_party_id, job_id=job_id, role=role, overall_timeout=DEFAULT_REMOTE_REQUEST_TIMEOUT) _routing_metadata = gen_routing_metadata(src_party_id=src_party_id, dest_party_id=dest_party_id) host, port, protocol = get_federated_proxy_address(src_party_id, dest_party_id) channel, stub = get_command_federation_channel(host, port) _return, _call = stub.unaryCall.with_call(_packet, metadata=_routing_metadata) channel.close() json_body = json_loads(_return.body.value) return json_body def forward_api(role, request_config): endpoint = request_config.get('header', {}).get('endpoint') ip = get_base_config(role, {}).get("host", "127.0.0.1") port = get_base_config(role, {}).get("port") url = "http://{}:{}{}".format(ip, port, endpoint) method = request_config.get('header', {}).get('method', 'post') audit_logger().info('api request: {}'.format(url)) action = getattr(requests, method.lower(), None) http_response = action(url=url, json=request_config.get('body'), headers=HEADERS) response = http_response.json() audit_logger().info(response) return response def get_node_identity(json_body, src_party_id): params = { 'partyId': int(src_party_id), 'federatedId': conf_utils.get_base_config("fatemanager", {}).get("federatedId") } try: response = requests.post(url="http://{}:{}{}".format( ServiceUtils.get_item("fatemanager", "host"), ServiceUtils.get_item("fatemanager", "port"), FATE_MANAGER_GET_NODE_INFO_ENDPOINT), json=params) json_body['appKey'] = response.json().get('data').get('appKey') json_body['appSecret'] = response.json().get('data').get('appSecret') json_body['_src_role'] = response.json().get('data').get('role') except Exception as e: raise Exception('get appkey and secret failed: {}'.format(str(e)))
[ "fate_flow.utils.grpc_utils.get_command_federation_channel", "fate_flow.utils.grpc_utils.forward_grpc_packet", "fate_flow.settings.HEADERS.copy", "fate_flow.utils.grpc_utils.gen_routing_metadata", "fate_flow.utils.service_utils.ServiceUtils.get_item", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.grpc_utils.wrap_grpc_packet" ]
[((1853, 1870), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (1860, 1870), False, 'from flask import jsonify\n'), ((1907, 1931), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1928, 1931), False, 'from fate_flow.settings import DEFAULT_REMOTE_REQUEST_TIMEOUT, CHECK_NODES_IDENTITY, FATE_MANAGER_GET_NODE_INFO_ENDPOINT, HEADERS, API_VERSION, stat_logger\n'), ((7955, 8074), 'fate_flow.utils.grpc_utils.wrap_grpc_packet', 'wrap_grpc_packet', (['json_body', 'method', 'endpoint', 'src_party_id', 'dest_party_id', 'job_id'], {'overall_timeout': 'overall_timeout'}), '(json_body, method, endpoint, src_party_id, dest_party_id,\n job_id, overall_timeout=overall_timeout)\n', (7971, 8074), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_command_federation_channel, gen_routing_metadata, forward_grpc_packet\n'), ((8126, 8202), 'fate_flow.utils.grpc_utils.gen_routing_metadata', 'gen_routing_metadata', ([], {'src_party_id': 'src_party_id', 'dest_party_id': 'dest_party_id'}), '(src_party_id=src_party_id, dest_party_id=dest_party_id)\n', (8146, 8202), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_command_federation_channel, gen_routing_metadata, forward_grpc_packet\n'), ((9759, 9919), 'fate_flow.utils.grpc_utils.forward_grpc_packet', 'forward_grpc_packet', (['json_body', 'method', 'endpoint', 'src_party_id', 'dest_party_id'], {'job_id': 'job_id', 'role': 'role', 'overall_timeout': 'DEFAULT_REMOTE_REQUEST_TIMEOUT'}), '(json_body, method, endpoint, src_party_id,\n dest_party_id, job_id=job_id, role=role, overall_timeout=\n DEFAULT_REMOTE_REQUEST_TIMEOUT)\n', (9778, 9919), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_command_federation_channel, gen_routing_metadata, forward_grpc_packet\n'), ((9969, 10045), 'fate_flow.utils.grpc_utils.gen_routing_metadata', 'gen_routing_metadata', ([], {'src_party_id': 'src_party_id', 'dest_party_id': 'dest_party_id'}), '(src_party_id=src_party_id, dest_party_id=dest_party_id)\n', (9989, 10045), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_command_federation_channel, gen_routing_metadata, forward_grpc_packet\n'), ((10150, 10192), 'fate_flow.utils.grpc_utils.get_command_federation_channel', 'get_command_federation_channel', (['host', 'port'], {}), '(host, port)\n', (10180, 10192), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_command_federation_channel, gen_routing_metadata, forward_grpc_packet\n'), ((10312, 10342), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['_return.body.value'], {}), '(_return.body.value)\n', (10322, 10342), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((2173, 2229), 'json.dumps', 'json.dumps', (["{'retmsg': retmsg, 'retcode': response_code}"], {}), "({'retmsg': retmsg, 'retcode': response_code})\n", (2183, 2229), False, 'import json\n'), ((4480, 4511), 'fate_arch.common.conf_utils.get_base_config', 'get_base_config', (['"""fateflow"""', '{}'], {}), "('fateflow', {})\n", (4495, 4511), False, 'from fate_arch.common.conf_utils import get_base_config\n'), ((4553, 4584), 'fate_arch.common.conf_utils.get_base_config', 'get_base_config', (['"""fateflow"""', '{}'], {}), "('fateflow', {})\n", (4568, 4584), False, 'from fate_arch.common.conf_utils import get_base_config\n'), ((6711, 6725), 'fate_flow.settings.HEADERS.copy', 'HEADERS.copy', ([], {}), '()\n', (6723, 6725), False, 'from fate_flow.settings import DEFAULT_REMOTE_REQUEST_TIMEOUT, CHECK_NODES_IDENTITY, FATE_MANAGER_GET_NODE_INFO_ENDPOINT, HEADERS, API_VERSION, stat_logger\n'), ((8296, 8338), 'fate_flow.utils.grpc_utils.get_command_federation_channel', 'get_command_federation_channel', (['host', 'port'], {}), '(host, port)\n', (8326, 8338), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_command_federation_channel, gen_routing_metadata, forward_grpc_packet\n'), ((8592, 8622), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['_return.body.value'], {}), '(_return.body.value)\n', (8602, 8622), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((10478, 10503), 'fate_arch.common.conf_utils.get_base_config', 'get_base_config', (['role', '{}'], {}), '(role, {})\n', (10493, 10503), False, 'from fate_arch.common.conf_utils import get_base_config\n'), ((10540, 10565), 'fate_arch.common.conf_utils.get_base_config', 'get_base_config', (['role', '{}'], {}), '(role, {})\n', (10555, 10565), False, 'from fate_arch.common.conf_utils import get_base_config\n'), ((10704, 10718), 'fate_arch.common.log.audit_logger', 'audit_logger', ([], {}), '()\n', (10716, 10718), False, 'from fate_arch.common.log import audit_logger, schedule_logger\n'), ((10934, 10948), 'fate_arch.common.log.audit_logger', 'audit_logger', ([], {}), '()\n', (10946, 10948), False, 'from fate_arch.common.log import audit_logger, schedule_logger\n'), ((7382, 7405), 'time.sleep', 'time.sleep', (['(2 * (t + 1))'], {}), '(2 * (t + 1))\n', (7392, 7405), False, 'import time\n'), ((8821, 8844), 'time.sleep', 'time.sleep', (['(2 * (t + 1))'], {}), '(2 * (t + 1))\n', (8831, 8844), False, 'import time\n'), ((11110, 11155), 'fate_arch.common.conf_utils.get_base_config', 'conf_utils.get_base_config', (['"""fatemanager"""', '{}'], {}), "('fatemanager', {})\n", (11136, 11155), False, 'from fate_arch.common import conf_utils\n'), ((4740, 4778), 'fate_arch.common.conf_utils.get_base_config', 'get_base_config', (['"""fate_on_eggroll"""', '{}'], {}), "('fate_on_eggroll', {})\n", (4755, 4778), False, 'from fate_arch.common.conf_utils import get_base_config\n'), ((6559, 6579), 'fate_arch.common.log.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (6571, 6579), False, 'from fate_arch.common.log import audit_logger, schedule_logger\n'), ((6937, 6958), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['json_body'], {}), '(json_body)\n', (6947, 6958), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((6989, 7009), 'fate_arch.common.log.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (7001, 7009), False, 'from fate_arch.common.log import audit_logger, schedule_logger\n'), ((7091, 7111), 'fate_arch.common.log.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (7103, 7111), False, 'from fate_arch.common.log import audit_logger, schedule_logger\n'), ((8474, 8494), 'fate_arch.common.log.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (8486, 8494), False, 'from fate_arch.common.log import audit_logger, schedule_logger\n'), ((11264, 11308), 'fate_flow.utils.service_utils.ServiceUtils.get_item', 'ServiceUtils.get_item', (['"""fatemanager"""', '"""host"""'], {}), "('fatemanager', 'host')\n", (11285, 11308), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((11322, 11366), 'fate_flow.utils.service_utils.ServiceUtils.get_item', 'ServiceUtils.get_item', (['"""fatemanager"""', '"""port"""'], {}), "('fatemanager', 'port')\n", (11343, 11366), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((5022, 5058), 'fate_arch.common.conf_utils.get_base_config', 'get_base_config', (['"""fate_on_spark"""', '{}'], {}), "('fate_on_spark', {})\n", (5037, 5058), False, 'from fate_arch.common.conf_utils import get_base_config\n'), ((7276, 7299), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (7291, 7299), False, 'from fate_arch.common.log import audit_logger, schedule_logger\n'), ((8720, 8743), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (8735, 8743), False, 'from fate_arch.common.log import audit_logger, schedule_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from arch.api import session from arch.api.utils.log_utils import LoggerFactory from fate_flow.entity.metric import MetricType, MetricMeta, Metric from federatedml.framework.homo.blocks import secure_mean_aggregator, loss_scatter, has_converged from federatedml.framework.homo.blocks.base import HomoTransferBase from federatedml.framework.homo.blocks.has_converged import HasConvergedTransVar from federatedml.framework.homo.blocks.loss_scatter import LossScatterTransVar from federatedml.framework.homo.blocks.secure_aggregator import SecureAggregatorTransVar from federatedml.model_base import ModelBase from federatedml.nn.homo_nn import nn_model from federatedml.nn.homo_nn.nn_model import restore_nn_model from federatedml.optim.convergence import converge_func_factory from federatedml.param.homo_nn_param import HomoNNParam from federatedml.util import consts from federatedml.util.io_check import assert_io_num_rows_equal Logger = LoggerFactory.get_logger() MODEL_META_NAME = "HomoNNModelMeta" MODEL_PARAM_NAME = "HomoNNModelParam" def _build_model_dict(meta, param): return {MODEL_META_NAME: meta, MODEL_PARAM_NAME: param} def _extract_param(model_dict: dict): return model_dict.get(MODEL_PARAM_NAME, None) def _extract_meta(model_dict: dict): return model_dict.get(MODEL_META_NAME, None) class HomoNNBase(ModelBase): def __init__(self, trans_var): super().__init__() self.model_param = HomoNNParam() self.aggregate_iteration_num = 0 self.transfer_variable = trans_var def _suffix(self): return self.aggregate_iteration_num, def _init_model(self, param: HomoNNParam): self.param = param self.enable_secure_aggregate = param.secure_aggregate self.max_aggregate_iteration_num = param.max_iter class HomoNNServer(HomoNNBase): def __init__(self, trans_var): super().__init__(trans_var=trans_var) self.model = None self.aggregator = secure_mean_aggregator.Server(self.transfer_variable.secure_aggregator_trans_var) self.loss_scatter = loss_scatter.Server(self.transfer_variable.loss_scatter_trans_var) self.has_converged = has_converged.Server(self.transfer_variable.has_converged_trans_var) def _init_model(self, param: HomoNNParam): super()._init_model(param=param) early_stop = self.model_param.early_stop self.converge_func = converge_func_factory(early_stop.converge_func, early_stop.eps).is_converge self.loss_consumed = early_stop.converge_func != "weight_diff" def callback_loss(self, iter_num, loss): metric_meta = MetricMeta(name='train', metric_type="LOSS", extra_metas={ "unit_name": "iters", }) self.callback_meta(metric_name='loss', metric_namespace='train', metric_meta=metric_meta) self.callback_metric(metric_name='loss', metric_namespace='train', metric_data=[Metric(iter_num, loss)]) def _is_converged(self): loss = self.loss_scatter.weighted_loss_mean(suffix=self._suffix()) Logger.info(f"loss at iter {self.aggregate_iteration_num}: {loss}") self.callback_loss(self.aggregate_iteration_num, loss) if self.loss_consumed: is_converged = self.converge_func(loss) else: is_converged = self.converge_func(self.model) self.has_converged.remote_converge_status(is_converge=is_converged, suffix=self._suffix()) return is_converged def fit(self, data_inst): while self.aggregate_iteration_num < self.max_aggregate_iteration_num: self.model = self.aggregator.weighted_mean_model(suffix=self._suffix()) self.aggregator.send_aggregated_model(model=self.model, suffix=self._suffix()) if self._is_converged(): Logger.info(f"early stop at iter {self.aggregate_iteration_num}") break self.aggregate_iteration_num += 1 else: Logger.warn(f"reach max iter: {self.aggregate_iteration_num}, not converged") def save_model(self): return self.model class HomoNNClient(HomoNNBase): def __init__(self, trans_var): super().__init__(trans_var=trans_var) self.aggregator = secure_mean_aggregator.Client(self.transfer_variable.secure_aggregator_trans_var) self.loss_scatter = loss_scatter.Client(self.transfer_variable.loss_scatter_trans_var) self.has_converged = has_converged.Client(self.transfer_variable.has_converged_trans_var) self.nn_model = None def _init_model(self, param: HomoNNParam): super()._init_model(param=param) self.batch_size = param.batch_size self.aggregate_every_n_epoch = param.aggregate_every_n_epoch self.nn_define = param.nn_define self.config_type = param.config_type self.optimizer = param.optimizer self.loss = param.loss self.metrics = param.metrics self.encode_label = param.encode_label self.data_converter = nn_model.get_data_converter(self.config_type) self.model_builder = nn_model.get_nn_builder(config_type=self.config_type) def _is_converged(self, data, epoch_degree): metrics = self.nn_model.evaluate(data) Logger.info(f"metrics at iter {self.aggregate_iteration_num}: {metrics}") loss = metrics["loss"] self.loss_scatter.send_loss(loss=(loss, epoch_degree), suffix=self._suffix()) is_converged = self.has_converged.get_converge_status(suffix=self._suffix()) return is_converged def __build_nn_model(self, input_shape): self.nn_model = self.model_builder(input_shape=input_shape, nn_define=self.nn_define, optimizer=self.optimizer, loss=self.loss, metrics=self.metrics) def __build_pytorch_model(self, nn_define): self.nn_model = self.model_builder(nn_define=nn_define, optimizer=self.optimizer, loss=self.loss, metrics=self.metrics) def fit(self, data_inst, *args): data = self.data_converter.convert(data_inst, batch_size=self.batch_size, encode_label=self.encode_label) if self.config_type == "pytorch": self.__build_pytorch_model(self.nn_define) else: self.__build_nn_model(data.get_shape()[0]) epoch_degree = float(len(data)) * self.aggregate_every_n_epoch while self.aggregate_iteration_num < self.max_aggregate_iteration_num: Logger.info(f"start {self.aggregate_iteration_num}_th aggregation") # train self.nn_model.train(data, aggregate_every_n_epoch=self.aggregate_every_n_epoch) # send model for aggregate, then set aggregated model to local self.aggregator.send_weighted_model(weighted_model=self.nn_model.get_model_weights(), weight=epoch_degree * self.aggregate_every_n_epoch, suffix=self._suffix()) weights = self.aggregator.get_aggregated_model(suffix=self._suffix()) self.nn_model.set_model_weights(weights=weights) # calc loss and check convergence if self._is_converged(data, epoch_degree): Logger.info(f"early stop at iter {self.aggregate_iteration_num}") break Logger.info(f"role {self.role} finish {self.aggregate_iteration_num}_th aggregation") self.aggregate_iteration_num += 1 else: Logger.warn(f"reach max iter: {self.aggregate_iteration_num}, not converged") def export_model(self): return _build_model_dict(meta=self._get_meta(), param=self._get_param()) def _get_meta(self): from federatedml.protobuf.generated import nn_model_meta_pb2 meta_pb = nn_model_meta_pb2.NNModelMeta() meta_pb.params.CopyFrom(self.model_param.generate_pb()) meta_pb.aggregate_iter = self.aggregate_iteration_num return meta_pb def _get_param(self): from federatedml.protobuf.generated import nn_model_param_pb2 param_pb = nn_model_param_pb2.NNModelParam() param_pb.saved_model_bytes = self.nn_model.export_model() return param_pb @assert_io_num_rows_equal def predict(self, data_inst): data = self.data_converter.convert(data_inst, batch_size=self.batch_size, encode_label=self.encode_label) predict = self.nn_model.predict(data) num_output_units = predict.shape[1] threshold = self.param.predict_param.threshold if num_output_units == 1: kv = [(x[0], (0 if x[1][0] <= threshold else 1, x[1][0].item())) for x in zip(data.get_keys(), predict)] pred_tbl = session.parallelize(kv, include_key=True, partition=data_inst.get_partitions()) return data_inst.join(pred_tbl, lambda d, pred: [d.label, pred[0], pred[1], {"0": 1 - pred[1], "1": pred[1]}]) else: kv = [(x[0], (x[1].argmax(), [float(e) for e in x[1]])) for x in zip(data.get_keys(), predict)] pred_tbl = session.parallelize(kv, include_key=True, partition=data_inst.get_partitions()) return data_inst.join(pred_tbl, lambda d, pred: [d.label, pred[0].item(), pred[1][pred[0]], {str(v): pred[1][v] for v in range(len(pred[1]))}]) def load_model(self, model_dict): model_dict = list(model_dict["model"].values())[0] model_obj = _extract_param(model_dict) meta_obj = _extract_meta(model_dict) self.model_param.restore_from_pb(meta_obj.params) self._init_model(self.model_param) self.aggregate_iteration_num = meta_obj.aggregate_iter self.nn_model = restore_nn_model(self.config_type, model_obj.saved_model_bytes) # server: Arbiter, clients: Guest and Hosts class HomoNNDefaultTransVar(HomoTransferBase): def __init__(self, server=(consts.ARBITER,), clients=(consts.GUEST, consts.HOST), prefix=None): super().__init__(server=server, clients=clients, prefix=prefix) self.secure_aggregator_trans_var = SecureAggregatorTransVar(server=server, clients=clients, prefix=self.prefix) self.loss_scatter_trans_var = LossScatterTransVar(server=server, clients=clients, prefix=self.prefix) self.has_converged_trans_var = HasConvergedTransVar(server=server, clients=clients, prefix=self.prefix) class HomoNNDefaultClient(HomoNNClient): def __init__(self): super().__init__(trans_var=HomoNNDefaultTransVar()) class HomoNNDefaultServer(HomoNNServer): def __init__(self): super().__init__(trans_var=HomoNNDefaultTransVar()) # server: Arbiter, clients: Guest and Hosts class HomoNNGuestServerTransVar(HomoNNDefaultTransVar): def __init__(self, server=(consts.GUEST,), clients=(consts.HOST,), prefix=None): super().__init__(server=server, clients=clients, prefix=prefix) class HomoNNGuestServerClient(HomoNNClient): def __init__(self): super().__init__(trans_var=HomoNNGuestServerTransVar()) class HomoNNGuestServerServer(HomoNNServer): def __init__(self): super().__init__(trans_var=HomoNNGuestServerTransVar()) # server: Arbiter, clients: Hosts class HomoNNArbiterSubmitTransVar(HomoNNDefaultTransVar): def __init__(self, server=(consts.ARBITER,), clients=(consts.HOST,), prefix=None): super().__init__(server=server, clients=clients, prefix=prefix) class HomoNNArbiterSubmitClient(HomoNNClient): def __init__(self): super().__init__(trans_var=HomoNNArbiterSubmitTransVar()) class HomoNNArbiterSubmitServer(HomoNNServer): def __init__(self): super().__init__(trans_var=HomoNNArbiterSubmitTransVar())
[ "fate_flow.entity.metric.MetricMeta", "fate_flow.entity.metric.Metric" ]
[((1558, 1584), 'arch.api.utils.log_utils.LoggerFactory.get_logger', 'LoggerFactory.get_logger', ([], {}), '()\n', (1582, 1584), False, 'from arch.api.utils.log_utils import LoggerFactory\n'), ((2055, 2068), 'federatedml.param.homo_nn_param.HomoNNParam', 'HomoNNParam', ([], {}), '()\n', (2066, 2068), False, 'from federatedml.param.homo_nn_param import HomoNNParam\n'), ((2586, 2672), 'federatedml.framework.homo.blocks.secure_mean_aggregator.Server', 'secure_mean_aggregator.Server', (['self.transfer_variable.secure_aggregator_trans_var'], {}), '(self.transfer_variable.\n secure_aggregator_trans_var)\n', (2615, 2672), False, 'from federatedml.framework.homo.blocks import secure_mean_aggregator, loss_scatter, has_converged\n'), ((2696, 2762), 'federatedml.framework.homo.blocks.loss_scatter.Server', 'loss_scatter.Server', (['self.transfer_variable.loss_scatter_trans_var'], {}), '(self.transfer_variable.loss_scatter_trans_var)\n', (2715, 2762), False, 'from federatedml.framework.homo.blocks import secure_mean_aggregator, loss_scatter, has_converged\n'), ((2792, 2860), 'federatedml.framework.homo.blocks.has_converged.Server', 'has_converged.Server', (['self.transfer_variable.has_converged_trans_var'], {}), '(self.transfer_variable.has_converged_trans_var)\n', (2812, 2860), False, 'from federatedml.framework.homo.blocks import secure_mean_aggregator, loss_scatter, has_converged\n'), ((3243, 3328), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': '"""train"""', 'metric_type': '"""LOSS"""', 'extra_metas': "{'unit_name': 'iters'}"}), "(name='train', metric_type='LOSS', extra_metas={'unit_name': 'iters'}\n )\n", (3253, 3328), False, 'from fate_flow.entity.metric import MetricType, MetricMeta, Metric\n'), ((5031, 5117), 'federatedml.framework.homo.blocks.secure_mean_aggregator.Client', 'secure_mean_aggregator.Client', (['self.transfer_variable.secure_aggregator_trans_var'], {}), '(self.transfer_variable.\n secure_aggregator_trans_var)\n', (5060, 5117), False, 'from federatedml.framework.homo.blocks import secure_mean_aggregator, loss_scatter, has_converged\n'), ((5141, 5207), 'federatedml.framework.homo.blocks.loss_scatter.Client', 'loss_scatter.Client', (['self.transfer_variable.loss_scatter_trans_var'], {}), '(self.transfer_variable.loss_scatter_trans_var)\n', (5160, 5207), False, 'from federatedml.framework.homo.blocks import secure_mean_aggregator, loss_scatter, has_converged\n'), ((5237, 5305), 'federatedml.framework.homo.blocks.has_converged.Client', 'has_converged.Client', (['self.transfer_variable.has_converged_trans_var'], {}), '(self.transfer_variable.has_converged_trans_var)\n', (5257, 5305), False, 'from federatedml.framework.homo.blocks import secure_mean_aggregator, loss_scatter, has_converged\n'), ((5810, 5855), 'federatedml.nn.homo_nn.nn_model.get_data_converter', 'nn_model.get_data_converter', (['self.config_type'], {}), '(self.config_type)\n', (5837, 5855), False, 'from federatedml.nn.homo_nn import nn_model\n'), ((5885, 5938), 'federatedml.nn.homo_nn.nn_model.get_nn_builder', 'nn_model.get_nn_builder', ([], {'config_type': 'self.config_type'}), '(config_type=self.config_type)\n', (5908, 5938), False, 'from federatedml.nn.homo_nn import nn_model\n'), ((8859, 8890), 'federatedml.protobuf.generated.nn_model_meta_pb2.NNModelMeta', 'nn_model_meta_pb2.NNModelMeta', ([], {}), '()\n', (8888, 8890), False, 'from federatedml.protobuf.generated import nn_model_meta_pb2\n'), ((9156, 9189), 'federatedml.protobuf.generated.nn_model_param_pb2.NNModelParam', 'nn_model_param_pb2.NNModelParam', ([], {}), '()\n', (9187, 9189), False, 'from federatedml.protobuf.generated import nn_model_param_pb2\n'), ((10911, 10974), 'federatedml.nn.homo_nn.nn_model.restore_nn_model', 'restore_nn_model', (['self.config_type', 'model_obj.saved_model_bytes'], {}), '(self.config_type, model_obj.saved_model_bytes)\n', (10927, 10974), False, 'from federatedml.nn.homo_nn.nn_model import restore_nn_model\n'), ((11283, 11359), 'federatedml.framework.homo.blocks.secure_aggregator.SecureAggregatorTransVar', 'SecureAggregatorTransVar', ([], {'server': 'server', 'clients': 'clients', 'prefix': 'self.prefix'}), '(server=server, clients=clients, prefix=self.prefix)\n', (11307, 11359), False, 'from federatedml.framework.homo.blocks.secure_aggregator import SecureAggregatorTransVar\n'), ((11398, 11469), 'federatedml.framework.homo.blocks.loss_scatter.LossScatterTransVar', 'LossScatterTransVar', ([], {'server': 'server', 'clients': 'clients', 'prefix': 'self.prefix'}), '(server=server, clients=clients, prefix=self.prefix)\n', (11417, 11469), False, 'from federatedml.framework.homo.blocks.loss_scatter import LossScatterTransVar\n'), ((11509, 11581), 'federatedml.framework.homo.blocks.has_converged.HasConvergedTransVar', 'HasConvergedTransVar', ([], {'server': 'server', 'clients': 'clients', 'prefix': 'self.prefix'}), '(server=server, clients=clients, prefix=self.prefix)\n', (11529, 11581), False, 'from federatedml.framework.homo.blocks.has_converged import HasConvergedTransVar\n'), ((3028, 3091), 'federatedml.optim.convergence.converge_func_factory', 'converge_func_factory', (['early_stop.converge_func', 'early_stop.eps'], {}), '(early_stop.converge_func, early_stop.eps)\n', (3049, 3091), False, 'from federatedml.optim.convergence import converge_func_factory\n'), ((3708, 3730), 'fate_flow.entity.metric.Metric', 'Metric', (['iter_num', 'loss'], {}), '(iter_num, loss)\n', (3714, 3730), False, 'from fate_flow.entity.metric import MetricType, MetricMeta, Metric\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_arch import storage from fate_arch.metastore.db_utils import StorageConnector from fate_arch.session import Session from fate_arch.storage import StorageTableMeta, StorageTableOrigin from fate_flow.entity import RunParameters from fate_flow.manager.data_manager import DataTableTracker, TableStorage from fate_flow.operation.job_saver import JobSaver from fate_flow.operation.job_tracker import Tracker from fate_flow.worker.task_executor import TaskExecutor from fate_flow.utils.api_utils import get_json_result, error_response from fate_flow.utils import job_utils, schedule_utils from flask import request from fate_flow.utils.detect_utils import validate_request @manager.route('/connector/create', methods=['POST']) def create_storage_connector(): request_data = request.json address = StorageTableMeta.create_address(request_data.get("engine"), request_data.get("connector_info")) connector = StorageConnector(connector_name=request_data.get("connector_name"), engine=request_data.get("engine"), connector_info=address.connector) connector.create_or_update() return get_json_result(retcode=0, retmsg='success') @manager.route('/connector/query', methods=['POST']) def query_storage_connector(): request_data = request.json connector = StorageConnector(connector_name=request_data.get("connector_name")) return get_json_result(retcode=0, retmsg='success', data=connector.get_info()) @manager.route('/add', methods=['post']) @manager.route('/bind', methods=['post']) @validate_request("engine", "address", "namespace", "name") def table_bind(): request_data = request.json address_dict = request_data.get('address') engine = request_data.get('engine') name = request_data.get('name') namespace = request_data.get('namespace') address = storage.StorageTableMeta.create_address(storage_engine=engine, address_dict=address_dict) in_serialized = request_data.get("in_serialized", 1 if engine in {storage.StorageEngine.STANDALONE, storage.StorageEngine.EGGROLL, storage.StorageEngine.MYSQL, storage.StorageEngine.PATH} else 0) destroy = (int(request_data.get("drop", 0)) == 1) data_table_meta = storage.StorageTableMeta(name=name, namespace=namespace) if data_table_meta: if destroy: data_table_meta.destroy_metas() else: return get_json_result(retcode=100, retmsg='The data table already exists.' 'If you still want to continue uploading, please add the parameter --drop') id_column = request_data.get("id_column") or request_data.get("id_name") feature_column = request_data.get("feature_column") or request_data.get("feature_name") schema = None if id_column and feature_column: schema = {'header': feature_column, 'sid': id_column} elif id_column: schema = {'sid': id_column, 'header': ''} sess = Session() storage_session = sess.storage(storage_engine=engine, options=request_data.get("options")) table = storage_session.create_table(address=address, name=name, namespace=namespace, partitions=request_data.get('partitions', None), hava_head=request_data.get("head"), schema=schema, id_delimiter=request_data.get("id_delimiter"), in_serialized=in_serialized, origin=request_data.get("origin", StorageTableOrigin.TABLE_BIND)) response = get_json_result(data={"table_name": name, "namespace": namespace}) if not table.check_address(): response = get_json_result(retcode=100, retmsg=f'engine {engine} address {address_dict} check failed') else: DataTableTracker.create_table_tracker( table_name=name, table_namespace=namespace, entity_info={"have_parent": False}, ) sess.destroy_all_sessions() return response @manager.route('/download', methods=['get']) def table_download(): request_data = request.json from fate_flow.component_env_utils.env_utils import import_component_output_depend import_component_output_depend() data_table_meta = storage.StorageTableMeta(name=request_data.get("name"), namespace=request_data.get("namespace")) if not data_table_meta: return error_response(response_code=210, retmsg=f'no found table:{request_data.get("namespace")}, {request_data.get("name")}') tar_file_name = 'table_{}_{}.tar.gz'.format(request_data.get("namespace"), request_data.get("name")) return TableStorage.send_table( output_tables_meta={"table": data_table_meta}, tar_file_name=tar_file_name, need_head=request_data.get("head", True) ) @manager.route('/delete', methods=['post']) def table_delete(): request_data = request.json table_name = request_data.get('table_name') namespace = request_data.get('namespace') data = None sess = Session() table = sess.get_table(name=table_name, namespace=namespace, ignore_disable=True) if table: table.destroy() data = {'table_name': table_name, 'namespace': namespace} sess.destroy_all_sessions() if data: return get_json_result(data=data) return get_json_result(retcode=101, retmsg='no find table') @manager.route('/disable', methods=['post']) @manager.route('/enable', methods=['post']) def table_disable(): request_data = request.json adapter_request_data(request_data) disable = True if request.url.endswith("disable") else False tables_meta = storage.StorageTableMeta.query_table_meta(filter_fields=dict(**request_data)) data = [] if tables_meta: for table_meta in tables_meta: storage.StorageTableMeta(name=table_meta.f_name, namespace=table_meta.f_namespace ).update_metas(disable=disable) data.append({'table_name': table_meta.f_name, 'namespace': table_meta.f_namespace}) return get_json_result(data=data) return get_json_result(retcode=101, retmsg='no find table') @manager.route('/disable/delete', methods=['post']) def table_delete_disable(): request_data = request.json adapter_request_data(request_data) tables_meta = storage.StorageTableMeta.query_table_meta(filter_fields={"disable": True}) data = [] sess = Session() for table_meta in tables_meta: table = sess.get_table(name=table_meta.f_name, namespace=table_meta.f_namespace, ignore_disable=True) if table: table.destroy() data.append({'table_name': table_meta.f_name, 'namespace': table_meta.f_namespace}) sess.destroy_all_sessions() if data: return get_json_result(data=data) return get_json_result(retcode=101, retmsg='no find table') @manager.route('/list', methods=['post']) @validate_request('job_id', 'role', 'party_id') def get_job_table_list(): jobs = JobSaver.query_job(**request.json) if jobs: job = jobs[0] tables = get_job_all_table(job) return get_json_result(data=tables) else: return get_json_result(retcode=101, retmsg='no find job') @manager.route('/<table_func>', methods=['post']) def table_api(table_func): config = request.json if table_func == 'table_info': table_key_count = 0 table_partition = None table_schema = None table_name, namespace = config.get("name") or config.get("table_name"), config.get("namespace") table_meta = storage.StorageTableMeta(name=table_name, namespace=namespace) address = None enable = True origin = None if table_meta: table_key_count = table_meta.get_count() table_partition = table_meta.get_partitions() table_schema = table_meta.get_schema() address = table_meta.get_address().__dict__ enable = not table_meta.get_disable() origin = table_meta.get_origin() exist = 1 else: exist = 0 return get_json_result(data={"table_name": table_name, "namespace": namespace, "exist": exist, "count": table_key_count, "partition": table_partition, "schema": table_schema, "enable": enable, "origin": origin, "address": address, }) else: return get_json_result() @manager.route('/tracking/source', methods=['post']) @validate_request("table_name", "namespace") def table_tracking(): request_info = request.json data = DataTableTracker.get_parent_table(request_info.get("table_name"), request_info.get("namespace")) return get_json_result(data=data) @manager.route('/tracking/job', methods=['post']) @validate_request("table_name", "namespace") def table_tracking_job(): request_info = request.json data = DataTableTracker.track_job(request_info.get("table_name"), request_info.get("namespace"), display=True) return get_json_result(data=data) def get_job_all_table(job): dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl, runtime_conf=job.f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf ) _, hierarchical_structure = dsl_parser.get_dsl_hierarchical_structure() component_table = {} try: component_output_tables = Tracker.query_output_data_infos(job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id) except: component_output_tables = [] for component_name_list in hierarchical_structure: for component_name in component_name_list: component_table[component_name] = {} component_input_table = get_component_input_table(dsl_parser, job, component_name) component_table[component_name]['input'] = component_input_table component_table[component_name]['output'] = {} for output_table in component_output_tables: if output_table.f_component_name == component_name: component_table[component_name]['output'][output_table.f_data_name] = \ {'name': output_table.f_table_name, 'namespace': output_table.f_table_namespace} return component_table def get_component_input_table(dsl_parser, job, component_name): component = dsl_parser.get_component_info(component_name=component_name) module_name = get_component_module(component_name, job.f_dsl) if 'reader' in module_name.lower(): return job.f_runtime_conf.get("component_parameters", {}).get("role", {}).get(job.f_role, {}).get(str(job.f_roles.get(job.f_role).index(int(job.f_party_id)))).get(component_name) task_input_dsl = component.get_input() job_args_on_party = TaskExecutor.get_job_args_on_party(dsl_parser=dsl_parser, job_runtime_conf=job.f_runtime_conf, role=job.f_role, party_id=job.f_party_id) config = job_utils.get_job_parameters(job.f_job_id, job.f_role, job.f_party_id) task_parameters = RunParameters(**config) job_parameters = task_parameters component_input_table = TaskExecutor.get_task_run_args(job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id, task_id=None, task_version=None, job_args=job_args_on_party, job_parameters=job_parameters, task_parameters=task_parameters, input_dsl=task_input_dsl, get_input_table=True ) return component_input_table def get_component_module(component_name, job_dsl): return job_dsl["components"][component_name]["module"].lower() def adapter_request_data(request_data): if request_data.get("table_name"): request_data["name"] = request_data.get("table_name")
[ "fate_flow.entity.RunParameters", "fate_flow.worker.task_executor.TaskExecutor.get_task_run_args", "fate_flow.operation.job_tracker.Tracker.query_output_data_infos", "fate_flow.utils.schedule_utils.get_job_dsl_parser", "fate_flow.worker.task_executor.TaskExecutor.get_job_args_on_party", "fate_flow.manager.data_manager.DataTableTracker.create_table_tracker", "fate_flow.utils.job_utils.get_job_parameters", "fate_flow.utils.api_utils.get_json_result", "fate_flow.operation.job_saver.JobSaver.query_job", "fate_flow.utils.detect_utils.validate_request", "fate_flow.component_env_utils.env_utils.import_component_output_depend" ]
[((2169, 2227), 'fate_flow.utils.detect_utils.validate_request', 'validate_request', (['"""engine"""', '"""address"""', '"""namespace"""', '"""name"""'], {}), "('engine', 'address', 'namespace', 'name')\n", (2185, 2227), False, 'from fate_flow.utils.detect_utils import validate_request\n'), ((7675, 7721), 'fate_flow.utils.detect_utils.validate_request', 'validate_request', (['"""job_id"""', '"""role"""', '"""party_id"""'], {}), "('job_id', 'role', 'party_id')\n", (7691, 7721), False, 'from fate_flow.utils.detect_utils import validate_request\n'), ((9539, 9582), 'fate_flow.utils.detect_utils.validate_request', 'validate_request', (['"""table_name"""', '"""namespace"""'], {}), "('table_name', 'namespace')\n", (9555, 9582), False, 'from fate_flow.utils.detect_utils import validate_request\n'), ((9836, 9879), 'fate_flow.utils.detect_utils.validate_request', 'validate_request', (['"""table_name"""', '"""namespace"""'], {}), "('table_name', 'namespace')\n", (9852, 9879), False, 'from fate_flow.utils.detect_utils import validate_request\n'), ((1753, 1797), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (1768, 1797), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((2461, 2555), 'fate_arch.storage.StorageTableMeta.create_address', 'storage.StorageTableMeta.create_address', ([], {'storage_engine': 'engine', 'address_dict': 'address_dict'}), '(storage_engine=engine, address_dict\n =address_dict)\n', (2500, 2555), False, 'from fate_arch import storage\n'), ((2897, 2953), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'name', 'namespace': 'namespace'}), '(name=name, namespace=namespace)\n', (2921, 2953), False, 'from fate_arch import storage\n'), ((3664, 3673), 'fate_arch.session.Session', 'Session', ([], {}), '()\n', (3671, 3673), False, 'from fate_arch.session import Session\n'), ((4280, 4346), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'table_name': name, 'namespace': namespace}"}), "(data={'table_name': name, 'namespace': namespace})\n", (4295, 4346), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((4919, 4951), 'fate_flow.component_env_utils.env_utils.import_component_output_depend', 'import_component_output_depend', ([], {}), '()\n', (4949, 4951), False, 'from fate_flow.component_env_utils.env_utils import import_component_output_depend\n'), ((5741, 5750), 'fate_arch.session.Session', 'Session', ([], {}), '()\n', (5748, 5750), False, 'from fate_arch.session import Session\n'), ((6039, 6091), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""no find table"""'}), "(retcode=101, retmsg='no find table')\n", (6054, 6091), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((6858, 6910), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""no find table"""'}), "(retcode=101, retmsg='no find table')\n", (6873, 6910), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7082, 7156), 'fate_arch.storage.StorageTableMeta.query_table_meta', 'storage.StorageTableMeta.query_table_meta', ([], {'filter_fields': "{'disable': True}"}), "(filter_fields={'disable': True})\n", (7123, 7156), False, 'from fate_arch import storage\n'), ((7182, 7191), 'fate_arch.session.Session', 'Session', ([], {}), '()\n', (7189, 7191), False, 'from fate_arch.session import Session\n'), ((7577, 7629), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""no find table"""'}), "(retcode=101, retmsg='no find table')\n", (7592, 7629), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7759, 7793), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {}), '(**request.json)\n', (7777, 7793), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((9756, 9782), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'data'}), '(data=data)\n', (9771, 9782), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((10064, 10090), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'data'}), '(data=data)\n', (10079, 10090), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((10138, 10269), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job.f_dsl', 'runtime_conf': 'job.f_runtime_conf', 'train_runtime_conf': 'job.f_train_runtime_conf'}), '(dsl=job.f_dsl, runtime_conf=job.\n f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf)\n', (10171, 10269), False, 'from fate_flow.utils import job_utils, schedule_utils\n'), ((12011, 12152), 'fate_flow.worker.task_executor.TaskExecutor.get_job_args_on_party', 'TaskExecutor.get_job_args_on_party', ([], {'dsl_parser': 'dsl_parser', 'job_runtime_conf': 'job.f_runtime_conf', 'role': 'job.f_role', 'party_id': 'job.f_party_id'}), '(dsl_parser=dsl_parser, job_runtime_conf=\n job.f_runtime_conf, role=job.f_role, party_id=job.f_party_id)\n', (12045, 12152), False, 'from fate_flow.worker.task_executor import TaskExecutor\n'), ((12279, 12349), 'fate_flow.utils.job_utils.get_job_parameters', 'job_utils.get_job_parameters', (['job.f_job_id', 'job.f_role', 'job.f_party_id'], {}), '(job.f_job_id, job.f_role, job.f_party_id)\n', (12307, 12349), False, 'from fate_flow.utils import job_utils, schedule_utils\n'), ((12372, 12395), 'fate_flow.entity.RunParameters', 'RunParameters', ([], {}), '(**config)\n', (12385, 12395), False, 'from fate_flow.entity import RunParameters\n'), ((12461, 12741), 'fate_flow.worker.task_executor.TaskExecutor.get_task_run_args', 'TaskExecutor.get_task_run_args', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id', 'task_id': 'None', 'task_version': 'None', 'job_args': 'job_args_on_party', 'job_parameters': 'job_parameters', 'task_parameters': 'task_parameters', 'input_dsl': 'task_input_dsl', 'get_input_table': '(True)'}), '(job_id=job.f_job_id, role=job.f_role,\n party_id=job.f_party_id, task_id=None, task_version=None, job_args=\n job_args_on_party, job_parameters=job_parameters, task_parameters=\n task_parameters, input_dsl=task_input_dsl, get_input_table=True)\n', (12491, 12741), False, 'from fate_flow.worker.task_executor import TaskExecutor\n'), ((4400, 4496), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(100)', 'retmsg': 'f"""engine {engine} address {address_dict} check failed"""'}), "(retcode=100, retmsg=\n f'engine {engine} address {address_dict} check failed')\n", (4415, 4496), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((4510, 4632), 'fate_flow.manager.data_manager.DataTableTracker.create_table_tracker', 'DataTableTracker.create_table_tracker', ([], {'table_name': 'name', 'table_namespace': 'namespace', 'entity_info': "{'have_parent': False}"}), "(table_name=name, table_namespace=\n namespace, entity_info={'have_parent': False})\n", (4547, 4632), False, 'from fate_flow.manager.data_manager import DataTableTracker, TableStorage\n'), ((6001, 6027), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'data'}), '(data=data)\n', (6016, 6027), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((6297, 6328), 'flask.request.url.endswith', 'request.url.endswith', (['"""disable"""'], {}), "('disable')\n", (6317, 6328), False, 'from flask import request\n'), ((6820, 6846), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'data'}), '(data=data)\n', (6835, 6846), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7539, 7565), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'data'}), '(data=data)\n', (7554, 7565), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7884, 7912), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'tables'}), '(data=tables)\n', (7899, 7912), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7938, 7988), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""no find job"""'}), "(retcode=101, retmsg='no find job')\n", (7953, 7988), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((8341, 8403), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'table_name', 'namespace': 'namespace'}), '(name=table_name, namespace=namespace)\n', (8365, 8403), False, 'from fate_arch import storage\n'), ((8880, 9116), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'table_name': table_name, 'namespace': namespace, 'exist': exist, 'count':\n table_key_count, 'partition': table_partition, 'schema': table_schema,\n 'enable': enable, 'origin': origin, 'address': address}"}), "(data={'table_name': table_name, 'namespace': namespace,\n 'exist': exist, 'count': table_key_count, 'partition': table_partition,\n 'schema': table_schema, 'enable': enable, 'origin': origin, 'address':\n address})\n", (8895, 9116), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((9465, 9482), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {}), '()\n', (9480, 9482), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((10563, 10661), 'fate_flow.operation.job_tracker.Tracker.query_output_data_infos', 'Tracker.query_output_data_infos', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id'}), '(job_id=job.f_job_id, role=job.f_role,\n party_id=job.f_party_id)\n', (10594, 10661), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((3075, 3226), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(100)', 'retmsg': '"""The data table already exists.If you still want to continue uploading, please add the parameter --drop"""'}), "(retcode=100, retmsg=\n 'The data table already exists.If you still want to continue uploading, please add the parameter --drop'\n )\n", (3090, 3226), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((6521, 6608), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'table_meta.f_name', 'namespace': 'table_meta.f_namespace'}), '(name=table_meta.f_name, namespace=table_meta.\n f_namespace)\n', (6545, 6608), False, 'from fate_arch import storage\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json import time import requests from flask import Response, jsonify from werkzeug.http import HTTP_STATUS_CODES from fate_arch.common import CoordinationCommunicationProtocol, CoordinationProxyService, FederatedMode from fate_arch.common.base_utils import json_loads from fate_flow.db.job_default_config import JobDefaultConfig from fate_flow.db.runtime_config import RuntimeConfig from fate_flow.db.service_registry import ServiceRegistry from fate_flow.entity import RetCode from fate_flow.settings import API_VERSION, CHECK_NODES_IDENTITY, FATE_MANAGER_GET_NODE_INFO_ENDPOINT, HEADERS, HOST, \ HTTP_PORT, PROXY, PROXY_PROTOCOL, stat_logger from fate_flow.utils.grpc_utils import forward_grpc_packet, gen_routing_metadata, get_command_federation_channel, \ wrap_grpc_packet from fate_flow.utils.log_utils import audit_logger, schedule_logger from fate_flow.utils.requests_utils import request def get_json_result(retcode=RetCode.SUCCESS, retmsg='success', data=None, job_id=None, meta=None): result_dict = {"retcode": retcode, "retmsg": retmsg, "data": data, "jobId": job_id, "meta": meta} response = {} for key, value in result_dict.items(): if value is None and key != "retcode": continue else: response[key] = value return jsonify(response) def server_error_response(e): stat_logger.exception(e) if len(e.args) > 1: return get_json_result(retcode=RetCode.EXCEPTION_ERROR, retmsg=str(e.args[0]), data=e.args[1]) else: return get_json_result(retcode=RetCode.EXCEPTION_ERROR, retmsg=str(e)) def error_response(response_code, retmsg=None): if retmsg is None: retmsg = HTTP_STATUS_CODES.get(response_code, 'Unknown Error') return Response(json.dumps({'retmsg': retmsg, 'retcode': response_code}), status=response_code, mimetype='application/json') def federated_api(job_id, method, endpoint, src_party_id, dest_party_id, src_role, json_body, federated_mode, api_version=API_VERSION, overall_timeout=None): overall_timeout = JobDefaultConfig.remote_request_timeout if overall_timeout is None else overall_timeout if int(dest_party_id) == 0: federated_mode = FederatedMode.SINGLE if federated_mode == FederatedMode.SINGLE: return local_api(job_id=job_id, method=method, endpoint=endpoint, json_body=json_body, api_version=api_version) elif federated_mode == FederatedMode.MULTIPLE: host, port, protocol = get_federated_proxy_address(src_party_id, dest_party_id) if protocol == CoordinationCommunicationProtocol.HTTP: return federated_coordination_on_http(job_id=job_id, method=method, host=host, port=port, endpoint=endpoint, src_party_id=src_party_id, src_role=src_role, dest_party_id=dest_party_id, json_body=json_body, api_version=api_version, overall_timeout=overall_timeout) elif protocol == CoordinationCommunicationProtocol.GRPC: return federated_coordination_on_grpc(job_id=job_id, method=method, host=host, port=port, endpoint=endpoint, src_party_id=src_party_id, src_role=src_role, dest_party_id=dest_party_id, json_body=json_body, api_version=api_version, overall_timeout=overall_timeout) else: raise Exception(f"{protocol} coordination communication protocol is not supported.") else: raise Exception('{} work mode is not supported'.format(federated_mode)) def local_api(job_id, method, endpoint, json_body, api_version=API_VERSION, try_times=3): return federated_coordination_on_http(job_id=job_id, method=method, host=RuntimeConfig.JOB_SERVER_HOST, port=RuntimeConfig.HTTP_PORT, endpoint=endpoint, src_party_id="", src_role="", dest_party_id="", json_body=json_body, api_version=api_version, try_times=try_times) def get_federated_proxy_address(src_party_id, dest_party_id): if isinstance(PROXY, str): if PROXY == CoordinationProxyService.ROLLSITE: proxy_address = ServiceRegistry.FATE_ON_EGGROLL.get(PROXY) return proxy_address["host"], proxy_address.get("grpc_port", proxy_address["port"]), CoordinationCommunicationProtocol.GRPC elif PROXY == CoordinationProxyService.NGINX: proxy_address = ServiceRegistry.FATE_ON_SPARK.get(PROXY) protocol = CoordinationCommunicationProtocol.HTTP if PROXY_PROTOCOL == "default" else PROXY_PROTOCOL return proxy_address["host"], proxy_address[f"{protocol}_port"], protocol else: raise RuntimeError(f"can not support coordinate proxy {PROXY}") elif isinstance(PROXY, dict): proxy_address = PROXY protocol = CoordinationCommunicationProtocol.HTTP if PROXY_PROTOCOL == "default" else PROXY_PROTOCOL proxy_name = PROXY.get("name", CoordinationProxyService.FATEFLOW) if proxy_name == CoordinationProxyService.FATEFLOW and str(dest_party_id) == str(src_party_id): host = RuntimeConfig.JOB_SERVER_HOST port = RuntimeConfig.HTTP_PORT else: host = proxy_address["host"] port = proxy_address[f"{protocol}_port"] return host, port, protocol else: raise RuntimeError(f"can not support coordinate proxy config {PROXY}") def federated_coordination_on_http(job_id, method, host, port, endpoint, src_party_id, src_role, dest_party_id, json_body, api_version=API_VERSION, overall_timeout=None, try_times=3): overall_timeout = JobDefaultConfig.remote_request_timeout if overall_timeout is None else overall_timeout endpoint = f"/{api_version}{endpoint}" exception = None json_body['src_fate_ver'] = RuntimeConfig.get_env('FATE') json_body['src_role'] = src_role json_body['src_party_id'] = src_party_id for t in range(try_times): try: url = "http://{}:{}{}".format(host, port, endpoint) audit_logger(job_id).info('remote http api request: {}'.format(url)) headers = HEADERS.copy() headers["dest-party-id"] = str(dest_party_id) headers["src-fate-ver"] = RuntimeConfig.get_env("FATE") headers["src-party-id"] = str(src_party_id) headers["src-role"] = str(src_role) response = request(method=method, url=url, json=json_body, headers=headers) audit_logger(job_id).info(response.text) audit_logger(job_id).info('remote http api response: {} {}'.format(endpoint, response.json())) return response.json() except Exception as e: exception = e schedule_logger(job_id).warning(f"remote http request {endpoint} error, sleep and try again") time.sleep(2 * (t+1)) else: raise exception def federated_coordination_on_grpc(job_id, method, host, port, endpoint, src_party_id, src_role, dest_party_id, json_body, api_version=API_VERSION, overall_timeout=None, try_times=3): overall_timeout = JobDefaultConfig.remote_request_timeout if overall_timeout is None else overall_timeout endpoint = f"/{api_version}{endpoint}" json_body['src_fate_ver'] = RuntimeConfig.get_env('FATE') json_body['src_role'] = src_role json_body['src_party_id'] = src_party_id if CHECK_NODES_IDENTITY: get_node_identity(json_body, src_party_id) _packet = wrap_grpc_packet(json_body, method, endpoint, src_party_id, dest_party_id, job_id, overall_timeout=overall_timeout) _routing_metadata = gen_routing_metadata(src_party_id=src_party_id, dest_party_id=dest_party_id) exception = None for t in range(try_times): try: channel, stub = get_command_federation_channel(host, port) _return, _call = stub.unaryCall.with_call(_packet, metadata=_routing_metadata, timeout=(overall_timeout/1000)) audit_logger(job_id).info("grpc api response: {}".format(_return)) channel.close() response = json_loads(_return.body.value) return response except Exception as e: exception = e schedule_logger(job_id).warning(f"remote request {endpoint} error, sleep and try again") time.sleep(2 * (t+1)) else: tips = 'Please check rollSite and fateflow network connectivity' """ if 'Error received from peer' in str(exception): tips = 'Please check if the fate flow server of the other party is started. ' if 'failed to connect to all addresses' in str(exception): tips = 'Please check whether the rollsite service(port: 9370) is started. ' """ raise Exception('{}rpc request error: {}'.format(tips, exception)) def proxy_api(role, _job_id, request_config): job_id = request_config.get('header').get('job_id', _job_id) method = request_config.get('header').get('method', 'POST') endpoint = request_config.get('header').get('endpoint') src_party_id = request_config.get('header').get('src_party_id') dest_party_id = request_config.get('header').get('dest_party_id') json_body = request_config.get('body') _packet = forward_grpc_packet(json_body, method, endpoint, src_party_id, dest_party_id, job_id=job_id, role=role) _routing_metadata = gen_routing_metadata(src_party_id=src_party_id, dest_party_id=dest_party_id) host, port, protocol = get_federated_proxy_address(src_party_id, dest_party_id) channel, stub = get_command_federation_channel(host, port) _return, _call = stub.unaryCall.with_call(_packet, metadata=_routing_metadata) channel.close() json_body = json_loads(_return.body.value) return json_body def forward_api(role, request_config): method = request_config.get('header', {}).get('method', 'post') endpoint = request_config.get('header', {}).get('endpoint') ip = getattr(ServiceRegistry, role.upper()).get("host") port = getattr(ServiceRegistry, role.upper()).get("port") url = "http://{}:{}{}".format(ip, port, endpoint) audit_logger().info('api request: {}'.format(url)) http_response = request(method=method, url=url, json=request_config.get('body'), headers=request_config.get('header')) if http_response.status_code == 200: response = http_response.json() else: response = {"retcode": http_response.status_code, "retmsg": http_response.text} audit_logger().info(response) return response def get_node_identity(json_body, src_party_id): params = { 'partyId': int(src_party_id), 'federatedId': ServiceRegistry.FATEMANAGER.get("federatedId"), } try: response = requests.post(url="http://{}:{}{}".format( ServiceRegistry.FATEMANAGER.get("host"), ServiceRegistry.FATEMANAGER.get("port"), FATE_MANAGER_GET_NODE_INFO_ENDPOINT), json=params) json_body['appKey'] = response.json().get('data').get('appKey') json_body['appSecret'] = response.json().get('data').get('appSecret') json_body['_src_role'] = response.json().get('data').get('role') except Exception as e: raise Exception('get appkey and secret failed: {}'.format(str(e)))
[ "fate_flow.db.service_registry.ServiceRegistry.FATE_ON_EGGROLL.get", "fate_flow.utils.grpc_utils.gen_routing_metadata", "fate_flow.db.service_registry.ServiceRegistry.FATE_ON_SPARK.get", "fate_flow.settings.stat_logger.exception", "fate_flow.db.service_registry.ServiceRegistry.FATEMANAGER.get", "fate_flow.utils.log_utils.audit_logger", "fate_flow.utils.grpc_utils.get_command_federation_channel", "fate_flow.utils.grpc_utils.forward_grpc_packet", "fate_flow.settings.PROXY.get", "fate_flow.settings.HEADERS.copy", "fate_flow.utils.requests_utils.request", "fate_flow.utils.grpc_utils.wrap_grpc_packet", "fate_flow.utils.log_utils.schedule_logger", "fate_flow.db.runtime_config.RuntimeConfig.get_env" ]
[((1920, 1937), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (1927, 1937), False, 'from flask import Response, jsonify\n'), ((1974, 1998), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1995, 1998), False, 'from fate_flow.settings import API_VERSION, CHECK_NODES_IDENTITY, FATE_MANAGER_GET_NODE_INFO_ENDPOINT, HEADERS, HOST, HTTP_PORT, PROXY, PROXY_PROTOCOL, stat_logger\n'), ((6525, 6554), 'fate_flow.db.runtime_config.RuntimeConfig.get_env', 'RuntimeConfig.get_env', (['"""FATE"""'], {}), "('FATE')\n", (6546, 6554), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((8018, 8047), 'fate_flow.db.runtime_config.RuntimeConfig.get_env', 'RuntimeConfig.get_env', (['"""FATE"""'], {}), "('FATE')\n", (8039, 8047), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((8225, 8344), 'fate_flow.utils.grpc_utils.wrap_grpc_packet', 'wrap_grpc_packet', (['json_body', 'method', 'endpoint', 'src_party_id', 'dest_party_id', 'job_id'], {'overall_timeout': 'overall_timeout'}), '(json_body, method, endpoint, src_party_id, dest_party_id,\n job_id, overall_timeout=overall_timeout)\n', (8241, 8344), False, 'from fate_flow.utils.grpc_utils import forward_grpc_packet, gen_routing_metadata, get_command_federation_channel, wrap_grpc_packet\n'), ((8396, 8472), 'fate_flow.utils.grpc_utils.gen_routing_metadata', 'gen_routing_metadata', ([], {'src_party_id': 'src_party_id', 'dest_party_id': 'dest_party_id'}), '(src_party_id=src_party_id, dest_party_id=dest_party_id)\n', (8416, 8472), False, 'from fate_flow.utils.grpc_utils import forward_grpc_packet, gen_routing_metadata, get_command_federation_channel, wrap_grpc_packet\n'), ((10029, 10136), 'fate_flow.utils.grpc_utils.forward_grpc_packet', 'forward_grpc_packet', (['json_body', 'method', 'endpoint', 'src_party_id', 'dest_party_id'], {'job_id': 'job_id', 'role': 'role'}), '(json_body, method, endpoint, src_party_id,\n dest_party_id, job_id=job_id, role=role)\n', (10048, 10136), False, 'from fate_flow.utils.grpc_utils import forward_grpc_packet, gen_routing_metadata, get_command_federation_channel, wrap_grpc_packet\n'), ((10157, 10233), 'fate_flow.utils.grpc_utils.gen_routing_metadata', 'gen_routing_metadata', ([], {'src_party_id': 'src_party_id', 'dest_party_id': 'dest_party_id'}), '(src_party_id=src_party_id, dest_party_id=dest_party_id)\n', (10177, 10233), False, 'from fate_flow.utils.grpc_utils import forward_grpc_packet, gen_routing_metadata, get_command_federation_channel, wrap_grpc_packet\n'), ((10338, 10380), 'fate_flow.utils.grpc_utils.get_command_federation_channel', 'get_command_federation_channel', (['host', 'port'], {}), '(host, port)\n', (10368, 10380), False, 'from fate_flow.utils.grpc_utils import forward_grpc_packet, gen_routing_metadata, get_command_federation_channel, wrap_grpc_packet\n'), ((10500, 10530), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['_return.body.value'], {}), '(_return.body.value)\n', (10510, 10530), False, 'from fate_arch.common.base_utils import json_loads\n'), ((2305, 2358), 'werkzeug.http.HTTP_STATUS_CODES.get', 'HTTP_STATUS_CODES.get', (['response_code', '"""Unknown Error"""'], {}), "(response_code, 'Unknown Error')\n", (2326, 2358), False, 'from werkzeug.http import HTTP_STATUS_CODES\n'), ((2379, 2435), 'json.dumps', 'json.dumps', (["{'retmsg': retmsg, 'retcode': response_code}"], {}), "({'retmsg': retmsg, 'retcode': response_code})\n", (2389, 2435), False, 'import json\n'), ((11440, 11486), 'fate_flow.db.service_registry.ServiceRegistry.FATEMANAGER.get', 'ServiceRegistry.FATEMANAGER.get', (['"""federatedId"""'], {}), "('federatedId')\n", (11471, 11486), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((4865, 4907), 'fate_flow.db.service_registry.ServiceRegistry.FATE_ON_EGGROLL.get', 'ServiceRegistry.FATE_ON_EGGROLL.get', (['PROXY'], {}), '(PROXY)\n', (4900, 4907), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((5650, 5702), 'fate_flow.settings.PROXY.get', 'PROXY.get', (['"""name"""', 'CoordinationProxyService.FATEFLOW'], {}), "('name', CoordinationProxyService.FATEFLOW)\n", (5659, 5702), False, 'from fate_flow.settings import API_VERSION, CHECK_NODES_IDENTITY, FATE_MANAGER_GET_NODE_INFO_ENDPOINT, HEADERS, HOST, HTTP_PORT, PROXY, PROXY_PROTOCOL, stat_logger\n'), ((6850, 6864), 'fate_flow.settings.HEADERS.copy', 'HEADERS.copy', ([], {}), '()\n', (6862, 6864), False, 'from fate_flow.settings import API_VERSION, CHECK_NODES_IDENTITY, FATE_MANAGER_GET_NODE_INFO_ENDPOINT, HEADERS, HOST, HTTP_PORT, PROXY, PROXY_PROTOCOL, stat_logger\n'), ((6961, 6990), 'fate_flow.db.runtime_config.RuntimeConfig.get_env', 'RuntimeConfig.get_env', (['"""FATE"""'], {}), "('FATE')\n", (6982, 6990), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((7119, 7183), 'fate_flow.utils.requests_utils.request', 'request', ([], {'method': 'method', 'url': 'url', 'json': 'json_body', 'headers': 'headers'}), '(method=method, url=url, json=json_body, headers=headers)\n', (7126, 7183), False, 'from fate_flow.utils.requests_utils import request\n'), ((8566, 8608), 'fate_flow.utils.grpc_utils.get_command_federation_channel', 'get_command_federation_channel', (['host', 'port'], {}), '(host, port)\n', (8596, 8608), False, 'from fate_flow.utils.grpc_utils import forward_grpc_packet, gen_routing_metadata, get_command_federation_channel, wrap_grpc_packet\n'), ((8862, 8892), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['_return.body.value'], {}), '(_return.body.value)\n', (8872, 8892), False, 'from fate_arch.common.base_utils import json_loads\n'), ((10905, 10919), 'fate_flow.utils.log_utils.audit_logger', 'audit_logger', ([], {}), '()\n', (10917, 10919), False, 'from fate_flow.utils.log_utils import audit_logger, schedule_logger\n'), ((11264, 11278), 'fate_flow.utils.log_utils.audit_logger', 'audit_logger', ([], {}), '()\n', (11276, 11278), False, 'from fate_flow.utils.log_utils import audit_logger, schedule_logger\n'), ((5126, 5166), 'fate_flow.db.service_registry.ServiceRegistry.FATE_ON_SPARK.get', 'ServiceRegistry.FATE_ON_SPARK.get', (['PROXY'], {}), '(PROXY)\n', (5159, 5166), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((7555, 7578), 'time.sleep', 'time.sleep', (['(2 * (t + 1))'], {}), '(2 * (t + 1))\n', (7565, 7578), False, 'import time\n'), ((9091, 9114), 'time.sleep', 'time.sleep', (['(2 * (t + 1))'], {}), '(2 * (t + 1))\n', (9101, 9114), False, 'import time\n'), ((6758, 6778), 'fate_flow.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (6770, 6778), False, 'from fate_flow.utils.log_utils import audit_logger, schedule_logger\n'), ((7196, 7216), 'fate_flow.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (7208, 7216), False, 'from fate_flow.utils.log_utils import audit_logger, schedule_logger\n'), ((7249, 7269), 'fate_flow.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (7261, 7269), False, 'from fate_flow.utils.log_utils import audit_logger, schedule_logger\n'), ((8744, 8764), 'fate_flow.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (8756, 8764), False, 'from fate_flow.utils.log_utils import audit_logger, schedule_logger\n'), ((11577, 11616), 'fate_flow.db.service_registry.ServiceRegistry.FATEMANAGER.get', 'ServiceRegistry.FATEMANAGER.get', (['"""host"""'], {}), "('host')\n", (11608, 11616), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((11630, 11669), 'fate_flow.db.service_registry.ServiceRegistry.FATEMANAGER.get', 'ServiceRegistry.FATEMANAGER.get', (['"""port"""'], {}), "('port')\n", (11661, 11669), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((7449, 7472), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (7464, 7472), False, 'from fate_flow.utils.log_utils import audit_logger, schedule_logger\n'), ((8990, 9013), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (9005, 9013), False, 'from fate_flow.utils.log_utils import audit_logger, schedule_logger\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_arch.common import log from fate_arch.common.data_utils import default_output_info, default_output_fs_path from fate_arch.session import Session from fate_arch.storage import StorageEngine, StorageTableMeta from fate_flow.components._base import ( BaseParam, ComponentBase, ComponentInputProtocol, ComponentMeta, ) from fate_flow.entity import Metric from fate_flow.manager.data_manager import TableStorage, DataTableTracker LOGGER = log.getLogger() writer_cpn_meta = ComponentMeta("Writer") @writer_cpn_meta.bind_param class WriterParam(BaseParam): def __init__(self, table_name=None, namespace=None, storage_engine=None, address=None, output_table_name=None, output_namespace=None, partitions=None): self.table_name = table_name self.namespace = namespace self.storage_engine = storage_engine self.address = address self.output_table_name = output_table_name self.output_namespace = output_namespace self.partitions = partitions def check(self): return True @writer_cpn_meta.bind_runner.on_guest.on_host.on_local class Writer(ComponentBase): def __init__(self): super(Writer, self).__init__() self.parameters = None self.job_parameters = None def _run(self, cpn_input: ComponentInputProtocol): self.parameters = cpn_input.parameters if self.parameters.get("namespace") and self.parameters.get("table_name"): namespace = self.parameters.get("namespace") name = self.parameters.get("table_name") elif cpn_input.flow_feeded_parameters.get("table_info"): namespace = cpn_input.flow_feeded_parameters.get("table_info")[0].get("namespace") name = cpn_input.flow_feeded_parameters.get("table_info")[0].get("name") else: raise Exception("no found name or namespace in input parameters") LOGGER.info(f"writer parameters:{self.parameters}") src_table = self._get_storage_table(namespace=namespace, name=name) output_name = self.parameters.get("output_table_name") output_namespace = self.parameters.get("output_namespace") if not output_namespace or not output_name: LOGGER.info("start create table info") output_namespace, output_name = self._create_output_table_info() LOGGER.info(f"output_namespace: {output_namespace}, output_name: {output_name}") engine = self.parameters.get("storage_engine").upper() dest_table = self._create_storage_table(engine=engine, address_dict=self.parameters.get("address"), name=output_name, namespace=output_namespace, partitions=self.parameters.get("partitions", src_table.meta.get_partitions()), id_delimiter=src_table.meta.get_id_delimiter() if src_table.meta.get_id_delimiter() else ",") _, dest_table.meta = dest_table.meta.update_metas(schema=src_table.meta.get_schema(), id_delimiter=src_table.meta.get_id_delimiter() if src_table.meta.get_id_delimiter() else ',') count = TableStorage.copy_table(src_table, dest_table, deserialize_value=True) LOGGER.info("save success") # output table track DataTableTracker.create_table_tracker( output_name, output_namespace, entity_info={ "have_parent": True, "parent_table_namespace": namespace, "parent_table_name": name, "job_id": self.tracker.job_id, }, ) self.tracker.log_output_data_info( data_name="writer", table_namespace=output_namespace, table_name=output_name, ) self.tracker.log_metric_data( metric_namespace="writer", metric_name="writer", metrics=[Metric("output_table_name", output_name), Metric("output_namespace", output_namespace), Metric("count", count)], ) @staticmethod def _get_storage_table(namespace, name): return Session.get_global().get_table(name=name, namespace=namespace) @staticmethod def _create_storage_table(engine, address_dict, name, namespace, partitions, id_delimiter): if not address_dict: address_dict = {} if engine == StorageEngine.MYSQL: if not address_dict.get("db") or not address_dict.get("name"): address_dict.update({"db": namespace, "name": name}) elif engine == StorageEngine.EGGROLL: address_dict.update({"name": name, "namespace": namespace}) elif engine == StorageEngine.STANDALONE: address_dict.update({"name": name, "namespace": namespace}) elif engine == StorageEngine.HIVE: address_dict.update({"database": namespace, "name": f"{name}"}) elif engine == StorageEngine.HDFS: if not address_dict.get("path"): address_dict.update({"path": default_output_fs_path(name=name, namespace=namespace, prefix=address_dict.get("path_prefix"))}) elif engine == StorageEngine.LOCALFS: if not address_dict.get("path"): address_dict.update({"path": default_output_fs_path(name=name, namespace=namespace, storage_engine=StorageEngine.LOCALFS)}) else: raise RuntimeError(f"{engine} storage is not supported") output_table_address = StorageTableMeta.create_address( storage_engine=engine, address_dict=address_dict ) storage_session = Session.get_global().storage(storage_engine=engine) output_table = storage_session.create_table( address=output_table_address, name=name, namespace=namespace, partitions=partitions, id_delimiter=id_delimiter ) return output_table def _create_output_table_info(self): ( output_namespace, output_name ) = default_output_info( task_id=self.tracker.task_id, task_version=self.tracker.task_version, output_type="data" ) return output_namespace, output_name
[ "fate_flow.components._base.ComponentMeta", "fate_flow.manager.data_manager.DataTableTracker.create_table_tracker", "fate_flow.entity.Metric", "fate_flow.manager.data_manager.TableStorage.copy_table" ]
[((1123, 1138), 'fate_arch.common.log.getLogger', 'log.getLogger', ([], {}), '()\n', (1136, 1138), False, 'from fate_arch.common import log\n'), ((1158, 1181), 'fate_flow.components._base.ComponentMeta', 'ComponentMeta', (['"""Writer"""'], {}), "('Writer')\n", (1171, 1181), False, 'from fate_flow.components._base import BaseParam, ComponentBase, ComponentInputProtocol, ComponentMeta\n'), ((4121, 4191), 'fate_flow.manager.data_manager.TableStorage.copy_table', 'TableStorage.copy_table', (['src_table', 'dest_table'], {'deserialize_value': '(True)'}), '(src_table, dest_table, deserialize_value=True)\n', (4144, 4191), False, 'from fate_flow.manager.data_manager import TableStorage, DataTableTracker\n'), ((4265, 4471), 'fate_flow.manager.data_manager.DataTableTracker.create_table_tracker', 'DataTableTracker.create_table_tracker', (['output_name', 'output_namespace'], {'entity_info': "{'have_parent': True, 'parent_table_namespace': namespace,\n 'parent_table_name': name, 'job_id': self.tracker.job_id}"}), "(output_name, output_namespace,\n entity_info={'have_parent': True, 'parent_table_namespace': namespace,\n 'parent_table_name': name, 'job_id': self.tracker.job_id})\n", (4302, 4471), False, 'from fate_flow.manager.data_manager import TableStorage, DataTableTracker\n'), ((6494, 6580), 'fate_arch.storage.StorageTableMeta.create_address', 'StorageTableMeta.create_address', ([], {'storage_engine': 'engine', 'address_dict': 'address_dict'}), '(storage_engine=engine, address_dict=\n address_dict)\n', (6525, 6580), False, 'from fate_arch.storage import StorageEngine, StorageTableMeta\n'), ((7057, 7171), 'fate_arch.common.data_utils.default_output_info', 'default_output_info', ([], {'task_id': 'self.tracker.task_id', 'task_version': 'self.tracker.task_version', 'output_type': '"""data"""'}), "(task_id=self.tracker.task_id, task_version=self.tracker\n .task_version, output_type='data')\n", (7076, 7171), False, 'from fate_arch.common.data_utils import default_output_info, default_output_fs_path\n'), ((5134, 5154), 'fate_arch.session.Session.get_global', 'Session.get_global', ([], {}), '()\n', (5152, 5154), False, 'from fate_arch.session import Session\n'), ((6625, 6645), 'fate_arch.session.Session.get_global', 'Session.get_global', ([], {}), '()\n', (6643, 6645), False, 'from fate_arch.session import Session\n'), ((4889, 4929), 'fate_flow.entity.Metric', 'Metric', (['"""output_table_name"""', 'output_name'], {}), "('output_table_name', output_name)\n", (4895, 4929), False, 'from fate_flow.entity import Metric\n'), ((4952, 4996), 'fate_flow.entity.Metric', 'Metric', (['"""output_namespace"""', 'output_namespace'], {}), "('output_namespace', output_namespace)\n", (4958, 4996), False, 'from fate_flow.entity import Metric\n'), ((5019, 5041), 'fate_flow.entity.Metric', 'Metric', (['"""count"""', 'count'], {}), "('count', count)\n", (5025, 5041), False, 'from fate_flow.entity import Metric\n'), ((6285, 6382), 'fate_arch.common.data_utils.default_output_fs_path', 'default_output_fs_path', ([], {'name': 'name', 'namespace': 'namespace', 'storage_engine': 'StorageEngine.LOCALFS'}), '(name=name, namespace=namespace, storage_engine=\n StorageEngine.LOCALFS)\n', (6307, 6382), False, 'from fate_arch.common.data_utils import default_output_info, default_output_fs_path\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_arch.common import WorkMode from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp from fate_arch.common.log import schedule_logger from fate_flow.controller.job_controller import JobController from fate_flow.db.db_models import DB, Job from fate_flow.entity.types import JobStatus, TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, ResourceOperation, \ FederatedSchedulingStatusCode, RunParameters, RetCode from fate_flow.operation.job_saver import JobSaver from fate_flow.operation.job_tracker import Tracker from fate_flow.scheduler.federated_scheduler import FederatedScheduler from fate_flow.scheduler.task_scheduler import TaskScheduler from fate_flow.settings import END_STATUS_JOB_SCHEDULING_TIME_LIMIT, END_STATUS_JOB_SCHEDULING_UPDATES from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils from fate_flow.utils import model_utils from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter from fate_flow.utils.cron import Cron class DAGScheduler(Cron): @classmethod def submit(cls, job_data, job_id=None): if not job_id: job_id = job_utils.generate_job_id() schedule_logger(job_id).info('submit job, job_id {}, body {}'.format(job_id, job_data)) job_dsl = job_data.get('job_dsl', {}) job_runtime_conf = job_data.get('job_runtime_conf', {}) job_utils.check_job_runtime_conf(job_runtime_conf) authentication_utils.check_constraint(job_runtime_conf, job_dsl) job_initiator = job_runtime_conf['initiator'] conf_adapter = JobRuntimeConfigAdapter(job_runtime_conf) common_job_parameters = conf_adapter.get_common_parameters() if common_job_parameters.job_type != 'predict': # generate job model info common_job_parameters.model_id = model_utils.gen_model_id(job_runtime_conf['role']) common_job_parameters.model_version = job_id train_runtime_conf = {} else: # check predict job parameters detect_utils.check_config(common_job_parameters.to_dict(), ['model_id', 'model_version']) # get inference dsl from pipeline model as job dsl tracker = Tracker(job_id=job_id, role=job_initiator['role'], party_id=job_initiator['party_id'], model_id=common_job_parameters.model_id, model_version=common_job_parameters.model_version) pipeline_model = tracker.get_output_model('pipeline') train_runtime_conf = json_loads(pipeline_model['Pipeline'].train_runtime_conf) if not model_utils.check_if_deployed(role=job_initiator['role'], party_id=job_initiator['party_id'], model_id=common_job_parameters.model_id, model_version=common_job_parameters.model_version): raise Exception( f"Model {common_job_parameters.model_id} {common_job_parameters.model_version} has not been deployed yet.") job_dsl = json_loads(pipeline_model['Pipeline'].inference_dsl) job = Job() job.f_job_id = job_id job.f_dsl = job_dsl job.f_train_runtime_conf = train_runtime_conf job.f_roles = job_runtime_conf['role'] job.f_work_mode = common_job_parameters.work_mode job.f_initiator_role = job_initiator['role'] job.f_initiator_party_id = job_initiator['party_id'] job.f_role = job_initiator['role'] job.f_party_id = job_initiator['party_id'] path_dict = job_utils.save_job_conf(job_id=job_id, role=job.f_initiator_role, job_dsl=job_dsl, job_runtime_conf=job_runtime_conf, job_runtime_conf_on_party={}, train_runtime_conf=train_runtime_conf, pipeline_dsl=None) if job.f_initiator_party_id not in job_runtime_conf['role'][job.f_initiator_role]: schedule_logger(job_id).info("initiator party id error:{}".format(job.f_initiator_party_id)) raise Exception("initiator party id error {}".format(job.f_initiator_party_id)) # create common parameters on initiator JobController.backend_compatibility(job_parameters=common_job_parameters) JobController.adapt_job_parameters(role=job.f_initiator_role, job_parameters=common_job_parameters, create_initiator_baseline=True) job.f_runtime_conf = conf_adapter.update_common_parameters(common_parameters=common_job_parameters) dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl, runtime_conf=job.f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf) # initiator runtime conf as template job.f_runtime_conf_on_party = job.f_runtime_conf.copy() job.f_runtime_conf_on_party["job_parameters"] = common_job_parameters.to_dict() if common_job_parameters.work_mode == WorkMode.CLUSTER: # Save the status information of all participants in the initiator for scheduling for role, party_ids in job.f_roles.items(): for party_id in party_ids: if role == job.f_initiator_role and party_id == job.f_initiator_party_id: continue JobController.initialize_tasks(job_id, role, party_id, False, job.f_initiator_role, job.f_initiator_party_id, common_job_parameters, dsl_parser) status_code, response = FederatedScheduler.create_job(job=job) if status_code != FederatedSchedulingStatusCode.SUCCESS: job.f_status = JobStatus.FAILED job.f_tag = "submit_failed" FederatedScheduler.sync_job_status(job=job) raise Exception("create job failed", response) schedule_logger(job_id).info( 'submit job successfully, job id is {}, model id is {}'.format(job.f_job_id, common_job_parameters.model_id)) logs_directory = job_utils.get_job_log_directory(job_id) submit_result = { "job_id": job_id, "model_info": {"model_id": common_job_parameters.model_id, "model_version": common_job_parameters.model_version}, "logs_directory": logs_directory, "board_url": job_utils.get_board_url(job_id, job_initiator['role'], job_initiator['party_id']) } submit_result.update(path_dict) return submit_result def run_do(self): schedule_logger().info("start schedule waiting jobs") jobs = JobSaver.query_job(is_initiator=True, status=JobStatus.WAITING, order_by="create_time", reverse=False) schedule_logger().info(f"have {len(jobs)} waiting jobs") if len(jobs): # FIFO job = jobs[0] schedule_logger().info(f"schedule waiting job {job.f_job_id}") try: self.schedule_waiting_jobs(job=job) except Exception as e: schedule_logger(job.f_job_id).exception(e) schedule_logger(job.f_job_id).error(f"schedule waiting job {job.f_job_id} failed") schedule_logger().info("schedule waiting jobs finished") schedule_logger().info("start schedule running jobs") jobs = JobSaver.query_job(is_initiator=True, status=JobStatus.RUNNING, order_by="create_time", reverse=False) schedule_logger().info(f"have {len(jobs)} running jobs") for job in jobs: schedule_logger().info(f"schedule running job {job.f_job_id}") try: self.schedule_running_job(job=job) except Exception as e: schedule_logger(job.f_job_id).exception(e) schedule_logger(job.f_job_id).error(f"schedule job {job.f_job_id} failed") schedule_logger().info("schedule running jobs finished") # some ready job exit before start schedule_logger().info("start schedule ready jobs") jobs = JobSaver.query_job(is_initiator=True, ready_signal=True, order_by="create_time", reverse=False) schedule_logger().info(f"have {len(jobs)} ready jobs") for job in jobs: schedule_logger().info(f"schedule ready job {job.f_job_id}") try: self.schedule_ready_job(job=job) except Exception as e: schedule_logger(job.f_job_id).exception(e) schedule_logger(job.f_job_id).error(f"schedule ready job {job.f_job_id} failed:\n{e}") schedule_logger().info("schedule ready jobs finished") schedule_logger().info("start schedule rerun jobs") jobs = JobSaver.query_job(is_initiator=True, rerun_signal=True, order_by="create_time", reverse=False) schedule_logger().info(f"have {len(jobs)} rerun jobs") for job in jobs: schedule_logger().info(f"schedule rerun job {job.f_job_id}") try: self.schedule_rerun_job(job=job) except Exception as e: schedule_logger(job.f_job_id).exception(e) schedule_logger(job.f_job_id).error(f"schedule job {job.f_job_id} failed") schedule_logger().info("schedule rerun jobs finished") schedule_logger().info("start schedule end status jobs to update status") jobs = JobSaver.query_job(is_initiator=True, status=set(EndStatus.status_list()), end_time=[current_timestamp() - END_STATUS_JOB_SCHEDULING_TIME_LIMIT, current_timestamp()]) schedule_logger().info(f"have {len(jobs)} end status jobs") for job in jobs: schedule_logger().info(f"schedule end status job {job.f_job_id}") try: update_status = self.end_scheduling_updates(job_id=job.f_job_id) if not update_status: schedule_logger(job.f_job_id).info(f"the number of updates has been exceeded") continue self.schedule_running_job(job=job) except Exception as e: schedule_logger(job.f_job_id).exception(e) schedule_logger(job.f_job_id).error(f"schedule job {job.f_job_id} failed") schedule_logger().info("schedule end status jobs finished") @classmethod def schedule_waiting_jobs(cls, job): job_id, initiator_role, initiator_party_id, = job.f_job_id, job.f_initiator_role, job.f_initiator_party_id, if not cls.ready_signal(job_id=job_id, set_or_reset=True): schedule_logger(job_id).info(f"job {job_id} may be handled by another scheduler") return try: if job.f_cancel_signal: job.f_status = JobStatus.CANCELED FederatedScheduler.sync_job_status(job=job) schedule_logger(job_id).info(f"job {job_id} have cancel signal") return apply_status_code, federated_response = FederatedScheduler.resource_for_job(job=job, operation_type=ResourceOperation.APPLY) if apply_status_code == FederatedSchedulingStatusCode.SUCCESS: cls.start_job(job_id=job_id, initiator_role=initiator_role, initiator_party_id=initiator_party_id) else: # rollback resource rollback_party = {} failed_party = {} for dest_role in federated_response.keys(): for dest_party_id in federated_response[dest_role].keys(): retcode = federated_response[dest_role][dest_party_id]["retcode"] if retcode == 0: rollback_party[dest_role] = rollback_party.get(dest_role, []) rollback_party[dest_role].append(dest_party_id) else: failed_party[dest_role] = failed_party.get(dest_role, []) failed_party[dest_role].append(dest_party_id) schedule_logger(job_id).info("job {} apply resource failed on {}, rollback {}".format( job_id, ",".join([",".join([f"{_r}:{_p}" for _p in _ps]) for _r, _ps in failed_party.items()]), ",".join([",".join([f"{_r}:{_p}" for _p in _ps]) for _r, _ps in rollback_party.items()]), )) if rollback_party: return_status_code, federated_response = FederatedScheduler.resource_for_job(job=job, operation_type=ResourceOperation.RETURN, specific_dest=rollback_party) if return_status_code != FederatedSchedulingStatusCode.SUCCESS: schedule_logger(job_id).info(f"job {job_id} return resource failed:\n{federated_response}") else: schedule_logger(job_id).info(f"job {job_id} no party should be rollback resource") if apply_status_code == FederatedSchedulingStatusCode.ERROR: cls.stop_job(job_id=job_id, role=initiator_role, party_id=initiator_party_id, stop_status=JobStatus.FAILED) schedule_logger(job_id).info(f"apply resource error, stop job {job_id}") except Exception as e: raise e finally: update_status = cls.ready_signal(job_id=job_id, set_or_reset=False) schedule_logger(job_id).info(f"reset job {job_id} ready signal {update_status}") @classmethod def schedule_ready_job(cls, job): job_id, initiator_role, initiator_party_id, = job.f_job_id, job.f_initiator_role, job.f_initiator_party_id update_status = cls.ready_signal(job_id=job_id, set_or_reset=False, ready_timeout_ttl=60 * 1000) schedule_logger(job_id).info(f"reset job {job_id} ready signal {update_status}") @classmethod def schedule_rerun_job(cls, job): if EndStatus.contains(job.f_status): job.f_status = JobStatus.WAITING job.f_ready_signal = False job.f_ready_time = None job.f_rerun_signal = False job.f_progress = 0 job.f_end_time = None job.f_elapsed = None schedule_logger(job_id=job.f_job_id).info(f"job {job.f_job_id} has been finished, set waiting to rerun") status, response = FederatedScheduler.sync_job_status(job=job) if status == FederatedSchedulingStatusCode.SUCCESS: cls.rerun_signal(job_id=job.f_job_id, set_or_reset=False) FederatedScheduler.sync_job(job=job, update_fields=["ready_signal", "ready_time", "rerun_signal", "progress", "end_time", "elapsed"]) schedule_logger(job_id=job.f_job_id).info(f"job {job.f_job_id} set waiting to rerun successfully") else: schedule_logger(job_id=job.f_job_id).info(f"job {job.f_job_id} set waiting to rerun failed") else: cls.rerun_signal(job_id=job.f_job_id, set_or_reset=False) cls.schedule_running_job(job) @classmethod def start_job(cls, job_id, initiator_role, initiator_party_id): schedule_logger(job_id=job_id).info( "try to start job {} on initiator {} {}".format(job_id, initiator_role, initiator_party_id)) job_info = {} job_info["job_id"] = job_id job_info["role"] = initiator_role job_info["party_id"] = initiator_party_id job_info["status"] = JobStatus.RUNNING job_info["party_status"] = JobStatus.RUNNING job_info["start_time"] = current_timestamp() job_info["tag"] = 'end_waiting' jobs = JobSaver.query_job(job_id=job_id, role=initiator_role, party_id=initiator_party_id) if jobs: job = jobs[0] FederatedScheduler.start_job(job=job) schedule_logger(job_id=job_id).info( "start job {} on initiator {} {}".format(job_id, initiator_role, initiator_party_id)) else: schedule_logger(job_id=job_id).error( "can not found job {} on initiator {} {}".format(job_id, initiator_role, initiator_party_id)) @classmethod def schedule_running_job(cls, job): schedule_logger(job_id=job.f_job_id).info("scheduling job {}".format(job.f_job_id)) dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl, runtime_conf=job.f_runtime_conf_on_party, train_runtime_conf=job.f_train_runtime_conf) task_scheduling_status_code, tasks = TaskScheduler.schedule(job=job, dsl_parser=dsl_parser, canceled=job.f_cancel_signal) tasks_status = [task.f_status for task in tasks] new_job_status = cls.calculate_job_status(task_scheduling_status_code=task_scheduling_status_code, tasks_status=tasks_status) if new_job_status == JobStatus.WAITING and job.f_cancel_signal: new_job_status = JobStatus.CANCELED total, finished_count = cls.calculate_job_progress(tasks_status=tasks_status) new_progress = float(finished_count) / total * 100 schedule_logger(job_id=job.f_job_id).info( "Job {} status is {}, calculate by task status list: {}".format(job.f_job_id, new_job_status, tasks_status)) if new_job_status != job.f_status or new_progress != job.f_progress: # Make sure to update separately, because these two fields update with anti-weight logic if int(new_progress) - job.f_progress > 0: job.f_progress = new_progress FederatedScheduler.sync_job(job=job, update_fields=["progress"]) cls.update_job_on_initiator(initiator_job=job, update_fields=["progress"]) if new_job_status != job.f_status: job.f_status = new_job_status if EndStatus.contains(job.f_status): FederatedScheduler.save_pipelined_model(job=job) FederatedScheduler.sync_job_status(job=job) cls.update_job_on_initiator(initiator_job=job, update_fields=["status"]) if EndStatus.contains(job.f_status): cls.finish(job=job, end_status=job.f_status) schedule_logger(job_id=job.f_job_id).info("finish scheduling job {}".format(job.f_job_id)) @classmethod def rerun_job(cls, job_id, initiator_role, initiator_party_id, component_name): schedule_logger(job_id=job_id).info(f"try to rerun job {job_id} on initiator {initiator_role} {initiator_party_id}") jobs = JobSaver.query_job(job_id=job_id, role=initiator_role, party_id=initiator_party_id) if jobs: job = jobs[0] else: raise RuntimeError(f"can not found job {job_id} on initiator {initiator_role} {initiator_party_id}") if component_name != job_utils.job_virtual_component_name(): tasks = JobSaver.query_task(job_id=job_id, role=initiator_role, party_id=initiator_party_id, component_name=component_name) else: tasks = JobSaver.query_task(job_id=job_id, role=initiator_role, party_id=initiator_party_id) job_can_rerun = False dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl, runtime_conf=job.f_runtime_conf_on_party, train_runtime_conf=job.f_train_runtime_conf) for task in tasks: if task.f_status in {TaskStatus.WAITING, TaskStatus.SUCCESS}: if task.f_status == TaskStatus.WAITING: job_can_rerun = True schedule_logger(job_id=job_id).info( f"task {task.f_task_id} {task.f_task_version} on {task.f_role} {task.f_party_id} is {task.f_status}, pass rerun") else: # stop old version task FederatedScheduler.stop_task(job=job, task=task, stop_status=TaskStatus.CANCELED) FederatedScheduler.clean_task(job=job, task=task, content_type="metrics") # create new version task task.f_task_version = task.f_task_version + 1 task.f_run_pid = None task.f_run_ip = None FederatedScheduler.create_task(job=job, task=task) # Save the status information of all participants in the initiator for scheduling schedule_logger(job_id=job_id).info(f"create task {task.f_task_id} new version {task.f_task_version}") for _role, _party_ids in job.f_runtime_conf_on_party["role"].items(): for _party_id in _party_ids: if _role == initiator_role and _party_id == initiator_party_id: continue JobController.initialize_tasks(job_id, _role, _party_id, False, job.f_initiator_role, job.f_initiator_party_id, RunParameters(**job.f_runtime_conf_on_party["job_parameters"]), dsl_parser, component_name=task.f_component_name, task_version=task.f_task_version) schedule_logger(job_id=job_id).info( f"create task {task.f_task_id} new version {task.f_task_version} successfully") job_can_rerun = True if job_can_rerun: schedule_logger(job_id=job_id).info(f"job {job_id} set rerun signal") status = cls.rerun_signal(job_id=job_id, set_or_reset=True) if status: schedule_logger(job_id=job_id).info(f"job {job_id} set rerun signal successfully") else: schedule_logger(job_id=job_id).info(f"job {job_id} set rerun signal failed") else: FederatedScheduler.sync_job_status(job=job) schedule_logger(job_id=job_id).info(f"job {job_id} no task to rerun") @classmethod def update_job_on_initiator(cls, initiator_job: Job, update_fields: list): jobs = JobSaver.query_job(job_id=initiator_job.f_job_id) if not jobs: raise Exception("Failed to update job status on initiator") job_info = initiator_job.to_human_model_dict(only_primary_with=update_fields) for field in update_fields: job_info[field] = getattr(initiator_job, "f_%s" % field) for job in jobs: job_info["role"] = job.f_role job_info["party_id"] = job.f_party_id JobSaver.update_job_status(job_info=job_info) JobSaver.update_job(job_info=job_info) @classmethod def calculate_job_status(cls, task_scheduling_status_code, tasks_status): # 1. all waiting # 2. have running # 3. waiting + end status # 4. all end status and difference # 5. all the same end status tmp_status_set = set(tasks_status) if len(tmp_status_set) == 1: # 1 and 5 return tmp_status_set.pop() else: if TaskStatus.RUNNING in tmp_status_set: # 2 return JobStatus.RUNNING if TaskStatus.WAITING in tmp_status_set: # 3 if task_scheduling_status_code == SchedulingStatusCode.HAVE_NEXT: return JobStatus.RUNNING else: # have waiting with no next pass # have waiting with no next or 4 for status in sorted(EndStatus.status_list(), key=lambda s: StatusSet.get_level(status=s), reverse=True): if status == TaskStatus.SUCCESS: continue elif status in tmp_status_set: return status if len( tmp_status_set) == 2 and TaskStatus.WAITING in tmp_status_set and TaskStatus.SUCCESS in tmp_status_set and task_scheduling_status_code == SchedulingStatusCode.NO_NEXT: return JobStatus.CANCELED raise Exception("Calculate job status failed: {}".format(tasks_status)) @classmethod def calculate_job_progress(cls, tasks_status): total = 0 finished_count = 0 for task_status in tasks_status: total += 1 if EndStatus.contains(task_status): finished_count += 1 return total, finished_count @classmethod def stop_job(cls, job_id, role, party_id, stop_status): schedule_logger(job_id=job_id).info(f"request stop job {job_id} with {stop_status}") jobs = JobSaver.query_job(job_id=job_id, role=role, party_id=party_id, is_initiator=True) if len(jobs) > 0: if stop_status == JobStatus.CANCELED: schedule_logger(job_id=job_id).info(f"cancel job {job_id}") set_cancel_status = cls.cancel_signal(job_id=job_id, set_or_reset=True) schedule_logger(job_id=job_id).info(f"set job {job_id} cancel signal {set_cancel_status}") job = jobs[0] job.f_status = stop_status schedule_logger(job_id=job_id).info(f"request stop job {job_id} with {stop_status} to all party") status_code, response = FederatedScheduler.stop_job(job=jobs[0], stop_status=stop_status) if status_code == FederatedSchedulingStatusCode.SUCCESS: schedule_logger(job_id=job_id).info(f"stop job {job_id} with {stop_status} successfully") return RetCode.SUCCESS, "success" else: initiator_tasks_group = JobSaver.get_tasks_asc(job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id) for initiator_task in initiator_tasks_group.values(): TaskScheduler.collect_task_of_all_party(job, initiator_task=initiator_task, set_status=stop_status) schedule_logger(job_id=job_id).info(f"stop job {job_id} with {stop_status} failed, {response}") return RetCode.FEDERATED_ERROR, json_dumps(response) else: return RetCode.SUCCESS, "can not found job" @classmethod @DB.connection_context() def ready_signal(cls, job_id, set_or_reset: bool, ready_timeout_ttl=None): filters = [Job.f_job_id == job_id] if set_or_reset: update_fields = {Job.f_ready_signal: True, Job.f_ready_time: current_timestamp()} filters.append(Job.f_ready_signal == False) else: update_fields = {Job.f_ready_signal: False, Job.f_ready_time: None} filters.append(Job.f_ready_signal == True) if ready_timeout_ttl: filters.append(current_timestamp() - Job.f_ready_time > ready_timeout_ttl) update_status = Job.update(update_fields).where(*filters).execute() > 0 return update_status @classmethod @DB.connection_context() def cancel_signal(cls, job_id, set_or_reset: bool): update_status = Job.update({Job.f_cancel_signal: set_or_reset, Job.f_cancel_time: current_timestamp()}).where( Job.f_job_id == job_id).execute() > 0 return update_status @classmethod @DB.connection_context() def rerun_signal(cls, job_id, set_or_reset: bool): if set_or_reset is True: update_fields = {Job.f_rerun_signal: True, Job.f_cancel_signal: False, Job.f_end_scheduling_updates: 0} elif set_or_reset is False: update_fields = {Job.f_rerun_signal: False} else: raise RuntimeError(f"can not support rereun signal {set_or_reset}") update_status = Job.update(update_fields).where(Job.f_job_id == job_id).execute() > 0 return update_status @classmethod @DB.connection_context() def end_scheduling_updates(cls, job_id): operate = Job.update({Job.f_end_scheduling_updates: Job.f_end_scheduling_updates + 1}) \ .where(Job.f_job_id == job_id, Job.f_end_scheduling_updates < END_STATUS_JOB_SCHEDULING_UPDATES) update_status = operate.execute() > 0 return update_status @classmethod def finish(cls, job, end_status): schedule_logger(job_id=job.f_job_id).info("Job {} finished with {}, do something...".format(job.f_job_id, end_status)) cls.stop_job(job_id=job.f_job_id, role=job.f_initiator_role, party_id=job.f_initiator_party_id, stop_status=end_status) FederatedScheduler.clean_job(job=job) schedule_logger(job_id=job.f_job_id).info("Job {} finished with {}, done".format(job.f_job_id, end_status))
[ "fate_flow.operation.job_saver.JobSaver.update_job", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status", "fate_flow.entity.types.EndStatus.contains", "fate_flow.db.db_models.Job", "fate_flow.utils.job_utils.get_board_url", "fate_flow.operation.job_saver.JobSaver.get_tasks_asc", "fate_flow.operation.job_saver.JobSaver.query_task", "fate_flow.entity.types.EndStatus.status_list", "fate_flow.utils.job_utils.save_job_conf", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.resource_for_job", "fate_flow.utils.job_utils.job_virtual_component_name", "fate_flow.utils.schedule_utils.get_job_dsl_parser", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.save_pipelined_model", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.create_job", "fate_flow.scheduler.task_scheduler.TaskScheduler.collect_task_of_all_party", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.start_job", "fate_flow.utils.job_utils.generate_job_id", "fate_flow.operation.job_tracker.Tracker", "fate_flow.utils.job_utils.get_job_log_directory", "fate_flow.utils.config_adapter.JobRuntimeConfigAdapter", "fate_flow.operation.job_saver.JobSaver.update_job_status", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.create_task", "fate_flow.db.db_models.DB.connection_context", "fate_flow.utils.authentication_utils.check_constraint", "fate_flow.scheduler.task_scheduler.TaskScheduler.schedule", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.clean_job", "fate_flow.entity.types.StatusSet.get_level", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.stop_job", "fate_flow.utils.model_utils.gen_model_id", "fate_flow.utils.model_utils.check_if_deployed", "fate_flow.utils.job_utils.check_job_runtime_conf", "fate_flow.controller.job_controller.JobController.initialize_tasks", "fate_flow.entity.types.RunParameters", "fate_flow.operation.job_saver.JobSaver.query_job", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.stop_task", "fate_flow.controller.job_controller.JobController.backend_compatibility", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.clean_task", "fate_flow.controller.job_controller.JobController.adapt_job_parameters", "fate_flow.db.db_models.Job.update" ]
[((27653, 27676), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (27674, 27676), False, 'from fate_flow.db.db_models import DB, Job\n'), ((28380, 28403), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (28401, 28403), False, 'from fate_flow.db.db_models import DB, Job\n'), ((28681, 28704), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (28702, 28704), False, 'from fate_flow.db.db_models import DB, Job\n'), ((29241, 29264), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (29262, 29264), False, 'from fate_flow.db.db_models import DB, Job\n'), ((2020, 2070), 'fate_flow.utils.job_utils.check_job_runtime_conf', 'job_utils.check_job_runtime_conf', (['job_runtime_conf'], {}), '(job_runtime_conf)\n', (2052, 2070), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((2079, 2143), 'fate_flow.utils.authentication_utils.check_constraint', 'authentication_utils.check_constraint', (['job_runtime_conf', 'job_dsl'], {}), '(job_runtime_conf, job_dsl)\n', (2116, 2143), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((2222, 2263), 'fate_flow.utils.config_adapter.JobRuntimeConfigAdapter', 'JobRuntimeConfigAdapter', (['job_runtime_conf'], {}), '(job_runtime_conf)\n', (2245, 2263), False, 'from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter\n'), ((3831, 3836), 'fate_flow.db.db_models.Job', 'Job', ([], {}), '()\n', (3834, 3836), False, 'from fate_flow.db.db_models import DB, Job\n'), ((4283, 4498), 'fate_flow.utils.job_utils.save_job_conf', 'job_utils.save_job_conf', ([], {'job_id': 'job_id', 'role': 'job.f_initiator_role', 'job_dsl': 'job_dsl', 'job_runtime_conf': 'job_runtime_conf', 'job_runtime_conf_on_party': '{}', 'train_runtime_conf': 'train_runtime_conf', 'pipeline_dsl': 'None'}), '(job_id=job_id, role=job.f_initiator_role, job_dsl=\n job_dsl, job_runtime_conf=job_runtime_conf, job_runtime_conf_on_party={\n }, train_runtime_conf=train_runtime_conf, pipeline_dsl=None)\n', (4306, 4498), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((5099, 5172), 'fate_flow.controller.job_controller.JobController.backend_compatibility', 'JobController.backend_compatibility', ([], {'job_parameters': 'common_job_parameters'}), '(job_parameters=common_job_parameters)\n', (5134, 5172), False, 'from fate_flow.controller.job_controller import JobController\n'), ((5181, 5316), 'fate_flow.controller.job_controller.JobController.adapt_job_parameters', 'JobController.adapt_job_parameters', ([], {'role': 'job.f_initiator_role', 'job_parameters': 'common_job_parameters', 'create_initiator_baseline': '(True)'}), '(role=job.f_initiator_role,\n job_parameters=common_job_parameters, create_initiator_baseline=True)\n', (5215, 5316), False, 'from fate_flow.controller.job_controller import JobController\n'), ((5529, 5660), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job.f_dsl', 'runtime_conf': 'job.f_runtime_conf', 'train_runtime_conf': 'job.f_train_runtime_conf'}), '(dsl=job.f_dsl, runtime_conf=job.\n f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf)\n', (5562, 5660), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((6598, 6636), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.create_job', 'FederatedScheduler.create_job', ([], {'job': 'job'}), '(job=job)\n', (6627, 6636), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((7087, 7126), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', (['job_id'], {}), '(job_id)\n', (7118, 7126), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((7641, 7748), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'is_initiator': '(True)', 'status': 'JobStatus.WAITING', 'order_by': '"""create_time"""', 'reverse': '(False)'}), "(is_initiator=True, status=JobStatus.WAITING, order_by=\n 'create_time', reverse=False)\n", (7659, 7748), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((8356, 8463), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'is_initiator': '(True)', 'status': 'JobStatus.RUNNING', 'order_by': '"""create_time"""', 'reverse': '(False)'}), "(is_initiator=True, status=JobStatus.RUNNING, order_by=\n 'create_time', reverse=False)\n", (8374, 8463), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((9061, 9161), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'is_initiator': '(True)', 'ready_signal': '(True)', 'order_by': '"""create_time"""', 'reverse': '(False)'}), "(is_initiator=True, ready_signal=True, order_by=\n 'create_time', reverse=False)\n", (9079, 9161), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((9720, 9820), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'is_initiator': '(True)', 'rerun_signal': '(True)', 'order_by': '"""create_time"""', 'reverse': '(False)'}), "(is_initiator=True, rerun_signal=True, order_by=\n 'create_time', reverse=False)\n", (9738, 9820), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((15182, 15214), 'fate_flow.entity.types.EndStatus.contains', 'EndStatus.contains', (['job.f_status'], {}), '(job.f_status)\n', (15200, 15214), False, 'from fate_flow.entity.types import JobStatus, TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, ResourceOperation, FederatedSchedulingStatusCode, RunParameters, RetCode\n'), ((16943, 16962), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (16960, 16962), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((17018, 17106), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id', 'role': 'initiator_role', 'party_id': 'initiator_party_id'}), '(job_id=job_id, role=initiator_role, party_id=\n initiator_party_id)\n', (17036, 17106), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((17692, 17832), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job.f_dsl', 'runtime_conf': 'job.f_runtime_conf_on_party', 'train_runtime_conf': 'job.f_train_runtime_conf'}), '(dsl=job.f_dsl, runtime_conf=job.\n f_runtime_conf_on_party, train_runtime_conf=job.f_train_runtime_conf)\n', (17725, 17832), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((17983, 18072), 'fate_flow.scheduler.task_scheduler.TaskScheduler.schedule', 'TaskScheduler.schedule', ([], {'job': 'job', 'dsl_parser': 'dsl_parser', 'canceled': 'job.f_cancel_signal'}), '(job=job, dsl_parser=dsl_parser, canceled=job.\n f_cancel_signal)\n', (18005, 18072), False, 'from fate_flow.scheduler.task_scheduler import TaskScheduler\n'), ((19572, 19604), 'fate_flow.entity.types.EndStatus.contains', 'EndStatus.contains', (['job.f_status'], {}), '(job.f_status)\n', (19590, 19604), False, 'from fate_flow.entity.types import JobStatus, TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, ResourceOperation, FederatedSchedulingStatusCode, RunParameters, RetCode\n'), ((20004, 20092), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id', 'role': 'initiator_role', 'party_id': 'initiator_party_id'}), '(job_id=job_id, role=initiator_role, party_id=\n initiator_party_id)\n', (20022, 20092), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((20673, 20813), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job.f_dsl', 'runtime_conf': 'job.f_runtime_conf_on_party', 'train_runtime_conf': 'job.f_train_runtime_conf'}), '(dsl=job.f_dsl, runtime_conf=job.\n f_runtime_conf_on_party, train_runtime_conf=job.f_train_runtime_conf)\n', (20706, 20813), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((23589, 23638), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'initiator_job.f_job_id'}), '(job_id=initiator_job.f_job_id)\n', (23607, 23638), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((26113, 26199), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'is_initiator': '(True)'}), '(job_id=job_id, role=role, party_id=party_id,\n is_initiator=True)\n', (26131, 26199), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((29929, 29966), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.clean_job', 'FederatedScheduler.clean_job', ([], {'job': 'job'}), '(job=job)\n', (29957, 29966), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((1778, 1805), 'fate_flow.utils.job_utils.generate_job_id', 'job_utils.generate_job_id', ([], {}), '()\n', (1803, 1805), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((2473, 2523), 'fate_flow.utils.model_utils.gen_model_id', 'model_utils.gen_model_id', (["job_runtime_conf['role']"], {}), "(job_runtime_conf['role'])\n", (2497, 2523), False, 'from fate_flow.utils import model_utils\n'), ((2861, 3049), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': 'job_id', 'role': "job_initiator['role']", 'party_id': "job_initiator['party_id']", 'model_id': 'common_job_parameters.model_id', 'model_version': 'common_job_parameters.model_version'}), "(job_id=job_id, role=job_initiator['role'], party_id=job_initiator[\n 'party_id'], model_id=common_job_parameters.model_id, model_version=\n common_job_parameters.model_version)\n", (2868, 3049), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((3169, 3226), 'fate_arch.common.base_utils.json_loads', 'json_loads', (["pipeline_model['Pipeline'].train_runtime_conf"], {}), "(pipeline_model['Pipeline'].train_runtime_conf)\n", (3179, 3226), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((3763, 3815), 'fate_arch.common.base_utils.json_loads', 'json_loads', (["pipeline_model['Pipeline'].inference_dsl"], {}), "(pipeline_model['Pipeline'].inference_dsl)\n", (3773, 3815), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((6798, 6841), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status', 'FederatedScheduler.sync_job_status', ([], {'job': 'job'}), '(job=job)\n', (6832, 6841), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((7380, 7466), 'fate_flow.utils.job_utils.get_board_url', 'job_utils.get_board_url', (['job_id', "job_initiator['role']", "job_initiator['party_id']"], {}), "(job_id, job_initiator['role'], job_initiator[\n 'party_id'])\n", (7403, 7466), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((11999, 12088), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.resource_for_job', 'FederatedScheduler.resource_for_job', ([], {'job': 'job', 'operation_type': 'ResourceOperation.APPLY'}), '(job=job, operation_type=\n ResourceOperation.APPLY)\n', (12034, 12088), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((15621, 15664), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status', 'FederatedScheduler.sync_job_status', ([], {'job': 'job'}), '(job=job)\n', (15655, 15664), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((17157, 17194), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.start_job', 'FederatedScheduler.start_job', ([], {'job': 'job'}), '(job=job)\n', (17185, 17194), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((20287, 20325), 'fate_flow.utils.job_utils.job_virtual_component_name', 'job_utils.job_virtual_component_name', ([], {}), '()\n', (20323, 20325), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((20347, 20467), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'job_id': 'job_id', 'role': 'initiator_role', 'party_id': 'initiator_party_id', 'component_name': 'component_name'}), '(job_id=job_id, role=initiator_role, party_id=\n initiator_party_id, component_name=component_name)\n', (20366, 20467), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((20537, 20626), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'job_id': 'job_id', 'role': 'initiator_role', 'party_id': 'initiator_party_id'}), '(job_id=job_id, role=initiator_role, party_id=\n initiator_party_id)\n', (20556, 20626), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((23351, 23394), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status', 'FederatedScheduler.sync_job_status', ([], {'job': 'job'}), '(job=job)\n', (23385, 23394), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((24052, 24097), 'fate_flow.operation.job_saver.JobSaver.update_job_status', 'JobSaver.update_job_status', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (24078, 24097), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((24110, 24148), 'fate_flow.operation.job_saver.JobSaver.update_job', 'JobSaver.update_job', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (24129, 24148), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((25821, 25852), 'fate_flow.entity.types.EndStatus.contains', 'EndStatus.contains', (['task_status'], {}), '(task_status)\n', (25839, 25852), False, 'from fate_flow.entity.types import JobStatus, TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, ResourceOperation, FederatedSchedulingStatusCode, RunParameters, RetCode\n'), ((26754, 26819), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.stop_job', 'FederatedScheduler.stop_job', ([], {'job': 'jobs[0]', 'stop_status': 'stop_status'}), '(job=jobs[0], stop_status=stop_status)\n', (26781, 26819), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((1814, 1837), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (1829, 1837), False, 'from fate_arch.common.log import schedule_logger\n'), ((3246, 3440), 'fate_flow.utils.model_utils.check_if_deployed', 'model_utils.check_if_deployed', ([], {'role': "job_initiator['role']", 'party_id': "job_initiator['party_id']", 'model_id': 'common_job_parameters.model_id', 'model_version': 'common_job_parameters.model_version'}), "(role=job_initiator['role'], party_id=\n job_initiator['party_id'], model_id=common_job_parameters.model_id,\n model_version=common_job_parameters.model_version)\n", (3275, 3440), False, 'from fate_flow.utils import model_utils\n'), ((6910, 6933), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (6925, 6933), False, 'from fate_arch.common.log import schedule_logger\n'), ((7572, 7589), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (7587, 7589), False, 'from fate_arch.common.log import schedule_logger\n'), ((7752, 7769), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (7767, 7769), False, 'from fate_arch.common.log import schedule_logger\n'), ((8221, 8238), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (8236, 8238), False, 'from fate_arch.common.log import schedule_logger\n'), ((8287, 8304), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (8302, 8304), False, 'from fate_arch.common.log import schedule_logger\n'), ((8467, 8484), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (8482, 8484), False, 'from fate_arch.common.log import schedule_logger\n'), ((8885, 8902), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (8900, 8902), False, 'from fate_arch.common.log import schedule_logger\n'), ((8994, 9011), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (9009, 9011), False, 'from fate_arch.common.log import schedule_logger\n'), ((9165, 9182), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (9180, 9182), False, 'from fate_arch.common.log import schedule_logger\n'), ((9589, 9606), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (9604, 9606), False, 'from fate_arch.common.log import schedule_logger\n'), ((9653, 9670), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (9668, 9670), False, 'from fate_arch.common.log import schedule_logger\n'), ((9824, 9841), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (9839, 9841), False, 'from fate_arch.common.log import schedule_logger\n'), ((10236, 10253), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (10251, 10253), False, 'from fate_arch.common.log import schedule_logger\n'), ((10300, 10317), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (10315, 10317), False, 'from fate_arch.common.log import schedule_logger\n'), ((10598, 10615), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (10613, 10615), False, 'from fate_arch.common.log import schedule_logger\n'), ((11269, 11286), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (11284, 11286), False, 'from fate_arch.common.log import schedule_logger\n'), ((11799, 11842), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status', 'FederatedScheduler.sync_job_status', ([], {'job': 'job'}), '(job=job)\n', (11833, 11842), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((15034, 15057), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (15049, 15057), False, 'from fate_arch.common.log import schedule_logger\n'), ((15819, 15956), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job', 'FederatedScheduler.sync_job', ([], {'job': 'job', 'update_fields': "['ready_signal', 'ready_time', 'rerun_signal', 'progress', 'end_time',\n 'elapsed']"}), "(job=job, update_fields=['ready_signal',\n 'ready_time', 'rerun_signal', 'progress', 'end_time', 'elapsed'])\n", (15846, 15956), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((16518, 16548), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (16533, 16548), False, 'from fate_arch.common.log import schedule_logger\n'), ((17586, 17622), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (17601, 17622), False, 'from fate_arch.common.log import schedule_logger\n'), ((18582, 18618), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (18597, 18618), False, 'from fate_arch.common.log import schedule_logger\n'), ((19041, 19105), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job', 'FederatedScheduler.sync_job', ([], {'job': 'job', 'update_fields': "['progress']"}), "(job=job, update_fields=['progress'])\n", (19068, 19105), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((19309, 19341), 'fate_flow.entity.types.EndStatus.contains', 'EndStatus.contains', (['job.f_status'], {}), '(job.f_status)\n', (19327, 19341), False, 'from fate_flow.entity.types import JobStatus, TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, ResourceOperation, FederatedSchedulingStatusCode, RunParameters, RetCode\n'), ((19428, 19471), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status', 'FederatedScheduler.sync_job_status', ([], {'job': 'job'}), '(job=job)\n', (19462, 19471), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((19671, 19707), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (19686, 19707), False, 'from fate_arch.common.log import schedule_logger\n'), ((19872, 19902), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (19887, 19902), False, 'from fate_arch.common.log import schedule_logger\n'), ((21378, 21464), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.stop_task', 'FederatedScheduler.stop_task', ([], {'job': 'job', 'task': 'task', 'stop_status': 'TaskStatus.CANCELED'}), '(job=job, task=task, stop_status=TaskStatus.\n CANCELED)\n', (21406, 21464), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((21476, 21549), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.clean_task', 'FederatedScheduler.clean_task', ([], {'job': 'job', 'task': 'task', 'content_type': '"""metrics"""'}), "(job=job, task=task, content_type='metrics')\n", (21505, 21549), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((21745, 21795), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.create_task', 'FederatedScheduler.create_task', ([], {'job': 'job', 'task': 'task'}), '(job=job, task=task)\n', (21775, 21795), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((25053, 25076), 'fate_flow.entity.types.EndStatus.status_list', 'EndStatus.status_list', ([], {}), '()\n', (25074, 25076), False, 'from fate_flow.entity.types import JobStatus, TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, ResourceOperation, FederatedSchedulingStatusCode, RunParameters, RetCode\n'), ((26013, 26043), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (26028, 26043), False, 'from fate_arch.common.log import schedule_logger\n'), ((27103, 27193), 'fate_flow.operation.job_saver.JobSaver.get_tasks_asc', 'JobSaver.get_tasks_asc', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id'}), '(job_id=job.f_job_id, role=job.f_role, party_id=job.\n f_party_id)\n', (27125, 27193), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((27897, 27916), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (27914, 27916), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((29328, 29404), 'fate_flow.db.db_models.Job.update', 'Job.update', (['{Job.f_end_scheduling_updates: Job.f_end_scheduling_updates + 1}'], {}), '({Job.f_end_scheduling_updates: Job.f_end_scheduling_updates + 1})\n', (29338, 29404), False, 'from fate_flow.db.db_models import DB, Job\n'), ((29674, 29710), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (29689, 29710), False, 'from fate_arch.common.log import schedule_logger\n'), ((29975, 30011), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (29990, 30011), False, 'from fate_arch.common.log import schedule_logger\n'), ((4857, 4880), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (4872, 4880), False, 'from fate_arch.common.log import schedule_logger\n'), ((6369, 6522), 'fate_flow.controller.job_controller.JobController.initialize_tasks', 'JobController.initialize_tasks', (['job_id', 'role', 'party_id', '(False)', 'job.f_initiator_role', 'job.f_initiator_party_id', 'common_job_parameters', 'dsl_parser'], {}), '(job_id, role, party_id, False, job.\n f_initiator_role, job.f_initiator_party_id, common_job_parameters,\n dsl_parser)\n', (6399, 6522), False, 'from fate_flow.controller.job_controller import JobController\n'), ((7888, 7905), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (7903, 7905), False, 'from fate_arch.common.log import schedule_logger\n'), ((8561, 8578), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (8576, 8578), False, 'from fate_arch.common.log import schedule_logger\n'), ((9257, 9274), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (9272, 9274), False, 'from fate_arch.common.log import schedule_logger\n'), ((9916, 9933), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (9931, 9933), False, 'from fate_arch.common.log import schedule_logger\n'), ((10438, 10461), 'fate_flow.entity.types.EndStatus.status_list', 'EndStatus.status_list', ([], {}), '()\n', (10459, 10461), False, 'from fate_flow.entity.types import JobStatus, TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, ResourceOperation, FederatedSchedulingStatusCode, RunParameters, RetCode\n'), ((10568, 10587), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (10585, 10587), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((10695, 10712), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (10710, 10712), False, 'from fate_arch.common.log import schedule_logger\n'), ((11583, 11606), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (11598, 11606), False, 'from fate_arch.common.log import schedule_logger\n'), ((13576, 13696), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.resource_for_job', 'FederatedScheduler.resource_for_job', ([], {'job': 'job', 'operation_type': 'ResourceOperation.RETURN', 'specific_dest': 'rollback_party'}), '(job=job, operation_type=\n ResourceOperation.RETURN, specific_dest=rollback_party)\n', (13611, 13696), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((14669, 14692), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (14684, 14692), False, 'from fate_arch.common.log import schedule_logger\n'), ((15485, 15521), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (15500, 15521), False, 'from fate_arch.common.log import schedule_logger\n'), ((17207, 17237), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (17222, 17237), False, 'from fate_arch.common.log import schedule_logger\n'), ((17372, 17402), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (17387, 17402), False, 'from fate_arch.common.log import schedule_logger\n'), ((19363, 19411), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.save_pipelined_model', 'FederatedScheduler.save_pipelined_model', ([], {'job': 'job'}), '(job=job)\n', (19402, 19411), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((22950, 22980), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (22965, 22980), False, 'from fate_arch.common.log import schedule_logger\n'), ((23407, 23437), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (23422, 23437), False, 'from fate_arch.common.log import schedule_logger\n'), ((26620, 26650), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (26635, 26650), False, 'from fate_arch.common.log import schedule_logger\n'), ((27279, 27382), 'fate_flow.scheduler.task_scheduler.TaskScheduler.collect_task_of_all_party', 'TaskScheduler.collect_task_of_all_party', (['job'], {'initiator_task': 'initiator_task', 'set_status': 'stop_status'}), '(job, initiator_task=initiator_task,\n set_status=stop_status)\n', (27318, 27382), False, 'from fate_flow.scheduler.task_scheduler import TaskScheduler\n'), ((27539, 27559), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['response'], {}), '(response)\n', (27549, 27559), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((10508, 10527), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (10525, 10527), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((11859, 11882), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (11874, 11882), False, 'from fate_arch.common.log import schedule_logger\n'), ((13128, 13151), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (13143, 13151), False, 'from fate_arch.common.log import schedule_logger\n'), ((16072, 16108), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (16087, 16108), False, 'from fate_arch.common.log import schedule_logger\n'), ((16205, 16241), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (16220, 16241), False, 'from fate_arch.common.log import schedule_logger\n'), ((21133, 21163), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (21148, 21163), False, 'from fate_arch.common.log import schedule_logger\n'), ((21910, 21940), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (21925, 21940), False, 'from fate_arch.common.log import schedule_logger\n'), ((22738, 22768), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (22753, 22768), False, 'from fate_arch.common.log import schedule_logger\n'), ((23131, 23161), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (23146, 23161), False, 'from fate_arch.common.log import schedule_logger\n'), ((23248, 23278), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (23263, 23278), False, 'from fate_arch.common.log import schedule_logger\n'), ((25092, 25121), 'fate_flow.entity.types.StatusSet.get_level', 'StatusSet.get_level', ([], {'status': 's'}), '(status=s)\n', (25111, 25121), False, 'from fate_flow.entity.types import JobStatus, TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, ResourceOperation, FederatedSchedulingStatusCode, RunParameters, RetCode\n'), ((26288, 26318), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (26303, 26318), False, 'from fate_arch.common.log import schedule_logger\n'), ((26452, 26482), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (26467, 26482), False, 'from fate_arch.common.log import schedule_logger\n'), ((26905, 26935), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (26920, 26935), False, 'from fate_arch.common.log import schedule_logger\n'), ((27395, 27425), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (27410, 27425), False, 'from fate_arch.common.log import schedule_logger\n'), ((8071, 8100), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (8086, 8100), False, 'from fate_arch.common.log import schedule_logger\n'), ((8130, 8159), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (8145, 8159), False, 'from fate_arch.common.log import schedule_logger\n'), ((8743, 8772), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (8758, 8772), False, 'from fate_arch.common.log import schedule_logger\n'), ((8802, 8831), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (8817, 8831), False, 'from fate_arch.common.log import schedule_logger\n'), ((9435, 9464), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (9450, 9464), False, 'from fate_arch.common.log import schedule_logger\n'), ((9494, 9523), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (9509, 9523), False, 'from fate_arch.common.log import schedule_logger\n'), ((10094, 10123), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (10109, 10123), False, 'from fate_arch.common.log import schedule_logger\n'), ((10153, 10182), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (10168, 10182), False, 'from fate_arch.common.log import schedule_logger\n'), ((10917, 10946), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (10932, 10946), False, 'from fate_arch.common.log import schedule_logger\n'), ((11127, 11156), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (11142, 11156), False, 'from fate_arch.common.log import schedule_logger\n'), ((11186, 11215), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (11201, 11215), False, 'from fate_arch.common.log import schedule_logger\n'), ((14128, 14151), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (14143, 14151), False, 'from fate_arch.common.log import schedule_logger\n'), ((14436, 14459), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (14451, 14459), False, 'from fate_arch.common.log import schedule_logger\n'), ((22519, 22581), 'fate_flow.entity.types.RunParameters', 'RunParameters', ([], {}), "(**job.f_runtime_conf_on_party['job_parameters'])\n", (22532, 22581), False, 'from fate_flow.entity.types import JobStatus, TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, ResourceOperation, FederatedSchedulingStatusCode, RunParameters, RetCode\n'), ((28188, 28207), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (28205, 28207), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((28272, 28297), 'fate_flow.db.db_models.Job.update', 'Job.update', (['update_fields'], {}), '(update_fields)\n', (28282, 28297), False, 'from fate_flow.db.db_models import DB, Job\n'), ((29119, 29144), 'fate_flow.db.db_models.Job.update', 'Job.update', (['update_fields'], {}), '(update_fields)\n', (29129, 29144), False, 'from fate_flow.db.db_models import DB, Job\n'), ((13994, 14017), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (14009, 14017), False, 'from fate_arch.common.log import schedule_logger\n'), ((28550, 28569), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (28567, 28569), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import io import json import operator import os import shutil import tarfile import uuid from flask import send_file from fate_arch.abc import StorageTableABC from fate_arch.common.base_utils import fate_uuid from fate_arch.session import Session from fate_flow.component_env_utils import feature_utils from fate_flow.settings import stat_logger from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking from fate_flow.utils import data_utils from fate_flow.utils.data_utils import get_header_schema class DataTableTracker(object): @classmethod @DB.connection_context() def create_table_tracker(cls, table_name, table_namespace, entity_info): tracker = DataTableTracking() tracker.f_table_name = table_name tracker.f_table_namespace = table_namespace for k, v in entity_info.items(): attr_name = 'f_%s' % k if hasattr(DataTableTracking, attr_name): setattr(tracker, attr_name, v) if entity_info.get("have_parent"): parent_trackers = DataTableTracking.select().where( DataTableTracking.f_table_name == entity_info.get("parent_table_name"), DataTableTracking.f_table_namespace == entity_info.get("parent_table_namespace")).order_by(DataTableTracking.f_create_time.desc()) if not parent_trackers: raise Exception(f"table {table_name} {table_namespace} no found parent") parent_tracker = parent_trackers[0] if parent_tracker.f_have_parent: tracker.f_source_table_name = parent_tracker.f_source_table_name tracker.f_source_table_namespace = parent_tracker.f_source_table_namespace else: tracker.f_source_table_name = parent_tracker.f_table_name tracker.f_source_table_namespace = parent_tracker.f_table_namespace rows = tracker.save(force_insert=True) if rows != 1: raise Exception("Create {} failed".format(tracker)) return tracker @classmethod @DB.connection_context() def query_tracker(cls, table_name, table_namespace, is_parent=False): if not is_parent: filters = [operator.attrgetter('f_table_name')(DataTableTracking) == table_name, operator.attrgetter('f_table_namespace')(DataTableTracking) == table_namespace] else: filters = [operator.attrgetter('f_parent_table_name')(DataTableTracking) == table_name, operator.attrgetter('f_parent_table_namespace')(DataTableTracking) == table_namespace] trackers = DataTableTracking.select().where(*filters) return [tracker for tracker in trackers] @classmethod @DB.connection_context() def get_parent_table(cls, table_name, table_namespace): trackers = DataTableTracker.query_tracker(table_name, table_namespace) if not trackers: raise Exception(f"no found table: table name {table_name}, table namespace {table_namespace}") else: parent_table_info = [] for tracker in trackers: if not tracker.f_have_parent: return [] else: parent_table_info.append({"parent_table_name": tracker.f_parent_table_name, "parent_table_namespace": tracker.f_parent_table_namespace, "source_table_name": tracker.f_source_table_name, "source_table_namespace": tracker.f_source_table_namespace }) return parent_table_info @classmethod @DB.connection_context() def track_job(cls, table_name, table_namespace, display=False): trackers = DataTableTracker.query_tracker(table_name, table_namespace, is_parent=True) job_id_list = [] for tracker in trackers: job_id_list.append(tracker.f_job_id) job_id_list = list(set(job_id_list)) return {"count": len(job_id_list)} if not display else {"count": len(job_id_list), "job": job_id_list} class TableStorage: @staticmethod def copy_table(src_table: StorageTableABC, dest_table: StorageTableABC, deserialize_value=False): count = 0 data_temp = [] part_of_data = [] src_table_meta = src_table.meta schema = {} update_schema = False if not src_table_meta.get_in_serialized(): if src_table_meta.get_have_head(): get_head = False else: get_head = True line_index = 0 fate_uuid = uuid.uuid1().hex if not src_table.meta.get_extend_sid(): get_line = data_utils.get_data_line elif not src_table_meta.get_auto_increasing_sid(): get_line = data_utils.get_sid_data_line else: get_line = data_utils.get_auto_increasing_sid_data_line for line in src_table.read(): if not get_head: schema = data_utils.get_header_schema( header_line=line, id_delimiter=src_table_meta.get_id_delimiter(), extend_sid=src_table_meta.get_extend_sid(), ) get_head = True continue values = line.rstrip().split(src_table.meta.get_id_delimiter()) k, v = get_line( values=values, line_index=line_index, extend_sid=src_table.meta.get_extend_sid(), auto_increasing_sid=src_table.meta.get_auto_increasing_sid(), id_delimiter=src_table.meta.get_id_delimiter(), fate_uuid=fate_uuid, ) line_index += 1 count = TableStorage.put_in_table( table=dest_table, k=k, v=v, temp=data_temp, count=count, part_of_data=part_of_data, ) else: for k, v in src_table.collect(): if deserialize_value: # writer component: deserialize value v, extend_header = feature_utils.get_deserialize_value(v, dest_table.meta.get_id_delimiter()) if not update_schema: header_list = get_component_output_data_schema(src_table.meta, extend_header) schema = get_header_schema(dest_table.meta.get_id_delimiter().join(header_list), dest_table.meta.get_id_delimiter()) _, dest_table.meta = dest_table.meta.update_metas(schema=schema) update_schema = True count = TableStorage.put_in_table( table=dest_table, k=k, v=v, temp=data_temp, count=count, part_of_data=part_of_data, ) schema = src_table.meta.get_schema() if data_temp: dest_table.put_all(data_temp) dest_table.meta.update_metas(schema=schema if not update_schema else None, part_of_data=part_of_data) return dest_table.count() @staticmethod def put_in_table(table: StorageTableABC, k, v, temp, count, part_of_data, max_num=10000): temp.append((k, v)) if count < 100: part_of_data.append((k, v)) if len(temp) == max_num: table.put_all(temp) temp.clear() return count + 1 @staticmethod def send_table(output_tables_meta, tar_file_name, limit=-1, need_head=True): output_data_file_list = [] output_data_meta_file_list = [] output_tmp_dir = os.path.join(os.getcwd(), 'tmp/{}'.format(fate_uuid())) for output_name, output_table_meta in output_tables_meta.items(): output_data_count = 0 is_str = False output_data_file_path = "{}/{}.csv".format(output_tmp_dir, output_name) os.makedirs(os.path.dirname(output_data_file_path), exist_ok=True) with open(output_data_file_path, 'w') as fw: with Session() as sess: output_table = sess.get_table(name=output_table_meta.get_name(), namespace=output_table_meta.get_namespace()) if output_table: for k, v in output_table.collect(): data_line, is_str, extend_header = feature_utils.get_component_output_data_line(src_key=k, src_value=v) fw.write('{}\n'.format(','.join(map(lambda x: str(x), data_line)))) output_data_count += 1 if output_data_count == limit: break if output_data_count: # get meta output_data_file_list.append(output_data_file_path) header = get_component_output_data_schema(output_table_meta=output_table_meta, is_str=is_str, extend_header=extend_header) output_data_meta_file_path = "{}/{}.meta".format(output_tmp_dir, output_name) output_data_meta_file_list.append(output_data_meta_file_path) with open(output_data_meta_file_path, 'w') as fw: json.dump({'header': header}, fw, indent=4) if need_head and header: with open(output_data_file_path, 'r+') as f: content = f.read() f.seek(0, 0) f.write('{}\n'.format(','.join(header)) + content) # tar memory_file = io.BytesIO() tar = tarfile.open(fileobj=memory_file, mode='w:gz') for index in range(0, len(output_data_file_list)): tar.add(output_data_file_list[index], os.path.relpath(output_data_file_list[index], output_tmp_dir)) tar.add(output_data_meta_file_list[index], os.path.relpath(output_data_meta_file_list[index], output_tmp_dir)) tar.close() memory_file.seek(0) output_data_file_list.extend(output_data_meta_file_list) for path in output_data_file_list: try: shutil.rmtree(os.path.dirname(path)) except Exception as e: # warning stat_logger.warning(e) return send_file(memory_file, attachment_filename=tar_file_name, as_attachment=True) def delete_tables_by_table_infos(output_data_table_infos): data = [] status = False with Session() as sess: for output_data_table_info in output_data_table_infos: table_name = output_data_table_info.f_table_name namespace = output_data_table_info.f_table_namespace table_info = {'table_name': table_name, 'namespace': namespace} if table_name and namespace and table_info not in data: table = sess.get_table(table_name, namespace) if table: try: table.destroy() data.append(table_info) status = True except: pass return status, data def delete_metric_data(metric_info): if metric_info.get('model'): sql = drop_metric_data_mode(metric_info.get('model')) else: sql = delete_metric_data_from_db(metric_info) return sql @DB.connection_context() def drop_metric_data_mode(model): try: drop_sql = 'drop table t_tracking_metric_{}'.format(model) DB.execute_sql(drop_sql) stat_logger.info(drop_sql) return drop_sql except Exception as e: stat_logger.exception(e) raise e @DB.connection_context() def delete_metric_data_from_db(metric_info): try: job_id = metric_info['job_id'] metric_info.pop('job_id') delete_sql = 'delete from t_tracking_metric_{} where f_job_id="{}"'.format(job_id[:8], job_id) for k, v in metric_info.items(): if hasattr(TrackingMetric, "f_" + k): connect_str = " and f_" delete_sql = delete_sql + connect_str + k + '="{}"'.format(v) DB.execute_sql(delete_sql) stat_logger.info(delete_sql) return delete_sql except Exception as e: stat_logger.exception(e) raise e def get_component_output_data_schema(output_table_meta, extend_header, is_str=False): # get schema schema = output_table_meta.get_schema() if not schema: return ['sid'] header = [schema.get('sid_name', 'sid')] if "label" in extend_header and schema.get("label_name"): extend_header[extend_header.index("label")] = schema.get("label_name") header.extend(extend_header) if is_str or isinstance(schema.get('header'), str): if not schema.get('header'): if schema.get('sid'): return [schema.get('sid')] else: return None header.extend([feature for feature in schema.get('header').split(',')]) else: header.extend(schema.get('header', [])) return header
[ "fate_flow.db.db_models.DB.execute_sql", "fate_flow.settings.stat_logger.warning", "fate_flow.settings.stat_logger.info", "fate_flow.db.db_models.DataTableTracking", "fate_flow.db.db_models.DB.connection_context", "fate_flow.db.db_models.DataTableTracking.select", "fate_flow.db.db_models.DataTableTracking.f_create_time.desc", "fate_flow.settings.stat_logger.exception", "fate_flow.component_env_utils.feature_utils.get_component_output_data_line" ]
[((12627, 12650), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (12648, 12650), False, 'from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking\n'), ((12932, 12955), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (12953, 12955), False, 'from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking\n'), ((1189, 1212), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (1210, 1212), False, 'from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking\n'), ((2686, 2709), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (2707, 2709), False, 'from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking\n'), ((3365, 3388), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (3386, 3388), False, 'from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking\n'), ((4352, 4375), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (4373, 4375), False, 'from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking\n'), ((1308, 1327), 'fate_flow.db.db_models.DataTableTracking', 'DataTableTracking', ([], {}), '()\n', (1325, 1327), False, 'from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking\n'), ((10824, 10836), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (10834, 10836), False, 'import io\n'), ((10851, 10897), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'memory_file', 'mode': '"""w:gz"""'}), "(fileobj=memory_file, mode='w:gz')\n", (10863, 10897), False, 'import tarfile\n'), ((11739, 11748), 'fate_arch.session.Session', 'Session', ([], {}), '()\n', (11746, 11748), False, 'from fate_arch.session import Session\n'), ((12769, 12793), 'fate_flow.db.db_models.DB.execute_sql', 'DB.execute_sql', (['drop_sql'], {}), '(drop_sql)\n', (12783, 12793), False, 'from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking\n'), ((12802, 12828), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['drop_sql'], {}), '(drop_sql)\n', (12818, 12828), False, 'from fate_flow.settings import stat_logger\n'), ((13404, 13430), 'fate_flow.db.db_models.DB.execute_sql', 'DB.execute_sql', (['delete_sql'], {}), '(delete_sql)\n', (13418, 13430), False, 'from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking\n'), ((13439, 13467), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['delete_sql'], {}), '(delete_sql)\n', (13455, 13467), False, 'from fate_flow.settings import stat_logger\n'), ((8637, 8648), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8646, 8648), False, 'import os\n'), ((11558, 11635), 'flask.send_file', 'send_file', (['memory_file'], {'attachment_filename': 'tar_file_name', 'as_attachment': '(True)'}), '(memory_file, attachment_filename=tar_file_name, as_attachment=True)\n', (11567, 11635), False, 'from flask import send_file\n'), ((12888, 12912), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (12909, 12912), False, 'from fate_flow.settings import stat_logger\n'), ((13529, 13553), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (13550, 13553), False, 'from fate_flow.settings import stat_logger\n'), ((1901, 1939), 'fate_flow.db.db_models.DataTableTracking.f_create_time.desc', 'DataTableTracking.f_create_time.desc', ([], {}), '()\n', (1937, 1939), False, 'from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking\n'), ((3249, 3275), 'fate_flow.db.db_models.DataTableTracking.select', 'DataTableTracking.select', ([], {}), '()\n', (3273, 3275), False, 'from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking\n'), ((5333, 5345), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (5343, 5345), False, 'import uuid\n'), ((8666, 8677), 'fate_arch.common.base_utils.fate_uuid', 'fate_uuid', ([], {}), '()\n', (8675, 8677), False, 'from fate_arch.common.base_utils import fate_uuid\n'), ((8923, 8961), 'os.path.dirname', 'os.path.dirname', (['output_data_file_path'], {}), '(output_data_file_path)\n', (8938, 8961), False, 'import os\n'), ((11007, 11068), 'os.path.relpath', 'os.path.relpath', (['output_data_file_list[index]', 'output_tmp_dir'], {}), '(output_data_file_list[index], output_tmp_dir)\n', (11022, 11068), False, 'import os\n'), ((11145, 11211), 'os.path.relpath', 'os.path.relpath', (['output_data_meta_file_list[index]', 'output_tmp_dir'], {}), '(output_data_meta_file_list[index], output_tmp_dir)\n', (11160, 11211), False, 'import os\n'), ((9056, 9065), 'fate_arch.session.Session', 'Session', ([], {}), '()\n', (9063, 9065), False, 'from fate_arch.session import Session\n'), ((10479, 10522), 'json.dump', 'json.dump', (["{'header': header}", 'fw'], {'indent': '(4)'}), "({'header': header}, fw, indent=4)\n", (10488, 10522), False, 'import json\n'), ((11416, 11437), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (11431, 11437), False, 'import os\n'), ((11516, 11538), 'fate_flow.settings.stat_logger.warning', 'stat_logger.warning', (['e'], {}), '(e)\n', (11535, 11538), False, 'from fate_flow.settings import stat_logger\n'), ((2833, 2868), 'operator.attrgetter', 'operator.attrgetter', (['"""f_table_name"""'], {}), "('f_table_name')\n", (2852, 2868), False, 'import operator\n'), ((2926, 2966), 'operator.attrgetter', 'operator.attrgetter', (['"""f_table_namespace"""'], {}), "('f_table_namespace')\n", (2945, 2966), False, 'import operator\n'), ((3043, 3085), 'operator.attrgetter', 'operator.attrgetter', (['"""f_parent_table_name"""'], {}), "('f_parent_table_name')\n", (3062, 3085), False, 'import operator\n'), ((3143, 3190), 'operator.attrgetter', 'operator.attrgetter', (['"""f_parent_table_namespace"""'], {}), "('f_parent_table_namespace')\n", (3162, 3190), False, 'import operator\n'), ((1672, 1698), 'fate_flow.db.db_models.DataTableTracking.select', 'DataTableTracking.select', ([], {}), '()\n', (1696, 1698), False, 'from fate_flow.db.db_models import DB, TrackingMetric, DataTableTracking\n'), ((9415, 9483), 'fate_flow.component_env_utils.feature_utils.get_component_output_data_line', 'feature_utils.get_component_output_data_line', ([], {'src_key': 'k', 'src_value': 'v'}), '(src_key=k, src_value=v)\n', (9459, 9483), False, 'from fate_flow.component_env_utils import feature_utils\n')]
import numpy as np from fate_arch.session import computing_session as session from federatedml.util import consts from federatedml.transfer_learning.hetero_ftl.ftl_base import FTL from federatedml.statistic.intersect import intersect_guest from federatedml.util import LOGGER from federatedml.transfer_learning.hetero_ftl.ftl_dataloder import FTLDataLoader from fate_flow.entity.metric import Metric from fate_flow.entity.metric import MetricMeta from federatedml.optim.convergence import converge_func_factory from federatedml.nn.hetero_nn.backend.paillier_tensor import PaillierTensor from federatedml.optim.activation import sigmoid from federatedml.statistic import data_overview class FTLGuest(FTL): def __init__(self): super(FTLGuest, self).__init__() self.phi = None # Φ_A self.phi_product = None # (Φ_A)‘(Φ_A) [feature_dim, feature_dim] self.overlap_y = None # y_i ∈ N_c self.overlap_y_2 = None # (y_i ∈ N_c )^2 self.overlap_ua = None # u_i ∈ N_AB self.constant_k = None # κ self.feat_dim = None # output feature dimension self.send_components = None # components to send self.convergence = None self.overlap_y_pt = None # paillier tensor self.history_loss = [] # list to record history loss self.role = consts.GUEST def init_intersect_obj(self): intersect_obj = intersect_guest.RsaIntersectionGuest(self.intersect_param) intersect_obj.guest_party_id = self.component_properties.local_partyid intersect_obj.host_party_id_list = self.component_properties.host_party_idlist LOGGER.debug('intersect done') return intersect_obj def check_convergence(self, loss): LOGGER.info("check convergence") if self.convergence is None: self.convergence = converge_func_factory("diff", self.tol) return self.convergence.is_converge(loss) def compute_phi_and_overlap_ua(self, data_loader: FTLDataLoader): """ compute Φ and ua of overlap samples """ phi = None # [1, feature_dim] Φ_A overlap_ua = [] for i in range(len(data_loader)): batch_x, batch_y = data_loader[i] ua_batch = self.nn.predict(batch_x) # [batch_size, feature_dim] relative_overlap_index = data_loader.get_relative_overlap_index(i) if len(relative_overlap_index) != 0: if self.verbose: LOGGER.debug('batch {}/{} overlap index is {}'.format(i, len(data_loader), relative_overlap_index)) overlap_ua.append(ua_batch[relative_overlap_index]) phi_tmp = np.expand_dims(np.sum(batch_y * ua_batch, axis=0), axis=0) if phi is None: phi = phi_tmp else: phi += phi_tmp phi = phi / self.data_num return phi, overlap_ua def batch_compute_components(self, data_loader: FTLDataLoader): """ compute guest components """ phi, overlap_ua = self.compute_phi_and_overlap_ua(data_loader) # Φ_A [1, feature_dim] phi_product = np.matmul(phi.transpose(), phi) # (Φ_A)‘(Φ_A) [feature_dim, feature_dim] if self.overlap_y is None: self.overlap_y = data_loader.get_overlap_y() # {C(y)=y} [1, feat_dim] if self.overlap_y_2 is None: self.overlap_y_2 = self.overlap_y * self.overlap_y # {D(y)=y^2} # [1, feat_dim] overlap_ua = np.concatenate(overlap_ua, axis=0) # [overlap_num, feat_dim] # 3 components will be sent to host y_overlap_2_phi_2 = 0.25 * np.expand_dims(self.overlap_y_2, axis=2) * phi_product y_overlap_phi = -0.5 * self.overlap_y * phi mapping_comp_a = -overlap_ua * self.constant_k return phi, phi_product, overlap_ua, [y_overlap_2_phi_2, y_overlap_phi, mapping_comp_a] def exchange_components(self, comp_to_send, epoch_idx): """ send guest components and get host components """ if self.mode == 'encrypted': comp_to_send = self.encrypt_tensor(comp_to_send) # sending [y_overlap_2_phi_2, y_overlap_phi, mapping_comp_a] self.transfer_variable.y_overlap_2_phi_2.remote(comp_to_send[0], suffix=(epoch_idx, )) self.transfer_variable.y_overlap_phi.remote(comp_to_send[1], suffix=(epoch_idx, )) self.transfer_variable.mapping_comp_a.remote(comp_to_send[2], suffix=(epoch_idx, )) # receiving [overlap_ub, overlap_ub_2, mapping_comp_b] overlap_ub = self.transfer_variable.overlap_ub.get(idx=0, suffix=(epoch_idx, )) overlap_ub_2 = self.transfer_variable.overlap_ub_2.get(idx=0, suffix=(epoch_idx, )) mapping_comp_b = self.transfer_variable.mapping_comp_b.get(idx=0, suffix=(epoch_idx, )) host_components = [overlap_ub, overlap_ub_2, mapping_comp_b] if self.mode == 'encrypted': host_paillier_tensors = [PaillierTensor(tb_obj=tb, partitions=self.partitions) for tb in host_components] return host_paillier_tensors else: return host_components def decrypt_inter_result(self, encrypted_const, grad_a_overlap, epoch_idx, local_round=-1): """ add random mask to encrypted inter-result, get decrypted data from host add subtract random mask """ rand_0 = self.rng_generator.generate_random_number(encrypted_const.shape) encrypted_const = encrypted_const + rand_0 rand_1 = PaillierTensor(ori_data=self.rng_generator.generate_random_number(grad_a_overlap.shape), partitions=self.partitions) grad_a_overlap = grad_a_overlap + rand_1 self.transfer_variable.guest_side_const.remote(encrypted_const, suffix=(epoch_idx, local_round,)) self.transfer_variable.guest_side_gradients.remote(grad_a_overlap.get_obj(), suffix=(epoch_idx, local_round,)) const = self.transfer_variable.decrypted_guest_const.get(suffix=(epoch_idx, local_round, ), idx=0) grad = self.transfer_variable.decrypted_guest_gradients.get(suffix=(epoch_idx, local_round, ), idx=0) const = const - rand_0 grad_a_overlap = PaillierTensor(tb_obj=grad, partitions=self.partitions) - rand_1 return const, grad_a_overlap def decrypt_host_data(self, epoch_idx, local_round=-1): inter_grad = self.transfer_variable.host_side_gradients.get(suffix=(epoch_idx, local_round, 'host_de_send'), idx=0) inter_grad_pt = PaillierTensor(tb_obj=inter_grad, partitions=self.partitions) self.transfer_variable.decrypted_host_gradients.remote(inter_grad_pt.decrypt(self.encrypter).get_obj(), suffix=(epoch_idx, local_round, 'host_de_get')) def decrypt_loss_val(self, encrypted_loss, epoch_idx): self.transfer_variable.encrypted_loss.remote(encrypted_loss, suffix=(epoch_idx, 'send_loss')) decrypted_loss = self.transfer_variable.decrypted_loss.get(idx=0, suffix=(epoch_idx, 'get_loss')) return decrypted_loss def compute_backward_gradients(self, host_components, data_loader: FTLDataLoader, epoch_idx, local_round=-1): """ compute backward gradients using host components """ # they are Paillier tensors or np array overlap_ub, overlap_ub_2, mapping_comp_b = host_components[0], host_components[1], host_components[2] y_overlap_2_phi = np.expand_dims(self.overlap_y_2 * self.phi, axis=1) if self.mode == 'plain': loss_grads_const_part1 = 0.25 * np.squeeze(np.matmul(y_overlap_2_phi, overlap_ub_2), axis=1) loss_grads_const_part2 = self.overlap_y * overlap_ub const = np.sum(loss_grads_const_part1, axis=0) - 0.5 * np.sum(loss_grads_const_part2, axis=0) grad_a_nonoverlap = self.alpha * const * data_loader.y[data_loader.get_non_overlap_indexes()] / self.data_num grad_a_overlap = self.alpha * const * self.overlap_y / self.data_num + mapping_comp_b return np.concatenate([grad_a_overlap, grad_a_nonoverlap], axis=0) elif self.mode == 'encrypted': loss_grads_const_part1 = overlap_ub_2.matmul_3d(0.25 * y_overlap_2_phi, multiply='right') loss_grads_const_part1 = loss_grads_const_part1.squeeze(axis=1) if self.overlap_y_pt is None: self.overlap_y_pt = PaillierTensor(self.overlap_y, partitions=self.partitions) loss_grads_const_part2 = overlap_ub * self.overlap_y_pt encrypted_const = loss_grads_const_part1.reduce_sum() - 0.5 * loss_grads_const_part2.reduce_sum() grad_a_overlap = self.overlap_y_pt.map_ndarray_product((self.alpha/self.data_num * encrypted_const)) + mapping_comp_b const, grad_a_overlap = self.decrypt_inter_result(encrypted_const, grad_a_overlap, epoch_idx=epoch_idx , local_round=local_round) self.decrypt_host_data(epoch_idx, local_round=local_round) grad_a_nonoverlap = self.alpha * const * data_loader.y[data_loader.get_non_overlap_indexes()]/self.data_num return np.concatenate([grad_a_overlap.numpy(), grad_a_nonoverlap], axis=0) def compute_loss(self, host_components, epoch_idx, overlap_num): """ compute training loss """ overlap_ub, overlap_ub_2, mapping_comp_b = host_components[0], host_components[1], host_components[2] if self.mode == 'plain': loss_overlap = np.sum((-self.overlap_ua * self.constant_k) * overlap_ub) ub_phi = np.matmul(overlap_ub, self.phi.transpose()) part1 = -0.5*np.sum(self.overlap_y*ub_phi) part2 = 1.0/8*np.sum(ub_phi * ub_phi) part3 = len(self.overlap_y)*np.log(2) loss_y = part1 + part2 + part3 return self.alpha * (loss_y/overlap_num) + loss_overlap/overlap_num elif self.mode == 'encrypted': loss_overlap = overlap_ub.element_wise_product((-self.overlap_ua*self.constant_k)) sum = np.sum(loss_overlap.reduce_sum()) ub_phi = overlap_ub.T.fast_matmul_2d(self.phi.transpose()) part1 = -0.5 * np.sum((self.overlap_y * ub_phi)) ub_2 = overlap_ub_2.reduce_sum() enc_phi_uB_2_phi = np.matmul(np.matmul(self.phi, ub_2), self.phi.transpose()) part2 = 1/8 * np.sum(enc_phi_uB_2_phi) part3 = len(self.overlap_y)*np.log(2) loss_y = part1 + part2 + part3 en_loss = (self.alpha/self.overlap_num) * loss_y + sum / overlap_num loss_val = self.decrypt_loss_val(en_loss, epoch_idx) return loss_val @staticmethod def sigmoid(x): return np.array(list(map(sigmoid, x))) def generate_summary(self): summary = {'loss_history': self.history_loss, "best_iteration": -1 if self.validation_strategy is None else self.validation_strategy.best_iteration} if self.validation_strategy: summary['validation_metrics'] = self.validation_strategy.summary() return summary def check_host_number(self): host_num = len(self.component_properties.host_party_idlist) LOGGER.info('host number is {}'.format(host_num)) if host_num != 1: raise ValueError('only 1 host party is allowed') def fit(self, data_inst, validate_data=None): LOGGER.debug('in training, partitions is {}'.format(data_inst.partitions)) LOGGER.info('start to fit a ftl model, ' 'run mode is {},' 'communication efficient mode is {}'.format(self.mode, self.comm_eff)) self.check_host_number() data_loader, self.x_shape, self.data_num, self.overlap_num = self.prepare_data(self.init_intersect_obj(), data_inst, guest_side=True) self.input_dim = self.x_shape[0] # cache data_loader for faster validation self.cache_dataloader[self.get_dataset_key(data_inst)] = data_loader self.partitions = data_inst.partitions LOGGER.debug('self partitions is {}'.format(self.partitions)) self.initialize_nn(input_shape=self.x_shape) self.feat_dim = self.nn._model.output_shape[1] self.constant_k = 1 / self.feat_dim self.validation_strategy = self.init_validation_strategy(data_inst, validate_data) self.callback_meta("loss", "train", MetricMeta(name="train", metric_type="LOSS", extra_metas={"unit_name": "iters"})) # compute intermediate result of first epoch self.phi, self.phi_product, self.overlap_ua, self.send_components = self.batch_compute_components(data_loader) for epoch_idx in range(self.epochs): LOGGER.debug('fitting epoch {}'.format(epoch_idx)) host_components = self.exchange_components(self.send_components, epoch_idx=epoch_idx) loss = None for local_round_idx in range(self.local_round): if self.comm_eff: LOGGER.debug('running local iter {}'.format(local_round_idx)) grads = self.compute_backward_gradients(host_components, data_loader, epoch_idx=epoch_idx, local_round=local_round_idx) self.update_nn_weights(grads, data_loader, epoch_idx, decay=self.comm_eff) if local_round_idx == 0: loss = self.compute_loss(host_components, epoch_idx, len(data_loader.get_overlap_indexes())) if local_round_idx + 1 != self.local_round: self.phi, self.overlap_ua = self.compute_phi_and_overlap_ua(data_loader) self.callback_metric("loss", "train", [Metric(epoch_idx, loss)]) self.history_loss.append(loss) # updating variables for next epochs if epoch_idx + 1 == self.epochs: # only need to update phi in last epochs self.phi, _ = self.compute_phi_and_overlap_ua(data_loader) else: # compute phi, phi_product, overlap_ua etc. for next epoch self.phi, self.phi_product, self.overlap_ua, self.send_components = self.batch_compute_components( data_loader) # check early_stopping_rounds if self.validation_strategy is not None: self.validation_strategy.validate(self, epoch_idx) if self.validation_strategy.need_stop(): LOGGER.debug('early stopping triggered') break # check n_iter_no_change if self.n_iter_no_change is True: if self.check_convergence(loss): self.sync_stop_flag(epoch_idx, stop_flag=True) break else: self.sync_stop_flag(epoch_idx, stop_flag=False) LOGGER.debug('fitting epoch {} done, loss is {}'.format(epoch_idx, loss)) self.callback_meta("loss", "train", MetricMeta(name="train", metric_type="LOSS", extra_metas={"Best": min(self.history_loss)})) self.set_summary(self.generate_summary()) LOGGER.debug('fitting ftl model done') def predict(self, data_inst): LOGGER.debug('guest start to predict') data_loader_key = self.get_dataset_key(data_inst) data_inst_ = data_overview.header_alignment(data_inst, self.store_header) if data_loader_key in self.cache_dataloader: data_loader = self.cache_dataloader[data_loader_key] else: data_loader, _, _, _ = self.prepare_data(self.init_intersect_obj(), data_inst_, guest_side=True) self.cache_dataloader[data_loader_key] = data_loader LOGGER.debug('try to get predict u from host, suffix is {}'.format((0, 'host_u'))) host_predicts = self.transfer_variable.predict_host_u.get(idx=0, suffix=(0, 'host_u')) predict_score = np.matmul(host_predicts, self.phi.transpose()) predicts = self.sigmoid(predict_score) # convert to predict scores predicts = list(map(float, predicts)) predict_tb = session.parallelize(zip(data_loader.get_overlap_keys(), predicts,), include_key=True, partition=data_inst.partitions) threshold = self.predict_param.threshold predict_result = self.predict_score_to_output(data_inst_, predict_tb, classes=[0, 1], threshold=threshold) LOGGER.debug('ftl guest prediction done') return predict_result def export_model(self): model_param = self.get_model_param() model_param.phi_a.extend(self.phi.tolist()[0]) return {"FTLGuestMeta": self.get_model_meta(), "FTLHostParam": model_param} def load_model(self, model_dict): model_param = None model_meta = None for _, value in model_dict["model"].items(): for model in value: if model.endswith("Meta"): model_meta = value[model] if model.endswith("Param"): model_param = value[model] LOGGER.info("load model") self.set_model_meta(model_meta) self.set_model_param(model_param) self.phi = np.array([model_param.phi_a])
[ "fate_flow.entity.metric.Metric", "fate_flow.entity.metric.MetricMeta" ]
[((1408, 1466), 'federatedml.statistic.intersect.intersect_guest.RsaIntersectionGuest', 'intersect_guest.RsaIntersectionGuest', (['self.intersect_param'], {}), '(self.intersect_param)\n', (1444, 1466), False, 'from federatedml.statistic.intersect import intersect_guest\n'), ((1641, 1671), 'federatedml.util.LOGGER.debug', 'LOGGER.debug', (['"""intersect done"""'], {}), "('intersect done')\n", (1653, 1671), False, 'from federatedml.util import LOGGER\n'), ((1749, 1781), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""check convergence"""'], {}), "('check convergence')\n", (1760, 1781), False, 'from federatedml.util import LOGGER\n'), ((3511, 3545), 'numpy.concatenate', 'np.concatenate', (['overlap_ua'], {'axis': '(0)'}), '(overlap_ua, axis=0)\n', (3525, 3545), True, 'import numpy as np\n'), ((6835, 6896), 'federatedml.nn.hetero_nn.backend.paillier_tensor.PaillierTensor', 'PaillierTensor', ([], {'tb_obj': 'inter_grad', 'partitions': 'self.partitions'}), '(tb_obj=inter_grad, partitions=self.partitions)\n', (6849, 6896), False, 'from federatedml.nn.hetero_nn.backend.paillier_tensor import PaillierTensor\n'), ((7944, 7995), 'numpy.expand_dims', 'np.expand_dims', (['(self.overlap_y_2 * self.phi)'], {'axis': '(1)'}), '(self.overlap_y_2 * self.phi, axis=1)\n', (7958, 7995), True, 'import numpy as np\n'), ((16091, 16129), 'federatedml.util.LOGGER.debug', 'LOGGER.debug', (['"""fitting ftl model done"""'], {}), "('fitting ftl model done')\n", (16103, 16129), False, 'from federatedml.util import LOGGER\n'), ((16174, 16212), 'federatedml.util.LOGGER.debug', 'LOGGER.debug', (['"""guest start to predict"""'], {}), "('guest start to predict')\n", (16186, 16212), False, 'from federatedml.util import LOGGER\n'), ((16294, 16354), 'federatedml.statistic.data_overview.header_alignment', 'data_overview.header_alignment', (['data_inst', 'self.store_header'], {}), '(data_inst, self.store_header)\n', (16324, 16354), False, 'from federatedml.statistic import data_overview\n'), ((17399, 17440), 'federatedml.util.LOGGER.debug', 'LOGGER.debug', (['"""ftl guest prediction done"""'], {}), "('ftl guest prediction done')\n", (17411, 17440), False, 'from federatedml.util import LOGGER\n'), ((18050, 18075), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""load model"""'], {}), "('load model')\n", (18061, 18075), False, 'from federatedml.util import LOGGER\n'), ((18178, 18207), 'numpy.array', 'np.array', (['[model_param.phi_a]'], {}), '([model_param.phi_a])\n', (18186, 18207), True, 'import numpy as np\n'), ((1850, 1889), 'federatedml.optim.convergence.converge_func_factory', 'converge_func_factory', (['"""diff"""', 'self.tol'], {}), "('diff', self.tol)\n", (1871, 1889), False, 'from federatedml.optim.convergence import converge_func_factory\n'), ((6370, 6425), 'federatedml.nn.hetero_nn.backend.paillier_tensor.PaillierTensor', 'PaillierTensor', ([], {'tb_obj': 'grad', 'partitions': 'self.partitions'}), '(tb_obj=grad, partitions=self.partitions)\n', (6384, 6425), False, 'from federatedml.nn.hetero_nn.backend.paillier_tensor import PaillierTensor\n'), ((8549, 8608), 'numpy.concatenate', 'np.concatenate', (['[grad_a_overlap, grad_a_nonoverlap]'], {'axis': '(0)'}), '([grad_a_overlap, grad_a_nonoverlap], axis=0)\n', (8563, 8608), True, 'import numpy as np\n'), ((10061, 10116), 'numpy.sum', 'np.sum', (['(-self.overlap_ua * self.constant_k * overlap_ub)'], {}), '(-self.overlap_ua * self.constant_k * overlap_ub)\n', (10067, 10116), True, 'import numpy as np\n'), ((13130, 13215), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': '"""train"""', 'metric_type': '"""LOSS"""', 'extra_metas': "{'unit_name': 'iters'}"}), "(name='train', metric_type='LOSS', extra_metas={'unit_name': 'iters'}\n )\n", (13140, 13215), False, 'from fate_flow.entity.metric import MetricMeta\n'), ((2702, 2736), 'numpy.sum', 'np.sum', (['(batch_y * ua_batch)'], {'axis': '(0)'}), '(batch_y * ua_batch, axis=0)\n', (2708, 2736), True, 'import numpy as np\n'), ((3653, 3693), 'numpy.expand_dims', 'np.expand_dims', (['self.overlap_y_2'], {'axis': '(2)'}), '(self.overlap_y_2, axis=2)\n', (3667, 3693), True, 'import numpy as np\n'), ((4983, 5036), 'federatedml.nn.hetero_nn.backend.paillier_tensor.PaillierTensor', 'PaillierTensor', ([], {'tb_obj': 'tb', 'partitions': 'self.partitions'}), '(tb_obj=tb, partitions=self.partitions)\n', (4997, 5036), False, 'from federatedml.nn.hetero_nn.backend.paillier_tensor import PaillierTensor\n'), ((8222, 8260), 'numpy.sum', 'np.sum', (['loss_grads_const_part1'], {'axis': '(0)'}), '(loss_grads_const_part1, axis=0)\n', (8228, 8260), True, 'import numpy as np\n'), ((10210, 10241), 'numpy.sum', 'np.sum', (['(self.overlap_y * ub_phi)'], {}), '(self.overlap_y * ub_phi)\n', (10216, 10241), True, 'import numpy as np\n'), ((10266, 10289), 'numpy.sum', 'np.sum', (['(ub_phi * ub_phi)'], {}), '(ub_phi * ub_phi)\n', (10272, 10289), True, 'import numpy as np\n'), ((10330, 10339), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (10336, 10339), True, 'import numpy as np\n'), ((8086, 8126), 'numpy.matmul', 'np.matmul', (['y_overlap_2_phi', 'overlap_ub_2'], {}), '(y_overlap_2_phi, overlap_ub_2)\n', (8095, 8126), True, 'import numpy as np\n'), ((8269, 8307), 'numpy.sum', 'np.sum', (['loss_grads_const_part2'], {'axis': '(0)'}), '(loss_grads_const_part2, axis=0)\n', (8275, 8307), True, 'import numpy as np\n'), ((8907, 8965), 'federatedml.nn.hetero_nn.backend.paillier_tensor.PaillierTensor', 'PaillierTensor', (['self.overlap_y'], {'partitions': 'self.partitions'}), '(self.overlap_y, partitions=self.partitions)\n', (8921, 8965), False, 'from federatedml.nn.hetero_nn.backend.paillier_tensor import PaillierTensor\n'), ((10750, 10781), 'numpy.sum', 'np.sum', (['(self.overlap_y * ub_phi)'], {}), '(self.overlap_y * ub_phi)\n', (10756, 10781), True, 'import numpy as np\n'), ((10870, 10895), 'numpy.matmul', 'np.matmul', (['self.phi', 'ub_2'], {}), '(self.phi, ub_2)\n', (10879, 10895), True, 'import numpy as np\n'), ((10945, 10969), 'numpy.sum', 'np.sum', (['enc_phi_uB_2_phi'], {}), '(enc_phi_uB_2_phi)\n', (10951, 10969), True, 'import numpy as np\n'), ((11010, 11019), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (11016, 11019), True, 'import numpy as np\n'), ((14518, 14541), 'fate_flow.entity.metric.Metric', 'Metric', (['epoch_idx', 'loss'], {}), '(epoch_idx, loss)\n', (14524, 14541), False, 'from fate_flow.entity.metric import Metric\n'), ((15295, 15335), 'federatedml.util.LOGGER.debug', 'LOGGER.debug', (['"""early stopping triggered"""'], {}), "('early stopping triggered')\n", (15307, 15335), False, 'from federatedml.util import LOGGER\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_arch.common import log from fate_flow.utils import api_utils LOGGER = log.getLogger() class OperationClient(object): @classmethod def get_job_conf(cls, job_id, role): response = api_utils.local_api( job_id=job_id, method='POST', endpoint='/operation/job_config/get', json_body={"job_id": job_id, "role": role}) return response.get("data") @classmethod def load_json_conf(cls, job_id, config_path): response = api_utils.local_api( job_id=job_id, method='POST', endpoint='/operation/json_conf/load'.format( ), json_body={"config_path": config_path}) return response.get("data")
[ "fate_flow.utils.api_utils.local_api" ]
[((697, 712), 'fate_arch.common.log.getLogger', 'log.getLogger', ([], {}), '()\n', (710, 712), False, 'from fate_arch.common import log\n'), ((823, 959), 'fate_flow.utils.api_utils.local_api', 'api_utils.local_api', ([], {'job_id': 'job_id', 'method': '"""POST"""', 'endpoint': '"""/operation/job_config/get"""', 'json_body': "{'job_id': job_id, 'role': role}"}), "(job_id=job_id, method='POST', endpoint=\n '/operation/job_config/get', json_body={'job_id': job_id, 'role': role})\n", (842, 959), False, 'from fate_flow.utils import api_utils\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import operator import time import typing from fate_arch.common.base_utils import current_timestamp from fate_flow.db.db_models import DB, Job, Task, DataBaseModel from fate_flow.entity.run_status import JobStatus, TaskStatus, EndStatus from fate_flow.utils.log_utils import schedule_logger, sql_logger from fate_flow.utils import schedule_utils import peewee class JobSaver(object): STATUS_FIELDS = ["status", "party_status"] @classmethod def create_job(cls, job_info) -> Job: return cls.create_job_family_entity(Job, job_info) @classmethod def create_task(cls, task_info) -> Task: return cls.create_job_family_entity(Task, task_info) @classmethod @DB.connection_context() def delete_job(cls, job_id): Job.delete().where(Job.f_job_id == job_id) @classmethod def update_job_status(cls, job_info): schedule_logger(job_info["job_id"]).info("try to update job status to {}".format(job_info.get("status"))) update_status = cls.update_status(Job, job_info) if update_status: schedule_logger(job_info["job_id"]).info("update job status successfully") if EndStatus.contains(job_info.get("status")): new_job_info = {} # only update tag for k in ["job_id", "role", "party_id", "tag"]: if k in job_info: new_job_info[k] = job_info[k] if not new_job_info.get("tag"): new_job_info["tag"] = "job_end" cls.update_entity_table(Job, new_job_info) else: schedule_logger(job_info["job_id"]).warning("update job status does not take effect") return update_status @classmethod def update_job(cls, job_info): schedule_logger(job_info["job_id"]).info("try to update job") if "status" in job_info: # Avoid unintentional usage that updates the status del job_info["status"] schedule_logger(job_info["job_id"]).warning("try to update job, pop job status") update_status = cls.update_entity_table(Job, job_info) if update_status: schedule_logger(job_info.get("job_id")).info(f"job update successfully: {job_info}") else: schedule_logger(job_info.get("job_id")).warning(f"job update does not take effect: {job_info}") return update_status @classmethod def update_task_status(cls, task_info): schedule_logger(task_info["job_id"]).info("try to update task {} {} status".format(task_info["task_id"], task_info["task_version"])) update_status = cls.update_status(Task, task_info) if update_status: schedule_logger(task_info["job_id"]).info("update task {} {} status successfully: {}".format(task_info["task_id"], task_info["task_version"], task_info)) else: schedule_logger(task_info["job_id"]).warning("update task {} {} status update does not take effect: {}".format(task_info["task_id"], task_info["task_version"], task_info)) return update_status @classmethod def update_task(cls, task_info): schedule_logger(task_info["job_id"]).info("try to update task {} {}".format(task_info["task_id"], task_info["task_version"])) update_status = cls.update_entity_table(Task, task_info) if update_status: schedule_logger(task_info["job_id"]).info("task {} {} update successfully".format(task_info["task_id"], task_info["task_version"])) else: schedule_logger(task_info["job_id"]).warning("task {} {} update does not take effect".format(task_info["task_id"], task_info["task_version"])) return update_status @classmethod @DB.connection_context() def create_job_family_entity(cls, entity_model, entity_info): obj = entity_model() obj.f_create_time = current_timestamp() for k, v in entity_info.items(): attr_name = 'f_%s' % k if hasattr(entity_model, attr_name): setattr(obj, attr_name, v) try: rows = obj.save(force_insert=True) if rows != 1: raise Exception("Create {} failed".format(entity_model)) return obj except peewee.IntegrityError as e: if e.args[0] == 1062 or (isinstance(e.args[0], str) and "UNIQUE constraint failed" in e.args[0]): sql_logger(job_id=entity_info.get("job_id", "fate_flow")).warning(e) else: raise Exception("Create {} failed:\n{}".format(entity_model, e)) except Exception as e: raise Exception("Create {} failed:\n{}".format(entity_model, e)) @classmethod @DB.connection_context() def update_status(cls, entity_model: DataBaseModel, entity_info: dict): query_filters = [] primary_keys = entity_model.get_primary_keys_name() for p_k in primary_keys: query_filters.append(operator.attrgetter(p_k)(entity_model) == entity_info[p_k.lstrip("f").lstrip("_")]) objs = entity_model.select().where(*query_filters) if objs: obj = objs[0] else: raise Exception(f"can not found the {entity_model.__name__} record to update") update_filters = query_filters[:] update_info = {"job_id": entity_info["job_id"]} for status_field in cls.STATUS_FIELDS: if entity_info.get(status_field) and hasattr(entity_model, f"f_{status_field}"): if status_field in ["status", "party_status"]: update_info[status_field] = entity_info[status_field] old_status = getattr(obj, f"f_{status_field}") new_status = update_info[status_field] if_pass = False if isinstance(obj, Task): if TaskStatus.StateTransitionRule.if_pass(src_status=old_status, dest_status=new_status): if_pass = True elif isinstance(obj, Job): if JobStatus.StateTransitionRule.if_pass(src_status=old_status, dest_status=new_status): if_pass = True if EndStatus.contains(new_status) and new_status not in {JobStatus.SUCCESS, JobStatus.CANCELED}: update_filters.append(Job.f_rerun_signal == False) if if_pass: update_filters.append(operator.attrgetter(f"f_{status_field}")(type(obj)) == old_status) else: # not allow update status update_info.pop(status_field) return cls.execute_update(old_obj=obj, model=entity_model, update_info=update_info, update_filters=update_filters) @classmethod @DB.connection_context() def update_entity_table(cls, entity_model, entity_info): query_filters = [] primary_keys = entity_model.get_primary_keys_name() for p_k in primary_keys: query_filters.append(operator.attrgetter(p_k)(entity_model) == entity_info[p_k.lstrip("f").lstrip("_")]) objs = entity_model.select().where(*query_filters) if objs: obj = objs[0] else: raise Exception("can not found the {}".format(entity_model.__name__)) update_filters = query_filters[:] update_info = {} update_info.update(entity_info) for _ in cls.STATUS_FIELDS: # not allow update status fields by this function update_info.pop(_, None) if update_info.get("tag") in {"job_end", "submit_failed"} and hasattr(entity_model, "f_tag"): if obj.f_start_time: update_info["end_time"] = current_timestamp() update_info['elapsed'] = update_info['end_time'] - obj.f_start_time if update_info.get("progress") and hasattr(entity_model, "f_progress") and update_info["progress"] > 0: update_filters.append(operator.attrgetter("f_progress")(entity_model) <= update_info["progress"]) return cls.execute_update(old_obj=obj, model=entity_model, update_info=update_info, update_filters=update_filters) @classmethod def execute_update(cls, old_obj, model, update_info, update_filters): update_fields = {} for k, v in update_info.items(): attr_name = 'f_%s' % k if hasattr(model, attr_name) and attr_name not in model.get_primary_keys_name(): update_fields[operator.attrgetter(attr_name)(model)] = v if update_fields: if update_filters: operate = old_obj.update(update_fields).where(*update_filters) else: operate = old_obj.update(update_fields) sql_logger(job_id=update_info.get("job_id", "fate_flow")).info(operate) return operate.execute() > 0 else: return False @classmethod @DB.connection_context() def query_job(cls, reverse=None, order_by=None, **kwargs): return Job.query(reverse=reverse, order_by=order_by, **kwargs) @classmethod @DB.connection_context() def get_tasks_asc(cls, job_id, role, party_id): tasks = Task.query(order_by="create_time", reverse=False, job_id=job_id, role=role, party_id=party_id) tasks_group = cls.get_latest_tasks(tasks=tasks) return tasks_group @classmethod @DB.connection_context() def query_task(cls, only_latest=True, reverse=None, order_by=None, **kwargs) -> typing.List[Task]: tasks = Task.query(reverse=reverse, order_by=order_by, **kwargs) if only_latest: tasks_group = cls.get_latest_tasks(tasks=tasks) return list(tasks_group.values()) else: return tasks @classmethod @DB.connection_context() def check_task(cls, job_id, role, party_id, components: list): filters = [ Task.f_job_id == job_id, Task.f_role == role, Task.f_party_id == party_id, Task.f_component_name << components ] tasks = Task.select().where(*filters) if tasks and len(tasks) == len(components): return True else: return False @classmethod def get_latest_tasks(cls, tasks): tasks_group = {} for task in tasks: task_key = cls.task_key(task_id=task.f_task_id, role=task.f_role, party_id=task.f_party_id) if task_key not in tasks_group: tasks_group[task_key] = task elif task.f_task_version > tasks_group[task_key].f_task_version: # update new version task tasks_group[task_key] = task return tasks_group @classmethod def fill_job_inference_dsl(cls, job_id, role, party_id, dsl_parser, origin_inference_dsl): # must fill dsl for fate serving components_parameters = {} tasks = cls.query_task(job_id=job_id, role=role, party_id=party_id, only_latest=True) for task in tasks: components_parameters[task.f_component_name] = task.f_component_parameters return schedule_utils.fill_inference_dsl(dsl_parser, origin_inference_dsl=origin_inference_dsl, components_parameters=components_parameters) @classmethod def task_key(cls, task_id, role, party_id): return f"{task_id}_{role}_{party_id}" def str_to_time_stamp(time_str): time_array = time.strptime(time_str, "%Y-%m-%d %H:%M:%S") time_stamp = int(time.mktime(time_array) * 1000) return time_stamp
[ "fate_flow.utils.log_utils.schedule_logger", "fate_flow.entity.run_status.JobStatus.StateTransitionRule.if_pass", "fate_flow.db.db_models.DB.connection_context", "fate_flow.db.db_models.Task.select", "fate_flow.entity.run_status.EndStatus.contains", "fate_flow.utils.schedule_utils.fill_inference_dsl", "fate_flow.db.db_models.Job.delete", "fate_flow.entity.run_status.TaskStatus.StateTransitionRule.if_pass", "fate_flow.db.db_models.Job.query", "fate_flow.db.db_models.Task.query" ]
[((1317, 1340), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (1338, 1340), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((4363, 4386), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (4384, 4386), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((5348, 5371), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (5369, 5371), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((7456, 7479), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (7477, 7479), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((9602, 9625), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (9623, 9625), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((9783, 9806), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (9804, 9806), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((10076, 10099), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (10097, 10099), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((10468, 10491), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (10489, 10491), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((12111, 12155), 'time.strptime', 'time.strptime', (['time_str', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(time_str, '%Y-%m-%d %H:%M:%S')\n", (12124, 12155), False, 'import time\n'), ((4510, 4529), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (4527, 4529), False, 'from fate_arch.common.base_utils import current_timestamp\n'), ((9704, 9759), 'fate_flow.db.db_models.Job.query', 'Job.query', ([], {'reverse': 'reverse', 'order_by': 'order_by'}), '(reverse=reverse, order_by=order_by, **kwargs)\n', (9713, 9759), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((9875, 9973), 'fate_flow.db.db_models.Task.query', 'Task.query', ([], {'order_by': '"""create_time"""', 'reverse': '(False)', 'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), "(order_by='create_time', reverse=False, job_id=job_id, role=role,\n party_id=party_id)\n", (9885, 9973), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((10219, 10275), 'fate_flow.db.db_models.Task.query', 'Task.query', ([], {'reverse': 'reverse', 'order_by': 'order_by'}), '(reverse=reverse, order_by=order_by, **kwargs)\n', (10229, 10275), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((11813, 11951), 'fate_flow.utils.schedule_utils.fill_inference_dsl', 'schedule_utils.fill_inference_dsl', (['dsl_parser'], {'origin_inference_dsl': 'origin_inference_dsl', 'components_parameters': 'components_parameters'}), '(dsl_parser, origin_inference_dsl=\n origin_inference_dsl, components_parameters=components_parameters)\n', (11846, 11951), False, 'from fate_flow.utils import schedule_utils\n'), ((12177, 12200), 'time.mktime', 'time.mktime', (['time_array'], {}), '(time_array)\n', (12188, 12200), False, 'import time\n'), ((1382, 1394), 'fate_flow.db.db_models.Job.delete', 'Job.delete', ([], {}), '()\n', (1392, 1394), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((1493, 1528), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (["job_info['job_id']"], {}), "(job_info['job_id'])\n", (1508, 1528), False, 'from fate_flow.utils.log_utils import schedule_logger, sql_logger\n'), ((2413, 2448), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (["job_info['job_id']"], {}), "(job_info['job_id'])\n", (2428, 2448), False, 'from fate_flow.utils.log_utils import schedule_logger, sql_logger\n'), ((3107, 3143), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (["task_info['job_id']"], {}), "(task_info['job_id'])\n", (3122, 3143), False, 'from fate_flow.utils.log_utils import schedule_logger, sql_logger\n'), ((3781, 3817), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (["task_info['job_id']"], {}), "(task_info['job_id'])\n", (3796, 3817), False, 'from fate_flow.utils.log_utils import schedule_logger, sql_logger\n'), ((8395, 8414), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (8412, 8414), False, 'from fate_arch.common.base_utils import current_timestamp\n'), ((10764, 10777), 'fate_flow.db.db_models.Task.select', 'Task.select', ([], {}), '()\n', (10775, 10777), False, 'from fate_flow.db.db_models import DB, Job, Task, DataBaseModel\n'), ((1694, 1729), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (["job_info['job_id']"], {}), "(job_info['job_id'])\n", (1709, 1729), False, 'from fate_flow.utils.log_utils import schedule_logger, sql_logger\n'), ((2237, 2272), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (["job_info['job_id']"], {}), "(job_info['job_id'])\n", (2252, 2272), False, 'from fate_flow.utils.log_utils import schedule_logger, sql_logger\n'), ((2619, 2654), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (["job_info['job_id']"], {}), "(job_info['job_id'])\n", (2634, 2654), False, 'from fate_flow.utils.log_utils import schedule_logger, sql_logger\n'), ((3337, 3373), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (["task_info['job_id']"], {}), "(task_info['job_id'])\n", (3352, 3373), False, 'from fate_flow.utils.log_utils import schedule_logger, sql_logger\n'), ((3517, 3553), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (["task_info['job_id']"], {}), "(task_info['job_id'])\n", (3532, 3553), False, 'from fate_flow.utils.log_utils import schedule_logger, sql_logger\n'), ((4010, 4046), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (["task_info['job_id']"], {}), "(task_info['job_id'])\n", (4025, 4046), False, 'from fate_flow.utils.log_utils import schedule_logger, sql_logger\n'), ((4168, 4204), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (["task_info['job_id']"], {}), "(task_info['job_id'])\n", (4183, 4204), False, 'from fate_flow.utils.log_utils import schedule_logger, sql_logger\n'), ((5601, 5625), 'operator.attrgetter', 'operator.attrgetter', (['p_k'], {}), '(p_k)\n', (5620, 5625), False, 'import operator\n'), ((6502, 6592), 'fate_flow.entity.run_status.TaskStatus.StateTransitionRule.if_pass', 'TaskStatus.StateTransitionRule.if_pass', ([], {'src_status': 'old_status', 'dest_status': 'new_status'}), '(src_status=old_status, dest_status=\n new_status)\n', (6540, 6592), False, 'from fate_flow.entity.run_status import JobStatus, TaskStatus, EndStatus\n'), ((7694, 7718), 'operator.attrgetter', 'operator.attrgetter', (['p_k'], {}), '(p_k)\n', (7713, 7718), False, 'import operator\n'), ((8645, 8678), 'operator.attrgetter', 'operator.attrgetter', (['"""f_progress"""'], {}), "('f_progress')\n", (8664, 8678), False, 'import operator\n'), ((9162, 9192), 'operator.attrgetter', 'operator.attrgetter', (['attr_name'], {}), '(attr_name)\n', (9181, 9192), False, 'import operator\n'), ((6706, 6795), 'fate_flow.entity.run_status.JobStatus.StateTransitionRule.if_pass', 'JobStatus.StateTransitionRule.if_pass', ([], {'src_status': 'old_status', 'dest_status': 'new_status'}), '(src_status=old_status, dest_status=\n new_status)\n', (6743, 6795), False, 'from fate_flow.entity.run_status import JobStatus, TaskStatus, EndStatus\n'), ((6862, 6892), 'fate_flow.entity.run_status.EndStatus.contains', 'EndStatus.contains', (['new_status'], {}), '(new_status)\n', (6880, 6892), False, 'from fate_flow.entity.run_status import JobStatus, TaskStatus, EndStatus\n'), ((7113, 7153), 'operator.attrgetter', 'operator.attrgetter', (['f"""f_{status_field}"""'], {}), "(f'f_{status_field}')\n", (7132, 7153), False, 'import operator\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import operator import copy import typing from fate_arch.abc import CTableABC from fate_arch.common import EngineType, Party from fate_arch.common.data_utils import default_output_fs_path, default_output_info from fate_arch.computing import ComputingEngine from fate_arch.federation import FederationEngine from fate_arch.storage import StorageEngine from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads from fate_flow.utils.log_utils import schedule_logger from fate_flow.db.db_models import (DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel) from fate_flow.entity import Metric, MetricMeta from fate_flow.entity import DataCache from fate_flow.db.runtime_config import RuntimeConfig from fate_flow.db.job_default_config import JobDefaultConfig from fate_flow.pipelined_model import pipelined_model from fate_flow.manager.cache_manager import CacheManager from fate_flow.manager.metric_manager import MetricManager from fate_arch import storage, session from fate_flow.utils import model_utils, job_utils, data_utils from fate_flow.entity import RunParameters class Tracker(object): """ Tracker for Job/Task/Metric """ METRIC_DATA_PARTITION = 48 METRIC_LIST_PARTITION = 48 JOB_VIEW_PARTITION = 8 def __init__(self, job_id: str, role: str, party_id: int, model_id: str = None, model_version: str = None, component_name: str = None, component_module_name: str = None, task_id: str = None, task_version: int = None, job_parameters: RunParameters = None ): self.job_id = job_id self.job_parameters = job_parameters self.role = role self.party_id = party_id self.component_name = component_name if component_name else job_utils.job_pipeline_component_name() self.module_name = component_module_name if component_module_name else job_utils.job_pipeline_component_module_name() self.task_id = task_id self.task_version = task_version self.model_id = model_id self.party_model_id = model_utils.gen_party_model_id(model_id=model_id, role=role, party_id=party_id) self.model_version = model_version self.pipelined_model = None if self.party_model_id and self.model_version: self.pipelined_model = pipelined_model.PipelinedModel(model_id=self.party_model_id, model_version=self.model_version) self.metric_manager = MetricManager(job_id=self.job_id, role=self.role, party_id=self.party_id, component_name=self.component_name, task_id=self.task_id, task_version=self.task_version) def save_metric_data(self, metric_namespace: str, metric_name: str, metrics: typing.List[Metric], job_level=False): schedule_logger(self.job_id).info( 'save component {} on {} {} {} {} metric data'.format(self.component_name, self.role, self.party_id, metric_namespace, metric_name)) kv = [] for metric in metrics: kv.append((metric.key, metric.value)) self.metric_manager.insert_metrics_into_db(metric_namespace, metric_name, 1, kv, job_level) def get_job_metric_data(self, metric_namespace: str, metric_name: str): return self.read_metric_data(metric_namespace=metric_namespace, metric_name=metric_name, job_level=True) def get_metric_data(self, metric_namespace: str, metric_name: str): return self.read_metric_data(metric_namespace=metric_namespace, metric_name=metric_name, job_level=False) @DB.connection_context() def read_metric_data(self, metric_namespace: str, metric_name: str, job_level=False): metrics = [] for k, v in self.metric_manager.read_metrics_from_db(metric_namespace, metric_name, 1, job_level): metrics.append(Metric(key=k, value=v)) return metrics def save_metric_meta(self, metric_namespace: str, metric_name: str, metric_meta: MetricMeta, job_level: bool = False): schedule_logger(self.job_id).info( 'save component {} on {} {} {} {} metric meta'.format(self.component_name, self.role, self.party_id, metric_namespace, metric_name)) self.metric_manager.insert_metrics_into_db(metric_namespace, metric_name, 0, metric_meta.to_dict().items(), job_level) @DB.connection_context() def get_metric_meta(self, metric_namespace: str, metric_name: str, job_level: bool = False): kv = dict() for k, v in self.metric_manager.read_metrics_from_db(metric_namespace, metric_name, 0, job_level): kv[k] = v return MetricMeta(name=kv.get('name'), metric_type=kv.get('metric_type'), extra_metas=kv) def log_job_view(self, view_data: dict): self.metric_manager.insert_metrics_into_db('job', 'job_view', 2, view_data.items(), job_level=True) @DB.connection_context() def get_job_view(self): view_data = {} for k, v in self.metric_manager.read_metrics_from_db('job', 'job_view', 2, job_level=True): view_data[k] = v return view_data def save_output_data(self, computing_table, output_storage_engine, output_storage_address=None, output_table_namespace=None, output_table_name=None, schema=None, token=None, need_read=True): if computing_table: if not output_table_namespace or not output_table_name: output_table_namespace, output_table_name = default_output_info(task_id=self.task_id, task_version=self.task_version, output_type="data") schedule_logger(self.job_id).info( 'persisting the component output temporary table to {} {}'.format(output_table_namespace, output_table_name)) part_of_limit = JobDefaultConfig.output_data_summary_count_limit part_of_data = [] if need_read: match_id_name = computing_table.schema.get("match_id_name") schedule_logger(self.job_id).info(f'match id name:{match_id_name}') for k, v in computing_table.collect(): part_of_data.append((k, v)) part_of_limit -= 1 if part_of_limit == 0: break session.Session.persistent(computing_table=computing_table, namespace=output_table_namespace, name=output_table_name, schema=schema, part_of_data=part_of_data, engine=output_storage_engine, engine_address=output_storage_address, token=token) return output_table_namespace, output_table_name else: schedule_logger(self.job_id).info('task id {} output data table is none'.format(self.task_id)) return None, None def save_table_meta(self, meta): schedule_logger(self.job_id).info(f'start save table meta:{meta}') address = storage.StorageTableMeta.create_address(storage_engine=meta.get("engine"), address_dict=meta.get("address")) table_meta = storage.StorageTableMeta(name=meta.get("name"), namespace=meta.get("namespace"), new=True) table_meta.set_metas(**meta) meta["address"] = address meta["part_of_data"] = deserialize_b64(meta["part_of_data"]) meta["schema"] = deserialize_b64(meta["schema"]) table_meta.create() schedule_logger(self.job_id).info(f'save table meta success') def get_table_meta(self, table_info): schedule_logger(self.job_id).info(f'start get table meta:{table_info}') table_meta_dict = storage.StorageTableMeta(namespace=table_info.get("namespace"), name=table_info.get("table_name"), create_address=False).to_dict() schedule_logger(self.job_id).info(f'get table meta success: {table_meta_dict}') table_meta_dict["part_of_data"] = serialize_b64(table_meta_dict["part_of_data"], to_str=True) table_meta_dict["schema"] = serialize_b64(table_meta_dict["schema"], to_str=True) return table_meta_dict def get_output_data_table(self, output_data_infos, tracker_client=None): """ Get component output data table, will run in the task executor process :param output_data_infos: :return: """ output_tables_meta = {} if output_data_infos: for output_data_info in output_data_infos: schedule_logger(self.job_id).info("get task {} {} output table {} {}".format(output_data_info.f_task_id, output_data_info.f_task_version, output_data_info.f_table_namespace, output_data_info.f_table_name)) if not tracker_client: data_table_meta = storage.StorageTableMeta(name=output_data_info.f_table_name, namespace=output_data_info.f_table_namespace) else: data_table_meta = tracker_client.get_table_meta(output_data_info.f_table_name, output_data_info.f_table_namespace) output_tables_meta[output_data_info.f_data_name] = data_table_meta return output_tables_meta def init_pipeline_model(self): self.pipelined_model.create_pipelined_model() def save_output_model(self, model_buffers: dict, model_alias: str): if model_buffers: self.pipelined_model.save_component_model(component_name=self.component_name, component_module_name=self.module_name, model_alias=model_alias, model_buffers=model_buffers) def get_output_model(self, model_alias, parse=True, output_json=False): return self.read_output_model(model_alias=model_alias, parse=parse, output_json=output_json) def write_output_model(self, component_model): self.pipelined_model.write_component_model(component_model) def read_output_model(self, model_alias, parse=True, output_json=False): return self.pipelined_model.read_component_model(component_name=self.component_name, model_alias=model_alias, parse=parse, output_json=output_json) def collect_model(self): model_buffers = self.pipelined_model.collect_models() return model_buffers def save_pipeline_model(self, pipeline_buffer_object): self.pipelined_model.save_pipeline_model(pipeline_buffer_object) def get_pipeline_model(self): return self.pipelined_model.read_pipeline_model() def get_component_define(self): return self.pipelined_model.get_component_define(component_name=self.component_name) def save_output_cache(self, cache_data: typing.Dict[str, CTableABC], cache_meta: dict, cache_name, output_storage_engine, output_storage_address: dict, token=None): output_namespace, output_name = default_output_info(task_id=self.task_id, task_version=self.task_version, output_type="cache") cache = CacheManager.persistent(cache_name, cache_data, cache_meta, output_namespace, output_name, output_storage_engine, output_storage_address, token=token) cache_key = self.tracking_output_cache(cache=cache, cache_name=cache_name) return cache_key def tracking_output_cache(self, cache: DataCache, cache_name: str) -> str: cache_key = CacheManager.record(cache=cache, job_id=self.job_id, role=self.role, party_id=self.party_id, component_name=self.component_name, task_id=self.task_id, task_version=self.task_version, cache_name=cache_name) schedule_logger(self.job_id).info(f"tracking {self.task_id} {self.task_version} output cache, cache key is {cache_key}") return cache_key def get_output_cache(self, cache_key=None, cache_name=None): caches = self.query_output_cache(cache_key=cache_key, cache_name=cache_name) if caches: return CacheManager.load(cache=caches[0]) else: return None, None def query_output_cache(self, cache_key=None, cache_name=None) -> typing.List[DataCache]: caches = CacheManager.query(job_id=self.job_id, role=self.role, party_id=self.party_id, component_name=self.component_name, cache_name=cache_name, cache_key=cache_key) group = {} # only the latest version of the task output is retrieved for cache in caches: group_key = f"{cache.task_id}-{cache.name}" if group_key not in group: group[group_key] = cache elif cache.task_version > group[group_key].task_version: group[group_key] = cache return list(group.values()) def query_output_cache_record(self): return CacheManager.query_record(job_id=self.job_id, role=self.role, party_id=self.party_id, component_name=self.component_name, task_version=self.task_version) @DB.connection_context() def insert_summary_into_db(self, summary_data: dict, need_serialize=True): try: summary_model = self.get_dynamic_db_model(ComponentSummary, self.job_id) DB.create_tables([summary_model]) summary_obj = summary_model.get_or_none( summary_model.f_job_id == self.job_id, summary_model.f_component_name == self.component_name, summary_model.f_role == self.role, summary_model.f_party_id == self.party_id, summary_model.f_task_id == self.task_id, summary_model.f_task_version == self.task_version ) if summary_obj: summary_obj.f_summary = serialize_b64(summary_data, to_str=True) if need_serialize else summary_data summary_obj.f_update_time = current_timestamp() summary_obj.save() else: self.get_dynamic_db_model(ComponentSummary, self.job_id).create( f_job_id=self.job_id, f_component_name=self.component_name, f_role=self.role, f_party_id=self.party_id, f_task_id=self.task_id, f_task_version=self.task_version, f_summary=serialize_b64(summary_data, to_str=True), f_create_time=current_timestamp() ) except Exception as e: schedule_logger(self.job_id).exception("An exception where querying summary job id: {} " "component name: {} to database:\n{}".format( self.job_id, self.component_name, e) ) @DB.connection_context() def read_summary_from_db(self, need_deserialize=True): try: summary_model = self.get_dynamic_db_model(ComponentSummary, self.job_id) summary = summary_model.get_or_none( summary_model.f_job_id == self.job_id, summary_model.f_component_name == self.component_name, summary_model.f_role == self.role, summary_model.f_party_id == self.party_id ) if summary: cpn_summary = deserialize_b64(summary.f_summary) if need_deserialize else summary.f_summary else: cpn_summary = "" except Exception as e: schedule_logger(self.job_id).exception(e) raise e return cpn_summary @DB.connection_context() def reload_summary(self, source_tracker): cpn_summary = source_tracker.read_summary_from_db(need_deserialize=False) self.insert_summary_into_db(cpn_summary, need_serialize=False) def log_output_data_info(self, data_name: str, table_namespace: str, table_name: str): self.insert_output_data_info_into_db(data_name=data_name, table_namespace=table_namespace, table_name=table_name) @DB.connection_context() def insert_output_data_info_into_db(self, data_name: str, table_namespace: str, table_name: str): try: tracking_output_data_info = self.get_dynamic_db_model(TrackingOutputDataInfo, self.job_id)() tracking_output_data_info.f_job_id = self.job_id tracking_output_data_info.f_component_name = self.component_name tracking_output_data_info.f_task_id = self.task_id tracking_output_data_info.f_task_version = self.task_version tracking_output_data_info.f_data_name = data_name tracking_output_data_info.f_role = self.role tracking_output_data_info.f_party_id = self.party_id tracking_output_data_info.f_table_namespace = table_namespace tracking_output_data_info.f_table_name = table_name tracking_output_data_info.f_create_time = current_timestamp() self.bulk_insert_into_db(self.get_dynamic_db_model(TrackingOutputDataInfo, self.job_id), [tracking_output_data_info.to_dict()]) except Exception as e: schedule_logger(self.job_id).exception("An exception where inserted output data info {} {} {} to database:\n{}".format( data_name, table_namespace, table_name, e )) @DB.connection_context() def bulk_insert_into_db(self, model, data_source): try: try: DB.create_tables([model]) except Exception as e: schedule_logger(self.job_id).exception(e) batch_size = 50 if RuntimeConfig.USE_LOCAL_DATABASE else 1000 for i in range(0, len(data_source), batch_size): with DB.atomic(): model.insert_many(data_source[i:i+batch_size]).execute() return len(data_source) except Exception as e: schedule_logger(self.job_id).exception(e) return 0 def save_as_table(self, computing_table, name, namespace): if self.job_parameters.storage_engine == StorageEngine.LINKIS_HIVE: return self.save_output_data(computing_table=computing_table, output_storage_engine=self.job_parameters.storage_engine, output_storage_address=self.job_parameters.engines_address.get(EngineType.STORAGE, {}), output_table_namespace=namespace, output_table_name=name) @DB.connection_context() def clean_metrics(self): return self.metric_manager.clean_metrics() @DB.connection_context() def get_metric_list(self, job_level: bool = False): return self.metric_manager.get_metric_list(job_level=job_level) @DB.connection_context() def reload_metric(self, source_tracker): return self.metric_manager.reload_metric(source_tracker.metric_manager) def get_output_data_info(self, data_name=None): return self.read_output_data_info_from_db(data_name=data_name) def read_output_data_info_from_db(self, data_name=None): filter_dict = {} filter_dict["job_id"] = self.job_id filter_dict["component_name"] = self.component_name filter_dict["role"] = self.role filter_dict["party_id"] = self.party_id if data_name: filter_dict["data_name"] = data_name return self.query_output_data_infos(**filter_dict) @classmethod @DB.connection_context() def query_output_data_infos(cls, **kwargs) -> typing.List[TrackingOutputDataInfo]: try: tracking_output_data_info_model = cls.get_dynamic_db_model(TrackingOutputDataInfo, kwargs.get("job_id")) filters = [] for f_n, f_v in kwargs.items(): attr_name = 'f_%s' % f_n if hasattr(tracking_output_data_info_model, attr_name): filters.append(operator.attrgetter('f_%s' % f_n)(tracking_output_data_info_model) == f_v) if filters: output_data_infos_tmp = tracking_output_data_info_model.select().where(*filters) else: output_data_infos_tmp = tracking_output_data_info_model.select() output_data_infos_group = {} # only the latest version of the task output data is retrieved for output_data_info in output_data_infos_tmp: group_key = cls.get_output_data_group_key(output_data_info.f_task_id, output_data_info.f_data_name) if group_key not in output_data_infos_group: output_data_infos_group[group_key] = output_data_info elif output_data_info.f_task_version > output_data_infos_group[group_key].f_task_version: output_data_infos_group[group_key] = output_data_info return list(output_data_infos_group.values()) except Exception as e: return [] @classmethod def get_output_data_group_key(cls, task_id, data_name): return task_id + data_name def clean_task(self, runtime_conf): schedule_logger(self.job_id).info('clean task {} {} on {} {}'.format(self.task_id, self.task_version, self.role, self.party_id)) try: with session.Session() as sess: # clean up temporary tables computing_temp_namespace = job_utils.generate_session_id(task_id=self.task_id, task_version=self.task_version, role=self.role, party_id=self.party_id) if self.job_parameters.computing_engine == ComputingEngine.EGGROLL: session_options = {"eggroll.session.processors.per.node": 1} else: session_options = {} try: if self.job_parameters.computing_engine != ComputingEngine.LINKIS_SPARK: sess.init_computing(computing_session_id=f"{computing_temp_namespace}_clean", options=session_options) sess.computing.cleanup(namespace=computing_temp_namespace, name="*") schedule_logger(self.job_id).info('clean table by namespace {} on {} {} done'.format(computing_temp_namespace, self.role, self.party_id)) # clean up the last tables of the federation federation_temp_namespace = job_utils.generate_task_version_id(self.task_id, self.task_version) sess.computing.cleanup(namespace=federation_temp_namespace, name="*") schedule_logger(self.job_id).info('clean table by namespace {} on {} {} done'.format(federation_temp_namespace, self.role, self.party_id)) if self.job_parameters.federation_engine == FederationEngine.RABBITMQ and self.role != "local": schedule_logger(self.job_id).info('rabbitmq start clean up') parties = [Party(k, p) for k, v in runtime_conf['role'].items() for p in v] federation_session_id = job_utils.generate_task_version_id(self.task_id, self.task_version) component_parameters_on_party = copy.deepcopy(runtime_conf) component_parameters_on_party["local"] = {"role": self.role, "party_id": self.party_id} sess.init_federation(federation_session_id=federation_session_id, runtime_conf=component_parameters_on_party, service_conf=self.job_parameters.engines_address.get(EngineType.FEDERATION, {})) sess._federation_session.cleanup(parties) schedule_logger(self.job_id).info('rabbitmq clean up success') #TODO optimize the clean process if self.job_parameters.federation_engine == FederationEngine.PULSAR and self.role != "local": schedule_logger(self.job_id).info('start to clean up pulsar topics') parties = [Party(k, p) for k, v in runtime_conf['role'].items() for p in v] federation_session_id = job_utils.generate_task_version_id(self.task_id, self.task_version) component_parameters_on_party = copy.deepcopy(runtime_conf) component_parameters_on_party["local"] = {"role": self.role, "party_id": self.party_id} sess.init_federation(federation_session_id=federation_session_id, runtime_conf=component_parameters_on_party, service_conf=self.job_parameters.engines_address.get(EngineType.FEDERATION, {})) sess._federation_session.cleanup(parties) schedule_logger(self.job_id).info('pulsar topic clean up success') except Exception as e: schedule_logger(self.job_id).exception("cleanup error") finally: sess.destroy_all_sessions() return True except Exception as e: schedule_logger(self.job_id).exception(e) return False @DB.connection_context() def save_machine_learning_model_info(self): try: record = MLModel.get_or_none(MLModel.f_model_version == self.job_id, MLModel.f_role == self.role, MLModel.f_model_id == self.model_id, MLModel.f_party_id == self.party_id) if not record: job = Job.get_or_none(Job.f_job_id == self.job_id) pipeline = self.pipelined_model.read_pipeline_model() if job: job_data = job.to_dict() model_info = { 'job_id': job_data.get("f_job_id"), 'role': self.role, 'party_id': self.party_id, 'roles': job_data.get("f_roles"), 'model_id': self.model_id, 'model_version': self.model_version, 'initiator_role': job_data.get('f_initiator_role'), 'initiator_party_id': job_data.get('f_initiator_party_id'), 'runtime_conf': job_data.get('f_runtime_conf'), 'work_mode': job_data.get('f_work_mode'), 'train_dsl': job_data.get('f_dsl'), 'train_runtime_conf': job_data.get('f_train_runtime_conf'), 'size': self.get_model_size(), 'job_status': job_data.get('f_status'), 'parent': pipeline.parent, 'fate_version': pipeline.fate_version, 'runtime_conf_on_party': json_loads(pipeline.runtime_conf_on_party), 'parent_info': json_loads(pipeline.parent_info), 'inference_dsl': json_loads(pipeline.inference_dsl) } model_utils.save_model_info(model_info) schedule_logger(self.job_id).info( 'save {} model info done. model id: {}, model version: {}.'.format(self.job_id, self.model_id, self.model_version)) else: schedule_logger(self.job_id).info( 'save {} model info failed, no job found in db. ' 'model id: {}, model version: {}.'.format(self.job_id, self.model_id, self.model_version)) else: schedule_logger(self.job_id).info('model {} info has already existed in database.'.format(self.job_id)) except Exception as e: schedule_logger(self.job_id).exception(e) @classmethod def get_dynamic_db_model(cls, base, job_id): return type(base.model(table_index=cls.get_dynamic_tracking_table_index(job_id=job_id))) @classmethod def get_dynamic_tracking_table_index(cls, job_id): return job_id[:8] def get_model_size(self): return self.pipelined_model.calculate_model_file_size()
[ "fate_flow.utils.log_utils.schedule_logger", "fate_flow.utils.job_utils.job_pipeline_component_name", "fate_flow.db.db_models.Job.get_or_none", "fate_flow.manager.cache_manager.CacheManager.load", "fate_flow.db.db_models.MachineLearningModelInfo.get_or_none", "fate_flow.manager.cache_manager.CacheManager.query", "fate_flow.db.db_models.DB.create_tables", "fate_flow.manager.cache_manager.CacheManager.persistent", "fate_flow.pipelined_model.pipelined_model.PipelinedModel", "fate_flow.utils.job_utils.job_pipeline_component_module_name", "fate_flow.manager.metric_manager.MetricManager", "fate_flow.db.db_models.DB.connection_context", "fate_flow.utils.job_utils.generate_task_version_id", "fate_flow.manager.cache_manager.CacheManager.query_record", "fate_flow.entity.Metric", "fate_flow.db.db_models.DB.atomic", "fate_flow.utils.model_utils.save_model_info", "fate_flow.utils.model_utils.gen_party_model_id", "fate_flow.utils.job_utils.generate_session_id", "fate_flow.manager.cache_manager.CacheManager.record" ]
[((4422, 4445), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (4443, 4445), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((5274, 5297), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (5295, 5297), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((5802, 5825), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (5823, 5825), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((14636, 14659), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (14657, 14659), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((16380, 16403), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (16401, 16403), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((17180, 17203), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (17201, 17203), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((17623, 17646), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (17644, 17646), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((19004, 19027), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (19025, 19027), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((20158, 20181), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (20179, 20181), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((20268, 20291), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (20289, 20291), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((20426, 20449), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (20447, 20449), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((21131, 21154), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (21152, 21154), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((27754, 27777), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (27775, 27777), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((2863, 2942), 'fate_flow.utils.model_utils.gen_party_model_id', 'model_utils.gen_party_model_id', ([], {'model_id': 'model_id', 'role': 'role', 'party_id': 'party_id'}), '(model_id=model_id, role=role, party_id=party_id)\n', (2893, 2942), False, 'from fate_flow.utils import model_utils, job_utils, data_utils\n'), ((3303, 3475), 'fate_flow.manager.metric_manager.MetricManager', 'MetricManager', ([], {'job_id': 'self.job_id', 'role': 'self.role', 'party_id': 'self.party_id', 'component_name': 'self.component_name', 'task_id': 'self.task_id', 'task_version': 'self.task_version'}), '(job_id=self.job_id, role=self.role, party_id=self.party_id,\n component_name=self.component_name, task_id=self.task_id, task_version=\n self.task_version)\n', (3316, 3475), False, 'from fate_flow.manager.metric_manager import MetricManager\n'), ((8519, 8556), 'fate_arch.common.base_utils.deserialize_b64', 'deserialize_b64', (["meta['part_of_data']"], {}), "(meta['part_of_data'])\n", (8534, 8556), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((8582, 8613), 'fate_arch.common.base_utils.deserialize_b64', 'deserialize_b64', (["meta['schema']"], {}), "(meta['schema'])\n", (8597, 8613), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((9122, 9181), 'fate_arch.common.base_utils.serialize_b64', 'serialize_b64', (["table_meta_dict['part_of_data']"], {'to_str': '(True)'}), "(table_meta_dict['part_of_data'], to_str=True)\n", (9135, 9181), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((9218, 9271), 'fate_arch.common.base_utils.serialize_b64', 'serialize_b64', (["table_meta_dict['schema']"], {'to_str': '(True)'}), "(table_meta_dict['schema'], to_str=True)\n", (9231, 9271), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((12334, 12432), 'fate_arch.common.data_utils.default_output_info', 'default_output_info', ([], {'task_id': 'self.task_id', 'task_version': 'self.task_version', 'output_type': '"""cache"""'}), "(task_id=self.task_id, task_version=self.task_version,\n output_type='cache')\n", (12353, 12432), False, 'from fate_arch.common.data_utils import default_output_fs_path, default_output_info\n'), ((12445, 12603), 'fate_flow.manager.cache_manager.CacheManager.persistent', 'CacheManager.persistent', (['cache_name', 'cache_data', 'cache_meta', 'output_namespace', 'output_name', 'output_storage_engine', 'output_storage_address'], {'token': 'token'}), '(cache_name, cache_data, cache_meta,\n output_namespace, output_name, output_storage_engine,\n output_storage_address, token=token)\n', (12468, 12603), False, 'from fate_flow.manager.cache_manager import CacheManager\n'), ((12804, 13018), 'fate_flow.manager.cache_manager.CacheManager.record', 'CacheManager.record', ([], {'cache': 'cache', 'job_id': 'self.job_id', 'role': 'self.role', 'party_id': 'self.party_id', 'component_name': 'self.component_name', 'task_id': 'self.task_id', 'task_version': 'self.task_version', 'cache_name': 'cache_name'}), '(cache=cache, job_id=self.job_id, role=self.role,\n party_id=self.party_id, component_name=self.component_name, task_id=\n self.task_id, task_version=self.task_version, cache_name=cache_name)\n', (12823, 13018), False, 'from fate_flow.manager.cache_manager import CacheManager\n'), ((13823, 13990), 'fate_flow.manager.cache_manager.CacheManager.query', 'CacheManager.query', ([], {'job_id': 'self.job_id', 'role': 'self.role', 'party_id': 'self.party_id', 'component_name': 'self.component_name', 'cache_name': 'cache_name', 'cache_key': 'cache_key'}), '(job_id=self.job_id, role=self.role, party_id=self.\n party_id, component_name=self.component_name, cache_name=cache_name,\n cache_key=cache_key)\n', (13841, 13990), False, 'from fate_flow.manager.cache_manager import CacheManager\n'), ((14435, 14598), 'fate_flow.manager.cache_manager.CacheManager.query_record', 'CacheManager.query_record', ([], {'job_id': 'self.job_id', 'role': 'self.role', 'party_id': 'self.party_id', 'component_name': 'self.component_name', 'task_version': 'self.task_version'}), '(job_id=self.job_id, role=self.role, party_id=self\n .party_id, component_name=self.component_name, task_version=self.\n task_version)\n', (14460, 14598), False, 'from fate_flow.manager.cache_manager import CacheManager\n'), ((2561, 2600), 'fate_flow.utils.job_utils.job_pipeline_component_name', 'job_utils.job_pipeline_component_name', ([], {}), '()\n', (2598, 2600), False, 'from fate_flow.utils import model_utils, job_utils, data_utils\n'), ((2680, 2726), 'fate_flow.utils.job_utils.job_pipeline_component_module_name', 'job_utils.job_pipeline_component_module_name', ([], {}), '()\n', (2724, 2726), False, 'from fate_flow.utils import model_utils, job_utils, data_utils\n'), ((3112, 3211), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'pipelined_model.PipelinedModel', ([], {'model_id': 'self.party_model_id', 'model_version': 'self.model_version'}), '(model_id=self.party_model_id, model_version=\n self.model_version)\n', (3142, 3211), False, 'from fate_flow.pipelined_model import pipelined_model\n'), ((7279, 7534), 'fate_arch.session.Session.persistent', 'session.Session.persistent', ([], {'computing_table': 'computing_table', 'namespace': 'output_table_namespace', 'name': 'output_table_name', 'schema': 'schema', 'part_of_data': 'part_of_data', 'engine': 'output_storage_engine', 'engine_address': 'output_storage_address', 'token': 'token'}), '(computing_table=computing_table, namespace=\n output_table_namespace, name=output_table_name, schema=schema,\n part_of_data=part_of_data, engine=output_storage_engine, engine_address\n =output_storage_address, token=token)\n', (7305, 7534), False, 'from fate_arch import storage, session\n'), ((13633, 13667), 'fate_flow.manager.cache_manager.CacheManager.load', 'CacheManager.load', ([], {'cache': 'caches[0]'}), '(cache=caches[0])\n', (13650, 13667), False, 'from fate_flow.manager.cache_manager import CacheManager\n'), ((14849, 14882), 'fate_flow.db.db_models.DB.create_tables', 'DB.create_tables', (['[summary_model]'], {}), '([summary_model])\n', (14865, 14882), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((18517, 18536), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (18534, 18536), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((27860, 28030), 'fate_flow.db.db_models.MachineLearningModelInfo.get_or_none', 'MLModel.get_or_none', (['(MLModel.f_model_version == self.job_id)', '(MLModel.f_role == self.role)', '(MLModel.f_model_id == self.model_id)', '(MLModel.f_party_id == self.party_id)'], {}), '(MLModel.f_model_version == self.job_id, MLModel.f_role ==\n self.role, MLModel.f_model_id == self.model_id, MLModel.f_party_id ==\n self.party_id)\n', (27879, 28030), True, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((3596, 3624), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (3611, 3624), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((4691, 4713), 'fate_flow.entity.Metric', 'Metric', ([], {'key': 'k', 'value': 'v'}), '(key=k, value=v)\n', (4697, 4713), False, 'from fate_flow.entity import Metric, MetricMeta\n'), ((4895, 4923), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (4910, 4923), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((6408, 6505), 'fate_arch.common.data_utils.default_output_info', 'default_output_info', ([], {'task_id': 'self.task_id', 'task_version': 'self.task_version', 'output_type': '"""data"""'}), "(task_id=self.task_id, task_version=self.task_version,\n output_type='data')\n", (6427, 6505), False, 'from fate_arch.common.data_utils import default_output_fs_path, default_output_info\n'), ((8053, 8081), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (8068, 8081), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((8650, 8678), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (8665, 8678), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((8763, 8791), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (8778, 8791), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((9000, 9028), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (9015, 9028), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((13298, 13326), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (13313, 13326), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((15498, 15517), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (15515, 15517), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((19129, 19154), 'fate_flow.db.db_models.DB.create_tables', 'DB.create_tables', (['[model]'], {}), '([model])\n', (19145, 19154), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((22763, 22791), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (22778, 22791), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((23153, 23170), 'fate_arch.session.Session', 'session.Session', ([], {}), '()\n', (23168, 23170), False, 'from fate_arch import storage, session\n'), ((23267, 23395), 'fate_flow.utils.job_utils.generate_session_id', 'job_utils.generate_session_id', ([], {'task_id': 'self.task_id', 'task_version': 'self.task_version', 'role': 'self.role', 'party_id': 'self.party_id'}), '(task_id=self.task_id, task_version=self.\n task_version, role=self.role, party_id=self.party_id)\n', (23296, 23395), False, 'from fate_flow.utils import model_utils, job_utils, data_utils\n'), ((28195, 28239), 'fate_flow.db.db_models.Job.get_or_none', 'Job.get_or_none', (['(Job.f_job_id == self.job_id)'], {}), '(Job.f_job_id == self.job_id)\n', (28210, 28239), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((6514, 6542), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (6529, 6542), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((7882, 7910), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (7897, 7910), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((9951, 10062), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'output_data_info.f_table_name', 'namespace': 'output_data_info.f_table_namespace'}), '(name=output_data_info.f_table_name, namespace=\n output_data_info.f_table_namespace)\n', (9975, 10062), False, 'from fate_arch import storage, session\n'), ((15377, 15417), 'fate_arch.common.base_utils.serialize_b64', 'serialize_b64', (['summary_data'], {'to_str': '(True)'}), '(summary_data, to_str=True)\n', (15390, 15417), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((16913, 16947), 'fate_arch.common.base_utils.deserialize_b64', 'deserialize_b64', (['summary.f_summary'], {}), '(summary.f_summary)\n', (16928, 16947), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((19404, 19415), 'fate_flow.db.db_models.DB.atomic', 'DB.atomic', ([], {}), '()\n', (19413, 19415), False, 'from fate_flow.db.db_models import DB, Job, TrackingOutputDataInfo, ComponentSummary, MachineLearningModelInfo as MLModel\n'), ((29697, 29736), 'fate_flow.utils.model_utils.save_model_info', 'model_utils.save_model_info', (['model_info'], {}), '(model_info)\n', (29724, 29736), False, 'from fate_flow.utils import model_utils, job_utils, data_utils\n'), ((6983, 7011), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (6998, 7011), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((9668, 9696), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (9683, 9696), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((15964, 16004), 'fate_arch.common.base_utils.serialize_b64', 'serialize_b64', (['summary_data'], {'to_str': '(True)'}), '(summary_data, to_str=True)\n', (15977, 16004), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((16040, 16059), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (16057, 16059), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((16121, 16149), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (16136, 16149), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((17085, 17113), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (17100, 17113), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((18757, 18785), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (18772, 18785), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((19573, 19601), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (19588, 19601), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((24673, 24740), 'fate_flow.utils.job_utils.generate_task_version_id', 'job_utils.generate_task_version_id', (['self.task_id', 'self.task_version'], {}), '(self.task_id, self.task_version)\n', (24707, 24740), False, 'from fate_flow.utils import model_utils, job_utils, data_utils\n'), ((25565, 25632), 'fate_flow.utils.job_utils.generate_task_version_id', 'job_utils.generate_task_version_id', (['self.task_id', 'self.task_version'], {}), '(self.task_id, self.task_version)\n', (25599, 25632), False, 'from fate_flow.utils import model_utils, job_utils, data_utils\n'), ((25689, 25716), 'copy.deepcopy', 'copy.deepcopy', (['runtime_conf'], {}), '(runtime_conf)\n', (25702, 25716), False, 'import copy\n'), ((26696, 26763), 'fate_flow.utils.job_utils.generate_task_version_id', 'job_utils.generate_task_version_id', (['self.task_id', 'self.task_version'], {}), '(self.task_id, self.task_version)\n', (26730, 26763), False, 'from fate_flow.utils import model_utils, job_utils, data_utils\n'), ((26820, 26847), 'copy.deepcopy', 'copy.deepcopy', (['runtime_conf'], {}), '(runtime_conf)\n', (26833, 26847), False, 'import copy\n'), ((27681, 27709), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (27696, 27709), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((29462, 29504), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.runtime_conf_on_party'], {}), '(pipeline.runtime_conf_on_party)\n', (29472, 29504), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((29545, 29577), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.parent_info'], {}), '(pipeline.parent_info)\n', (29555, 29577), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((29620, 29654), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.inference_dsl'], {}), '(pipeline.inference_dsl)\n', (29630, 29654), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64, json_loads\n'), ((30547, 30575), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (30562, 30575), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((30694, 30722), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (30709, 30722), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((19206, 19234), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (19221, 19234), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((25452, 25463), 'fate_arch.common.Party', 'Party', (['k', 'p'], {}), '(k, p)\n', (25457, 25463), False, 'from fate_arch.common import EngineType, Party\n'), ((26583, 26594), 'fate_arch.common.Party', 'Party', (['k', 'p'], {}), '(k, p)\n', (26588, 26594), False, 'from fate_arch.common import EngineType, Party\n'), ((29758, 29786), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (29773, 29786), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((30157, 30185), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (30172, 30185), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((21589, 21622), 'operator.attrgetter', 'operator.attrgetter', (["('f_%s' % f_n)"], {}), "('f_%s' % f_n)\n", (21608, 21622), False, 'import operator\n'), ((24196, 24224), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (24211, 24224), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((24859, 24887), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (24874, 24887), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((25356, 25384), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (25371, 25384), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((26224, 26252), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (26239, 26252), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((26479, 26507), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (26494, 26507), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((27355, 27383), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (27370, 27383), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((27481, 27509), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (27496, 27509), False, 'from fate_flow.utils.log_utils import schedule_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from flask import Flask, request from arch.api.utils import file_utils from fate_flow.settings import stat_logger from fate_flow.utils.api_utils import get_json_result from fate_flow.utils import detect_utils from fate_flow.driver.job_controller import JobController manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/<access_module>', methods=['post']) def download_upload(access_module): request_config = request.json required_arguments = ['work_mode', 'namespace', 'table_name'] if access_module == 'upload': required_arguments.extend(['file', 'head', 'partition']) elif access_module == 'download': required_arguments.extend(['output_path']) else: raise Exception('can not support this operating: {}'.format(access_module)) detect_utils.check_config(request_config, required_arguments=required_arguments) data = {} if access_module == "upload": data['table_name'] = request_config["table_name"] data['namespace'] = request_config["namespace"] job_dsl, job_runtime_conf = gen_data_access_job_config(request_config, access_module) job_id, job_dsl_path, job_runtime_conf_path, logs_directory, model_info, board_url = JobController.submit_job({'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}) data.update({'job_dsl_path': job_dsl_path, 'job_runtime_conf_path': job_runtime_conf_path, 'board_url': board_url, 'logs_directory': logs_directory}) return get_json_result(job_id=job_id, data=data) def gen_data_access_job_config(config_data, access_module): job_runtime_conf = { "initiator": {}, "job_parameters": {}, "role": {}, "role_parameters": {} } initiator_role = "local" initiator_party_id = 0 job_runtime_conf["initiator"]["role"] = initiator_role job_runtime_conf["initiator"]["party_id"] = initiator_party_id job_runtime_conf["job_parameters"]["work_mode"] = config_data["work_mode"] job_runtime_conf["role"][initiator_role] = [initiator_party_id] job_dsl = { "components": {} } if access_module == 'upload': job_runtime_conf["role_parameters"][initiator_role] = { "upload_0": { "work_mode": [config_data["work_mode"]], "head": [config_data["head"]], "partition": [config_data["partition"]], "file": [config_data["file"]], "namespace": [config_data["namespace"]], "table_name": [config_data["table_name"]], "in_version": [config_data.get("in_version")], } } job_dsl["components"]["upload_0"] = { "module": "Upload" } if access_module == 'download': job_runtime_conf["role_parameters"][initiator_role] = { "download_0": { "work_mode": [config_data["work_mode"]], "delimitor": [config_data.get("delimitor", ",")], "output_path": [config_data["output_path"]], "namespace": [config_data["namespace"]], "table_name": [config_data["table_name"]] } } job_dsl["components"]["download_0"] = { "module": "Download" } return job_dsl, job_runtime_conf
[ "fate_flow.utils.detect_utils.check_config", "fate_flow.driver.job_controller.JobController.submit_job", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.api_utils.get_json_result" ]
[((906, 921), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (911, 921), False, 'from flask import Flask, request\n'), ((985, 1009), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1006, 1009), False, 'from fate_flow.settings import stat_logger\n'), ((1542, 1627), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (['request_config'], {'required_arguments': 'required_arguments'}), '(request_config, required_arguments=required_arguments\n )\n', (1567, 1627), False, 'from fate_flow.utils import detect_utils\n'), ((1964, 2052), 'fate_flow.driver.job_controller.JobController.submit_job', 'JobController.submit_job', (["{'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}"], {}), "({'job_dsl': job_dsl, 'job_runtime_conf':\n job_runtime_conf})\n", (1988, 2052), False, 'from fate_flow.driver.job_controller import JobController\n'), ((2231, 2272), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'job_id': 'job_id', 'data': 'data'}), '(job_id=job_id, data=data)\n', (2246, 2272), False, 'from fate_flow.utils.api_utils import get_json_result\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from arch.api.utils import log_utils from fate_flow.entity.metric import Metric, MetricMeta, MetricType from federatedml.optim.convergence import converge_func_factory from federatedml.util import consts from federatedrec.matrix_factorization.hetero_matrixfactor.hetero_mf_base import \ HeteroMFBase LOGGER = log_utils.getLogger() class HeteroMFArbiter(HeteroMFBase): """ Third party coordinator of federated training process. """ def __init__(self): super(HeteroMFArbiter, self).__init__() self.role = consts.ARBITER self.loss_consumed = None self.converge_func = None def _init_model(self, params): super()._init_model(params) early_stop = self.model_param.early_stop self.converge_func = converge_func_factory( early_stop.converge_func, early_stop.eps).is_converge self.loss_consumed = early_stop.converge_func != "weight_diff" def callback_loss(self, iter_num, loss): """ call back function of loss and metrics. :param iter_num: iter number. :param loss: loss type. :return: """ metric_meta = MetricMeta(name='train', metric_type="LOSS", extra_metas={ "unit_name": "iters", }) self.callback_meta( metric_name='loss', metric_namespace='train', metric_meta=metric_meta) self.callback_metric(metric_name='loss', metric_namespace='train', metric_data=[Metric(iter_num, loss)]) def _check_monitored_status(self): loss = self.aggregator.aggregate_loss(suffix=self._iter_suffix()) LOGGER.info(f"loss at iter {self.aggregator_iter}: {loss}") self.callback_loss(self.aggregator_iter, loss) if self.loss_consumed: converge_args = (loss,) if self.loss_consumed else (self.aggregator.model,) return self.aggregator.send_converge_status( self.converge_func, converge_args=converge_args, suffix=self._iter_suffix()) return None def fit(self, data_inst): """ Aggregate model for host and guest, then broadcast back. :param data_inst: input param is not used. :return: """ while self.aggregator_iter < self.max_iter: self.aggregator.aggregate_and_broadcast(suffix=self._iter_suffix()) if self._check_monitored_status(): LOGGER.info(f"early stop at iter {self.aggregator_iter}") break self.aggregator_iter += 1 else: LOGGER.warn( f"reach max iter: {self.aggregator_iter}, not converged") def save_model(self): """ :return: aggregated model. """ return self.aggregator.model
[ "fate_flow.entity.metric.MetricMeta", "fate_flow.entity.metric.Metric" ]
[((976, 997), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', ([], {}), '()\n', (995, 997), False, 'from arch.api.utils import log_utils\n'), ((1824, 1909), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': '"""train"""', 'metric_type': '"""LOSS"""', 'extra_metas': "{'unit_name': 'iters'}"}), "(name='train', metric_type='LOSS', extra_metas={'unit_name': 'iters'}\n )\n", (1834, 1909), False, 'from fate_flow.entity.metric import Metric, MetricMeta, MetricType\n'), ((1437, 1500), 'federatedml.optim.convergence.converge_func_factory', 'converge_func_factory', (['early_stop.converge_func', 'early_stop.eps'], {}), '(early_stop.converge_func, early_stop.eps)\n', (1458, 1500), False, 'from federatedml.optim.convergence import converge_func_factory\n'), ((2326, 2348), 'fate_flow.entity.metric.Metric', 'Metric', (['iter_num', 'loss'], {}), '(iter_num, loss)\n', (2332, 2348), False, 'from fate_flow.entity.metric import Metric, MetricMeta, MetricType\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import threading import time from fate_flow.utils.authentication_utils import authentication_check from federatedml.protobuf.generated import pipeline_pb2 from arch.api.utils import dtable_utils from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads from arch.api.utils.log_utils import schedule_logger from fate_flow.db.db_models import Job from fate_flow.driver.task_executor import TaskExecutor from fate_flow.driver.task_scheduler import TaskScheduler from fate_flow.entity.constant_config import JobStatus, TaskStatus from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.manager.tracking_manager import Tracking from fate_flow.utils.service_utils import ServiceUtils from fate_flow.settings import USE_AUTHENTICATION, FATE_BOARD_DASHBOARD_ENDPOINT from fate_flow.utils import detect_utils, job_utils, job_controller_utils from fate_flow.utils.job_utils import generate_job_id, save_job_conf, get_job_dsl_parser, get_job_log_directory class JobController(object): task_executor_pool = None @staticmethod def init(): pass @staticmethod def submit_job(job_data, job_id=None): if not job_id: job_id = generate_job_id() schedule_logger(job_id).info('submit job, job_id {}, body {}'.format(job_id, job_data)) job_dsl = job_data.get('job_dsl', {}) job_runtime_conf = job_data.get('job_runtime_conf', {}) job_utils.check_pipeline_job_runtime_conf(job_runtime_conf) job_parameters = job_runtime_conf['job_parameters'] job_initiator = job_runtime_conf['initiator'] job_type = job_parameters.get('job_type', '') if job_type != 'predict': # generate job model info job_parameters['model_id'] = '#'.join([dtable_utils.all_party_key(job_runtime_conf['role']), 'model']) job_parameters['model_version'] = job_id train_runtime_conf = {} else: detect_utils.check_config(job_parameters, ['model_id', 'model_version']) # get inference dsl from pipeline model as job dsl job_tracker = Tracking(job_id=job_id, role=job_initiator['role'], party_id=job_initiator['party_id'], model_id=job_parameters['model_id'], model_version=job_parameters['model_version']) pipeline_model = job_tracker.get_output_model('pipeline') job_dsl = json_loads(pipeline_model['Pipeline'].inference_dsl) train_runtime_conf = json_loads(pipeline_model['Pipeline'].train_runtime_conf) path_dict = save_job_conf(job_id=job_id, job_dsl=job_dsl, job_runtime_conf=job_runtime_conf, train_runtime_conf=train_runtime_conf, pipeline_dsl=None) job = Job() job.f_job_id = job_id job.f_roles = json_dumps(job_runtime_conf['role']) job.f_work_mode = job_parameters['work_mode'] job.f_initiator_party_id = job_initiator['party_id'] job.f_dsl = json_dumps(job_dsl) job.f_runtime_conf = json_dumps(job_runtime_conf) job.f_train_runtime_conf = json_dumps(train_runtime_conf) job.f_run_ip = '' job.f_status = JobStatus.WAITING job.f_progress = 0 job.f_create_time = current_timestamp() initiator_role = job_initiator['role'] initiator_party_id = job_initiator['party_id'] if initiator_party_id not in job_runtime_conf['role'][initiator_role]: schedule_logger(job_id).info("initiator party id error:{}".format(initiator_party_id)) raise Exception("initiator party id error {}".format(initiator_party_id)) get_job_dsl_parser(dsl=job_dsl, runtime_conf=job_runtime_conf, train_runtime_conf=train_runtime_conf) TaskScheduler.distribute_job(job=job, roles=job_runtime_conf['role'], job_initiator=job_initiator) # push into queue job_event = job_utils.job_event(job_id, initiator_role, initiator_party_id) try: RuntimeConfig.JOB_QUEUE.put_event(job_event) except Exception as e: raise Exception('push job into queue failed') schedule_logger(job_id).info( 'submit job successfully, job id is {}, model id is {}'.format(job.f_job_id, job_parameters['model_id'])) board_url = "http://{}:{}{}".format( ServiceUtils.get_item("fateboard", "host"), ServiceUtils.get_item("fateboard", "port"), FATE_BOARD_DASHBOARD_ENDPOINT).format(job_id, job_initiator['role'], job_initiator['party_id']) logs_directory = get_job_log_directory(job_id) return job_id, path_dict['job_dsl_path'], path_dict['job_runtime_conf_path'], logs_directory, \ {'model_id': job_parameters['model_id'],'model_version': job_parameters['model_version']}, board_url @staticmethod def kill_job(job_id, role, party_id, job_initiator, timeout=False, component_name=''): schedule_logger(job_id).info('{} {} get kill job {} {} command'.format(role, party_id, job_id, component_name)) task_info = job_utils.get_task_info(job_id, role, party_id, component_name) tasks = job_utils.query_task(**task_info) job = job_utils.query_job(job_id=job_id) for task in tasks: kill_status = False try: # task clean up runtime_conf = json_loads(job[0].f_runtime_conf) roles = ','.join(runtime_conf['role'].keys()) party_ids = ','.join([','.join([str(j) for j in i]) for i in runtime_conf['role'].values()]) # Tracking(job_id=job_id, role=role, party_id=party_id, task_id=task.f_task_id).clean_task(roles, party_ids) # stop task kill_status = job_utils.kill_task_executor_process(task) # session stop job_utils.start_session_stop(task) except Exception as e: schedule_logger(job_id).exception(e) finally: schedule_logger(job_id).info( 'job {} component {} on {} {} process {} kill {}'.format(job_id, task.f_component_name, task.f_role, task.f_party_id, task.f_run_pid, 'success' if kill_status else 'failed')) status = TaskStatus.FAILED if not timeout else TaskStatus.TIMEOUT if task.f_status != TaskStatus.COMPLETE: task.f_status = status try: TaskExecutor.sync_task_status(job_id=job_id, component_name=task.f_component_name, task_id=task.f_task_id, role=role, party_id=party_id, initiator_party_id=job_initiator.get('party_id', None), task_info=task.to_json(), initiator_role=job_initiator.get('role', None)) except Exception as e: schedule_logger(job_id).exception(e) @staticmethod def update_task_status(job_id, component_name, task_id, role, party_id, task_info): tracker = Tracking(job_id=job_id, role=role, party_id=party_id, component_name=component_name, task_id=task_id) tracker.save_task(role=role, party_id=party_id, task_info=task_info) schedule_logger(job_id).info( 'job {} component {} {} {} status {}'.format(job_id, component_name, role, party_id, task_info.get('f_status', ''))) @staticmethod def query_task_input_args(job_id, task_id, role, party_id, job_args, job_parameters, input_dsl, filter_type=None, filter_attr=None): task_run_args = TaskExecutor.get_task_run_args(job_id=job_id, role=role, party_id=party_id, task_id=task_id, job_args=job_args, job_parameters=job_parameters, task_parameters={}, input_dsl=input_dsl, if_save_as_task_input_data=False, filter_type=filter_type, filter_attr=filter_attr ) return task_run_args @staticmethod def update_job_status(job_id, role, party_id, job_info, create=False): job_info['f_run_ip'] = RuntimeConfig.JOB_SERVER_HOST if create: dsl = json_loads(job_info['f_dsl']) runtime_conf = json_loads(job_info['f_runtime_conf']) train_runtime_conf = json_loads(job_info['f_train_runtime_conf']) if USE_AUTHENTICATION: authentication_check(src_role=job_info.get('src_role', None), src_party_id=job_info.get('src_party_id', None), dsl=dsl, runtime_conf=runtime_conf, role=role, party_id=party_id) save_job_conf(job_id=job_id, job_dsl=dsl, job_runtime_conf=runtime_conf, train_runtime_conf=train_runtime_conf, pipeline_dsl=None) job_parameters = runtime_conf['job_parameters'] job_tracker = Tracking(job_id=job_id, role=role, party_id=party_id, model_id=job_parameters["model_id"], model_version=job_parameters["model_version"]) if job_parameters.get("job_type", "") != "predict": job_tracker.init_pipelined_model() roles = json_loads(job_info['f_roles']) partner = {} show_role = {} is_initiator = job_info.get('f_is_initiator', 0) for _role, _role_party in roles.items(): if is_initiator or _role == role: show_role[_role] = show_role.get(_role, []) for _party_id in _role_party: if is_initiator or _party_id == party_id: show_role[_role].append(_party_id) if _role != role: partner[_role] = partner.get(_role, []) partner[_role].extend(_role_party) else: for _party_id in _role_party: if _party_id != party_id: partner[_role] = partner.get(_role, []) partner[_role].append(_party_id) dag = get_job_dsl_parser(dsl=dsl, runtime_conf=runtime_conf, train_runtime_conf=train_runtime_conf) job_args = dag.get_args_input() dataset = {} for _role, _role_party_args in job_args.items(): if is_initiator or _role == role: for _party_index in range(len(_role_party_args)): _party_id = roles[_role][_party_index] if is_initiator or _party_id == party_id: dataset[_role] = dataset.get(_role, {}) dataset[_role][_party_id] = dataset[_role].get(_party_id, {}) for _data_type, _data_location in _role_party_args[_party_index]['args']['data'].items(): dataset[_role][_party_id][_data_type] = '{}.{}'.format(_data_location['namespace'], _data_location['name']) job_tracker.log_job_view({'partner': partner, 'dataset': dataset, 'roles': show_role}) else: job_tracker = Tracking(job_id=job_id, role=role, party_id=party_id) job_tracker.save_job_info(role=role, party_id=party_id, job_info=job_info, create=create) @staticmethod def save_pipeline(job_id, role, party_id, model_id, model_version): schedule_logger(job_id).info('job {} on {} {} start to save pipeline'.format(job_id, role, party_id)) job_dsl, job_runtime_conf, train_runtime_conf = job_utils.get_job_configuration(job_id=job_id, role=role, party_id=party_id) job_parameters = job_runtime_conf.get('job_parameters', {}) job_type = job_parameters.get('job_type', '') if job_type == 'predict': return dag = job_utils.get_job_dsl_parser(dsl=job_dsl, runtime_conf=job_runtime_conf, train_runtime_conf=train_runtime_conf) predict_dsl = dag.get_predict_dsl(role=role) pipeline = pipeline_pb2.Pipeline() pipeline.inference_dsl = json_dumps(predict_dsl, byte=True) pipeline.train_dsl = json_dumps(job_dsl, byte=True) pipeline.train_runtime_conf = json_dumps(job_runtime_conf, byte=True) pipeline.fate_version = RuntimeConfig.get_env("FATE") pipeline.model_id = model_id pipeline.model_version = model_version job_tracker = Tracking(job_id=job_id, role=role, party_id=party_id, model_id=model_id, model_version=model_version) job_tracker.save_pipeline(pipelined_buffer_object=pipeline) schedule_logger(job_id).info('job {} on {} {} save pipeline successfully'.format(job_id, role, party_id)) @staticmethod def clean_job(job_id, role, party_id, roles, party_ids): schedule_logger(job_id).info('job {} on {} {} start to clean'.format(job_id, role, party_id)) tasks = job_utils.query_task(job_id=job_id, role=role, party_id=party_id) for task in tasks: try: Tracking(job_id=job_id, role=role, party_id=party_id, task_id=task.f_task_id).clean_task(roles, party_ids) schedule_logger(job_id).info( 'job {} component {} on {} {} clean done'.format(job_id, task.f_component_name, role, party_id)) except Exception as e: schedule_logger(job_id).info( 'job {} component {} on {} {} clean failed'.format(job_id, task.f_component_name, role, party_id)) schedule_logger(job_id).exception(e) schedule_logger(job_id).info('job {} on {} {} clean done'.format(job_id, role, party_id)) @staticmethod def check_job_run(job_id, role,party_id, job_info): return job_controller_utils.job_quantity_constraint(job_id, role, party_id, job_info) @staticmethod def cancel_job(job_id, role, party_id, job_initiator): schedule_logger(job_id).info('{} {} get cancel waiting job {} command'.format(role, party_id, job_id)) jobs = job_utils.query_job(job_id=job_id) if jobs: job = jobs[0] job_runtime_conf = json_loads(job.f_runtime_conf) event = job_utils.job_event(job.f_job_id, job_runtime_conf['initiator']['role'], job_runtime_conf['initiator']['party_id']) try: RuntimeConfig.JOB_QUEUE.del_event(event) except: return False schedule_logger(job_id).info('cancel waiting job successfully, job id is {}'.format(job.f_job_id)) return True else: raise Exception('role {} party id {} cancel waiting job failed, no find jod {}'.format(role, party_id, job_id)) class JobClean(threading.Thread): def run(self): time.sleep(5) jobs = job_utils.query_job(status='running', is_initiator=1) job_ids = set([job.f_job_id for job in jobs]) for job_id in job_ids: schedule_logger(job_id).info('fate flow server start clean job') TaskScheduler.stop(job_id, JobStatus.FAILED)
[ "fate_flow.utils.job_utils.query_job", "fate_flow.utils.job_utils.check_pipeline_job_runtime_conf", "fate_flow.utils.detect_utils.check_config", "fate_flow.utils.job_utils.get_job_dsl_parser", "fate_flow.utils.job_utils.query_task", "fate_flow.utils.job_controller_utils.job_quantity_constraint", "fate_flow.db.db_models.Job", "fate_flow.utils.job_utils.job_event", "fate_flow.entity.runtime_config.RuntimeConfig.JOB_QUEUE.del_event", "fate_flow.entity.runtime_config.RuntimeConfig.JOB_QUEUE.put_event", "fate_flow.utils.job_utils.save_job_conf", "fate_flow.driver.task_scheduler.TaskScheduler.distribute_job", "fate_flow.utils.job_utils.kill_task_executor_process", "fate_flow.utils.job_utils.generate_job_id", "fate_flow.entity.runtime_config.RuntimeConfig.get_env", "fate_flow.utils.job_utils.get_job_log_directory", "fate_flow.utils.job_utils.get_task_info", "fate_flow.utils.job_utils.get_job_configuration", "fate_flow.driver.task_executor.TaskExecutor.get_task_run_args", "fate_flow.driver.task_scheduler.TaskScheduler.stop", "fate_flow.manager.tracking_manager.Tracking", "fate_flow.utils.service_utils.ServiceUtils.get_item", "fate_flow.utils.job_utils.start_session_stop" ]
[((2048, 2107), 'fate_flow.utils.job_utils.check_pipeline_job_runtime_conf', 'job_utils.check_pipeline_job_runtime_conf', (['job_runtime_conf'], {}), '(job_runtime_conf)\n', (2089, 2107), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((3203, 3346), 'fate_flow.utils.job_utils.save_job_conf', 'save_job_conf', ([], {'job_id': 'job_id', 'job_dsl': 'job_dsl', 'job_runtime_conf': 'job_runtime_conf', 'train_runtime_conf': 'train_runtime_conf', 'pipeline_dsl': 'None'}), '(job_id=job_id, job_dsl=job_dsl, job_runtime_conf=\n job_runtime_conf, train_runtime_conf=train_runtime_conf, pipeline_dsl=None)\n', (3216, 3346), False, 'from fate_flow.utils.job_utils import generate_job_id, save_job_conf, get_job_dsl_parser, get_job_log_directory\n'), ((3493, 3498), 'fate_flow.db.db_models.Job', 'Job', ([], {}), '()\n', (3496, 3498), False, 'from fate_flow.db.db_models import Job\n'), ((3551, 3587), 'arch.api.utils.core_utils.json_dumps', 'json_dumps', (["job_runtime_conf['role']"], {}), "(job_runtime_conf['role'])\n", (3561, 3587), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((3723, 3742), 'arch.api.utils.core_utils.json_dumps', 'json_dumps', (['job_dsl'], {}), '(job_dsl)\n', (3733, 3742), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((3772, 3800), 'arch.api.utils.core_utils.json_dumps', 'json_dumps', (['job_runtime_conf'], {}), '(job_runtime_conf)\n', (3782, 3800), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((3836, 3866), 'arch.api.utils.core_utils.json_dumps', 'json_dumps', (['train_runtime_conf'], {}), '(train_runtime_conf)\n', (3846, 3866), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((3989, 4008), 'arch.api.utils.core_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (4006, 4008), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((4385, 4490), 'fate_flow.utils.job_utils.get_job_dsl_parser', 'get_job_dsl_parser', ([], {'dsl': 'job_dsl', 'runtime_conf': 'job_runtime_conf', 'train_runtime_conf': 'train_runtime_conf'}), '(dsl=job_dsl, runtime_conf=job_runtime_conf,\n train_runtime_conf=train_runtime_conf)\n', (4403, 4490), False, 'from fate_flow.utils.job_utils import generate_job_id, save_job_conf, get_job_dsl_parser, get_job_log_directory\n'), ((4550, 4652), 'fate_flow.driver.task_scheduler.TaskScheduler.distribute_job', 'TaskScheduler.distribute_job', ([], {'job': 'job', 'roles': "job_runtime_conf['role']", 'job_initiator': 'job_initiator'}), "(job=job, roles=job_runtime_conf['role'],\n job_initiator=job_initiator)\n", (4578, 4652), False, 'from fate_flow.driver.task_scheduler import TaskScheduler\n'), ((4696, 4759), 'fate_flow.utils.job_utils.job_event', 'job_utils.job_event', (['job_id', 'initiator_role', 'initiator_party_id'], {}), '(job_id, initiator_role, initiator_party_id)\n', (4715, 4759), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((5367, 5396), 'fate_flow.utils.job_utils.get_job_log_directory', 'get_job_log_directory', (['job_id'], {}), '(job_id)\n', (5388, 5396), False, 'from fate_flow.utils.job_utils import generate_job_id, save_job_conf, get_job_dsl_parser, get_job_log_directory\n'), ((5867, 5930), 'fate_flow.utils.job_utils.get_task_info', 'job_utils.get_task_info', (['job_id', 'role', 'party_id', 'component_name'], {}), '(job_id, role, party_id, component_name)\n', (5890, 5930), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((5947, 5980), 'fate_flow.utils.job_utils.query_task', 'job_utils.query_task', ([], {}), '(**task_info)\n', (5967, 5980), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((5995, 6029), 'fate_flow.utils.job_utils.query_job', 'job_utils.query_job', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (6014, 6029), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((8008, 8114), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'component_name': 'component_name', 'task_id': 'task_id'}), '(job_id=job_id, role=role, party_id=party_id, component_name=\n component_name, task_id=task_id)\n', (8016, 8114), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((8591, 8871), 'fate_flow.driver.task_executor.TaskExecutor.get_task_run_args', 'TaskExecutor.get_task_run_args', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'task_id': 'task_id', 'job_args': 'job_args', 'job_parameters': 'job_parameters', 'task_parameters': '{}', 'input_dsl': 'input_dsl', 'if_save_as_task_input_data': '(False)', 'filter_type': 'filter_type', 'filter_attr': 'filter_attr'}), '(job_id=job_id, role=role, party_id=party_id,\n task_id=task_id, job_args=job_args, job_parameters=job_parameters,\n task_parameters={}, input_dsl=input_dsl, if_save_as_task_input_data=\n False, filter_type=filter_type, filter_attr=filter_attr)\n', (8621, 8871), False, 'from fate_flow.driver.task_executor import TaskExecutor\n'), ((13201, 13277), 'fate_flow.utils.job_utils.get_job_configuration', 'job_utils.get_job_configuration', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, role=role, party_id=party_id)\n', (13232, 13277), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((13555, 13670), 'fate_flow.utils.job_utils.get_job_dsl_parser', 'job_utils.get_job_dsl_parser', ([], {'dsl': 'job_dsl', 'runtime_conf': 'job_runtime_conf', 'train_runtime_conf': 'train_runtime_conf'}), '(dsl=job_dsl, runtime_conf=job_runtime_conf,\n train_runtime_conf=train_runtime_conf)\n', (13583, 13670), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((13825, 13848), 'federatedml.protobuf.generated.pipeline_pb2.Pipeline', 'pipeline_pb2.Pipeline', ([], {}), '()\n', (13846, 13848), False, 'from federatedml.protobuf.generated import pipeline_pb2\n'), ((13882, 13916), 'arch.api.utils.core_utils.json_dumps', 'json_dumps', (['predict_dsl'], {'byte': '(True)'}), '(predict_dsl, byte=True)\n', (13892, 13916), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((13946, 13976), 'arch.api.utils.core_utils.json_dumps', 'json_dumps', (['job_dsl'], {'byte': '(True)'}), '(job_dsl, byte=True)\n', (13956, 13976), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((14015, 14054), 'arch.api.utils.core_utils.json_dumps', 'json_dumps', (['job_runtime_conf'], {'byte': '(True)'}), '(job_runtime_conf, byte=True)\n', (14025, 14054), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((14087, 14116), 'fate_flow.entity.runtime_config.RuntimeConfig.get_env', 'RuntimeConfig.get_env', (['"""FATE"""'], {}), "('FATE')\n", (14108, 14116), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((14223, 14328), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'model_id': 'model_id', 'model_version': 'model_version'}), '(job_id=job_id, role=role, party_id=party_id, model_id=model_id,\n model_version=model_version)\n', (14231, 14328), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((14736, 14801), 'fate_flow.utils.job_utils.query_task', 'job_utils.query_task', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, role=role, party_id=party_id)\n', (14756, 14801), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((15573, 15651), 'fate_flow.utils.job_controller_utils.job_quantity_constraint', 'job_controller_utils.job_quantity_constraint', (['job_id', 'role', 'party_id', 'job_info'], {}), '(job_id, role, party_id, job_info)\n', (15617, 15651), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((15856, 15890), 'fate_flow.utils.job_utils.query_job', 'job_utils.query_job', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (15875, 15890), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((16671, 16684), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (16681, 16684), False, 'import time\n'), ((16700, 16753), 'fate_flow.utils.job_utils.query_job', 'job_utils.query_job', ([], {'status': '"""running"""', 'is_initiator': '(1)'}), "(status='running', is_initiator=1)\n", (16719, 16753), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((1816, 1833), 'fate_flow.utils.job_utils.generate_job_id', 'generate_job_id', ([], {}), '()\n', (1831, 1833), False, 'from fate_flow.utils.job_utils import generate_job_id, save_job_conf, get_job_dsl_parser, get_job_log_directory\n'), ((2578, 2650), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (['job_parameters', "['model_id', 'model_version']"], {}), "(job_parameters, ['model_id', 'model_version'])\n", (2603, 2650), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((2740, 2921), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': 'job_id', 'role': "job_initiator['role']", 'party_id': "job_initiator['party_id']", 'model_id': "job_parameters['model_id']", 'model_version': "job_parameters['model_version']"}), "(job_id=job_id, role=job_initiator['role'], party_id=job_initiator[\n 'party_id'], model_id=job_parameters['model_id'], model_version=\n job_parameters['model_version'])\n", (2748, 2921), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((3039, 3091), 'arch.api.utils.core_utils.json_loads', 'json_loads', (["pipeline_model['Pipeline'].inference_dsl"], {}), "(pipeline_model['Pipeline'].inference_dsl)\n", (3049, 3091), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((3125, 3182), 'arch.api.utils.core_utils.json_loads', 'json_loads', (["pipeline_model['Pipeline'].train_runtime_conf"], {}), "(pipeline_model['Pipeline'].train_runtime_conf)\n", (3135, 3182), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((4786, 4830), 'fate_flow.entity.runtime_config.RuntimeConfig.JOB_QUEUE.put_event', 'RuntimeConfig.JOB_QUEUE.put_event', (['job_event'], {}), '(job_event)\n', (4819, 4830), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((9576, 9605), 'arch.api.utils.core_utils.json_loads', 'json_loads', (["job_info['f_dsl']"], {}), "(job_info['f_dsl'])\n", (9586, 9605), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((9633, 9671), 'arch.api.utils.core_utils.json_loads', 'json_loads', (["job_info['f_runtime_conf']"], {}), "(job_info['f_runtime_conf'])\n", (9643, 9671), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((9705, 9749), 'arch.api.utils.core_utils.json_loads', 'json_loads', (["job_info['f_train_runtime_conf']"], {}), "(job_info['f_train_runtime_conf'])\n", (9715, 9749), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((10027, 10161), 'fate_flow.utils.job_utils.save_job_conf', 'save_job_conf', ([], {'job_id': 'job_id', 'job_dsl': 'dsl', 'job_runtime_conf': 'runtime_conf', 'train_runtime_conf': 'train_runtime_conf', 'pipeline_dsl': 'None'}), '(job_id=job_id, job_dsl=dsl, job_runtime_conf=runtime_conf,\n train_runtime_conf=train_runtime_conf, pipeline_dsl=None)\n', (10040, 10161), False, 'from fate_flow.utils.job_utils import generate_job_id, save_job_conf, get_job_dsl_parser, get_job_log_directory\n'), ((10349, 10491), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'model_id': "job_parameters['model_id']", 'model_version': "job_parameters['model_version']"}), "(job_id=job_id, role=role, party_id=party_id, model_id=\n job_parameters['model_id'], model_version=job_parameters['model_version'])\n", (10357, 10491), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((10692, 10723), 'arch.api.utils.core_utils.json_loads', 'json_loads', (["job_info['f_roles']"], {}), "(job_info['f_roles'])\n", (10702, 10723), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((11603, 11701), 'fate_flow.utils.job_utils.get_job_dsl_parser', 'get_job_dsl_parser', ([], {'dsl': 'dsl', 'runtime_conf': 'runtime_conf', 'train_runtime_conf': 'train_runtime_conf'}), '(dsl=dsl, runtime_conf=runtime_conf, train_runtime_conf=\n train_runtime_conf)\n', (11621, 11701), False, 'from fate_flow.utils.job_utils import generate_job_id, save_job_conf, get_job_dsl_parser, get_job_log_directory\n'), ((12792, 12845), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, role=role, party_id=party_id)\n', (12800, 12845), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((15965, 15995), 'arch.api.utils.core_utils.json_loads', 'json_loads', (['job.f_runtime_conf'], {}), '(job.f_runtime_conf)\n', (15975, 15995), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((16016, 16135), 'fate_flow.utils.job_utils.job_event', 'job_utils.job_event', (['job.f_job_id', "job_runtime_conf['initiator']['role']", "job_runtime_conf['initiator']['party_id']"], {}), "(job.f_job_id, job_runtime_conf['initiator']['role'],\n job_runtime_conf['initiator']['party_id'])\n", (16035, 16135), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((16928, 16972), 'fate_flow.driver.task_scheduler.TaskScheduler.stop', 'TaskScheduler.stop', (['job_id', 'JobStatus.FAILED'], {}), '(job_id, JobStatus.FAILED)\n', (16946, 16972), False, 'from fate_flow.driver.task_scheduler import TaskScheduler\n'), ((1842, 1865), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (1857, 1865), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((4929, 4952), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (4944, 4952), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((5735, 5758), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (5750, 5758), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((6169, 6202), 'arch.api.utils.core_utils.json_loads', 'json_loads', (['job[0].f_runtime_conf'], {}), '(job[0].f_runtime_conf)\n', (6179, 6202), False, 'from arch.api.utils.core_utils import current_timestamp, json_dumps, json_loads\n'), ((6557, 6599), 'fate_flow.utils.job_utils.kill_task_executor_process', 'job_utils.kill_task_executor_process', (['task'], {}), '(task)\n', (6593, 6599), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((6647, 6681), 'fate_flow.utils.job_utils.start_session_stop', 'job_utils.start_session_stop', (['task'], {}), '(task)\n', (6675, 6681), False, 'from fate_flow.utils import detect_utils, job_utils, job_controller_utils\n'), ((8195, 8218), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (8210, 8218), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((13043, 13066), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (13058, 13066), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((14432, 14455), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (14447, 14455), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((14626, 14649), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (14641, 14649), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((15393, 15416), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (15408, 15416), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((15738, 15761), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (15753, 15761), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((16245, 16285), 'fate_flow.entity.runtime_config.RuntimeConfig.JOB_QUEUE.del_event', 'RuntimeConfig.JOB_QUEUE.del_event', (['event'], {}), '(event)\n', (16278, 16285), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((2399, 2451), 'arch.api.utils.dtable_utils.all_party_key', 'dtable_utils.all_party_key', (["job_runtime_conf['role']"], {}), "(job_runtime_conf['role'])\n", (2425, 2451), False, 'from arch.api.utils import dtable_utils\n'), ((4203, 4226), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (4218, 4226), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((5134, 5176), 'fate_flow.utils.service_utils.ServiceUtils.get_item', 'ServiceUtils.get_item', (['"""fateboard"""', '"""host"""'], {}), "('fateboard', 'host')\n", (5155, 5176), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((5190, 5232), 'fate_flow.utils.service_utils.ServiceUtils.get_item', 'ServiceUtils.get_item', (['"""fateboard"""', '"""port"""'], {}), "('fateboard', 'port')\n", (5211, 5232), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((16347, 16370), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (16362, 16370), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((16851, 16874), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (16866, 16874), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((6807, 6830), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (6822, 6830), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((14862, 14939), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'task_id': 'task.f_task_id'}), '(job_id=job_id, role=role, party_id=party_id, task_id=task.f_task_id)\n', (14870, 14939), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((14985, 15008), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (15000, 15008), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((6733, 6756), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (6748, 6756), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((7846, 7869), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (7861, 7869), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((15183, 15206), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (15198, 15206), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((15348, 15371), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (15363, 15371), False, 'from arch.api.utils.log_utils import schedule_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import typing from copy import deepcopy from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string from fate_arch.common import FederatedMode from fate_flow.db.db_models import DB, Job, Task from fate_flow.scheduler.federated_scheduler import FederatedScheduler from fate_flow.scheduler.task_scheduler import TaskScheduler from fate_flow.operation.job_saver import JobSaver from fate_flow.entity.types import ResourceOperation from fate_flow.entity import RetCode from fate_flow.entity.run_status import StatusSet, JobStatus, TaskStatus, EndStatus, InterruptStatus from fate_flow.entity.run_status import FederatedSchedulingStatusCode from fate_flow.entity.run_status import SchedulingStatusCode from fate_flow.entity import JobConfigurationBase from fate_flow.operation.job_tracker import Tracker from fate_flow.controller.job_controller import JobController from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter from fate_flow.utils import model_utils from fate_flow.utils.cron import Cron from fate_flow.db.job_default_config import JobDefaultConfig from fate_flow.manager.provider_manager import ProviderManager class DAGScheduler(Cron): @classmethod def submit(cls, submit_job_conf: JobConfigurationBase, job_id: str = None): if not job_id: job_id = job_utils.generate_job_id() submit_result = { "job_id": job_id } schedule_logger(job_id).info(f"submit job, body {submit_job_conf.to_dict()}") try: dsl = submit_job_conf.dsl runtime_conf = deepcopy(submit_job_conf.runtime_conf) job_utils.check_job_runtime_conf(runtime_conf) authentication_utils.check_constraint(runtime_conf, dsl) job_initiator = runtime_conf["initiator"] conf_adapter = JobRuntimeConfigAdapter(runtime_conf) common_job_parameters = conf_adapter.get_common_parameters() if common_job_parameters.job_type != "predict": # generate job model info conf_version = schedule_utils.get_conf_version(runtime_conf) if conf_version != 2: raise Exception("only the v2 version runtime conf is supported") common_job_parameters.model_id = model_utils.gen_model_id(runtime_conf["role"]) common_job_parameters.model_version = job_id train_runtime_conf = {} else: # check predict job parameters detect_utils.check_config(common_job_parameters.to_dict(), ["model_id", "model_version"]) # get inference dsl from pipeline model as job dsl tracker = Tracker(job_id=job_id, role=job_initiator["role"], party_id=job_initiator["party_id"], model_id=common_job_parameters.model_id, model_version=common_job_parameters.model_version) pipeline_model = tracker.get_pipeline_model() train_runtime_conf = json_loads(pipeline_model.train_runtime_conf) if not model_utils.check_if_deployed(role=job_initiator["role"], party_id=job_initiator["party_id"], model_id=common_job_parameters.model_id, model_version=common_job_parameters.model_version): raise Exception(f"Model {common_job_parameters.model_id} {common_job_parameters.model_version} has not been deployed yet.") dsl = json_loads(pipeline_model.inference_dsl) dsl = ProviderManager.fill_fate_flow_provider(dsl) job = Job() job.f_job_id = job_id job.f_dsl = dsl job.f_train_runtime_conf = train_runtime_conf job.f_roles = runtime_conf["role"] job.f_initiator_role = job_initiator["role"] job.f_initiator_party_id = job_initiator["party_id"] job.f_role = job_initiator["role"] job.f_party_id = job_initiator["party_id"] path_dict = job_utils.save_job_conf(job_id=job_id, role=job.f_initiator_role, party_id=job.f_initiator_party_id, dsl=dsl, runtime_conf=runtime_conf, runtime_conf_on_party={}, train_runtime_conf=train_runtime_conf, pipeline_dsl=None) if job.f_initiator_party_id not in runtime_conf["role"][job.f_initiator_role]: msg = f"initiator party id {job.f_initiator_party_id} not in roles {runtime_conf['role']}" schedule_logger(job_id).info(msg) raise Exception(msg) # create common parameters on initiator JobController.create_common_job_parameters(job_id=job.f_job_id, initiator_role=job.f_initiator_role, common_job_parameters=common_job_parameters) job.f_runtime_conf = conf_adapter.update_common_parameters(common_parameters=common_job_parameters) dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl, runtime_conf=job.f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf) # initiator runtime conf as template job.f_runtime_conf_on_party = job.f_runtime_conf.copy() job.f_runtime_conf_on_party["job_parameters"] = common_job_parameters.to_dict() status_code, response = FederatedScheduler.create_job(job=job) if status_code != FederatedSchedulingStatusCode.SUCCESS: job.f_status = JobStatus.FAILED job.f_tag = "submit_failed" FederatedScheduler.sync_job_status(job=job) raise Exception("create job failed", response) else: need_run_components = {} for role in response: need_run_components[role] = {} for party, res in response[role].items(): need_run_components[role][party] = [name for name, value in response[role][party]["data"]["components"].items() if value["need_run"] is True] if common_job_parameters.federated_mode == FederatedMode.MULTIPLE: # create the task holder in db to record information of all participants in the initiator for scheduling for role, party_ids in job.f_roles.items(): for party_id in party_ids: if role == job.f_initiator_role and party_id == job.f_initiator_party_id: continue if not need_run_components[role][party_id]: continue JobController.initialize_tasks(job_id=job_id, role=role, party_id=party_id, run_on_this_party=False, initiator_role=job.f_initiator_role, initiator_party_id=job.f_initiator_party_id, job_parameters=common_job_parameters, dsl_parser=dsl_parser, components=need_run_components[role][party_id]) job.f_status = JobStatus.WAITING status_code, response = FederatedScheduler.sync_job_status(job=job) if status_code != FederatedSchedulingStatusCode.SUCCESS: raise Exception("set job to waiting status failed") schedule_logger(job_id).info(f"submit job successfully, job id is {job.f_job_id}, model id is {common_job_parameters.model_id}") logs_directory = job_utils.get_job_log_directory(job_id) result = { "code": RetCode.SUCCESS, "message": "success", "model_info": {"model_id": common_job_parameters.model_id, "model_version": common_job_parameters.model_version}, "logs_directory": logs_directory, "board_url": job_utils.get_board_url(job_id, job_initiator["role"], job_initiator["party_id"]) } warn_parameter = JobRuntimeConfigAdapter(submit_job_conf.runtime_conf).check_removed_parameter() if warn_parameter: result["message"] = f"[WARN]{warn_parameter} is removed,it does not take effect!" submit_result.update(result) submit_result.update(path_dict) except Exception as e: submit_result["code"] = RetCode.OPERATING_ERROR submit_result["message"] = exception_to_trace_string(e) schedule_logger(job_id).exception(e) return submit_result @classmethod def update_parameters(cls, job, job_parameters, component_parameters): updated_job_parameters, updated_component_parameters, updated_components = JobController.gen_updated_parameters(job_id=job.f_job_id, initiator_role=job.f_initiator_role, initiator_party_id=job.f_initiator_party_id, input_job_parameters=job_parameters, input_component_parameters=component_parameters) schedule_logger(job.f_job_id).info(f"components {updated_components} parameters has been updated") updated_parameters = { "job_parameters": updated_job_parameters, "component_parameters": updated_component_parameters, "components": updated_components } status_code, response = FederatedScheduler.update_parameter(job, updated_parameters=updated_parameters) if status_code == FederatedSchedulingStatusCode.SUCCESS: return RetCode.SUCCESS, updated_parameters else: return RetCode.OPERATING_ERROR, response def run_do(self): schedule_logger().info("start schedule waiting jobs") jobs = JobSaver.query_job(is_initiator=True, status=JobStatus.WAITING, order_by="create_time", reverse=False) schedule_logger().info(f"have {len(jobs)} waiting jobs") if len(jobs): # FIFO job = jobs[0] schedule_logger().info(f"schedule waiting job {job.f_job_id}") try: self.schedule_waiting_jobs(job=job) except Exception as e: schedule_logger(job.f_job_id).exception(e) schedule_logger(job.f_job_id).error(f"schedule waiting job failed") schedule_logger().info("schedule waiting jobs finished") schedule_logger().info("start schedule running jobs") jobs = JobSaver.query_job(is_initiator=True, status=JobStatus.RUNNING, order_by="create_time", reverse=False) schedule_logger().info(f"have {len(jobs)} running jobs") for job in jobs: schedule_logger().info(f"schedule running job {job.f_job_id}") try: self.schedule_running_job(job=job) except Exception as e: schedule_logger(job.f_job_id).exception(e) schedule_logger(job.f_job_id).error(f"schedule job failed") schedule_logger().info("schedule running jobs finished") # some ready job exit before start schedule_logger().info("start schedule ready jobs") jobs = JobSaver.query_job(is_initiator=True, ready_signal=True, order_by="create_time", reverse=False) schedule_logger().info(f"have {len(jobs)} ready jobs") for job in jobs: schedule_logger().info(f"schedule ready job {job.f_job_id}") try: self.schedule_ready_job(job=job) except Exception as e: schedule_logger(job.f_job_id).exception(e) schedule_logger(job.f_job_id).error(f"schedule ready job failed:\n{e}") schedule_logger().info("schedule ready jobs finished") schedule_logger().info("start schedule rerun jobs") jobs = JobSaver.query_job(is_initiator=True, rerun_signal=True, order_by="create_time", reverse=False) schedule_logger().info(f"have {len(jobs)} rerun jobs") for job in jobs: schedule_logger().info(f"schedule rerun job {job.f_job_id}") try: self.schedule_rerun_job(job=job) except Exception as e: schedule_logger(job.f_job_id).exception(e) schedule_logger(job.f_job_id).error(f"schedule job failed") schedule_logger().info("schedule rerun jobs finished") schedule_logger().info("start schedule end status jobs to update status") jobs = JobSaver.query_job(is_initiator=True, status=set(EndStatus.status_list()), end_time=[current_timestamp() - JobDefaultConfig.end_status_job_scheduling_time_limit, current_timestamp()]) schedule_logger().info(f"have {len(jobs)} end status jobs") for job in jobs: schedule_logger().info(f"schedule end status job {job.f_job_id}") try: update_status = self.end_scheduling_updates(job_id=job.f_job_id) if update_status: schedule_logger(job.f_job_id).info(f"try update status by scheduling like running job") else: schedule_logger(job.f_job_id).info(f"the number of updates has been exceeded") continue self.schedule_running_job(job=job, force_sync_status=True) except Exception as e: schedule_logger(job.f_job_id).exception(e) schedule_logger(job.f_job_id).error(f"schedule job failed") schedule_logger().info("schedule end status jobs finished") @classmethod def schedule_waiting_jobs(cls, job): job_id, initiator_role, initiator_party_id, = job.f_job_id, job.f_initiator_role, job.f_initiator_party_id, if not cls.ready_signal(job_id=job_id, set_or_reset=True): schedule_logger(job_id).info(f"job may be handled by another scheduler") return try: if job.f_cancel_signal: job.f_status = JobStatus.CANCELED FederatedScheduler.sync_job_status(job=job) schedule_logger(job_id).info(f"job have cancel signal") return schedule_logger(job_id).info(f"job dependence check") dependence_status_code, federated_dependence_response = FederatedScheduler.dependence_for_job(job=job) schedule_logger(job_id).info(f"dependence check: {dependence_status_code}, {federated_dependence_response}") if dependence_status_code == FederatedSchedulingStatusCode.SUCCESS: apply_status_code, federated_response = FederatedScheduler.resource_for_job(job=job, operation_type=ResourceOperation.APPLY) if apply_status_code == FederatedSchedulingStatusCode.SUCCESS: cls.start_job(job_id=job_id, initiator_role=initiator_role, initiator_party_id=initiator_party_id) else: # rollback resource rollback_party = {} failed_party = {} for dest_role in federated_response.keys(): for dest_party_id in federated_response[dest_role].keys(): retcode = federated_response[dest_role][dest_party_id]["retcode"] if retcode == 0: rollback_party[dest_role] = rollback_party.get(dest_role, []) rollback_party[dest_role].append(dest_party_id) else: failed_party[dest_role] = failed_party.get(dest_role, []) failed_party[dest_role].append(dest_party_id) schedule_logger(job_id).info("job apply resource failed on {}, rollback {}".format( ",".join([",".join([f"{_r}:{_p}" for _p in _ps]) for _r, _ps in failed_party.items()]), ",".join([",".join([f"{_r}:{_p}" for _p in _ps]) for _r, _ps in rollback_party.items()]), )) if rollback_party: return_status_code, federated_response = FederatedScheduler.resource_for_job(job=job, operation_type=ResourceOperation.RETURN, specific_dest=rollback_party) if return_status_code != FederatedSchedulingStatusCode.SUCCESS: schedule_logger(job_id).info(f"job return resource failed:\n{federated_response}") else: schedule_logger(job_id).info(f"job no party should be rollback resource") if apply_status_code == FederatedSchedulingStatusCode.ERROR: cls.stop_job(job_id=job_id, role=initiator_role, party_id=initiator_party_id, stop_status=JobStatus.FAILED) schedule_logger(job_id).info(f"apply resource error, stop job") except Exception as e: raise e finally: update_status = cls.ready_signal(job_id=job_id, set_or_reset=False) schedule_logger(job_id).info(f"reset job ready signal {update_status}") @classmethod def schedule_ready_job(cls, job): job_id, initiator_role, initiator_party_id, = job.f_job_id, job.f_initiator_role, job.f_initiator_party_id update_status = cls.ready_signal(job_id=job_id, set_or_reset=False, ready_timeout_ttl=60 * 1000) schedule_logger(job_id).info(f"reset job ready signal {update_status}") @classmethod def schedule_rerun_job(cls, job): if EndStatus.contains(job.f_status): job.f_status = JobStatus.WAITING job.f_ready_signal = False job.f_ready_time = None job.f_rerun_signal = False job.f_progress = 0 job.f_end_time = None job.f_elapsed = None schedule_logger(job.f_job_id).info(f"job has been finished, set waiting to rerun") status, response = FederatedScheduler.sync_job_status(job=job) if status == FederatedSchedulingStatusCode.SUCCESS: cls.rerun_signal(job_id=job.f_job_id, set_or_reset=False) FederatedScheduler.sync_job(job=job, update_fields=["ready_signal", "ready_time", "rerun_signal", "progress", "end_time", "elapsed"]) schedule_logger(job.f_job_id).info(f"job set waiting to rerun successfully") else: schedule_logger(job.f_job_id).info(f"job set waiting to rerun failed") else: cls.rerun_signal(job_id=job.f_job_id, set_or_reset=False) cls.schedule_running_job(job) @classmethod def start_job(cls, job_id, initiator_role, initiator_party_id): schedule_logger(job_id).info(f"try to start job on initiator {initiator_role} {initiator_party_id}") job_info = {} job_info["job_id"] = job_id job_info["role"] = initiator_role job_info["party_id"] = initiator_party_id job_info["status"] = JobStatus.RUNNING job_info["party_status"] = JobStatus.RUNNING job_info["start_time"] = current_timestamp() job_info["tag"] = "end_waiting" jobs = JobSaver.query_job(job_id=job_id, role=initiator_role, party_id=initiator_party_id) if jobs: job = jobs[0] FederatedScheduler.start_job(job=job) schedule_logger(job_id).info(f"start job on initiator {initiator_role} {initiator_party_id}") else: schedule_logger(job_id).error(f"can not found job on initiator {initiator_role} {initiator_party_id}") @classmethod def schedule_running_job(cls, job: Job, force_sync_status=False): schedule_logger(job.f_job_id).info(f"scheduling running job") dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl, runtime_conf=job.f_runtime_conf_on_party, train_runtime_conf=job.f_train_runtime_conf) task_scheduling_status_code, auto_rerun_tasks, tasks = TaskScheduler.schedule(job=job, dsl_parser=dsl_parser, canceled=job.f_cancel_signal) tasks_status = dict([(task.f_component_name, task.f_status) for task in tasks]) new_job_status = cls.calculate_job_status(task_scheduling_status_code=task_scheduling_status_code, tasks_status=tasks_status.values()) if new_job_status == JobStatus.WAITING and job.f_cancel_signal: new_job_status = JobStatus.CANCELED total, finished_count = cls.calculate_job_progress(tasks_status=tasks_status) new_progress = float(finished_count) / total * 100 schedule_logger(job.f_job_id).info(f"job status is {new_job_status}, calculate by task status list: {tasks_status}") if new_job_status != job.f_status or new_progress != job.f_progress: # Make sure to update separately, because these two fields update with anti-weight logic if int(new_progress) - job.f_progress > 0: job.f_progress = new_progress FederatedScheduler.sync_job(job=job, update_fields=["progress"]) cls.update_job_on_initiator(initiator_job=job, update_fields=["progress"]) if new_job_status != job.f_status: job.f_status = new_job_status if EndStatus.contains(job.f_status): FederatedScheduler.save_pipelined_model(job=job) FederatedScheduler.sync_job_status(job=job) cls.update_job_on_initiator(initiator_job=job, update_fields=["status"]) if EndStatus.contains(job.f_status): cls.finish(job=job, end_status=job.f_status) if auto_rerun_tasks: schedule_logger(job.f_job_id).info("job have auto rerun tasks") cls.set_job_rerun(job_id=job.f_job_id, initiator_role=job.f_initiator_role, initiator_party_id=job.f_initiator_party_id, tasks=auto_rerun_tasks, auto=True) if force_sync_status: FederatedScheduler.sync_job_status(job=job) schedule_logger(job.f_job_id).info("finish scheduling running job") @classmethod def set_job_rerun(cls, job_id, initiator_role, initiator_party_id, auto, force=False, tasks: typing.List[Task] = None, component_name: typing.Union[str, list] = None): schedule_logger(job_id).info(f"try to rerun job on initiator {initiator_role} {initiator_party_id}") jobs = JobSaver.query_job(job_id=job_id, role=initiator_role, party_id=initiator_party_id) if not jobs: raise RuntimeError(f"can not found job on initiator {initiator_role} {initiator_party_id}") job = jobs[0] dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl, runtime_conf=job.f_runtime_conf_on_party, train_runtime_conf=job.f_train_runtime_conf) if tasks: schedule_logger(job_id).info(f"require {[task.f_component_name for task in tasks]} to rerun") else: task_query = { 'job_id': job_id, 'role': initiator_role, 'party_id': initiator_party_id, } if not component_name or component_name == job_utils.job_pipeline_component_name(): # rerun all tasks schedule_logger(job_id).info("require all component of pipeline to rerun") else: _require_reruns = {component_name} if isinstance(component_name, str) else set(component_name) _should_reruns = _require_reruns.copy() for _cpn in _require_reruns: _components = dsl_parser.get_downstream_dependent_components(_cpn) for _c in _components: _should_reruns.add(_c.get_name()) schedule_logger(job_id).info(f"require {_require_reruns} to rerun, " f"and then found {_should_reruns} need be to rerun") task_query['component_name'] = _should_reruns tasks = JobSaver.query_task(**task_query) job_can_rerun = any([TaskScheduler.prepare_rerun_task( job=job, task=task, dsl_parser=dsl_parser, auto=auto, force=force, ) for task in tasks]) if not job_can_rerun: FederatedScheduler.sync_job_status(job=job) schedule_logger(job_id).info("job no task to rerun") return False schedule_logger(job_id).info("job set rerun signal") status = cls.rerun_signal(job_id=job_id, set_or_reset=True) schedule_logger(job_id).info(f"job set rerun signal {'successfully' if status else 'failed'}") return True @classmethod def update_job_on_initiator(cls, initiator_job: Job, update_fields: list): schedule_logger(initiator_job.f_job_id).info(f"try to update job {update_fields} on initiator") jobs = JobSaver.query_job(job_id=initiator_job.f_job_id) if not jobs: raise Exception("Failed to update job status on initiator") job_info = initiator_job.to_human_model_dict(only_primary_with=update_fields) for field in update_fields: job_info[field] = getattr(initiator_job, "f_%s" % field) for job in jobs: job_info["role"] = job.f_role job_info["party_id"] = job.f_party_id JobSaver.update_job_status(job_info=job_info) JobSaver.update_job(job_info=job_info) schedule_logger(initiator_job.f_job_id).info(f"update job {update_fields} on initiator finished") @classmethod def calculate_job_status(cls, task_scheduling_status_code, tasks_status): # 1. all waiting # 2. have running # 3. waiting + end status # 4. all end status and difference # 5. all the same end status tmp_status_set = set(tasks_status) if TaskStatus.PASS in tmp_status_set: tmp_status_set.remove(TaskStatus.PASS) tmp_status_set.add(TaskStatus.SUCCESS) if len(tmp_status_set) == 1: # 1 and 5 return tmp_status_set.pop() else: if TaskStatus.RUNNING in tmp_status_set: # 2 return JobStatus.RUNNING if TaskStatus.WAITING in tmp_status_set: # 3 if task_scheduling_status_code == SchedulingStatusCode.HAVE_NEXT: return JobStatus.RUNNING else: # have waiting with no next pass # have waiting with no next or 4 for status in sorted(InterruptStatus.status_list(), key=lambda s: StatusSet.get_level(status=s), reverse=True): if status in tmp_status_set: return status if tmp_status_set == {TaskStatus.WAITING, TaskStatus.SUCCESS} and task_scheduling_status_code == SchedulingStatusCode.NO_NEXT: return JobStatus.CANCELED raise Exception("calculate job status failed, all task status: {}".format(tasks_status)) @classmethod def calculate_job_progress(cls, tasks_status): total = 0 finished_count = 0 for task_status in tasks_status.values(): total += 1 if EndStatus.contains(task_status): finished_count += 1 return total, finished_count @classmethod def stop_job(cls, job_id, role, party_id, stop_status): schedule_logger(job_id).info(f"request stop job with {stop_status}") jobs = JobSaver.query_job(job_id=job_id, role=role, party_id=party_id, is_initiator=True) if len(jobs) > 0: if stop_status == JobStatus.CANCELED: schedule_logger(job_id).info(f"cancel job") set_cancel_status = cls.cancel_signal(job_id=job_id, set_or_reset=True) schedule_logger(job_id).info(f"set job cancel signal {set_cancel_status}") job = jobs[0] job.f_status = stop_status schedule_logger(job_id).info(f"request stop job with {stop_status} to all party") status_code, response = FederatedScheduler.stop_job(job=jobs[0], stop_status=stop_status) if status_code == FederatedSchedulingStatusCode.SUCCESS: schedule_logger(job_id).info(f"stop job with {stop_status} successfully") return RetCode.SUCCESS, "success" else: initiator_tasks_group = JobSaver.get_tasks_asc(job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id) for initiator_task in initiator_tasks_group.values(): TaskScheduler.collect_task_of_all_party(job, initiator_task=initiator_task, set_status=stop_status) schedule_logger(job_id).info(f"stop job with {stop_status} failed, {response}") return RetCode.FEDERATED_ERROR, json_dumps(response) else: return RetCode.SUCCESS, "can not found job" @classmethod @DB.connection_context() def ready_signal(cls, job_id, set_or_reset: bool, ready_timeout_ttl=None): filters = [Job.f_job_id == job_id] if set_or_reset: update_fields = {Job.f_ready_signal: True, Job.f_ready_time: current_timestamp()} filters.append(Job.f_ready_signal == False) else: update_fields = {Job.f_ready_signal: False, Job.f_ready_time: None} filters.append(Job.f_ready_signal == True) if ready_timeout_ttl: filters.append(current_timestamp() - Job.f_ready_time > ready_timeout_ttl) update_status = Job.update(update_fields).where(*filters).execute() > 0 return update_status @classmethod @DB.connection_context() def cancel_signal(cls, job_id, set_or_reset: bool): update_status = Job.update({Job.f_cancel_signal: set_or_reset, Job.f_cancel_time: current_timestamp()}).where(Job.f_job_id == job_id).execute() > 0 return update_status @classmethod @DB.connection_context() def rerun_signal(cls, job_id, set_or_reset: bool): if set_or_reset is True: update_fields = {Job.f_rerun_signal: True, Job.f_cancel_signal: False, Job.f_end_scheduling_updates: 0} elif set_or_reset is False: update_fields = {Job.f_rerun_signal: False} else: raise RuntimeError(f"can not support rereun signal {set_or_reset}") update_status = Job.update(update_fields).where(Job.f_job_id == job_id).execute() > 0 return update_status @classmethod @DB.connection_context() def end_scheduling_updates(cls, job_id): operate = Job.update({Job.f_end_scheduling_updates: Job.f_end_scheduling_updates + 1}).where(Job.f_job_id == job_id, Job.f_end_scheduling_updates < JobDefaultConfig.end_status_job_scheduling_updates) update_status = operate.execute() > 0 return update_status @classmethod def finish(cls, job, end_status): schedule_logger(job.f_job_id).info(f"job finished with {end_status}, do something...") cls.stop_job(job_id=job.f_job_id, role=job.f_initiator_role, party_id=job.f_initiator_party_id, stop_status=end_status) FederatedScheduler.clean_job(job=job) schedule_logger(job.f_job_id).info(f"job finished with {end_status}, done")
[ "fate_flow.utils.log_utils.schedule_logger", "fate_flow.utils.job_utils.job_pipeline_component_name", "fate_flow.operation.job_saver.JobSaver.update_job", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status", "fate_flow.entity.run_status.EndStatus.contains", "fate_flow.utils.log_utils.exception_to_trace_string", "fate_flow.utils.schedule_utils.get_conf_version", "fate_flow.db.db_models.Job", "fate_flow.utils.job_utils.get_board_url", "fate_flow.operation.job_saver.JobSaver.get_tasks_asc", "fate_flow.operation.job_saver.JobSaver.query_task", "fate_flow.scheduler.task_scheduler.TaskScheduler.prepare_rerun_task", "fate_flow.utils.job_utils.save_job_conf", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.resource_for_job", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.save_pipelined_model", "fate_flow.utils.schedule_utils.get_job_dsl_parser", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.create_job", "fate_flow.manager.provider_manager.ProviderManager.fill_fate_flow_provider", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.start_job", "fate_flow.scheduler.task_scheduler.TaskScheduler.collect_task_of_all_party", "fate_flow.utils.job_utils.generate_job_id", "fate_flow.entity.run_status.EndStatus.status_list", "fate_flow.operation.job_tracker.Tracker", "fate_flow.utils.job_utils.get_job_log_directory", "fate_flow.utils.config_adapter.JobRuntimeConfigAdapter", "fate_flow.operation.job_saver.JobSaver.update_job_status", "fate_flow.db.db_models.DB.connection_context", "fate_flow.utils.authentication_utils.check_constraint", "fate_flow.scheduler.task_scheduler.TaskScheduler.schedule", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.clean_job", "fate_flow.controller.job_controller.JobController.gen_updated_parameters", "fate_flow.controller.job_controller.JobController.create_common_job_parameters", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.stop_job", "fate_flow.utils.model_utils.gen_model_id", "fate_flow.utils.model_utils.check_if_deployed", "fate_flow.utils.job_utils.check_job_runtime_conf", "fate_flow.controller.job_controller.JobController.initialize_tasks", "fate_flow.operation.job_saver.JobSaver.query_job", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.dependence_for_job", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.update_parameter", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job", "fate_flow.entity.run_status.InterruptStatus.status_list", "fate_flow.entity.run_status.StatusSet.get_level", "fate_flow.db.db_models.Job.update" ]
[((31042, 31065), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (31063, 31065), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((31769, 31792), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (31790, 31792), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((32057, 32080), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (32078, 32080), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((32617, 32640), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (32638, 32640), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((10312, 10551), 'fate_flow.controller.job_controller.JobController.gen_updated_parameters', 'JobController.gen_updated_parameters', ([], {'job_id': 'job.f_job_id', 'initiator_role': 'job.f_initiator_role', 'initiator_party_id': 'job.f_initiator_party_id', 'input_job_parameters': 'job_parameters', 'input_component_parameters': 'component_parameters'}), '(job_id=job.f_job_id, initiator_role=\n job.f_initiator_role, initiator_party_id=job.f_initiator_party_id,\n input_job_parameters=job_parameters, input_component_parameters=\n component_parameters)\n', (10348, 10551), False, 'from fate_flow.controller.job_controller import JobController\n'), ((11363, 11442), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.update_parameter', 'FederatedScheduler.update_parameter', (['job'], {'updated_parameters': 'updated_parameters'}), '(job, updated_parameters=updated_parameters)\n', (11398, 11442), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((11730, 11837), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'is_initiator': '(True)', 'status': 'JobStatus.WAITING', 'order_by': '"""create_time"""', 'reverse': '(False)'}), "(is_initiator=True, status=JobStatus.WAITING, order_by=\n 'create_time', reverse=False)\n", (11748, 11837), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((12430, 12537), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'is_initiator': '(True)', 'status': 'JobStatus.RUNNING', 'order_by': '"""create_time"""', 'reverse': '(False)'}), "(is_initiator=True, status=JobStatus.RUNNING, order_by=\n 'create_time', reverse=False)\n", (12448, 12537), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((13120, 13220), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'is_initiator': '(True)', 'ready_signal': '(True)', 'order_by': '"""create_time"""', 'reverse': '(False)'}), "(is_initiator=True, ready_signal=True, order_by=\n 'create_time', reverse=False)\n", (13138, 13220), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((13764, 13864), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'is_initiator': '(True)', 'rerun_signal': '(True)', 'order_by': '"""create_time"""', 'reverse': '(False)'}), "(is_initiator=True, rerun_signal=True, order_by=\n 'create_time', reverse=False)\n", (13782, 13864), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((19451, 19483), 'fate_flow.entity.run_status.EndStatus.contains', 'EndStatus.contains', (['job.f_status'], {}), '(job.f_status)\n', (19469, 19483), False, 'from fate_flow.entity.run_status import StatusSet, JobStatus, TaskStatus, EndStatus, InterruptStatus\n'), ((21002, 21021), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (21019, 21021), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((21077, 21165), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id', 'role': 'initiator_role', 'party_id': 'initiator_party_id'}), '(job_id=job_id, role=initiator_role, party_id=\n initiator_party_id)\n', (21095, 21165), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((21669, 21809), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job.f_dsl', 'runtime_conf': 'job.f_runtime_conf_on_party', 'train_runtime_conf': 'job.f_train_runtime_conf'}), '(dsl=job.f_dsl, runtime_conf=job.\n f_runtime_conf_on_party, train_runtime_conf=job.f_train_runtime_conf)\n', (21702, 21809), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((21978, 22067), 'fate_flow.scheduler.task_scheduler.TaskScheduler.schedule', 'TaskScheduler.schedule', ([], {'job': 'job', 'dsl_parser': 'dsl_parser', 'canceled': 'job.f_cancel_signal'}), '(job=job, dsl_parser=dsl_parser, canceled=job.\n f_cancel_signal)\n', (22000, 22067), False, 'from fate_flow.scheduler.task_scheduler import TaskScheduler\n'), ((23510, 23542), 'fate_flow.entity.run_status.EndStatus.contains', 'EndStatus.contains', (['job.f_status'], {}), '(job.f_status)\n', (23528, 23542), False, 'from fate_flow.entity.run_status import StatusSet, JobStatus, TaskStatus, EndStatus, InterruptStatus\n'), ((24373, 24461), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id', 'role': 'initiator_role', 'party_id': 'initiator_party_id'}), '(job_id=job_id, role=initiator_role, party_id=\n initiator_party_id)\n', (24391, 24461), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((24626, 24766), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job.f_dsl', 'runtime_conf': 'job.f_runtime_conf_on_party', 'train_runtime_conf': 'job.f_train_runtime_conf'}), '(dsl=job.f_dsl, runtime_conf=job.\n f_runtime_conf_on_party, train_runtime_conf=job.f_train_runtime_conf)\n', (24659, 24766), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((26933, 26982), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'initiator_job.f_job_id'}), '(job_id=initiator_job.f_job_id)\n', (26951, 26982), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((29582, 29668), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'is_initiator': '(True)'}), '(job_id=job_id, role=role, party_id=party_id,\n is_initiator=True)\n', (29600, 29668), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((33357, 33394), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.clean_job', 'FederatedScheduler.clean_job', ([], {'job': 'job'}), '(job=job)\n', (33385, 33394), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((2109, 2136), 'fate_flow.utils.job_utils.generate_job_id', 'job_utils.generate_job_id', ([], {}), '()\n', (2134, 2136), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((2366, 2404), 'copy.deepcopy', 'deepcopy', (['submit_job_conf.runtime_conf'], {}), '(submit_job_conf.runtime_conf)\n', (2374, 2404), False, 'from copy import deepcopy\n'), ((2417, 2463), 'fate_flow.utils.job_utils.check_job_runtime_conf', 'job_utils.check_job_runtime_conf', (['runtime_conf'], {}), '(runtime_conf)\n', (2449, 2463), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((2476, 2532), 'fate_flow.utils.authentication_utils.check_constraint', 'authentication_utils.check_constraint', (['runtime_conf', 'dsl'], {}), '(runtime_conf, dsl)\n', (2513, 2532), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((2614, 2651), 'fate_flow.utils.config_adapter.JobRuntimeConfigAdapter', 'JobRuntimeConfigAdapter', (['runtime_conf'], {}), '(runtime_conf)\n', (2637, 2651), False, 'from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter\n'), ((4441, 4485), 'fate_flow.manager.provider_manager.ProviderManager.fill_fate_flow_provider', 'ProviderManager.fill_fate_flow_provider', (['dsl'], {}), '(dsl)\n', (4480, 4485), False, 'from fate_flow.manager.provider_manager import ProviderManager\n'), ((4505, 4510), 'fate_flow.db.db_models.Job', 'Job', ([], {}), '()\n', (4508, 4510), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((4927, 5160), 'fate_flow.utils.job_utils.save_job_conf', 'job_utils.save_job_conf', ([], {'job_id': 'job_id', 'role': 'job.f_initiator_role', 'party_id': 'job.f_initiator_party_id', 'dsl': 'dsl', 'runtime_conf': 'runtime_conf', 'runtime_conf_on_party': '{}', 'train_runtime_conf': 'train_runtime_conf', 'pipeline_dsl': 'None'}), '(job_id=job_id, role=job.f_initiator_role, party_id=\n job.f_initiator_party_id, dsl=dsl, runtime_conf=runtime_conf,\n runtime_conf_on_party={}, train_runtime_conf=train_runtime_conf,\n pipeline_dsl=None)\n', (4950, 5160), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((5835, 5989), 'fate_flow.controller.job_controller.JobController.create_common_job_parameters', 'JobController.create_common_job_parameters', ([], {'job_id': 'job.f_job_id', 'initiator_role': 'job.f_initiator_role', 'common_job_parameters': 'common_job_parameters'}), '(job_id=job.f_job_id,\n initiator_role=job.f_initiator_role, common_job_parameters=\n common_job_parameters)\n', (5877, 5989), False, 'from fate_flow.controller.job_controller import JobController\n'), ((6118, 6249), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job.f_dsl', 'runtime_conf': 'job.f_runtime_conf', 'train_runtime_conf': 'job.f_train_runtime_conf'}), '(dsl=job.f_dsl, runtime_conf=job.\n f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf)\n', (6151, 6249), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((6610, 6648), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.create_job', 'FederatedScheduler.create_job', ([], {'job': 'job'}), '(job=job)\n', (6639, 6648), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((9129, 9168), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', (['job_id'], {}), '(job_id)\n', (9160, 9168), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((16210, 16256), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.dependence_for_job', 'FederatedScheduler.dependence_for_job', ([], {'job': 'job'}), '(job=job)\n', (16247, 16256), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((19868, 19911), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status', 'FederatedScheduler.sync_job_status', ([], {'job': 'job'}), '(job=job)\n', (19902, 19911), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((21216, 21253), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.start_job', 'FederatedScheduler.start_job', ([], {'job': 'job'}), '(job=job)\n', (21244, 21253), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((23916, 23959), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status', 'FederatedScheduler.sync_job_status', ([], {'job': 'job'}), '(job=job)\n', (23950, 23959), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((26081, 26114), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {}), '(**task_query)\n', (26100, 26114), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((26330, 26373), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status', 'FederatedScheduler.sync_job_status', ([], {'job': 'job'}), '(job=job)\n', (26364, 26373), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((27396, 27441), 'fate_flow.operation.job_saver.JobSaver.update_job_status', 'JobSaver.update_job_status', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (27422, 27441), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((27454, 27492), 'fate_flow.operation.job_saver.JobSaver.update_job', 'JobSaver.update_job', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (27473, 27492), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((29306, 29337), 'fate_flow.entity.run_status.EndStatus.contains', 'EndStatus.contains', (['task_status'], {}), '(task_status)\n', (29324, 29337), False, 'from fate_flow.entity.run_status import StatusSet, JobStatus, TaskStatus, EndStatus, InterruptStatus\n'), ((30175, 30240), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.stop_job', 'FederatedScheduler.stop_job', ([], {'job': 'jobs[0]', 'stop_status': 'stop_status'}), '(job=jobs[0], stop_status=stop_status)\n', (30202, 30240), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((2210, 2233), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (2225, 2233), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((2859, 2904), 'fate_flow.utils.schedule_utils.get_conf_version', 'schedule_utils.get_conf_version', (['runtime_conf'], {}), '(runtime_conf)\n', (2890, 2904), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((3077, 3123), 'fate_flow.utils.model_utils.gen_model_id', 'model_utils.gen_model_id', (["runtime_conf['role']"], {}), "(runtime_conf['role'])\n", (3101, 3123), False, 'from fate_flow.utils import model_utils\n'), ((3489, 3677), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': 'job_id', 'role': "job_initiator['role']", 'party_id': "job_initiator['party_id']", 'model_id': 'common_job_parameters.model_id', 'model_version': 'common_job_parameters.model_version'}), "(job_id=job_id, role=job_initiator['role'], party_id=job_initiator[\n 'party_id'], model_id=common_job_parameters.model_id, model_version=\n common_job_parameters.model_version)\n", (3496, 3677), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((3801, 3846), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline_model.train_runtime_conf'], {}), '(pipeline_model.train_runtime_conf)\n', (3811, 3846), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((4382, 4422), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline_model.inference_dsl'], {}), '(pipeline_model.inference_dsl)\n', (4392, 4422), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((6826, 6869), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status', 'FederatedScheduler.sync_job_status', ([], {'job': 'job'}), '(job=job)\n', (6860, 6869), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((8769, 8812), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status', 'FederatedScheduler.sync_job_status', ([], {'job': 'job'}), '(job=job)\n', (8803, 8812), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((9480, 9566), 'fate_flow.utils.job_utils.get_board_url', 'job_utils.get_board_url', (['job_id', "job_initiator['role']", "job_initiator['party_id']"], {}), "(job_id, job_initiator['role'], job_initiator[\n 'party_id'])\n", (9503, 9566), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((10029, 10057), 'fate_flow.utils.log_utils.exception_to_trace_string', 'exception_to_trace_string', (['e'], {}), '(e)\n', (10054, 10057), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((11026, 11055), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (11041, 11055), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((11661, 11678), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (11676, 11678), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((11841, 11858), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (11856, 11858), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((12295, 12312), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (12310, 12312), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((12361, 12378), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (12376, 12378), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((12541, 12558), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (12556, 12558), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((12944, 12961), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (12959, 12961), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((13053, 13070), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (13068, 13070), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((13224, 13241), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (13239, 13241), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((13633, 13650), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (13648, 13650), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((13697, 13714), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (13712, 13714), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((13868, 13885), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (13883, 13885), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((14265, 14282), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (14280, 14282), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((14329, 14346), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (14344, 14346), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((14610, 14627), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (14625, 14627), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((15416, 15433), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (15431, 15433), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((15937, 15980), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status', 'FederatedScheduler.sync_job_status', ([], {'job': 'job'}), '(job=job)\n', (15971, 15980), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((16514, 16603), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.resource_for_job', 'FederatedScheduler.resource_for_job', ([], {'job': 'job', 'operation_type': 'ResourceOperation.APPLY'}), '(job=job, operation_type=\n ResourceOperation.APPLY)\n', (16549, 16603), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((19312, 19335), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (19327, 19335), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((20066, 20203), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job', 'FederatedScheduler.sync_job', ([], {'job': 'job', 'update_fields': "['ready_signal', 'ready_time', 'rerun_signal', 'progress', 'end_time',\n 'elapsed']"}), "(job=job, update_fields=['ready_signal',\n 'ready_time', 'rerun_signal', 'progress', 'end_time', 'elapsed'])\n", (20093, 20203), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((20618, 20641), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (20633, 20641), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((21585, 21614), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (21600, 21614), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((22567, 22596), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (22582, 22596), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((22979, 23043), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job', 'FederatedScheduler.sync_job', ([], {'job': 'job', 'update_fields': "['progress']"}), "(job=job, update_fields=['progress'])\n", (23006, 23043), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((23247, 23279), 'fate_flow.entity.run_status.EndStatus.contains', 'EndStatus.contains', (['job.f_status'], {}), '(job.f_status)\n', (23265, 23279), False, 'from fate_flow.entity.run_status import StatusSet, JobStatus, TaskStatus, EndStatus, InterruptStatus\n'), ((23366, 23409), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_job_status', 'FederatedScheduler.sync_job_status', ([], {'job': 'job'}), '(job=job)\n', (23400, 23409), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((23968, 23997), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (23983, 23997), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((24256, 24279), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (24271, 24279), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((26145, 26248), 'fate_flow.scheduler.task_scheduler.TaskScheduler.prepare_rerun_task', 'TaskScheduler.prepare_rerun_task', ([], {'job': 'job', 'task': 'task', 'dsl_parser': 'dsl_parser', 'auto': 'auto', 'force': 'force'}), '(job=job, task=task, dsl_parser=dsl_parser,\n auto=auto, force=force)\n', (26177, 26248), False, 'from fate_flow.scheduler.task_scheduler import TaskScheduler\n'), ((26473, 26496), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (26488, 26496), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((26602, 26625), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (26617, 26625), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((26822, 26861), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['initiator_job.f_job_id'], {}), '(initiator_job.f_job_id)\n', (26837, 26861), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((27501, 27540), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['initiator_job.f_job_id'], {}), '(initiator_job.f_job_id)\n', (27516, 27540), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((28651, 28680), 'fate_flow.entity.run_status.InterruptStatus.status_list', 'InterruptStatus.status_list', ([], {}), '()\n', (28678, 28680), False, 'from fate_flow.entity.run_status import StatusSet, JobStatus, TaskStatus, EndStatus, InterruptStatus\n'), ((29498, 29521), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (29513, 29521), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((30508, 30598), 'fate_flow.operation.job_saver.JobSaver.get_tasks_asc', 'JobSaver.get_tasks_asc', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id'}), '(job_id=job.f_job_id, role=job.f_role, party_id=job.\n f_party_id)\n', (30530, 30598), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((31286, 31305), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (31303, 31305), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((32704, 32780), 'fate_flow.db.db_models.Job.update', 'Job.update', (['{Job.f_end_scheduling_updates: Job.f_end_scheduling_updates + 1}'], {}), '({Job.f_end_scheduling_updates: Job.f_end_scheduling_updates + 1})\n', (32714, 32780), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((33134, 33163), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (33149, 33163), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((33403, 33432), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (33418, 33432), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((3870, 4064), 'fate_flow.utils.model_utils.check_if_deployed', 'model_utils.check_if_deployed', ([], {'role': "job_initiator['role']", 'party_id': "job_initiator['party_id']", 'model_id': 'common_job_parameters.model_id', 'model_version': 'common_job_parameters.model_version'}), "(role=job_initiator['role'], party_id=\n job_initiator['party_id'], model_id=common_job_parameters.model_id,\n model_version=common_job_parameters.model_version)\n", (3899, 4064), False, 'from fate_flow.utils import model_utils\n'), ((8971, 8994), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (8986, 8994), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((9605, 9658), 'fate_flow.utils.config_adapter.JobRuntimeConfigAdapter', 'JobRuntimeConfigAdapter', (['submit_job_conf.runtime_conf'], {}), '(submit_job_conf.runtime_conf)\n', (9628, 9658), False, 'from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter\n'), ((11977, 11994), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (11992, 11994), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((12635, 12652), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (12650, 12652), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((13316, 13333), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (13331, 13333), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((13960, 13977), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (13975, 13977), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((14467, 14490), 'fate_flow.entity.run_status.EndStatus.status_list', 'EndStatus.status_list', ([], {}), '()\n', (14488, 14490), False, 'from fate_flow.entity.run_status import StatusSet, JobStatus, TaskStatus, EndStatus, InterruptStatus\n'), ((14580, 14599), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (14597, 14599), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((14707, 14724), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (14722, 14724), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((15730, 15753), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (15745, 15753), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((16088, 16111), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (16103, 16111), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((16269, 16292), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (16284, 16292), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((18956, 18979), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (18971, 18979), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((19754, 19783), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (19769, 19783), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((21266, 21289), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (21281, 21289), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((21386, 21409), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (21401, 21409), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((23301, 23349), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.save_pipelined_model', 'FederatedScheduler.save_pipelined_model', ([], {'job': 'job'}), '(job=job)\n', (23340, 23349), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((23642, 23671), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (23657, 23671), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((24903, 24926), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (24918, 24926), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((25230, 25269), 'fate_flow.utils.job_utils.job_pipeline_component_name', 'job_utils.job_pipeline_component_name', ([], {}), '()\n', (25267, 25269), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils, authentication_utils\n'), ((26386, 26409), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (26401, 26409), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((30057, 30080), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (30072, 30080), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((30684, 30787), 'fate_flow.scheduler.task_scheduler.TaskScheduler.collect_task_of_all_party', 'TaskScheduler.collect_task_of_all_party', (['job'], {'initiator_task': 'initiator_task', 'set_status': 'stop_status'}), '(job, initiator_task=initiator_task,\n set_status=stop_status)\n', (30723, 30787), False, 'from fate_flow.scheduler.task_scheduler import TaskScheduler\n'), ((30928, 30948), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['response'], {}), '(response)\n', (30938, 30948), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((5699, 5722), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (5714, 5722), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((10070, 10093), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (10085, 10093), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((14503, 14522), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (14520, 14522), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((15997, 16020), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (16012, 16020), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((18056, 18176), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.resource_for_job', 'FederatedScheduler.resource_for_job', ([], {'job': 'job', 'operation_type': 'ResourceOperation.RETURN', 'specific_dest': 'rollback_party'}), '(job=job, operation_type=\n ResourceOperation.RETURN, specific_dest=rollback_party)\n', (18091, 18176), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((20216, 20245), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (20231, 20245), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((20327, 20356), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (20342, 20356), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((25321, 25344), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (25336, 25344), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((25831, 25854), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (25846, 25854), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((28696, 28725), 'fate_flow.entity.run_status.StatusSet.get_level', 'StatusSet.get_level', ([], {'status': 's'}), '(status=s)\n', (28715, 28725), False, 'from fate_flow.entity.run_status import StatusSet, JobStatus, TaskStatus, EndStatus, InterruptStatus\n'), ((29757, 29780), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (29772, 29780), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((29905, 29928), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (29920, 29928), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((30326, 30349), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (30341, 30349), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((30800, 30823), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (30815, 30823), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((7916, 8225), 'fate_flow.controller.job_controller.JobController.initialize_tasks', 'JobController.initialize_tasks', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'run_on_this_party': '(False)', 'initiator_role': 'job.f_initiator_role', 'initiator_party_id': 'job.f_initiator_party_id', 'job_parameters': 'common_job_parameters', 'dsl_parser': 'dsl_parser', 'components': 'need_run_components[role][party_id]'}), '(job_id=job_id, role=role, party_id=party_id,\n run_on_this_party=False, initiator_role=job.f_initiator_role,\n initiator_party_id=job.f_initiator_party_id, job_parameters=\n common_job_parameters, dsl_parser=dsl_parser, components=\n need_run_components[role][party_id])\n', (7946, 8225), False, 'from fate_flow.controller.job_controller import JobController\n'), ((12160, 12189), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (12175, 12189), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((12219, 12248), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (12234, 12248), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((12817, 12846), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (12832, 12846), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((12876, 12905), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (12891, 12905), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((13494, 13523), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (13509, 13523), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((13553, 13582), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (13568, 13582), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((14138, 14167), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (14153, 14167), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((14197, 14226), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (14212, 14226), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((14925, 14954), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (14940, 14954), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((15055, 15084), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (15070, 15084), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((15289, 15318), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (15304, 15318), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((15348, 15377), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (15363, 15377), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((17619, 17642), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (17634, 17642), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((31577, 31596), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (31594, 31596), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n'), ((31661, 31686), 'fate_flow.db.db_models.Job.update', 'Job.update', (['update_fields'], {}), '(update_fields)\n', (31671, 31686), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((32495, 32520), 'fate_flow.db.db_models.Job.update', 'Job.update', (['update_fields'], {}), '(update_fields)\n', (32505, 32520), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((18421, 18444), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (18436, 18444), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((18732, 18755), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (18747, 18755), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((18288, 18311), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (18303, 18311), False, 'from fate_flow.utils.log_utils import schedule_logger, exception_to_trace_string\n'), ((31939, 31958), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (31956, 31958), False, 'from fate_arch.common.base_utils import json_loads, json_dumps, current_timestamp\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import io import json import os import shutil import tarfile from flask import Flask, request, send_file, jsonify from google.protobuf import json_format from fate_arch.common.base_utils import fate_uuid from fate_arch import storage from fate_flow.db.db_models import Job, DB from fate_flow.manager.data_manager import delete_metric_data from fate_flow.operation.job_tracker import Tracker from fate_flow.operation.job_saver import JobSaver from fate_flow.scheduler.federated_scheduler import FederatedScheduler from fate_flow.settings import stat_logger, TEMP_DIRECTORY from fate_flow.utils import job_utils, data_utils, detect_utils, schedule_utils from fate_flow.utils.api_utils import get_json_result, error_response from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter from federatedml.feature.instance import Instance manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/job/data_view', methods=['post']) def job_view(): request_data = request.json check_request_parameters(request_data) job_tracker = Tracker(job_id=request_data['job_id'], role=request_data['role'], party_id=request_data['party_id']) job_view_data = job_tracker.get_job_view() if job_view_data: job_metric_list = job_tracker.get_metric_list(job_level=True) job_view_data['model_summary'] = {} for metric_namespace, namespace_metrics in job_metric_list.items(): job_view_data['model_summary'][metric_namespace] = job_view_data['model_summary'].get(metric_namespace, {}) for metric_name in namespace_metrics: job_view_data['model_summary'][metric_namespace][metric_name] = job_view_data['model_summary'][ metric_namespace].get(metric_name, {}) for metric_data in job_tracker.get_job_metric_data(metric_namespace=metric_namespace, metric_name=metric_name): job_view_data['model_summary'][metric_namespace][metric_name][metric_data.key] = metric_data.value return get_json_result(retcode=0, retmsg='success', data=job_view_data) else: return get_json_result(retcode=101, retmsg='error') @manager.route('/component/metric/all', methods=['post']) def component_metric_all(): request_data = request.json check_request_parameters(request_data) tracker = Tracker(job_id=request_data['job_id'], component_name=request_data['component_name'], role=request_data['role'], party_id=request_data['party_id']) metrics = tracker.get_metric_list() all_metric_data = {} if metrics: for metric_namespace, metric_names in metrics.items(): all_metric_data[metric_namespace] = all_metric_data.get(metric_namespace, {}) for metric_name in metric_names: all_metric_data[metric_namespace][metric_name] = all_metric_data[metric_namespace].get(metric_name, {}) metric_data, metric_meta = get_metric_all_data(tracker=tracker, metric_namespace=metric_namespace, metric_name=metric_name) all_metric_data[metric_namespace][metric_name]['data'] = metric_data all_metric_data[metric_namespace][metric_name]['meta'] = metric_meta return get_json_result(retcode=0, retmsg='success', data=all_metric_data) else: return get_json_result(retcode=0, retmsg='no data', data={}) @manager.route('/component/metrics', methods=['post']) def component_metrics(): request_data = request.json check_request_parameters(request_data) tracker = Tracker(job_id=request_data['job_id'], component_name=request_data['component_name'], role=request_data['role'], party_id=request_data['party_id']) metrics = tracker.get_metric_list() if metrics: return get_json_result(retcode=0, retmsg='success', data=metrics) else: return get_json_result(retcode=0, retmsg='no data', data={}) @manager.route('/component/metric_data', methods=['post']) def component_metric_data(): request_data = request.json check_request_parameters(request_data) tracker = Tracker(job_id=request_data['job_id'], component_name=request_data['component_name'], role=request_data['role'], party_id=request_data['party_id']) metric_data, metric_meta = get_metric_all_data(tracker=tracker, metric_namespace=request_data['metric_namespace'], metric_name=request_data['metric_name']) if metric_data or metric_meta: return get_json_result(retcode=0, retmsg='success', data=metric_data, meta=metric_meta) else: return get_json_result(retcode=0, retmsg='no data', data=[], meta={}) def get_metric_all_data(tracker, metric_namespace, metric_name): metric_data = tracker.get_metric_data(metric_namespace=metric_namespace, metric_name=metric_name) metric_meta = tracker.get_metric_meta(metric_namespace=metric_namespace, metric_name=metric_name) if metric_data or metric_meta: metric_data_list = [(metric.key, metric.value) for metric in metric_data] metric_data_list.sort(key=lambda x: x[0]) return metric_data_list, metric_meta.to_dict() if metric_meta else {} else: return [], {} @manager.route('/component/metric/delete', methods=['post']) def component_metric_delete(): sql = delete_metric_data(request.json) return get_json_result(retcode=0, retmsg='success', data=sql) @manager.route('/component/parameters', methods=['post']) def component_parameters(): request_data = request.json check_request_parameters(request_data) job_id = request_data.get('job_id', '') job_dsl_parser = schedule_utils.get_job_dsl_parser_by_job_id(job_id=job_id) if job_dsl_parser: component = job_dsl_parser.get_component_info(request_data['component_name']) parameters = component.get_role_parameters() for role, partys_parameters in parameters.items(): for party_parameters in partys_parameters: if party_parameters.get('local', {}).get('role', '') == request_data['role'] and party_parameters.get( 'local', {}).get('party_id', '') == int(request_data['party_id']): output_parameters = {} output_parameters['module'] = party_parameters.get('module', '') for p_k, p_v in party_parameters.items(): if p_k.endswith('Param'): output_parameters[p_k] = p_v return get_json_result(retcode=0, retmsg='success', data=output_parameters) else: return get_json_result(retcode=0, retmsg='can not found this component parameters') else: return get_json_result(retcode=101, retmsg='can not found this job') @manager.route('/component/output/model', methods=['post']) def component_output_model(): request_data = request.json check_request_parameters(request_data) job_dsl, job_runtime_conf, runtime_conf_on_party, train_runtime_conf = job_utils.get_job_configuration(job_id=request_data['job_id'], role=request_data['role'], party_id=request_data['party_id']) try: model_id = runtime_conf_on_party['job_parameters']['model_id'] model_version = runtime_conf_on_party['job_parameters']['model_version'] except Exception as e: job_dsl, job_runtime_conf, train_runtime_conf = job_utils.get_model_configuration(job_id=request_data['job_id'], role=request_data['role'], party_id=request_data['party_id']) if any([job_dsl, job_runtime_conf, train_runtime_conf]): adapter = JobRuntimeConfigAdapter(job_runtime_conf) model_id = adapter.get_common_parameters().to_dict().get('model_id') model_version = adapter.get_common_parameters().to_dict.get('model_version') else: stat_logger.exception(e) stat_logger.error(f"Can not find model info by filters: job id: {request_data.get('job_id')}, " f"role: {request_data.get('role')}, party id: {request_data.get('party_id')}") raise Exception(f"Can not find model info by filters: job id: {request_data.get('job_id')}, " f"role: {request_data.get('role')}, party id: {request_data.get('party_id')}") tracker = Tracker(job_id=request_data['job_id'], component_name=request_data['component_name'], role=request_data['role'], party_id=request_data['party_id'], model_id=model_id, model_version=model_version) dag = schedule_utils.get_job_dsl_parser(dsl=job_dsl, runtime_conf=job_runtime_conf, train_runtime_conf=train_runtime_conf) component = dag.get_component_info(request_data['component_name']) output_model_json = {} # There is only one model output at the current dsl version. output_model = tracker.get_output_model(component.get_output()['model'][0] if component.get_output().get('model') else 'default') for buffer_name, buffer_object in output_model.items(): if buffer_name.endswith('Param'): output_model_json = json_format.MessageToDict(buffer_object, including_default_value_fields=True) if output_model_json: component_define = tracker.get_component_define() this_component_model_meta = {} for buffer_name, buffer_object in output_model.items(): if buffer_name.endswith('Meta'): this_component_model_meta['meta_data'] = json_format.MessageToDict(buffer_object, including_default_value_fields=True) this_component_model_meta.update(component_define) return get_json_result(retcode=0, retmsg='success', data=output_model_json, meta=this_component_model_meta) else: return get_json_result(retcode=0, retmsg='no data', data={}) @manager.route('/component/output/data', methods=['post']) def component_output_data(): request_data = request.json output_tables_meta = get_component_output_tables_meta(task_data=request_data) if not output_tables_meta: return get_json_result(retcode=0, retmsg='no data', data=[]) output_data_list = [] headers = [] totals = [] data_names = [] for output_name, output_table_meta in output_tables_meta.items(): output_data = [] num = 100 have_data_label = False is_str = False have_weight = False if output_table_meta: # part_of_data format: [(k, v)] for k, v in output_table_meta.get_part_of_data(): if num == 0: break data_line, have_data_label, is_str, have_weight = get_component_output_data_line(src_key=k, src_value=v) output_data.append(data_line) num -= 1 total = output_table_meta.get_count() output_data_list.append(output_data) data_names.append(output_name) totals.append(total) if output_data: header = get_component_output_data_schema(output_table_meta=output_table_meta, have_data_label=have_data_label, is_str=is_str, have_weight=have_weight) headers.append(header) else: headers.append(None) if len(output_data_list) == 1 and not output_data_list[0]: return get_json_result(retcode=0, retmsg='no data', data=[]) return get_json_result(retcode=0, retmsg='success', data=output_data_list, meta={'header': headers, 'total': totals, 'names':data_names}) @manager.route('/component/output/data/download', methods=['get']) def component_output_data_download(): request_data = request.json try: output_tables_meta = get_component_output_tables_meta(task_data=request_data) except Exception as e: stat_logger.exception(e) return error_response(210, str(e)) limit = request_data.get('limit', -1) if not output_tables_meta: return error_response(response_code=210, retmsg='no data') if limit == 0: return error_response(response_code=210, retmsg='limit is 0') have_data_label = False have_weight = False output_data_file_list = [] output_data_meta_file_list = [] output_tmp_dir = os.path.join(os.getcwd(), 'tmp/{}'.format(fate_uuid())) for output_name, output_table_meta in output_tables_meta.items(): output_data_count = 0 is_str = False output_data_file_path = "{}/{}.csv".format(output_tmp_dir, output_name) os.makedirs(os.path.dirname(output_data_file_path), exist_ok=True) with open(output_data_file_path, 'w') as fw: with storage.Session.build(name=output_table_meta.get_name(), namespace=output_table_meta.get_namespace()) as storage_session: output_table = storage_session.get_table() for k, v in output_table.collect(): data_line, have_data_label, is_str, have_weight = get_component_output_data_line(src_key=k, src_value=v) fw.write('{}\n'.format(','.join(map(lambda x: str(x), data_line)))) output_data_count += 1 if output_data_count == limit: break if output_data_count: # get meta output_data_file_list.append(output_data_file_path) header = get_component_output_data_schema(output_table_meta=output_table_meta, have_data_label=have_data_label, is_str=is_str, have_weight=have_weight) output_data_meta_file_path = "{}/{}.meta".format(output_tmp_dir, output_name) output_data_meta_file_list.append(output_data_meta_file_path) with open(output_data_meta_file_path, 'w') as fw: json.dump({'header': header}, fw, indent=4) if request_data.get('head', True) and header: with open(output_data_file_path, 'r+') as f: content = f.read() f.seek(0, 0) f.write('{}\n'.format(','.join(header)) + content) # tar memory_file = io.BytesIO() tar = tarfile.open(fileobj=memory_file, mode='w:gz') for index in range(0, len(output_data_file_list)): tar.add(output_data_file_list[index], os.path.relpath(output_data_file_list[index], output_tmp_dir)) tar.add(output_data_meta_file_list[index], os.path.relpath(output_data_meta_file_list[index], output_tmp_dir)) tar.close() memory_file.seek(0) output_data_file_list.extend(output_data_meta_file_list) for path in output_data_file_list: try: shutil.rmtree(os.path.dirname(path)) except Exception as e: # warning stat_logger.warning(e) tar_file_name = 'job_{}_{}_{}_{}_output_data.tar.gz'.format(request_data['job_id'], request_data['component_name'], request_data['role'], request_data['party_id']) return send_file(memory_file, attachment_filename=tar_file_name, as_attachment=True) @manager.route('/component/output/data/table', methods=['post']) def component_output_data_table(): request_data = request.json detect_utils.check_config(config=request_data, required_arguments=['job_id', 'role', 'party_id', 'component_name']) jobs = JobSaver.query_job(job_id=request_data.get('job_id')) if jobs: job = jobs[0] return jsonify(FederatedScheduler.tracker_command(job, request_data, 'output/table')) else: return get_json_result(retcode=100, retmsg='No found job') @manager.route('/component/summary/download', methods=['POST']) def get_component_summary(): request_data = request.json try: required_params = ["job_id", "component_name", "role", "party_id"] detect_utils.check_config(request_data, required_params) tracker = Tracker(job_id=request_data["job_id"], component_name=request_data["component_name"], role=request_data["role"], party_id=request_data["party_id"], task_id=request_data.get("task_id", None), task_version=request_data.get("task_version", None)) summary = tracker.read_summary_from_db() if summary: if request_data.get("filename"): temp_filepath = os.path.join(TEMP_DIRECTORY, request_data.get("filename")) with open(temp_filepath, "w") as fout: fout.write(json.dumps(summary, indent=4)) return send_file(open(temp_filepath, "rb"), as_attachment=True, attachment_filename=request_data.get("filename")) else: return get_json_result(data=summary) return error_response(210, "No component summary found, please check if arguments are specified correctly.") except Exception as e: stat_logger.exception(e) return error_response(210, str(e)) @manager.route('/component/list', methods=['POST']) def component_list(): request_data = request.json parser = schedule_utils.get_job_dsl_parser_by_job_id(job_id=request_data.get('job_id')) if parser: return get_json_result(data={'components': list(parser.get_dsl().get('components').keys())}) else: return get_json_result(retcode=100, retmsg='No job matched, please make sure the job id is valid.') def get_component_output_tables_meta(task_data): check_request_parameters(task_data) tracker = Tracker(job_id=task_data['job_id'], component_name=task_data['component_name'], role=task_data['role'], party_id=task_data['party_id']) job_dsl_parser = schedule_utils.get_job_dsl_parser_by_job_id(job_id=task_data['job_id']) if not job_dsl_parser: raise Exception('can not get dag parser, please check if the parameters are correct') component = job_dsl_parser.get_component_info(task_data['component_name']) if not component: raise Exception('can not found component, please check if the parameters are correct') output_data_table_infos = tracker.get_output_data_info() output_tables_meta = tracker.get_output_data_table(output_data_infos=output_data_table_infos) return output_tables_meta def get_component_output_data_line(src_key, src_value): have_data_label = False have_weight = False data_line = [src_key] is_str = False if isinstance(src_value, Instance): if src_value.label is not None: data_line.append(src_value.label) have_data_label = True data_line.extend(data_utils.dataset_to_list(src_value.features)) if src_value.weight is not None: have_weight = True data_line.append(src_value.weight) elif isinstance(src_value, str): data_line.extend([value for value in src_value.split(',')]) is_str = True else: data_line.extend(data_utils.dataset_to_list(src_value)) return data_line, have_data_label, is_str, have_weight def get_component_output_data_schema(output_table_meta, have_data_label, is_str=False, have_weight=False): # get schema schema = output_table_meta.get_schema() if not schema: return ['sid'] header = [schema.get('sid_name', 'sid')] if have_data_label: header.append(schema.get('label_name')) if is_str: if not schema.get('header'): if schema.get('sid'): return [schema.get('sid')] else: return None header.extend([feature for feature in schema.get('header').split(',')]) else: header.extend(schema.get('header', [])) if have_weight: header.append('weight') return header @DB.connection_context() def check_request_parameters(request_data): if 'role' not in request_data and 'party_id' not in request_data: jobs = Job.select(Job.f_runtime_conf_on_party).where(Job.f_job_id == request_data.get('job_id', ''), Job.f_is_initiator == True) if jobs: job = jobs[0] job_runtime_conf = job.f_runtime_conf_on_party job_initiator = job_runtime_conf.get('initiator', {}) role = job_initiator.get('role', '') party_id = job_initiator.get('party_id', 0) request_data['role'] = role request_data['party_id'] = party_id
[ "fate_flow.utils.detect_utils.check_config", "fate_flow.utils.data_utils.dataset_to_list", "fate_flow.manager.data_manager.delete_metric_data", "fate_flow.settings.stat_logger.warning", "fate_flow.utils.schedule_utils.get_job_dsl_parser", "fate_flow.utils.api_utils.error_response", "fate_flow.operation.job_tracker.Tracker", "fate_flow.utils.config_adapter.JobRuntimeConfigAdapter", "fate_flow.db.db_models.DB.connection_context", "fate_flow.utils.job_utils.get_job_configuration", "fate_flow.utils.api_utils.get_json_result", "fate_flow.utils.schedule_utils.get_job_dsl_parser_by_job_id", "fate_flow.utils.job_utils.get_model_configuration", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.tracker_command", "fate_flow.db.db_models.Job.select", "fate_flow.settings.stat_logger.exception" ]
[((1468, 1483), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1473, 1483), False, 'from flask import Flask, request, send_file, jsonify\n'), ((21332, 21355), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (21353, 21355), False, 'from fate_flow.db.db_models import Job, DB\n'), ((1547, 1571), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1568, 1571), False, 'from fate_flow.settings import stat_logger, TEMP_DIRECTORY\n'), ((1789, 1894), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': "request_data['job_id']", 'role': "request_data['role']", 'party_id': "request_data['party_id']"}), "(job_id=request_data['job_id'], role=request_data['role'], party_id=\n request_data['party_id'])\n", (1796, 1894), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((3131, 3288), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': "request_data['job_id']", 'component_name': "request_data['component_name']", 'role': "request_data['role']", 'party_id': "request_data['party_id']"}), "(job_id=request_data['job_id'], component_name=request_data[\n 'component_name'], role=request_data['role'], party_id=request_data[\n 'party_id'])\n", (3138, 3288), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((4405, 4562), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': "request_data['job_id']", 'component_name': "request_data['component_name']", 'role': "request_data['role']", 'party_id': "request_data['party_id']"}), "(job_id=request_data['job_id'], component_name=request_data[\n 'component_name'], role=request_data['role'], party_id=request_data[\n 'party_id'])\n", (4412, 4562), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((4963, 5120), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': "request_data['job_id']", 'component_name': "request_data['component_name']", 'role': "request_data['role']", 'party_id': "request_data['party_id']"}), "(job_id=request_data['job_id'], component_name=request_data[\n 'component_name'], role=request_data['role'], party_id=request_data[\n 'party_id'])\n", (4970, 5120), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((6330, 6362), 'fate_flow.manager.data_manager.delete_metric_data', 'delete_metric_data', (['request.json'], {}), '(request.json)\n', (6348, 6362), False, 'from fate_flow.manager.data_manager import delete_metric_data\n'), ((6374, 6428), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'sql'}), "(retcode=0, retmsg='success', data=sql)\n", (6389, 6428), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((6657, 6715), 'fate_flow.utils.schedule_utils.get_job_dsl_parser_by_job_id', 'schedule_utils.get_job_dsl_parser_by_job_id', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (6700, 6715), False, 'from fate_flow.utils import job_utils, data_utils, detect_utils, schedule_utils\n'), ((8034, 8163), 'fate_flow.utils.job_utils.get_job_configuration', 'job_utils.get_job_configuration', ([], {'job_id': "request_data['job_id']", 'role': "request_data['role']", 'party_id': "request_data['party_id']"}), "(job_id=request_data['job_id'], role=\n request_data['role'], party_id=request_data['party_id'])\n", (8065, 8163), False, 'from fate_flow.utils import job_utils, data_utils, detect_utils, schedule_utils\n'), ((9719, 9924), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': "request_data['job_id']", 'component_name': "request_data['component_name']", 'role': "request_data['role']", 'party_id': "request_data['party_id']", 'model_id': 'model_id', 'model_version': 'model_version'}), "(job_id=request_data['job_id'], component_name=request_data[\n 'component_name'], role=request_data['role'], party_id=request_data[\n 'party_id'], model_id=model_id, model_version=model_version)\n", (9726, 9924), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((9969, 10090), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job_dsl', 'runtime_conf': 'job_runtime_conf', 'train_runtime_conf': 'train_runtime_conf'}), '(dsl=job_dsl, runtime_conf=\n job_runtime_conf, train_runtime_conf=train_runtime_conf)\n', (10002, 10090), False, 'from fate_flow.utils import job_utils, data_utils, detect_utils, schedule_utils\n'), ((12893, 13029), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'output_data_list', 'meta': "{'header': headers, 'total': totals, 'names': data_names}"}), "(retcode=0, retmsg='success', data=output_data_list, meta={\n 'header': headers, 'total': totals, 'names': data_names})\n", (12908, 13029), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((15617, 15629), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (15627, 15629), False, 'import io\n'), ((15640, 15686), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'memory_file', 'mode': '"""w:gz"""'}), "(fileobj=memory_file, mode='w:gz')\n", (15652, 15686), False, 'import tarfile\n'), ((16799, 16918), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', ([], {'config': 'request_data', 'required_arguments': "['job_id', 'role', 'party_id', 'component_name']"}), "(config=request_data, required_arguments=['job_id',\n 'role', 'party_id', 'component_name'])\n", (16824, 16918), False, 'from fate_flow.utils import job_utils, data_utils, detect_utils, schedule_utils\n'), ((19091, 19231), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': "task_data['job_id']", 'component_name': "task_data['component_name']", 'role': "task_data['role']", 'party_id': "task_data['party_id']"}), "(job_id=task_data['job_id'], component_name=task_data[\n 'component_name'], role=task_data['role'], party_id=task_data['party_id'])\n", (19098, 19231), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((19270, 19341), 'fate_flow.utils.schedule_utils.get_job_dsl_parser_by_job_id', 'schedule_utils.get_job_dsl_parser_by_job_id', ([], {'job_id': "task_data['job_id']"}), "(job_id=task_data['job_id'])\n", (19313, 19341), False, 'from fate_flow.utils import job_utils, data_utils, detect_utils, schedule_utils\n'), ((2819, 2883), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'job_view_data'}), "(retcode=0, retmsg='success', data=job_view_data)\n", (2834, 2883), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((2909, 2953), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""error"""'}), "(retcode=101, retmsg='error')\n", (2924, 2953), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((4088, 4154), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'all_metric_data'}), "(retcode=0, retmsg='success', data=all_metric_data)\n", (4103, 4154), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((4180, 4233), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no data"""', 'data': '{}'}), "(retcode=0, retmsg='no data', data={})\n", (4195, 4233), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((4646, 4704), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'metrics'}), "(retcode=0, retmsg='success', data=metrics)\n", (4661, 4704), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((4730, 4783), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no data"""', 'data': '{}'}), "(retcode=0, retmsg='no data', data={})\n", (4745, 4783), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((5394, 5479), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'metric_data', 'meta': 'metric_meta'}), "(retcode=0, retmsg='success', data=metric_data, meta=metric_meta\n )\n", (5409, 5479), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((5531, 5593), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no data"""', 'data': '[]', 'meta': '{}'}), "(retcode=0, retmsg='no data', data=[], meta={})\n", (5546, 5593), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7730, 7791), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""can not found this job"""'}), "(retcode=101, retmsg='can not found this job')\n", (7745, 7791), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((11163, 11268), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'output_model_json', 'meta': 'this_component_model_meta'}), "(retcode=0, retmsg='success', data=output_model_json, meta=\n this_component_model_meta)\n", (11178, 11268), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((11289, 11342), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no data"""', 'data': '{}'}), "(retcode=0, retmsg='no data', data={})\n", (11304, 11342), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((11593, 11646), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no data"""', 'data': '[]'}), "(retcode=0, retmsg='no data', data=[])\n", (11608, 11646), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((12828, 12881), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no data"""', 'data': '[]'}), "(retcode=0, retmsg='no data', data=[])\n", (12843, 12881), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((13449, 13500), 'fate_flow.utils.api_utils.error_response', 'error_response', ([], {'response_code': '(210)', 'retmsg': '"""no data"""'}), "(response_code=210, retmsg='no data')\n", (13463, 13500), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((13535, 13589), 'fate_flow.utils.api_utils.error_response', 'error_response', ([], {'response_code': '(210)', 'retmsg': '"""limit is 0"""'}), "(response_code=210, retmsg='limit is 0')\n", (13549, 13589), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((13744, 13755), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13753, 13755), False, 'import os\n'), ((16583, 16660), 'flask.send_file', 'send_file', (['memory_file'], {'attachment_filename': 'tar_file_name', 'as_attachment': '(True)'}), '(memory_file, attachment_filename=tar_file_name, as_attachment=True)\n', (16592, 16660), False, 'from flask import Flask, request, send_file, jsonify\n'), ((17134, 17185), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(100)', 'retmsg': '"""No found job"""'}), "(retcode=100, retmsg='No found job')\n", (17149, 17185), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((17405, 17461), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (['request_data', 'required_params'], {}), '(request_data, required_params)\n', (17430, 17461), False, 'from fate_flow.utils import job_utils, data_utils, detect_utils, schedule_utils\n'), ((18347, 18457), 'fate_flow.utils.api_utils.error_response', 'error_response', (['(210)', '"""No component summary found, please check if arguments are specified correctly."""'], {}), "(210,\n 'No component summary found, please check if arguments are specified correctly.'\n )\n", (18361, 18457), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((18893, 18990), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(100)', 'retmsg': '"""No job matched, please make sure the job id is valid."""'}), "(retcode=100, retmsg=\n 'No job matched, please make sure the job id is valid.')\n", (18908, 18990), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7628, 7704), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""can not found this component parameters"""'}), "(retcode=0, retmsg='can not found this component parameters')\n", (7643, 7704), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((8617, 8748), 'fate_flow.utils.job_utils.get_model_configuration', 'job_utils.get_model_configuration', ([], {'job_id': "request_data['job_id']", 'role': "request_data['role']", 'party_id': "request_data['party_id']"}), "(job_id=request_data['job_id'], role=\n request_data['role'], party_id=request_data['party_id'])\n", (8650, 8748), False, 'from fate_flow.utils import job_utils, data_utils, detect_utils, schedule_utils\n'), ((10561, 10638), 'google.protobuf.json_format.MessageToDict', 'json_format.MessageToDict', (['buffer_object'], {'including_default_value_fields': '(True)'}), '(buffer_object, including_default_value_fields=True)\n', (10586, 10638), False, 'from google.protobuf import json_format\n'), ((13293, 13317), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (13314, 13317), False, 'from fate_flow.settings import stat_logger, TEMP_DIRECTORY\n'), ((13773, 13784), 'fate_arch.common.base_utils.fate_uuid', 'fate_uuid', ([], {}), '()\n', (13782, 13784), False, 'from fate_arch.common.base_utils import fate_uuid\n'), ((14010, 14048), 'os.path.dirname', 'os.path.dirname', (['output_data_file_path'], {}), '(output_data_file_path)\n', (14025, 14048), False, 'import os\n'), ((15788, 15849), 'os.path.relpath', 'os.path.relpath', (['output_data_file_list[index]', 'output_tmp_dir'], {}), '(output_data_file_list[index], output_tmp_dir)\n', (15803, 15849), False, 'import os\n'), ((15902, 15968), 'os.path.relpath', 'os.path.relpath', (['output_data_meta_file_list[index]', 'output_tmp_dir'], {}), '(output_data_meta_file_list[index], output_tmp_dir)\n', (15917, 15968), False, 'import os\n'), ((17038, 17107), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.tracker_command', 'FederatedScheduler.tracker_command', (['job', 'request_data', '"""output/table"""'], {}), "(job, request_data, 'output/table')\n", (17072, 17107), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((18484, 18508), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (18505, 18508), False, 'from fate_flow.settings import stat_logger, TEMP_DIRECTORY\n'), ((20189, 20235), 'fate_flow.utils.data_utils.dataset_to_list', 'data_utils.dataset_to_list', (['src_value.features'], {}), '(src_value.features)\n', (20215, 20235), False, 'from fate_flow.utils import job_utils, data_utils, detect_utils, schedule_utils\n'), ((9011, 9052), 'fate_flow.utils.config_adapter.JobRuntimeConfigAdapter', 'JobRuntimeConfigAdapter', (['job_runtime_conf'], {}), '(job_runtime_conf)\n', (9034, 9052), False, 'from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter\n'), ((9249, 9273), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (9270, 9273), False, 'from fate_flow.settings import stat_logger, TEMP_DIRECTORY\n'), ((10928, 11005), 'google.protobuf.json_format.MessageToDict', 'json_format.MessageToDict', (['buffer_object'], {'including_default_value_fields': '(True)'}), '(buffer_object, including_default_value_fields=True)\n', (10953, 11005), False, 'from google.protobuf import json_format\n'), ((15283, 15326), 'json.dump', 'json.dump', (["{'header': header}", 'fw'], {'indent': '(4)'}), "({'header': header}, fw, indent=4)\n", (15292, 15326), False, 'import json\n'), ((16149, 16170), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (16164, 16170), False, 'import os\n'), ((16237, 16259), 'fate_flow.settings.stat_logger.warning', 'stat_logger.warning', (['e'], {}), '(e)\n', (16256, 16259), False, 'from fate_flow.settings import stat_logger, TEMP_DIRECTORY\n'), ((18302, 18331), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'summary'}), '(data=summary)\n', (18317, 18331), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((20518, 20555), 'fate_flow.utils.data_utils.dataset_to_list', 'data_utils.dataset_to_list', (['src_value'], {}), '(src_value)\n', (20544, 20555), False, 'from fate_flow.utils import job_utils, data_utils, detect_utils, schedule_utils\n'), ((21485, 21524), 'fate_flow.db.db_models.Job.select', 'Job.select', (['Job.f_runtime_conf_on_party'], {}), '(Job.f_runtime_conf_on_party)\n', (21495, 21524), False, 'from fate_flow.db.db_models import Job, DB\n'), ((7526, 7594), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'output_parameters'}), "(retcode=0, retmsg='success', data=output_parameters)\n", (7541, 7594), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((18067, 18096), 'json.dumps', 'json.dumps', (['summary'], {'indent': '(4)'}), '(summary, indent=4)\n', (18077, 18096), False, 'import json\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import shutil import sys import time import uuid from fate_arch import storage, session from fate_arch.common import EngineType, file_utils, log, path_utils from fate_arch.common.data_utils import default_input_fs_path from fate_arch.session import Session from fate_arch.storage import DEFAULT_ID_DELIMITER, EggRollStoreType, StorageEngine, StorageTableOrigin from fate_flow.components._base import ( BaseParam, ComponentBase, ComponentMeta, ComponentInputProtocol, ) from fate_flow.entity import Metric, MetricMeta from fate_flow.manager.data_manager import DataTableTracker from fate_flow.scheduling_apps.client import ControllerClient from fate_flow.db.job_default_config import JobDefaultConfig from fate_flow.utils import data_utils, job_utils, process_utils, session_utils, upload_utils from fate_flow.utils.base_utils import get_fate_flow_directory LOGGER = log.getLogger() upload_cpn_meta = ComponentMeta("Upload") @upload_cpn_meta.bind_param class UploadParam(BaseParam): def __init__( self, file="", head=1, id_delimiter=DEFAULT_ID_DELIMITER, partition=10, namespace="", name="", storage_engine="", storage_address=None, destroy=False, extend_sid=False, auto_increasing_sid=False, block_size=1 ): self.file = file self.head = head self.id_delimiter = id_delimiter self.partition = partition self.namespace = namespace self.name = name self.storage_engine = storage_engine self.storage_address = storage_address self.destroy = destroy self.extend_sid = extend_sid self.auto_increasing_sid = auto_increasing_sid self.block_size = block_size def check(self): return True @upload_cpn_meta.bind_runner.on_local class Upload(ComponentBase): def __init__(self): super(Upload, self).__init__() self.MAX_PARTITIONS = 1024 self.MAX_BYTES = 1024 * 1024 * 8 * 500 self.parameters = {} self.table = None self.is_block = False self.session_id = None self.session = None self.storage_engine = None def _run(self, cpn_input: ComponentInputProtocol): self.parameters = cpn_input.parameters LOGGER.info(self.parameters) self.parameters["role"] = cpn_input.roles["role"] self.parameters["local"] = cpn_input.roles["local"] storage_engine = self.parameters["storage_engine"].upper() storage_address = self.parameters["storage_address"] # if not set storage, use job storage as default if not storage_engine: storage_engine = cpn_input.job_parameters.storage_engine self.storage_engine = storage_engine if not storage_address: storage_address = cpn_input.job_parameters.engines_address[ EngineType.STORAGE ] job_id = self.task_version_id.split("_")[0] if not os.path.isabs(self.parameters.get("file", "")): self.parameters["file"] = os.path.join( get_fate_flow_directory(), self.parameters["file"] ) if not os.path.exists(self.parameters["file"]): raise Exception( "%s is not exist, please check the configure" % (self.parameters["file"]) ) if not os.path.getsize(self.parameters["file"]): raise Exception("%s is an empty file" % (self.parameters["file"])) name, namespace = self.parameters.get("name"), self.parameters.get("namespace") _namespace, _table_name = self.generate_table_name(self.parameters["file"]) if namespace is None: namespace = _namespace if name is None: name = _table_name read_head = self.parameters["head"] if read_head == 0: head = False elif read_head == 1: head = True else: raise Exception("'head' in conf.json should be 0 or 1") partitions = self.parameters["partition"] if partitions <= 0 or partitions >= self.MAX_PARTITIONS: raise Exception( "Error number of partition, it should between %d and %d" % (0, self.MAX_PARTITIONS) ) self.session_id = job_utils.generate_session_id( self.tracker.task_id, self.tracker.task_version, self.tracker.role, self.tracker.party_id, ) sess = Session.get_global() self.session = sess if self.parameters.get("destroy", False): table = sess.get_table(namespace=namespace, name=name) if table: LOGGER.info( f"destroy table name: {name} namespace: {namespace} engine: {table.engine}" ) try: table.destroy() except Exception as e: LOGGER.error(e) else: LOGGER.info( f"can not found table name: {name} namespace: {namespace}, pass destroy" ) address_dict = storage_address.copy() storage_session = sess.storage( storage_engine=storage_engine, options=self.parameters.get("options") ) upload_address = {} if storage_engine in {StorageEngine.EGGROLL, StorageEngine.STANDALONE}: upload_address = { "name": name, "namespace": namespace, "storage_type": EggRollStoreType.ROLLPAIR_LMDB, } elif storage_engine in {StorageEngine.MYSQL, StorageEngine.HIVE}: if not address_dict.get("db") or not address_dict.get("name"): upload_address = {"db": namespace, "name": name} elif storage_engine in {StorageEngine.PATH}: upload_address = {"path": self.parameters["file"]} elif storage_engine in {StorageEngine.HDFS}: upload_address = { "path": default_input_fs_path( name=name, namespace=namespace, prefix=address_dict.get("path_prefix"), ) } elif storage_engine in {StorageEngine.LOCALFS}: upload_address = { "path": default_input_fs_path( name=name, namespace=namespace, storage_engine=storage_engine ) } else: raise RuntimeError(f"can not support this storage engine: {storage_engine}") address_dict.update(upload_address) LOGGER.info(f"upload to {storage_engine} storage, address: {address_dict}") address = storage.StorageTableMeta.create_address( storage_engine=storage_engine, address_dict=address_dict ) self.parameters["partitions"] = partitions self.parameters["name"] = name self.table = storage_session.create_table(address=address, origin=StorageTableOrigin.UPLOAD, **self.parameters) if storage_engine not in [StorageEngine.PATH]: data_table_count = self.save_data_table(job_id, name, namespace, storage_engine, head) else: data_table_count = self.get_data_table_count( self.parameters["file"], name, namespace ) self.table.meta.update_metas(in_serialized=True) DataTableTracker.create_table_tracker( table_name=name, table_namespace=namespace, entity_info={"job_id": job_id, "have_parent": False}, ) LOGGER.info("------------load data finish!-----------------") # rm tmp file try: if "{}/fate_upload_tmp".format(job_id) in self.parameters["file"]: LOGGER.info("remove tmp upload file") LOGGER.info(os.path.dirname(self.parameters["file"])) shutil.rmtree(os.path.dirname(self.parameters["file"])) except: LOGGER.info("remove tmp file failed") LOGGER.info("file: {}".format(self.parameters["file"])) LOGGER.info("total data_count: {}".format(data_table_count)) LOGGER.info("table name: {}, table namespace: {}".format(name, namespace)) def save_data_table(self, job_id, dst_table_name, dst_table_namespace, storage_engine, head=True): input_file = self.parameters["file"] input_feature_count = self.get_count(input_file) data_head, file_list, table_list = self.split_file(input_file, head, input_feature_count, dst_table_name, dst_table_namespace, storage_engine) if len(file_list) == 1: self.upload_file(input_file, head, job_id, input_feature_count) else: self.upload_file_block(file_list, data_head, table_list) table_count = self.table.count() self.table.meta.update_metas( count=table_count, partitions=self.parameters["partition"], extend_sid=self.parameters["extend_sid"], ) self.save_meta( dst_table_namespace=dst_table_namespace, dst_table_name=dst_table_name, table_count=table_count, ) return table_count def get_count(self, input_file): with open(input_file, "r", encoding="utf-8") as fp: count = 0 for line in fp: count += 1 return count def split_file(self, file_path, head, input_feature_count, table_name, namespace, storage_engine): data_head = None file_list = [] table_list = [] block_size = self.parameters.get("block_size", 1) if storage_engine not in {StorageEngine.EGGROLL, StorageEngine.STANDALONE} or block_size == 1: return data_head, [file_path], None if isinstance(block_size, int) and block_size> 1: block_line = int(input_feature_count / self.parameters.get("block_size", 5)) + 1 else: raise ValueError(f"block size value error:{block_size}") LOGGER.info('start to split file {}'.format(file_path)) file_dir, name = os.path.split(file_path) partno = 0 name_uuid = uuid.uuid1().hex with open(file_path, 'r') as stream: if head: data_head = stream.readline() while True: part_file_name = os.path.join(file_dir, name + '_' + str(partno)) LOGGER.debug('write start %s' % part_file_name) part_stream = open(part_file_name, 'w') read_count = 0 while read_count < block_line: read_content = stream.readline() if read_content: part_stream.write(read_content) else: break read_count += 1 part_stream.close() if read_count > 0: file_list.append(part_file_name) table_list.append({"name": table_name + '_' + name_uuid + str(partno), "namespace": namespace}) if (read_count < block_line): break partno += 1 LOGGER.debug('finish split file {}: {}, {}, {}'.format(file_path, data_head, file_list, table_list)) return data_head, file_list, table_list def upload_file(self, input_file, head, job_id=None, input_feature_count=None, table=None, without_block=True): if not table: table = self.table with open(input_file, "r") as fin: lines_count = 0 if head is True: data_head = fin.readline() input_feature_count -= 1 self.update_table_meta(data_head) n = 0 fate_uuid = uuid.uuid1().hex get_line = self.get_line() while True: data = list() lines = fin.readlines(JobDefaultConfig.upload_max_bytes) line_index = 0 if lines: # self.append_data_line(lines, data, n) for line in lines: values = line.rstrip().split(self.parameters["id_delimiter"]) k, v = get_line( values=values, line_index=line_index, extend_sid=self.parameters["extend_sid"], auto_increasing_sid=self.parameters["auto_increasing_sid"], id_delimiter=self.parameters["id_delimiter"], fate_uuid=fate_uuid, ) data.append((k, v)) line_index += 1 if without_block: lines_count += len(data) save_progress = lines_count / input_feature_count * 100 // 1 job_info = { "progress": save_progress, "job_id": job_id, "role": self.parameters["local"]["role"], "party_id": self.parameters["local"]["party_id"], } ControllerClient.update_job(job_info=job_info) table.put_all(data) if n == 0 and without_block: table.meta.update_metas(part_of_data=data) else: return n += 1 def upload_file_block(self, file_list, data_head, table_list): if data_head: self.update_table_meta(data_head) upload_process = [] for block_index, block_file in enumerate(file_list): task_dir = os.path.join(job_utils.get_job_directory(job_id=self.tracker.job_id), self.tracker.role, str(self.tracker.party_id), self.tracker.component_name, 'upload') os.makedirs(task_dir, exist_ok=True) process_cmd = [ sys.executable or 'python3', sys.modules[upload_utils.UploadFile.__module__].__file__, '--session_id', self.session_id, '--storage', self.storage_engine, '--file', block_file, '--namespace', table_list[block_index].get("namespace"), '--name', table_list[block_index].get("name"), '--partitions', self.parameters.get('partition') ] LOGGER.info(process_cmd) job_log_dir = os.path.join(job_utils.get_job_log_directory(job_id=self.tracker.job_id), self.tracker.role, str(self.tracker.party_id)) task_log_dir = os.path.join(job_log_dir, self.tracker.component_name, f'block_{block_index}') p = process_utils.run_subprocess(job_id=self.tracker.job_id, config_dir=task_dir, process_cmd=process_cmd, log_dir=task_log_dir) upload_process.append(p) self.check_upload_process(upload_process) self.union_table(table_list) def union_table(self, table_list): combined_table = self.get_computing_table(self.table.name, self.table.namespace) for table_info in table_list: table = self.get_computing_table(table_info.get("name"), table_info.get("namespace")) combined_table = combined_table.union(table) LOGGER.info(combined_table.count()) session.Session.persistent(computing_table=combined_table, namespace=self.table.namespace, name=self.table.name, schema={}, engine=self.table.engine, engine_address=self.table.address.__dict__, token=None) def get_computing_table(self, name, namespace, schema=None): storage_table_meta = storage.StorageTableMeta(name=name, namespace=namespace) computing_table = session.get_computing_session().load( storage_table_meta.get_address(), schema=schema if schema else storage_table_meta.get_schema(), partitions=self.parameters.get("partitions")) return computing_table def check_upload_process(self, upload_process): while True: for p in upload_process: LOGGER.info(f"pid {p.pid} poll status: {p.poll()}") if p.poll() != None: if p.poll() !=0: raise Exception(p.stderr) upload_process.remove(p) LOGGER.info(f"running pid:{[p.pid for p in upload_process]}") time.sleep(5) if not len(upload_process): break def update_table_meta(self, data_head): _, meta = self.table.meta.update_metas( schema=data_utils.get_header_schema( header_line=data_head, id_delimiter=self.parameters["id_delimiter"], extend_sid=self.parameters["extend_sid"], ), auto_increasing_sid=self.parameters["auto_increasing_sid"], extend_sid=self.parameters["extend_sid"], ) self.table.meta = meta def get_line(self): if not self.parameters["extend_sid"]: line = data_utils.get_data_line elif not self.parameters["auto_increasing_sid"]: line = data_utils.get_sid_data_line else: line = data_utils.get_auto_increasing_sid_data_line return line def generate_table_name(self, input_file_path): str_time = time.strftime("%Y%m%d%H%M%S", time.localtime()) file_name = input_file_path.split(".")[0] file_name = file_name.split("/")[-1] return file_name, str_time def save_meta(self, dst_table_namespace, dst_table_name, table_count): self.tracker.log_output_data_info( data_name="upload", table_namespace=dst_table_namespace, table_name=dst_table_name, ) self.tracker.log_metric_data( metric_namespace="upload", metric_name="data_access", metrics=[Metric("count", table_count)], ) self.tracker.set_metric_meta( metric_namespace="upload", metric_name="data_access", metric_meta=MetricMeta(name="upload", metric_type="UPLOAD"), ) def get_data_table_count(self, path, name, namespace): count = path_utils.get_data_table_count(path) self.save_meta( dst_table_namespace=namespace, dst_table_name=name, table_count=count ) self.table.meta.update_metas(count=count) return count
[ "fate_flow.utils.process_utils.run_subprocess", "fate_flow.scheduling_apps.client.ControllerClient.update_job", "fate_flow.components._base.ComponentMeta", "fate_flow.utils.job_utils.get_job_log_directory", "fate_flow.utils.base_utils.get_fate_flow_directory", "fate_flow.utils.job_utils.get_job_directory", "fate_flow.entity.MetricMeta", "fate_flow.utils.data_utils.get_header_schema", "fate_flow.entity.Metric", "fate_flow.utils.job_utils.generate_session_id", "fate_flow.manager.data_manager.DataTableTracker.create_table_tracker" ]
[((1510, 1525), 'fate_arch.common.log.getLogger', 'log.getLogger', ([], {}), '()\n', (1523, 1525), False, 'from fate_arch.common import EngineType, file_utils, log, path_utils\n'), ((1545, 1568), 'fate_flow.components._base.ComponentMeta', 'ComponentMeta', (['"""Upload"""'], {}), "('Upload')\n", (1558, 1568), False, 'from fate_flow.components._base import BaseParam, ComponentBase, ComponentMeta, ComponentInputProtocol\n'), ((5046, 5171), 'fate_flow.utils.job_utils.generate_session_id', 'job_utils.generate_session_id', (['self.tracker.task_id', 'self.tracker.task_version', 'self.tracker.role', 'self.tracker.party_id'], {}), '(self.tracker.task_id, self.tracker.\n task_version, self.tracker.role, self.tracker.party_id)\n', (5075, 5171), False, 'from fate_flow.utils import data_utils, job_utils, process_utils, session_utils, upload_utils\n'), ((5241, 5261), 'fate_arch.session.Session.get_global', 'Session.get_global', ([], {}), '()\n', (5259, 5261), False, 'from fate_arch.session import Session\n'), ((7489, 7590), 'fate_arch.storage.StorageTableMeta.create_address', 'storage.StorageTableMeta.create_address', ([], {'storage_engine': 'storage_engine', 'address_dict': 'address_dict'}), '(storage_engine=storage_engine,\n address_dict=address_dict)\n', (7528, 7590), False, 'from fate_arch import storage, session\n'), ((8181, 8321), 'fate_flow.manager.data_manager.DataTableTracker.create_table_tracker', 'DataTableTracker.create_table_tracker', ([], {'table_name': 'name', 'table_namespace': 'namespace', 'entity_info': "{'job_id': job_id, 'have_parent': False}"}), "(table_name=name, table_namespace=\n namespace, entity_info={'job_id': job_id, 'have_parent': False})\n", (8218, 8321), False, 'from fate_flow.manager.data_manager import DataTableTracker\n'), ((10960, 10984), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (10973, 10984), False, 'import os\n'), ((16449, 16664), 'fate_arch.session.Session.persistent', 'session.Session.persistent', ([], {'computing_table': 'combined_table', 'namespace': 'self.table.namespace', 'name': 'self.table.name', 'schema': '{}', 'engine': 'self.table.engine', 'engine_address': 'self.table.address.__dict__', 'token': 'None'}), '(computing_table=combined_table, namespace=self.\n table.namespace, name=self.table.name, schema={}, engine=self.table.\n engine, engine_address=self.table.address.__dict__, token=None)\n', (16475, 16664), False, 'from fate_arch import storage, session\n'), ((16960, 17016), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'name', 'namespace': 'namespace'}), '(name=name, namespace=namespace)\n', (16984, 17016), False, 'from fate_arch import storage, session\n'), ((19552, 19589), 'fate_arch.common.path_utils.get_data_table_count', 'path_utils.get_data_table_count', (['path'], {}), '(path)\n', (19583, 19589), False, 'from fate_arch.common import EngineType, file_utils, log, path_utils\n'), ((3896, 3935), 'os.path.exists', 'os.path.exists', (["self.parameters['file']"], {}), "(self.parameters['file'])\n", (3910, 3935), False, 'import os\n'), ((4101, 4141), 'os.path.getsize', 'os.path.getsize', (["self.parameters['file']"], {}), "(self.parameters['file'])\n", (4116, 4141), False, 'import os\n'), ((11024, 11036), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (11034, 11036), False, 'import uuid\n'), ((14896, 14932), 'os.makedirs', 'os.makedirs', (['task_dir'], {'exist_ok': '(True)'}), '(task_dir, exist_ok=True)\n', (14907, 14932), False, 'import os\n'), ((15682, 15760), 'os.path.join', 'os.path.join', (['job_log_dir', 'self.tracker.component_name', 'f"""block_{block_index}"""'], {}), "(job_log_dir, self.tracker.component_name, f'block_{block_index}')\n", (15694, 15760), False, 'import os\n'), ((15777, 15906), 'fate_flow.utils.process_utils.run_subprocess', 'process_utils.run_subprocess', ([], {'job_id': 'self.tracker.job_id', 'config_dir': 'task_dir', 'process_cmd': 'process_cmd', 'log_dir': 'task_log_dir'}), '(job_id=self.tracker.job_id, config_dir=\n task_dir, process_cmd=process_cmd, log_dir=task_log_dir)\n', (15805, 15906), False, 'from fate_flow.utils import data_utils, job_utils, process_utils, session_utils, upload_utils\n'), ((17723, 17736), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (17733, 17736), False, 'import time\n'), ((18702, 18718), 'time.localtime', 'time.localtime', ([], {}), '()\n', (18716, 18718), False, 'import time\n'), ((3816, 3841), 'fate_flow.utils.base_utils.get_fate_flow_directory', 'get_fate_flow_directory', ([], {}), '()\n', (3839, 3841), False, 'from fate_flow.utils.base_utils import get_fate_flow_directory\n'), ((12638, 12650), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (12648, 12650), False, 'import uuid\n'), ((14633, 14688), 'fate_flow.utils.job_utils.get_job_directory', 'job_utils.get_job_directory', ([], {'job_id': 'self.tracker.job_id'}), '(job_id=self.tracker.job_id)\n', (14660, 14688), False, 'from fate_flow.utils import data_utils, job_utils, process_utils, session_utils, upload_utils\n'), ((15508, 15567), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', ([], {'job_id': 'self.tracker.job_id'}), '(job_id=self.tracker.job_id)\n', (15539, 15567), False, 'from fate_flow.utils import data_utils, job_utils, process_utils, session_utils, upload_utils\n'), ((17043, 17074), 'fate_arch.session.get_computing_session', 'session.get_computing_session', ([], {}), '()\n', (17072, 17074), False, 'from fate_arch import storage, session\n'), ((17911, 18055), 'fate_flow.utils.data_utils.get_header_schema', 'data_utils.get_header_schema', ([], {'header_line': 'data_head', 'id_delimiter': "self.parameters['id_delimiter']", 'extend_sid': "self.parameters['extend_sid']"}), "(header_line=data_head, id_delimiter=self.\n parameters['id_delimiter'], extend_sid=self.parameters['extend_sid'])\n", (17939, 18055), False, 'from fate_flow.utils import data_utils, job_utils, process_utils, session_utils, upload_utils\n'), ((19417, 19464), 'fate_flow.entity.MetricMeta', 'MetricMeta', ([], {'name': '"""upload"""', 'metric_type': '"""UPLOAD"""'}), "(name='upload', metric_type='UPLOAD')\n", (19427, 19464), False, 'from fate_flow.entity import Metric, MetricMeta\n'), ((8630, 8670), 'os.path.dirname', 'os.path.dirname', (["self.parameters['file']"], {}), "(self.parameters['file'])\n", (8645, 8670), False, 'import os\n'), ((8702, 8742), 'os.path.dirname', 'os.path.dirname', (["self.parameters['file']"], {}), "(self.parameters['file'])\n", (8717, 8742), False, 'import os\n'), ((19236, 19264), 'fate_flow.entity.Metric', 'Metric', (['"""count"""', 'table_count'], {}), "('count', table_count)\n", (19242, 19264), False, 'from fate_flow.entity import Metric, MetricMeta\n'), ((14097, 14143), 'fate_flow.scheduling_apps.client.ControllerClient.update_job', 'ControllerClient.update_job', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (14124, 14143), False, 'from fate_flow.scheduling_apps.client import ControllerClient\n'), ((7063, 7152), 'fate_arch.common.data_utils.default_input_fs_path', 'default_input_fs_path', ([], {'name': 'name', 'namespace': 'namespace', 'storage_engine': 'storage_engine'}), '(name=name, namespace=namespace, storage_engine=\n storage_engine)\n', (7084, 7152), False, 'from fate_arch.common.data_utils import default_input_fs_path\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_arch import storage from fate_flow.entity.types import RunParameters from fate_flow.operation import JobSaver, Tracker from fate_flow.operation.task_executor import TaskExecutor from fate_flow.utils.api_utils import get_json_result from fate_flow.utils import detect_utils, job_utils, schedule_utils from fate_flow.settings import stat_logger from flask import Flask, request manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/add', methods=['post']) def table_add(): request_data = request.json detect_utils.check_config(request_data, required_arguments=["engine", "address", "namespace", "name", ("head", (0, 1)), "id_delimiter"]) address_dict = request_data.get('address') engine = request_data.get('engine') name = request_data.get('name') namespace = request_data.get('namespace') address = storage.StorageTableMeta.create_address(storage_engine=engine, address_dict=address_dict) in_serialized = request_data.get("in_serialized", 1 if engine in {storage.StorageEngine.STANDALONE, storage.StorageEngine.EGGROLL} else 0) destroy = (int(request_data.get("drop", 0)) == 1) data_table_meta = storage.StorageTableMeta(name=name, namespace=namespace) if data_table_meta: if destroy: data_table_meta.destroy_metas() else: return get_json_result(retcode=100, retmsg='The data table already exists.' 'If you still want to continue uploading, please add the parameter -drop.' '1 means to add again after deleting the table') with storage.Session.build(storage_engine=engine, options=request_data.get("options")) as storage_session: storage_session.create_table(address=address, name=name, namespace=namespace, partitions=request_data.get('partitions', None), hava_head=request_data.get("head"), id_delimiter=request_data.get("id_delimiter"), in_serialized=in_serialized) return get_json_result(data={"table_name": name, "namespace": namespace}) @manager.route('/delete', methods=['post']) def table_delete(): request_data = request.json table_name = request_data.get('table_name') namespace = request_data.get('namespace') data = None with storage.Session.build(name=table_name, namespace=namespace) as storage_session: table = storage_session.get_table() if table: table.destroy() data = {'table_name': table_name, 'namespace': namespace} if data: return get_json_result(data=data) return get_json_result(retcode=101, retmsg='no find table') @manager.route('/list', methods=['post']) def get_job_table_list(): detect_utils.check_config(config=request.json, required_arguments=['job_id', 'role', 'party_id']) jobs = JobSaver.query_job(**request.json) if jobs: job = jobs[0] tables = get_job_all_table(job) return get_json_result(data=tables) else: return get_json_result(retcode=101, retmsg='no find job') @manager.route('/<table_func>', methods=['post']) def table_api(table_func): config = request.json if table_func == 'table_info': table_key_count = 0 table_partition = None table_schema = None table_name, namespace = config.get("name") or config.get("table_name"), config.get("namespace") table_meta = storage.StorageTableMeta(name=table_name, namespace=namespace) if table_meta: table_key_count = table_meta.get_count() table_partition = table_meta.get_partitions() table_schema = table_meta.get_schema() exist = 1 else: exist = 0 return get_json_result(data={"table_name": table_name, "namespace": namespace, "exist": exist, "count": table_key_count, "partition": table_partition, "schema": table_schema}) else: return get_json_result() def get_job_all_table(job): dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl, runtime_conf=job.f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf ) _, hierarchical_structure = dsl_parser.get_dsl_hierarchical_structure() component_table = {} component_output_tables = Tracker.query_output_data_infos(job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id) for component_name_list in hierarchical_structure: for component_name in component_name_list: component_table[component_name] = {} component_input_table = get_component_input_table(dsl_parser, job, component_name) component_table[component_name]['input'] = component_input_table component_table[component_name]['output'] = {} for output_table in component_output_tables: if output_table.f_component_name == component_name: component_table[component_name]['output'][output_table.f_data_name] = \ {'name': output_table.f_table_name, 'namespace': output_table.f_table_namespace} return component_table def get_component_input_table(dsl_parser, job, component_name): component = dsl_parser.get_component_info(component_name=component_name) if 'reader' in component_name: component_parameters = component.get_role_parameters() return component_parameters[job.f_role][0]['ReaderParam'] task_input_dsl = component.get_input() job_args_on_party = TaskExecutor.get_job_args_on_party(dsl_parser=dsl_parser, job_runtime_conf=job.f_runtime_conf, role=job.f_role, party_id=job.f_party_id) config = job_utils.get_job_parameters(job.f_job_id, job.f_role, job.f_party_id) task_parameters = RunParameters(**config) job_parameters = task_parameters component_input_table = TaskExecutor.get_task_run_args(job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id, task_id=None, task_version=None, job_args=job_args_on_party, job_parameters=job_parameters, task_parameters=task_parameters, input_dsl=task_input_dsl, get_input_table=True ) return component_input_table
[ "fate_flow.utils.job_utils.get_job_parameters", "fate_flow.entity.types.RunParameters", "fate_flow.utils.detect_utils.check_config", "fate_flow.utils.schedule_utils.get_job_dsl_parser", "fate_flow.operation.Tracker.query_output_data_infos", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.api_utils.get_json_result", "fate_flow.operation.task_executor.TaskExecutor.get_job_args_on_party", "fate_flow.operation.task_executor.TaskExecutor.get_task_run_args", "fate_flow.operation.JobSaver.query_job" ]
[((1013, 1028), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1018, 1028), False, 'from flask import Flask, request\n'), ((1092, 1116), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1113, 1116), False, 'from fate_flow.settings import stat_logger\n'), ((1268, 1408), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (['request_data'], {'required_arguments': "['engine', 'address', 'namespace', 'name', ('head', (0, 1)), 'id_delimiter']"}), "(request_data, required_arguments=['engine',\n 'address', 'namespace', 'name', ('head', (0, 1)), 'id_delimiter'])\n", (1293, 1408), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils\n'), ((1588, 1682), 'fate_arch.storage.StorageTableMeta.create_address', 'storage.StorageTableMeta.create_address', ([], {'storage_engine': 'engine', 'address_dict': 'address_dict'}), '(storage_engine=engine, address_dict\n =address_dict)\n', (1627, 1682), False, 'from fate_arch import storage\n'), ((1897, 1953), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'name', 'namespace': 'namespace'}), '(name=name, namespace=namespace)\n', (1921, 1953), False, 'from fate_arch import storage\n'), ((2793, 2859), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'table_name': name, 'namespace': namespace}"}), "(data={'table_name': name, 'namespace': namespace})\n", (2808, 2859), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((3383, 3435), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""no find table"""'}), "(retcode=101, retmsg='no find table')\n", (3398, 3435), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((3510, 3611), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', ([], {'config': 'request.json', 'required_arguments': "['job_id', 'role', 'party_id']"}), "(config=request.json, required_arguments=['job_id',\n 'role', 'party_id'])\n", (3535, 3611), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils\n'), ((3619, 3653), 'fate_flow.operation.JobSaver.query_job', 'JobSaver.query_job', ([], {}), '(**request.json)\n', (3637, 3653), False, 'from fate_flow.operation import JobSaver, Tracker\n'), ((4966, 5097), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job.f_dsl', 'runtime_conf': 'job.f_runtime_conf', 'train_runtime_conf': 'job.f_train_runtime_conf'}), '(dsl=job.f_dsl, runtime_conf=job.\n f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf)\n', (4999, 5097), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils\n'), ((5378, 5476), 'fate_flow.operation.Tracker.query_output_data_infos', 'Tracker.query_output_data_infos', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id'}), '(job_id=job.f_job_id, role=job.f_role,\n party_id=job.f_party_id)\n', (5409, 5476), False, 'from fate_flow.operation import JobSaver, Tracker\n'), ((6644, 6785), 'fate_flow.operation.task_executor.TaskExecutor.get_job_args_on_party', 'TaskExecutor.get_job_args_on_party', ([], {'dsl_parser': 'dsl_parser', 'job_runtime_conf': 'job.f_runtime_conf', 'role': 'job.f_role', 'party_id': 'job.f_party_id'}), '(dsl_parser=dsl_parser, job_runtime_conf=\n job.f_runtime_conf, role=job.f_role, party_id=job.f_party_id)\n', (6678, 6785), False, 'from fate_flow.operation.task_executor import TaskExecutor\n'), ((6912, 6982), 'fate_flow.utils.job_utils.get_job_parameters', 'job_utils.get_job_parameters', (['job.f_job_id', 'job.f_role', 'job.f_party_id'], {}), '(job.f_job_id, job.f_role, job.f_party_id)\n', (6940, 6982), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils\n'), ((7005, 7028), 'fate_flow.entity.types.RunParameters', 'RunParameters', ([], {}), '(**config)\n', (7018, 7028), False, 'from fate_flow.entity.types import RunParameters\n'), ((7094, 7374), 'fate_flow.operation.task_executor.TaskExecutor.get_task_run_args', 'TaskExecutor.get_task_run_args', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id', 'task_id': 'None', 'task_version': 'None', 'job_args': 'job_args_on_party', 'job_parameters': 'job_parameters', 'task_parameters': 'task_parameters', 'input_dsl': 'task_input_dsl', 'get_input_table': '(True)'}), '(job_id=job.f_job_id, role=job.f_role,\n party_id=job.f_party_id, task_id=None, task_version=None, job_args=\n job_args_on_party, job_parameters=job_parameters, task_parameters=\n task_parameters, input_dsl=task_input_dsl, get_input_table=True)\n', (7124, 7374), False, 'from fate_flow.operation.task_executor import TaskExecutor\n'), ((3077, 3136), 'fate_arch.storage.Session.build', 'storage.Session.build', ([], {'name': 'table_name', 'namespace': 'namespace'}), '(name=table_name, namespace=namespace)\n', (3098, 3136), False, 'from fate_arch import storage\n'), ((3345, 3371), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'data'}), '(data=data)\n', (3360, 3371), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((3744, 3772), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'tables'}), '(data=tables)\n', (3759, 3772), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((3798, 3848), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""no find job"""'}), "(retcode=101, retmsg='no find job')\n", (3813, 3848), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((4201, 4263), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'table_name', 'namespace': 'namespace'}), '(name=table_name, namespace=namespace)\n', (4225, 4263), False, 'from fate_arch import storage\n'), ((4522, 4698), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'table_name': table_name, 'namespace': namespace, 'exist': exist, 'count':\n table_key_count, 'partition': table_partition, 'schema': table_schema}"}), "(data={'table_name': table_name, 'namespace': namespace,\n 'exist': exist, 'count': table_key_count, 'partition': table_partition,\n 'schema': table_schema})\n", (4537, 4698), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((4901, 4918), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {}), '()\n', (4916, 4918), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((2075, 2271), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(100)', 'retmsg': '"""The data table already exists.If you still want to continue uploading, please add the parameter -drop.1 means to add again after deleting the table"""'}), "(retcode=100, retmsg=\n 'The data table already exists.If you still want to continue uploading, please add the parameter -drop.1 means to add again after deleting the table'\n )\n", (2090, 2271), False, 'from fate_flow.utils.api_utils import get_json_result\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import io import os import tarfile from flask import Flask, request, send_file from arch.api.utils.core import base64_decode from arch.api.utils.core import json_loads from fate_flow.driver.job_controller import JobController from fate_flow.driver.task_scheduler import TaskScheduler from fate_flow.settings import stat_logger from fate_flow.utils import job_utils from fate_flow.utils.api_utils import get_json_result manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) # User interface @manager.route('/submit', methods=['POST']) def submit_job(): job_id, job_dsl_path, job_runtime_conf_path, model_info, board_url = JobController.submit_job(request.json) return get_json_result(job_id=job_id, data={'job_dsl_path': job_dsl_path, 'job_runtime_conf_path': job_runtime_conf_path, 'model_info': model_info, 'board_url': board_url }) @manager.route('/stop', methods=['POST']) def stop_job(): TaskScheduler.stop_job(job_id=request.json.get('job_id', '')) return get_json_result(retcode=0, retmsg='success') @manager.route('/query', methods=['POST']) def query_job(): jobs = job_utils.query_job(**request.json) if not jobs: return get_json_result(retcode=101, retmsg='find job failed') return get_json_result(retcode=0, retmsg='success', data=[job.to_json() for job in jobs]) @manager.route('/config', methods=['POST']) def job_config(): jobs = job_utils.query_job(**request.json) if not jobs: return get_json_result(retcode=101, retmsg='find job failed') else: job = jobs[0] response_data = dict() response_data['job_id'] = job.f_job_id response_data['dsl'] = json_loads(job.f_dsl) response_data['runtime_conf'] = json_loads(job.f_runtime_conf) response_data['train_runtime_conf'] = json_loads(job.f_train_runtime_conf) response_data['model_info'] = {'model_id': response_data['runtime_conf']['job_parameters']['model_id'], 'model_version': response_data['runtime_conf']['job_parameters'][ 'model_version']} return get_json_result(retcode=0, retmsg='success', data=response_data) @manager.route('/log', methods=['get']) def job_log(): job_id = request.json.get('job_id', '') memory_file = io.BytesIO() tar = tarfile.open(fileobj=memory_file, mode='w:gz') job_log_dir = job_utils.get_job_log_directory(job_id=job_id) for root, dir, files in os.walk(job_log_dir): for file in files: full_path = os.path.join(root, file) rel_path = os.path.relpath(full_path, job_log_dir) tar.add(full_path, rel_path) tar.close() memory_file.seek(0) return send_file(memory_file, attachment_filename='job_{}_log.tar.gz'.format(job_id), as_attachment=True) @manager.route('/task/query', methods=['POST']) def query_task(): tasks = job_utils.query_task(**request.json) if not tasks: return get_json_result(retcode=101, retmsg='find task failed') return get_json_result(retcode=0, retmsg='success', data=[task.to_json() for task in tasks]) # Scheduling interface @manager.route('/<job_id>/<role>/<party_id>/create', methods=['POST']) def create_job(job_id, role, party_id): JobController.update_job_status(job_id=job_id, role=role, party_id=int(party_id), job_info=request.json, create=True) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<role>/<party_id>/status', methods=['POST']) def job_status(job_id, role, party_id): JobController.update_job_status(job_id=job_id, role=role, party_id=int(party_id), job_info=request.json, create=False) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<role>/<party_id>/<model_id>/<model_version>/save/pipeline', methods=['POST']) def save_pipeline(job_id, role, party_id, model_id, model_version): JobController.save_pipeline(job_id=job_id, role=role, party_id=party_id, model_id=base64_decode(model_id), model_version=base64_decode(model_version)) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<role>/<party_id>/kill', methods=['POST']) def kill_job(job_id, role, party_id): JobController.kill_job(job_id=job_id, role=role, party_id=int(party_id), job_initiator=request.json.get('job_initiator', {})) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<role>/<party_id>/clean', methods=['POST']) def clean(job_id, role, party_id): JobController.clean_job(job_id=job_id, role=role, party_id=party_id) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<component_name>/<task_id>/<role>/<party_id>/run', methods=['POST']) def run_task(job_id, component_name, task_id, role, party_id): TaskScheduler.start_task(job_id, component_name, task_id, role, party_id, request.json) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<component_name>/<task_id>/<role>/<party_id>/status', methods=['POST']) def task_status(job_id, component_name, task_id, role, party_id): JobController.update_task_status(job_id, component_name, task_id, role, party_id, request.json) return get_json_result(retcode=0, retmsg='success')
[ "fate_flow.utils.job_utils.query_job", "fate_flow.driver.job_controller.JobController.clean_job", "fate_flow.driver.job_controller.JobController.submit_job", "fate_flow.utils.job_utils.query_task", "fate_flow.driver.job_controller.JobController.update_task_status", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.api_utils.get_json_result", "fate_flow.driver.task_scheduler.TaskScheduler.start_task", "fate_flow.utils.job_utils.get_job_log_directory" ]
[((1048, 1063), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1053, 1063), False, 'from flask import Flask, request, send_file\n'), ((1127, 1151), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1148, 1151), False, 'from fate_flow.settings import stat_logger\n'), ((1361, 1399), 'fate_flow.driver.job_controller.JobController.submit_job', 'JobController.submit_job', (['request.json'], {}), '(request.json)\n', (1385, 1399), False, 'from fate_flow.driver.job_controller import JobController\n'), ((1411, 1584), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'job_id': 'job_id', 'data': "{'job_dsl_path': job_dsl_path, 'job_runtime_conf_path':\n job_runtime_conf_path, 'model_info': model_info, 'board_url': board_url}"}), "(job_id=job_id, data={'job_dsl_path': job_dsl_path,\n 'job_runtime_conf_path': job_runtime_conf_path, 'model_info':\n model_info, 'board_url': board_url})\n", (1426, 1584), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1907, 1951), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (1922, 1951), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((2025, 2060), 'fate_flow.utils.job_utils.query_job', 'job_utils.query_job', ([], {}), '(**request.json)\n', (2044, 2060), False, 'from fate_flow.utils import job_utils\n'), ((2317, 2352), 'fate_flow.utils.job_utils.query_job', 'job_utils.query_job', ([], {}), '(**request.json)\n', (2336, 2352), False, 'from fate_flow.utils import job_utils\n'), ((3185, 3215), 'flask.request.json.get', 'request.json.get', (['"""job_id"""', '""""""'], {}), "('job_id', '')\n", (3201, 3215), False, 'from flask import Flask, request, send_file\n'), ((3234, 3246), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3244, 3246), False, 'import io\n'), ((3257, 3303), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'memory_file', 'mode': '"""w:gz"""'}), "(fileobj=memory_file, mode='w:gz')\n", (3269, 3303), False, 'import tarfile\n'), ((3322, 3368), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (3353, 3368), False, 'from fate_flow.utils import job_utils\n'), ((3397, 3417), 'os.walk', 'os.walk', (['job_log_dir'], {}), '(job_log_dir)\n', (3404, 3417), False, 'import os\n'), ((3829, 3865), 'fate_flow.utils.job_utils.query_task', 'job_utils.query_task', ([], {}), '(**request.json)\n', (3849, 3865), False, 'from fate_flow.utils import job_utils\n'), ((4357, 4401), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (4372, 4401), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((4685, 4729), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (4700, 4729), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((5071, 5115), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (5086, 5115), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((5393, 5437), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (5408, 5437), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((5549, 5617), 'fate_flow.driver.job_controller.JobController.clean_job', 'JobController.clean_job', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, role=role, party_id=party_id)\n', (5572, 5617), False, 'from fate_flow.driver.job_controller import JobController\n'), ((5629, 5673), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (5644, 5673), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((5838, 5929), 'fate_flow.driver.task_scheduler.TaskScheduler.start_task', 'TaskScheduler.start_task', (['job_id', 'component_name', 'task_id', 'role', 'party_id', 'request.json'], {}), '(job_id, component_name, task_id, role, party_id,\n request.json)\n', (5862, 5929), False, 'from fate_flow.driver.task_scheduler import TaskScheduler\n'), ((5937, 5981), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (5952, 5981), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((6152, 6251), 'fate_flow.driver.job_controller.JobController.update_task_status', 'JobController.update_task_status', (['job_id', 'component_name', 'task_id', 'role', 'party_id', 'request.json'], {}), '(job_id, component_name, task_id, role,\n party_id, request.json)\n', (6184, 6251), False, 'from fate_flow.driver.job_controller import JobController\n'), ((6259, 6303), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (6274, 6303), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((2093, 2147), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""find job failed"""'}), "(retcode=101, retmsg='find job failed')\n", (2108, 2147), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((2385, 2439), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""find job failed"""'}), "(retcode=101, retmsg='find job failed')\n", (2400, 2439), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((2581, 2602), 'arch.api.utils.core.json_loads', 'json_loads', (['job.f_dsl'], {}), '(job.f_dsl)\n', (2591, 2602), False, 'from arch.api.utils.core import json_loads\n'), ((2643, 2673), 'arch.api.utils.core.json_loads', 'json_loads', (['job.f_runtime_conf'], {}), '(job.f_runtime_conf)\n', (2653, 2673), False, 'from arch.api.utils.core import json_loads\n'), ((2720, 2756), 'arch.api.utils.core.json_loads', 'json_loads', (['job.f_train_runtime_conf'], {}), '(job.f_train_runtime_conf)\n', (2730, 2756), False, 'from arch.api.utils.core import json_loads\n'), ((3050, 3114), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'response_data'}), "(retcode=0, retmsg='success', data=response_data)\n", (3065, 3114), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((3899, 3954), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""find task failed"""'}), "(retcode=101, retmsg='find task failed')\n", (3914, 3954), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1864, 1894), 'flask.request.json.get', 'request.json.get', (['"""job_id"""', '""""""'], {}), "('job_id', '')\n", (1880, 1894), False, 'from flask import Flask, request, send_file\n'), ((3470, 3494), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (3482, 3494), False, 'import os\n'), ((3518, 3557), 'os.path.relpath', 'os.path.relpath', (['full_path', 'job_log_dir'], {}), '(full_path, job_log_dir)\n', (3533, 3557), False, 'import os\n'), ((4991, 5014), 'arch.api.utils.core.base64_decode', 'base64_decode', (['model_id'], {}), '(model_id)\n', (5004, 5014), False, 'from arch.api.utils.core import base64_decode\n'), ((5030, 5058), 'arch.api.utils.core.base64_decode', 'base64_decode', (['model_version'], {}), '(model_version)\n', (5043, 5058), False, 'from arch.api.utils.core import base64_decode\n'), ((5343, 5380), 'flask.request.json.get', 'request.json.get', (['"""job_initiator"""', '{}'], {}), "('job_initiator', {})\n", (5359, 5380), False, 'from flask import Flask, request, send_file\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np from fate_arch.session import computing_session as session from fate_flow.entity.metric import Metric from fate_flow.entity.metric import MetricMeta from federatedml.evaluation.metrics import clustering_metric from federatedml.framework.hetero.procedure import table_aggregator from federatedml.param.hetero_kmeans_param import KmeansParam from federatedml.unsupervised_learning.kmeans.kmeans_model_base import BaseKmeansModel from federatedml.util import LOGGER from federatedml.util import consts from federatedml.framework.weights import NumpyWeights class HeteroKmeansArbiter(BaseKmeansModel): def __init__(self): super(HeteroKmeansArbiter, self).__init__() self.model_param = KmeansParam() # self.dist_aggregator = secure_sum_aggregator.Server(enable_secure_aggregate=False) # self.cluster_dist_aggregator = secure_sum_aggregator.Server(enable_secure_aggregate=False) self.DBI = 0 self.aggregator = table_aggregator.Server(enable_secure_aggregate=True) def callback_dbi(self, iter_num, dbi): metric_meta = MetricMeta(name='train', metric_type="DBI", extra_metas={ "unit_name": "iters", }) self.callback_meta(metric_name='DBI', metric_namespace='train', metric_meta=metric_meta) self.callback_metric(metric_name='DBI', metric_namespace='train', metric_data=[Metric(iter_num, dbi)]) def sum_in_cluster(self, iterator): sum_result = dict() for k, v in iterator: if v[1] not in sum_result: sum_result[v[1]] = np.sqrt(v[0][v[1]]) else: sum_result[v[1]] += np.sqrt(v[0][v[1]]) return sum_result def cal_ave_dist(self, dist_cluster_table, cluster_result): dist_centroid_dist_table = dist_cluster_table.applyPartitions(self.sum_in_cluster).reduce(self.sum_dict) cluster_count = cluster_result.applyPartitions(self.count).reduce(self.sum_dict) cal_ave_dist_list = [] for key in cluster_count.keys(): count = cluster_count[key] cal_ave_dist_list.append([key, count, dist_centroid_dist_table[key] / count]) return cal_ave_dist_list @staticmethod def max_radius(iterator): radius_result = dict() for k, v in iterator: if v[0] not in radius_result: radius_result[v[0]] = v[1] elif v[1] >= radius_result[v[0]]: radius_result[v[0]] = v[1] return radius_result @staticmethod def get_max_radius(v1, v2): rs = {} for k1 in v1.keys() | v2.keys(): rs[k1] = max(v1.get(k1, 0), v2.get(k1, 0)) return rs def cal_dbi(self, dist_sum, cluster_result, suffix): dist_cluster_table = dist_sum.join(cluster_result, lambda v1, v2: [v1, v2]) dist_table = self.cal_ave_dist(dist_cluster_table, cluster_result) # ave dist in each cluster if len(dist_table) == 1: raise ValueError('Only one class detected. DBI calculation error') cluster_dist = self.aggregator.sum_model(suffix=(suffix,)) cluster_avg_intra_dist = [] for i in range(len(dist_table)): cluster_avg_intra_dist.append(dist_table[i][2]) self.DBI = clustering_metric.DaviesBouldinIndex.compute(self, cluster_avg_intra_dist, list(cluster_dist._weights)) self.callback_dbi(suffix - 1, self.DBI) def fit(self, data_instances=None, validate_data=None): LOGGER.info("Enter hetero Kmeans arbiter fit") last_cluster_result = None while self.n_iter_ < self.max_iter: dist_sum = self.aggregator.aggregate_tables(suffix=(self.n_iter_,)) if last_cluster_result is not None: self.cal_dbi(dist_sum, last_cluster_result, self.n_iter_) cluster_result = dist_sum.mapValues(lambda v: np.argmin(v)) self.aggregator.send_aggregated_tables(cluster_result, suffix=(self.n_iter_,)) tol1 = self.transfer_variable.guest_tol.get(idx=0, suffix=(self.n_iter_,)) tol2 = self.transfer_variable.host_tol.get(idx=0, suffix=(self.n_iter_,)) tol_final = tol1 + tol2 self.is_converged = True if tol_final < self.tol else False LOGGER.debug(f"iter: {self.n_iter_}, tol_final: {tol_final}, tol: {self.tol}," f" is_converge: {self.is_converged}") self.transfer_variable.arbiter_tol.remote(self.is_converged, role=consts.HOST, idx=-1, suffix=(self.n_iter_,)) self.transfer_variable.arbiter_tol.remote(self.is_converged, role=consts.GUEST, idx=0, suffix=(self.n_iter_,)) last_cluster_result = cluster_result self.n_iter_ += 1 if self.is_converged: break # calculate finall round dbi dist_sum = self.aggregator.aggregate_tables(suffix=(self.n_iter_,)) cluster_result = dist_sum.mapValues(lambda v: np.argmin(v)) self.aggregator.send_aggregated_tables(cluster_result, suffix=(self.n_iter_,)) self.cal_dbi(dist_sum, last_cluster_result, self.n_iter_) dist_sum_dbi = self.aggregator.aggregate_tables(suffix=(self.n_iter_ + 1,)) self.aggregator.send_aggregated_tables(cluster_result, suffix=(self.n_iter_ + 1,)) self.cal_dbi(dist_sum_dbi, cluster_result, self.n_iter_ + 1) def predict(self, data_instances=None): LOGGER.info("Start predict ...") res_dict = self.aggregator.aggregate_tables(suffix='predict') cluster_result = res_dict.mapValues(lambda v: np.argmin(v)) cluster_dist_result = res_dict.mapValues(lambda v: min(v)) self.aggregator.send_aggregated_tables(cluster_result, suffix='predict') res_dict_dbi = self.aggregator.aggregate_tables(suffix='predict_dbi') self.aggregator.send_aggregated_tables(cluster_result, suffix='predict_dbi') dist_cluster_table = res_dict.join(cluster_result, lambda v1, v2: [v1, v2]) dist_cluster_table_dbi = res_dict_dbi.join(cluster_result, lambda v1, v2: [v1, v2]) dist_table = self.cal_ave_dist(dist_cluster_table, cluster_result) # ave dist in each cluster dist_table_dbi = self.cal_ave_dist(dist_cluster_table_dbi, cluster_result) # if len(dist_table) == 1: # raise ValueError('Only one class detected. DBI calculation error') cluster_dist = self.aggregator.sum_model(suffix='predict') dist_cluster_table_out = cluster_result.join(cluster_dist_result, lambda v1, v2: [int(v1), float(v2)]) cluster_max_radius = dist_cluster_table_out.applyPartitions(self.max_radius).reduce(self.get_max_radius) result = [] for i in range(len(dist_table)): c_key = dist_table[i][0] result.append(tuple( [int(c_key), [dist_table[i][1], dist_table_dbi[i][2], cluster_max_radius[c_key], list(cluster_dist._weights)]])) predict_result1 = session.parallelize(result, partition=res_dict.partitions, include_key=True) predict_result2 = dist_cluster_table_out return predict_result1, predict_result2
[ "fate_flow.entity.metric.Metric", "fate_flow.entity.metric.MetricMeta" ]
[((1340, 1353), 'federatedml.param.hetero_kmeans_param.KmeansParam', 'KmeansParam', ([], {}), '()\n', (1351, 1353), False, 'from federatedml.param.hetero_kmeans_param import KmeansParam\n'), ((1595, 1648), 'federatedml.framework.hetero.procedure.table_aggregator.Server', 'table_aggregator.Server', ([], {'enable_secure_aggregate': '(True)'}), '(enable_secure_aggregate=True)\n', (1618, 1648), False, 'from federatedml.framework.hetero.procedure import table_aggregator\n'), ((1715, 1794), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': '"""train"""', 'metric_type': '"""DBI"""', 'extra_metas': "{'unit_name': 'iters'}"}), "(name='train', metric_type='DBI', extra_metas={'unit_name': 'iters'})\n", (1725, 1794), False, 'from fate_flow.entity.metric import MetricMeta\n'), ((4354, 4400), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""Enter hetero Kmeans arbiter fit"""'], {}), "('Enter hetero Kmeans arbiter fit')\n", (4365, 4400), False, 'from federatedml.util import LOGGER\n'), ((6402, 6434), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""Start predict ..."""'], {}), "('Start predict ...')\n", (6413, 6434), False, 'from federatedml.util import LOGGER\n'), ((7957, 8033), 'fate_arch.session.computing_session.parallelize', 'session.parallelize', (['result'], {'partition': 'res_dict.partitions', 'include_key': '(True)'}), '(result, partition=res_dict.partitions, include_key=True)\n', (7976, 8033), True, 'from fate_arch.session import computing_session as session\n'), ((5138, 5260), 'federatedml.util.LOGGER.debug', 'LOGGER.debug', (['f"""iter: {self.n_iter_}, tol_final: {tol_final}, tol: {self.tol}, is_converge: {self.is_converged}"""'], {}), "(\n f'iter: {self.n_iter_}, tol_final: {tol_final}, tol: {self.tol}, is_converge: {self.is_converged}'\n )\n", (5150, 5260), False, 'from federatedml.util import LOGGER\n'), ((2374, 2393), 'numpy.sqrt', 'np.sqrt', (['v[0][v[1]]'], {}), '(v[0][v[1]])\n', (2381, 2393), True, 'import numpy as np\n'), ((2448, 2467), 'numpy.sqrt', 'np.sqrt', (['v[0][v[1]]'], {}), '(v[0][v[1]])\n', (2455, 2467), True, 'import numpy as np\n'), ((5938, 5950), 'numpy.argmin', 'np.argmin', (['v'], {}), '(v)\n', (5947, 5950), True, 'import numpy as np\n'), ((6559, 6571), 'numpy.argmin', 'np.argmin', (['v'], {}), '(v)\n', (6568, 6571), True, 'import numpy as np\n'), ((2177, 2198), 'fate_flow.entity.metric.Metric', 'Metric', (['iter_num', 'dbi'], {}), '(iter_num, dbi)\n', (2183, 2198), False, 'from fate_flow.entity.metric import Metric\n'), ((4740, 4752), 'numpy.argmin', 'np.argmin', (['v'], {}), '(v)\n', (4749, 4752), True, 'import numpy as np\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from flask import request from fate_flow.db.runtime_config import RuntimeConfig from fate_flow.utils.api_utils import get_json_result from fate_flow.settings import API_VERSION from fate_flow.db.service_registry import ServiceRegistry from fate_flow.db.config_manager import ConfigManager from fate_flow.db.job_default_config import JobDefaultConfig from fate_flow.entity import RetCode @manager.route('/version/get', methods=['POST']) def get_fate_version_info(): module = request.json.get('module', None) if module: version = {module: RuntimeConfig.get_env(module)} else: version = RuntimeConfig.get_all_env() version["API"] = API_VERSION return get_json_result(data=version) @manager.route('/service/get', methods=['POST']) def get_service_registry(): return get_json_result(data=ServiceRegistry.get_all()) @manager.route('/service/<service_name>/register', methods=['POST']) def register_service(service_name: str): ServiceRegistry.register(service_name.upper(), request.json) if ServiceRegistry.get(service_name) is not None: return get_json_result() else: return get_json_result(retcode=RetCode.OPERATING_ERROR) @manager.route('/service/<service_name>/get', methods=['POST']) def get_service(service_name: str): return get_json_result(data=ServiceRegistry.get(service_name.upper())) @manager.route('/reload', methods=['POST']) def reload(): config = ConfigManager.load() return get_json_result(data=config) @manager.route('/config/job/default', methods=['POST']) def job_default_config(): return get_json_result(data=JobDefaultConfig.get_all())
[ "fate_flow.db.runtime_config.RuntimeConfig.get_env", "fate_flow.db.job_default_config.JobDefaultConfig.get_all", "fate_flow.db.config_manager.ConfigManager.load", "fate_flow.db.service_registry.ServiceRegistry.get", "fate_flow.utils.api_utils.get_json_result", "fate_flow.db.service_registry.ServiceRegistry.get_all", "fate_flow.db.runtime_config.RuntimeConfig.get_all_env" ]
[((1096, 1128), 'flask.request.json.get', 'request.json.get', (['"""module"""', 'None'], {}), "('module', None)\n", (1112, 1128), False, 'from flask import request\n'), ((1302, 1331), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'version'}), '(data=version)\n', (1317, 1331), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((2058, 2078), 'fate_flow.db.config_manager.ConfigManager.load', 'ConfigManager.load', ([], {}), '()\n', (2076, 2078), False, 'from fate_flow.db.config_manager import ConfigManager\n'), ((2090, 2118), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'config'}), '(data=config)\n', (2105, 2118), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1230, 1257), 'fate_flow.db.runtime_config.RuntimeConfig.get_all_env', 'RuntimeConfig.get_all_env', ([], {}), '()\n', (1255, 1257), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((1654, 1687), 'fate_flow.db.service_registry.ServiceRegistry.get', 'ServiceRegistry.get', (['service_name'], {}), '(service_name)\n', (1673, 1687), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((1716, 1733), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {}), '()\n', (1731, 1733), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1759, 1807), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'RetCode.OPERATING_ERROR'}), '(retcode=RetCode.OPERATING_ERROR)\n', (1774, 1807), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1171, 1200), 'fate_flow.db.runtime_config.RuntimeConfig.get_env', 'RuntimeConfig.get_env', (['module'], {}), '(module)\n', (1192, 1200), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((1443, 1468), 'fate_flow.db.service_registry.ServiceRegistry.get_all', 'ServiceRegistry.get_all', ([], {}), '()\n', (1466, 1468), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((2235, 2261), 'fate_flow.db.job_default_config.JobDefaultConfig.get_all', 'JobDefaultConfig.get_all', ([], {}), '()\n', (2259, 2261), False, 'from fate_flow.db.job_default_config import JobDefaultConfig\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from fate_arch.common.base_utils import current_timestamp from fate_flow.controller.engine_adapt import build_engine from fate_flow.db.db_models import DB, Job, DependenciesStorageMeta from fate_arch.session import Session from fate_flow.utils.log_utils import detect_logger from fate_flow.manager.dependence_manager import DependenceManager from fate_flow.scheduler.federated_scheduler import FederatedScheduler from fate_flow.entity.run_status import JobStatus, TaskStatus, EndStatus from fate_flow.settings import SESSION_VALID_PERIOD from fate_flow.utils import cron, job_utils, process_utils from fate_flow.db.runtime_config import RuntimeConfig from fate_flow.operation.job_saver import JobSaver from fate_flow.manager.resource_manager import ResourceManager from fate_arch.common import EngineType class Detector(cron.Cron): def run_do(self): self.detect_running_task() self.detect_running_job() self.detect_resource_record() self.detect_expired_session() self.detect_dependence_upload_record() @classmethod def detect_running_task(cls): detect_logger().info('start to detect running task..') count = 0 try: running_tasks = JobSaver.query_task(party_status=TaskStatus.RUNNING, only_latest=False) stop_job_ids = set() for task in running_tasks: if not task.f_engine_conf and task.f_run_ip != RuntimeConfig.JOB_SERVER_HOST and not task.f_run_on_this_party: continue count += 1 try: process_exist = build_engine(task.f_engine_conf.get("computing_engine")).is_alive(task) if not process_exist: msg = f"task {task.f_task_id} {task.f_task_version} on {task.f_role} {task.f_party_id}" detect_logger(job_id=task.f_job_id).info(f"{msg} with {task.f_party_status} process {task.f_run_pid} does not exist") time.sleep(3) _tasks = JobSaver.query_task(task_id=task.f_task_id, task_version=task.f_task_version, role=task.f_role, party_id=task.f_party_id) if _tasks: if _tasks[0].f_party_status == TaskStatus.RUNNING: stop_job_ids.add(task.f_job_id) detect_logger(task.f_job_id).info(f"{msg} party status has been checked twice, try to stop job") else: detect_logger(task.f_job_id).info(f"{msg} party status has changed to {_tasks[0].f_party_status}, may be stopped by task_controller.stop_task, pass stop job again") else: detect_logger(task.f_job_id).warning(f"{msg} can not found on db") except Exception as e: detect_logger(job_id=task.f_job_id).exception(e) if stop_job_ids: detect_logger().info('start to stop jobs: {}'.format(stop_job_ids)) stop_jobs = set() for job_id in stop_job_ids: jobs = JobSaver.query_job(job_id=job_id) if jobs: stop_jobs.add(jobs[0]) cls.request_stop_jobs(jobs=stop_jobs, stop_msg="task executor process abort", stop_status=JobStatus.FAILED) except Exception as e: detect_logger().exception(e) finally: detect_logger().info(f"finish detect {count} running task") @classmethod def detect_running_job(cls): detect_logger().info('start detect running job') try: running_jobs = JobSaver.query_job(status=JobStatus.RUNNING, is_initiator=True) stop_jobs = set() for job in running_jobs: try: if job_utils.check_job_is_timeout(job): stop_jobs.add(job) except Exception as e: detect_logger(job_id=job.f_job_id).exception(e) cls.request_stop_jobs(jobs=stop_jobs, stop_msg="running timeout", stop_status=JobStatus.TIMEOUT) except Exception as e: detect_logger().exception(e) finally: detect_logger().info('finish detect running job') @classmethod @DB.connection_context() def detect_resource_record(cls): detect_logger().info('start detect resource recycle') try: filter_status = EndStatus.status_list() filter_status.append(JobStatus.WAITING) jobs = Job.select().where(Job.f_resource_in_use == True, current_timestamp() - Job.f_apply_resource_time > 10 * 60 * 1000, Job.f_status << filter_status) stop_jobs = set() for job in jobs: if job.f_status == JobStatus.WAITING: stop_jobs.add(job) else: try: detect_logger(job_id=job.f_job_id).info(f"start to return job {job.f_job_id} on {job.f_role} {job.f_party_id} resource") flag = ResourceManager.return_job_resource(job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id) if flag: detect_logger(job_id=job.f_job_id).info(f"return job {job.f_job_id} on {job.f_role} {job.f_party_id} resource successfully") else: detect_logger(job_id=job.f_job_id).info(f"return job {job.f_job_id} on {job.f_role} {job.f_party_id} resource failed") except Exception as e: detect_logger(job_id=job.f_job_id).exception(e) cls.request_stop_jobs(jobs=stop_jobs, stop_msg="start timeout", stop_status=JobStatus.TIMEOUT) except Exception as e: detect_logger().exception(e) finally: detect_logger().info('finish detect resource recycle') @classmethod @DB.connection_context() def detect_dependence_upload_record(cls): detect_logger().info('start detect dependence upload process') try: upload_process_list = DependenciesStorageMeta.select().where(DependenciesStorageMeta.f_upload_status==True) for dependence in upload_process_list: if int(dependence.f_pid): is_alive = process_utils.check_process(pid=int(dependence.f_pid)) if not is_alive: try: DependenceManager.kill_upload_process(version=dependence.f_version, storage_engine=dependence.f_storage_engine, dependence_type=dependence.f_type) except Exception as e: detect_logger().exception(e) except Exception as e: detect_logger().exception(e) finally: detect_logger().info('finish detect dependence upload process') @classmethod def detect_expired_session(cls): ttl = SESSION_VALID_PERIOD detect_logger().info(f'start detect expired session by ttl {ttl/1000} s') try: session_records = Session.query_sessions(create_time=[None, current_timestamp() - ttl]) manager_session_id_list = [] for session_record in session_records: manager_session_id = session_record.f_manager_session_id if manager_session_id not in manager_session_id: continue manager_session_id_list.append(manager_session_id) detect_logger().info(f'start destroy session {manager_session_id}') try: sess = Session(session_id=manager_session_id, options={"logger": detect_logger()}) sess.destroy_all_sessions() except Exception as e: detect_logger().error(f'stop session {manager_session_id} error', e) finally: detect_logger().info(f'stop session {manager_session_id} successfully') except Exception as e: detect_logger().error('detect expired session error', e) finally: detect_logger().info('finish detect expired session') @classmethod def request_stop_jobs(cls, jobs: [Job], stop_msg, stop_status): if not len(jobs): return detect_logger().info(f"have {len(jobs)} should be stopped, because of {stop_msg}") for job in jobs: try: detect_logger(job_id=job.f_job_id).info(f"detector request start to stop job {job.f_job_id}, because of {stop_msg}") FederatedScheduler.request_stop_job(job=job, stop_status=stop_status) detect_logger(job_id=job.f_job_id).info(f"detector request stop job {job.f_job_id} successfully") except Exception as e: detect_logger(job_id=job.f_job_id).exception(e)
[ "fate_flow.manager.dependence_manager.DependenceManager.kill_upload_process", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.request_stop_job", "fate_flow.db.db_models.DependenciesStorageMeta.select", "fate_flow.utils.job_utils.check_job_is_timeout", "fate_flow.utils.log_utils.detect_logger", "fate_flow.operation.job_saver.JobSaver.query_job", "fate_flow.manager.resource_manager.ResourceManager.return_job_resource", "fate_flow.db.db_models.DB.connection_context", "fate_flow.db.db_models.Job.select", "fate_flow.entity.run_status.EndStatus.status_list", "fate_flow.operation.job_saver.JobSaver.query_task" ]
[((4949, 4972), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (4970, 4972), False, 'from fate_flow.db.db_models import DB, Job, DependenciesStorageMeta\n'), ((6593, 6616), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (6614, 6616), False, 'from fate_flow.db.db_models import DB, Job, DependenciesStorageMeta\n'), ((1851, 1922), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'party_status': 'TaskStatus.RUNNING', 'only_latest': '(False)'}), '(party_status=TaskStatus.RUNNING, only_latest=False)\n', (1870, 1922), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((4304, 4367), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'status': 'JobStatus.RUNNING', 'is_initiator': '(True)'}), '(status=JobStatus.RUNNING, is_initiator=True)\n', (4322, 4367), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((5113, 5136), 'fate_flow.entity.run_status.EndStatus.status_list', 'EndStatus.status_list', ([], {}), '()\n', (5134, 5136), False, 'from fate_flow.entity.run_status import JobStatus, TaskStatus, EndStatus\n'), ((1737, 1752), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (1750, 1752), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((3773, 3806), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (3791, 3806), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((4215, 4230), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (4228, 4230), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((5018, 5033), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (5031, 5033), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((6671, 6686), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (6684, 6686), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((7786, 7801), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (7799, 7801), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((9122, 9137), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (9135, 9137), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((9396, 9465), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.request_stop_job', 'FederatedScheduler.request_stop_job', ([], {'job': 'job', 'stop_status': 'stop_status'}), '(job=job, stop_status=stop_status)\n', (9431, 9465), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((4096, 4111), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (4109, 4111), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((4479, 4514), 'fate_flow.utils.job_utils.check_job_is_timeout', 'job_utils.check_job_is_timeout', (['job'], {}), '(job)\n', (4509, 4514), False, 'from fate_flow.utils import cron, job_utils, process_utils\n'), ((4876, 4891), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (4889, 4891), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((5208, 5220), 'fate_flow.db.db_models.Job.select', 'Job.select', ([], {}), '()\n', (5218, 5220), False, 'from fate_flow.db.db_models import DB, Job, DependenciesStorageMeta\n'), ((6515, 6530), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (6528, 6530), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((6781, 6813), 'fate_flow.db.db_models.DependenciesStorageMeta.select', 'DependenciesStorageMeta.select', ([], {}), '()\n', (6811, 6813), False, 'from fate_flow.db.db_models import DB, Job, DependenciesStorageMeta\n'), ((7624, 7639), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (7637, 7639), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((8929, 8944), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (8942, 8944), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((2627, 2640), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2637, 2640), False, 'import time\n'), ((2674, 2800), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'task_id': 'task.f_task_id', 'task_version': 'task.f_task_version', 'role': 'task.f_role', 'party_id': 'task.f_party_id'}), '(task_id=task.f_task_id, task_version=task.\n f_task_version, role=task.f_role, party_id=task.f_party_id)\n', (2693, 2800), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((3612, 3627), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (3625, 3627), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((4038, 4053), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (4051, 4053), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((4818, 4833), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (4831, 4833), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((5258, 5277), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (5275, 5277), False, 'from fate_arch.common.base_utils import current_timestamp\n'), ((5730, 5832), 'fate_flow.manager.resource_manager.ResourceManager.return_job_resource', 'ResourceManager.return_job_resource', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id'}), '(job_id=job.f_job_id, role=job.f_role,\n party_id=job.f_party_id)\n', (5765, 5832), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((6457, 6472), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (6470, 6472), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((7566, 7581), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (7579, 7581), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((8315, 8330), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (8328, 8330), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((8843, 8858), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (8856, 8858), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((9263, 9297), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (9276, 9297), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((9482, 9516), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (9495, 9516), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((7140, 7295), 'fate_flow.manager.dependence_manager.DependenceManager.kill_upload_process', 'DependenceManager.kill_upload_process', ([], {'version': 'dependence.f_version', 'storage_engine': 'dependence.f_storage_engine', 'dependence_type': 'dependence.f_type'}), '(version=dependence.f_version,\n storage_engine=dependence.f_storage_engine, dependence_type=dependence.\n f_type)\n', (7177, 7295), False, 'from fate_flow.manager.dependence_manager import DependenceManager\n'), ((7945, 7964), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (7962, 7964), False, 'from fate_arch.common.base_utils import current_timestamp\n'), ((8728, 8743), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (8741, 8743), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((9631, 9665), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (9644, 9665), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((2485, 2520), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {'job_id': 'task.f_job_id'}), '(job_id=task.f_job_id)\n', (2498, 2520), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((3518, 3553), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {'job_id': 'task.f_job_id'}), '(job_id=task.f_job_id)\n', (3531, 3553), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((4618, 4652), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (4631, 4652), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((5578, 5612), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (5591, 5612), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((8489, 8504), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (8502, 8504), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((8614, 8629), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (8627, 8629), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((3392, 3420), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (3405, 3420), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((5890, 5924), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (5903, 5924), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((6073, 6107), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (6086, 6107), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((6259, 6293), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (6272, 6293), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((3006, 3034), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (3019, 3034), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((3169, 3197), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (3182, 3197), False, 'from fate_flow.utils.log_utils import detect_logger\n'), ((7494, 7509), 'fate_flow.utils.log_utils.detect_logger', 'detect_logger', ([], {}), '()\n', (7507, 7509), False, 'from fate_flow.utils.log_utils import detect_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys from fate_arch.common.log import schedule_logger from fate_flow.controller.engine_operation.base import BaseEngine from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.entity.types import KillProcessStatusCode, TaskStatus from fate_flow.operation.task_executor import TaskExecutor from fate_flow.utils import job_utils class SparkEngine(BaseEngine): @staticmethod def run(job_id, component_name, task_id, task_version, role, party_id, task_parameters_path, run_parameters, task_info, **kwargs): if "SPARK_HOME" not in os.environ: raise EnvironmentError("SPARK_HOME not found") spark_home = os.environ["SPARK_HOME"] # additional configs spark_submit_config = run_parameters.spark_run deploy_mode = spark_submit_config.get("deploy-mode", "client") if deploy_mode not in ["client"]: raise ValueError(f"deploy mode {deploy_mode} not supported") spark_submit_cmd = os.path.join(spark_home, "bin/spark-submit") process_cmd = [spark_submit_cmd, f'--name={task_id}#{role}'] for k, v in spark_submit_config.items(): if k != "conf": process_cmd.append(f'--{k}={v}') if "conf" in spark_submit_config: for ck, cv in spark_submit_config["conf"].items(): process_cmd.append(f'--conf') process_cmd.append(f'{ck}={cv}') process_cmd.extend([ sys.modules[TaskExecutor.__module__].__file__, '-j', job_id, '-n', component_name, '-t', task_id, '-v', task_version, '-r', role, '-p', party_id, '-c', task_parameters_path, '--run_ip', RuntimeConfig.JOB_SERVER_HOST, '--job_server', '{}:{}'.format(RuntimeConfig.JOB_SERVER_HOST, RuntimeConfig.HTTP_PORT), ]) task_log_dir = os.path.join(job_utils.get_job_log_directory(job_id=job_id), role, party_id, component_name) task_job_dir = os.path.join(job_utils.get_job_directory(job_id=job_id), role, party_id, component_name) schedule_logger(job_id).info( 'job {} task {} {} on {} {} executor subprocess is ready'.format(job_id, task_id, task_version, role, party_id)) task_dir = os.path.dirname(task_parameters_path) p = job_utils.run_subprocess(job_id=job_id, config_dir=task_dir, process_cmd=process_cmd, log_dir=task_log_dir, job_dir=task_job_dir) task_info["run_pid"] = p.pid return p @staticmethod def kill(task): kill_status_code = job_utils.kill_task_executor_process(task) # session stop if kill_status_code == KillProcessStatusCode.KILLED or task.f_status not in {TaskStatus.WAITING}: job_utils.start_session_stop(task) @staticmethod def is_alive(task): return job_utils.check_job_process(int(task.f_run_pid))
[ "fate_flow.utils.job_utils.kill_task_executor_process", "fate_flow.utils.job_utils.get_job_directory", "fate_flow.utils.job_utils.run_subprocess", "fate_flow.utils.job_utils.start_session_stop", "fate_flow.utils.job_utils.get_job_log_directory" ]
[((1623, 1667), 'os.path.join', 'os.path.join', (['spark_home', '"""bin/spark-submit"""'], {}), "(spark_home, 'bin/spark-submit')\n", (1635, 1667), False, 'import os\n'), ((3015, 3052), 'os.path.dirname', 'os.path.dirname', (['task_parameters_path'], {}), '(task_parameters_path)\n', (3030, 3052), False, 'import os\n'), ((3065, 3199), 'fate_flow.utils.job_utils.run_subprocess', 'job_utils.run_subprocess', ([], {'job_id': 'job_id', 'config_dir': 'task_dir', 'process_cmd': 'process_cmd', 'log_dir': 'task_log_dir', 'job_dir': 'task_job_dir'}), '(job_id=job_id, config_dir=task_dir, process_cmd=\n process_cmd, log_dir=task_log_dir, job_dir=task_job_dir)\n', (3089, 3199), False, 'from fate_flow.utils import job_utils\n'), ((3352, 3394), 'fate_flow.utils.job_utils.kill_task_executor_process', 'job_utils.kill_task_executor_process', (['task'], {}), '(task)\n', (3388, 3394), False, 'from fate_flow.utils import job_utils\n'), ((2564, 2610), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (2595, 2610), False, 'from fate_flow.utils import job_utils\n'), ((2680, 2722), 'fate_flow.utils.job_utils.get_job_directory', 'job_utils.get_job_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (2707, 2722), False, 'from fate_flow.utils import job_utils\n'), ((3536, 3570), 'fate_flow.utils.job_utils.start_session_stop', 'job_utils.start_session_stop', (['task'], {}), '(task)\n', (3564, 3570), False, 'from fate_flow.utils import job_utils\n'), ((2764, 2787), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (2779, 2787), False, 'from fate_arch.common.log import schedule_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_arch import storage from fate_arch.common.base_utils import current_timestamp from fate_flow.db.db_models import DB, Job from fate_arch.storage import StorageSessionBase from fate_arch.common.log import detect_logger from fate_flow.scheduler import FederatedScheduler from fate_flow.entity.types import JobStatus, TaskStatus, EndStatus from fate_flow.utils import cron, job_utils from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.operation import JobSaver from fate_flow.manager import ResourceManager class Detector(cron.Cron): def run_do(self): self.detect_running_task() self.detect_running_job() self.detect_resource_record() self.detect_expired_session() @classmethod def detect_running_task(cls): detect_logger().info('start to detect running task..') count = 0 try: running_tasks = JobSaver.query_task(party_status=TaskStatus.RUNNING, run_on_this_party=True, run_ip=RuntimeConfig.JOB_SERVER_HOST, only_latest=False) stop_job_ids = set() for task in running_tasks: count += 1 try: process_exist = job_utils.check_job_process(int(task.f_run_pid)) if not process_exist: detect_logger(job_id=task.f_job_id).info( 'job {} task {} {} on {} {} process {} does not exist'.format( task.f_job_id, task.f_task_id, task.f_task_version, task.f_role, task.f_party_id, task.f_run_pid)) stop_job_ids.add(task.f_job_id) except Exception as e: detect_logger(job_id=task.f_job_id).exception(e) if stop_job_ids: detect_logger().info('start to stop jobs: {}'.format(stop_job_ids)) stop_jobs = set() for job_id in stop_job_ids: jobs = JobSaver.query_job(job_id=job_id) if jobs: stop_jobs.add(jobs[0]) cls.request_stop_jobs(jobs=stop_jobs, stop_msg="task executor process abort", stop_status=JobStatus.CANCELED) except Exception as e: detect_logger().exception(e) finally: detect_logger().info(f"finish detect {count} running task") @classmethod def detect_running_job(cls): detect_logger().info('start detect running job') try: running_jobs = JobSaver.query_job(status=JobStatus.RUNNING, is_initiator=True) stop_jobs = set() for job in running_jobs: try: if job_utils.check_job_is_timeout(job): stop_jobs.add(job) except Exception as e: detect_logger(job_id=job.f_job_id).exception(e) cls.request_stop_jobs(jobs=stop_jobs, stop_msg="running timeout", stop_status=JobStatus.TIMEOUT) except Exception as e: detect_logger().exception(e) finally: detect_logger().info('finish detect running job') @classmethod @DB.connection_context() def detect_resource_record(cls): detect_logger().info('start detect resource recycle') try: filter_status = EndStatus.status_list() filter_status.append(JobStatus.WAITING) jobs = Job.select().where(Job.f_resource_in_use == True, current_timestamp() - Job.f_apply_resource_time > 10 * 60 * 1000, Job.f_status << filter_status) stop_jobs = set() for job in jobs: if job.f_status == JobStatus.WAITING: stop_jobs.add(job) else: try: detect_logger(job_id=job.f_job_id).info(f"start to return job {job.f_job_id} on {job.f_role} {job.f_party_id} resource") flag = ResourceManager.return_job_resource(job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id) if flag: detect_logger(job_id=job.f_job_id).info(f"return job {job.f_job_id} on {job.f_role} {job.f_party_id} resource successfully") else: detect_logger(job_id=job.f_job_id).info(f"return job {job.f_job_id} on {job.f_role} {job.f_party_id} resource failed") except Exception as e: detect_logger(job_id=job.f_job_id).exception(e) cls.request_stop_jobs(jobs=stop_jobs, stop_msg="start timeout", stop_status=JobStatus.TIMEOUT) except Exception as e: detect_logger().exception(e) finally: detect_logger().info('finish detect resource recycle') @classmethod def detect_expired_session(cls): detect_logger().info('start detect expired session') sessions_record = StorageSessionBase.query_expired_sessions_record(ttl=5 * 60 * 60 * 1000) for session_record in sessions_record: detect_logger().info('start stop session id {}'.format(session_record.f_session_id)) session = storage.Session.build(session_id=session_record.f_session_id, storage_engine=session_record.f_engine_name) session.destroy_session() detect_logger().info('session id {} success'.format(session_record.f_session_id)) @classmethod def request_stop_jobs(cls, jobs: [Job], stop_msg, stop_status): if not len(jobs): return detect_logger().info(f"have {len(jobs)} should be stopped, because of {stop_msg}") for job in jobs: try: detect_logger(job_id=job.f_job_id).info(f"detector request start to stop job {job.f_job_id}, because of {stop_msg}") FederatedScheduler.request_stop_job(job=job, stop_status=stop_status) detect_logger(job_id=job.f_job_id).info(f"detector request stop job {job.f_job_id} successfully") except Exception as e: detect_logger(job_id=job.f_job_id).exception(e)
[ "fate_flow.scheduler.FederatedScheduler.request_stop_job", "fate_flow.utils.job_utils.check_job_is_timeout", "fate_flow.db.db_models.DB.connection_context", "fate_flow.operation.JobSaver.query_task", "fate_flow.db.db_models.Job.select", "fate_flow.manager.ResourceManager.return_job_resource", "fate_flow.operation.JobSaver.query_job", "fate_flow.entity.types.EndStatus.status_list" ]
[((3926, 3949), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (3947, 3949), False, 'from fate_flow.db.db_models import DB, Job\n'), ((5689, 5761), 'fate_arch.storage.StorageSessionBase.query_expired_sessions_record', 'StorageSessionBase.query_expired_sessions_record', ([], {'ttl': '(5 * 60 * 60 * 1000)'}), '(ttl=5 * 60 * 60 * 1000)\n', (5737, 5761), False, 'from fate_arch.storage import StorageSessionBase\n'), ((1521, 1658), 'fate_flow.operation.JobSaver.query_task', 'JobSaver.query_task', ([], {'party_status': 'TaskStatus.RUNNING', 'run_on_this_party': '(True)', 'run_ip': 'RuntimeConfig.JOB_SERVER_HOST', 'only_latest': '(False)'}), '(party_status=TaskStatus.RUNNING, run_on_this_party=True,\n run_ip=RuntimeConfig.JOB_SERVER_HOST, only_latest=False)\n', (1540, 1658), False, 'from fate_flow.operation import JobSaver\n'), ((3281, 3344), 'fate_flow.operation.JobSaver.query_job', 'JobSaver.query_job', ([], {'status': 'JobStatus.RUNNING', 'is_initiator': '(True)'}), '(status=JobStatus.RUNNING, is_initiator=True)\n', (3299, 3344), False, 'from fate_flow.operation import JobSaver\n'), ((4090, 4113), 'fate_flow.entity.types.EndStatus.status_list', 'EndStatus.status_list', ([], {}), '()\n', (4111, 4113), False, 'from fate_flow.entity.types import JobStatus, TaskStatus, EndStatus\n'), ((5928, 6038), 'fate_arch.storage.Session.build', 'storage.Session.build', ([], {'session_id': 'session_record.f_session_id', 'storage_engine': 'session_record.f_engine_name'}), '(session_id=session_record.f_session_id,\n storage_engine=session_record.f_engine_name)\n', (5949, 6038), False, 'from fate_arch import storage\n'), ((1407, 1422), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (1420, 1422), False, 'from fate_arch.common.log import detect_logger\n'), ((2748, 2781), 'fate_flow.operation.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (2766, 2781), False, 'from fate_flow.operation import JobSaver\n'), ((3192, 3207), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (3205, 3207), False, 'from fate_arch.common.log import detect_logger\n'), ((3995, 4010), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (4008, 4010), False, 'from fate_arch.common.log import detect_logger\n'), ((5610, 5625), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (5623, 5625), False, 'from fate_arch.common.log import detect_logger\n'), ((6306, 6321), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (6319, 6321), False, 'from fate_arch.common.log import detect_logger\n'), ((6580, 6649), 'fate_flow.scheduler.FederatedScheduler.request_stop_job', 'FederatedScheduler.request_stop_job', ([], {'job': 'job', 'stop_status': 'stop_status'}), '(job=job, stop_status=stop_status)\n', (6615, 6649), False, 'from fate_flow.scheduler import FederatedScheduler\n'), ((3073, 3088), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (3086, 3088), False, 'from fate_arch.common.log import detect_logger\n'), ((3456, 3491), 'fate_flow.utils.job_utils.check_job_is_timeout', 'job_utils.check_job_is_timeout', (['job'], {}), '(job)\n', (3486, 3491), False, 'from fate_flow.utils import cron, job_utils\n'), ((3853, 3868), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (3866, 3868), False, 'from fate_arch.common.log import detect_logger\n'), ((4185, 4197), 'fate_flow.db.db_models.Job.select', 'Job.select', ([], {}), '()\n', (4195, 4197), False, 'from fate_flow.db.db_models import DB, Job\n'), ((5492, 5507), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (5505, 5507), False, 'from fate_arch.common.log import detect_logger\n'), ((5821, 5836), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (5834, 5836), False, 'from fate_arch.common.log import detect_logger\n'), ((6085, 6100), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (6098, 6100), False, 'from fate_arch.common.log import detect_logger\n'), ((2587, 2602), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (2600, 2602), False, 'from fate_arch.common.log import detect_logger\n'), ((3015, 3030), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (3028, 3030), False, 'from fate_arch.common.log import detect_logger\n'), ((3795, 3810), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (3808, 3810), False, 'from fate_arch.common.log import detect_logger\n'), ((4235, 4254), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (4252, 4254), False, 'from fate_arch.common.base_utils import current_timestamp\n'), ((4707, 4809), 'fate_flow.manager.ResourceManager.return_job_resource', 'ResourceManager.return_job_resource', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id'}), '(job_id=job.f_job_id, role=job.f_role,\n party_id=job.f_party_id)\n', (4742, 4809), False, 'from fate_flow.manager import ResourceManager\n'), ((5434, 5449), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {}), '()\n', (5447, 5449), False, 'from fate_arch.common.log import detect_logger\n'), ((6447, 6481), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (6460, 6481), False, 'from fate_arch.common.log import detect_logger\n'), ((6666, 6700), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (6679, 6700), False, 'from fate_arch.common.log import detect_logger\n'), ((6815, 6849), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (6828, 6849), False, 'from fate_arch.common.log import detect_logger\n'), ((1926, 1961), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {'job_id': 'task.f_job_id'}), '(job_id=task.f_job_id)\n', (1939, 1961), False, 'from fate_arch.common.log import detect_logger\n'), ((2493, 2528), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {'job_id': 'task.f_job_id'}), '(job_id=task.f_job_id)\n', (2506, 2528), False, 'from fate_arch.common.log import detect_logger\n'), ((3595, 3629), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (3608, 3629), False, 'from fate_arch.common.log import detect_logger\n'), ((4555, 4589), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (4568, 4589), False, 'from fate_arch.common.log import detect_logger\n'), ((4867, 4901), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (4880, 4901), False, 'from fate_arch.common.log import detect_logger\n'), ((5050, 5084), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (5063, 5084), False, 'from fate_arch.common.log import detect_logger\n'), ((5236, 5270), 'fate_arch.common.log.detect_logger', 'detect_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (5249, 5270), False, 'from fate_arch.common.log import detect_logger\n')]
# # Copyright 2021 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import io import uuid import kfserving from kfserving.api import creds_utils from kubernetes import client from fate_flow.settings import stat_logger from .model_storage import get_model_storage, ModelStorageType from .model_storage.minio import MinIOModelStorage MINIO_K8S_SECRET_NAME = "fate-homo-serving-minio-secret" STORAGE_URI_KEY = "storage_uri" ANNOTATION_PREFIX = "fate.fedai.org/" ANNOTATION_SERVICE_UUID = ANNOTATION_PREFIX + "uuid" ANNOTATION_FATE_MODEL_ID = ANNOTATION_PREFIX + "model_id" ANNOTATION_FATE_MODEL_VERSION = ANNOTATION_PREFIX + "model_version" class KFServingDeployer(object): """Class representing a KFServing service deployer """ def __init__(self, party_model_id, model_version, model_object, service_id, namespace=None, config_file_content=None, replace=False, skip_create_storage_secret=False, model_storage_type=ModelStorageType.MINIO, model_storage_parameters=None): """ :param party_model_id: the model id with party info used to identify the model :param model_version: the model version :param model_object: the converted model object :param service_id: name of the service :param config_file_content: the content of a config file that will be used to connect to the kubernetes cluster. :param namespace: the kubernetes namespace this service belongs to. :param replace: whether to replace the running service, defaults to False. :param skip_create_storage_secret: whether or not to skip setting up MinIO credentials for KFServing storage-initializer container, defaults to False. :param model_storage_type: type of the underlying model storage defaults to ModelStorageType.MINIO. :param model_storage_parameters: a dict containing extra arguments to initialize the model storage instance, defaults to {}. see the doc of model storage classes for the available parameters. """ self.party_model_id = party_model_id self.model_version = model_version self.model_object = model_object self.service_id = service_id if model_storage_parameters is None: model_storage_parameters = {} self.model_storage = get_model_storage(storage_type=model_storage_type, sub_path=party_model_id + "/" + model_version + "/" + service_id, **model_storage_parameters) self.storage_uri = None self.isvc = None # this should also set up kubernetes.client config config_file = None if config_file_content: config_file = io.StringIO(config_file_content) self.kfserving_client = kfserving.KFServingClient(config_file) self.namespace = namespace if namespace else kfserving.utils.get_default_target_namespace() self.replace = replace self.skip_create_storage_secret = skip_create_storage_secret stat_logger.debug("Initialized KFServingDeployer with client config: {}".format(config_file)) def prepare_model(self): """ Prepare the model to be used by KFServing system. Calls into each deployer implementation to serialize the model object and uploads the related files to the target model storage. :return: the uri to fetch the uploaed/prepared model """ if not self.storage_uri: self.storage_uri = self.model_storage.save(self._do_prepare_model()) stat_logger.info("Prepared model with uri: {}".format(self.storage_uri)) return self.storage_uri def _do_prepare_model(self): raise NotImplementedError("_do_prepare_storage method not implemented") def deploy(self): """ Deploy a KFServing InferenceService from a model object :return: the InferenceService object as a dict """ if self.status() and not self.replace: raise RuntimeError("serving service {} already exists".format(self.service_id)) if self.isvc is None: stat_logger.info("Preparing model storage and InferenceService spec...") self.prepare_model() self.prepare_isvc() if self.isvc.metadata.annotations is None: self.isvc.metadata.annotations = {} # add a different annotation to force replace self.isvc.metadata.annotations[ANNOTATION_SERVICE_UUID] = str(uuid.uuid4()) self.isvc.metadata.annotations[ANNOTATION_FATE_MODEL_ID] = self.party_model_id self.isvc.metadata.annotations[ANNOTATION_FATE_MODEL_VERSION] = self.model_version if isinstance(self.model_storage, MinIOModelStorage) and not self.skip_create_storage_secret: self.prepare_sa_secret() if self.status() is None: stat_logger.info("Creating InferenceService {}...".format(self.service_id)) created_isvc = self.kfserving_client.create(self.isvc, namespace=self.namespace) else: stat_logger.info("Replacing InferenceService {}...".format(self.service_id)) self.isvc.metadata.resource_version = None created_isvc = self.kfserving_client.replace(self.service_id, self.isvc, namespace=self.namespace) return created_isvc def prepare_isvc(self): """ Generate an InferenceService spec to be applied into KFServing :return: the spec object """ if self.isvc is None: self.isvc = kfserving.V1beta1InferenceService( api_version=kfserving.constants.KFSERVING_V1BETA1, kind=kfserving.constants.KFSERVING_KIND, metadata=client.V1ObjectMeta(name=self.service_id), spec=kfserving.V1beta1InferenceServiceSpec( predictor=kfserving.V1beta1PredictorSpec())) self._do_prepare_predictor() if self.namespace: self.isvc.metadata.namespace = self.namespace stat_logger.info("InferenceService spec ready") stat_logger.debug(self.isvc) return self.isvc def _do_prepare_predictor(self): raise NotImplementedError("_do_prepare_predictor method not implemented") def destroy(self): """ Delete the InferenceService """ if self.status() is not None: self.kfserving_client.delete(self.service_id, namespace=self.namespace) stat_logger.info("InferenceService {} is deleted".format(self.service_id)) def status(self): try: return self.kfserving_client.get(self.service_id, namespace=self.namespace) except RuntimeError as e: if "Reason: Not Found" in str(e): return None def wait(self, timeout=120): """Wait until the service becomes ready Internally calls KFServing API to retrieve the status :param timeout: seconds to wait :return: the InferenceService dict """ return self.kfserving_client.get(self.service_id, namespace=self.namespace, watch=True, timeout_seconds=timeout) def prepare_sa_secret(self): """ Prepare the secret to be used by the service account for the KFServing service. KFServing needs a service account to find the credential to download files from MINIO/S3 storage. It must contain a secret resource with the credential embedded. We can prepare one use kubernetes API here. """ secrets = client.CoreV1Api().list_namespaced_secret(self.namespace) secret_names = [secret.metadata.name for secret in secrets.items] annotations = { "serving.kubeflow.org/s3-endpoint": self.model_storage.endpoint, "serving.kubeflow.org/s3-usehttps": "1" if self.model_storage.secure else "0" } secret = client.V1Secret(metadata=client.V1ObjectMeta(name=MINIO_K8S_SECRET_NAME, annotations=annotations), type="Opaque", string_data={ 'AWS_ACCESS_KEY_ID': self.model_storage.access_key, 'AWS_SECRET_ACCESS_KEY': self.model_storage.secret_key }) if MINIO_K8S_SECRET_NAME not in secret_names: client.CoreV1Api().create_namespaced_secret(self.namespace, secret) else: client.CoreV1Api().patch_namespaced_secret(MINIO_K8S_SECRET_NAME, self.namespace, secret) sa_name = self.isvc.spec.predictor.service_account_name \ if (self.isvc and isinstance(self.isvc, kfserving.V1beta1InferenceService) and self.isvc.spec.predictor.service_account_name) \ else "default" creds_utils.set_service_account(self.namespace, sa_name, MINIO_K8S_SECRET_NAME)
[ "fate_flow.settings.stat_logger.info", "fate_flow.settings.stat_logger.debug" ]
[((3739, 3777), 'kfserving.KFServingClient', 'kfserving.KFServingClient', (['config_file'], {}), '(config_file)\n', (3764, 3777), False, 'import kfserving\n'), ((7059, 7106), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""InferenceService spec ready"""'], {}), "('InferenceService spec ready')\n", (7075, 7106), False, 'from fate_flow.settings import stat_logger\n'), ((7115, 7143), 'fate_flow.settings.stat_logger.debug', 'stat_logger.debug', (['self.isvc'], {}), '(self.isvc)\n', (7132, 7143), False, 'from fate_flow.settings import stat_logger\n'), ((10041, 10120), 'kfserving.api.creds_utils.set_service_account', 'creds_utils.set_service_account', (['self.namespace', 'sa_name', 'MINIO_K8S_SECRET_NAME'], {}), '(self.namespace, sa_name, MINIO_K8S_SECRET_NAME)\n', (10072, 10120), False, 'from kfserving.api import creds_utils\n'), ((3674, 3706), 'io.StringIO', 'io.StringIO', (['config_file_content'], {}), '(config_file_content)\n', (3685, 3706), False, 'import io\n'), ((3831, 3877), 'kfserving.utils.get_default_target_namespace', 'kfserving.utils.get_default_target_namespace', ([], {}), '()\n', (3875, 3877), False, 'import kfserving\n'), ((5090, 5162), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""Preparing model storage and InferenceService spec..."""'], {}), "('Preparing model storage and InferenceService spec...')\n", (5106, 5162), False, 'from fate_flow.settings import stat_logger\n'), ((5451, 5463), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5461, 5463), False, 'import uuid\n'), ((8694, 8712), 'kubernetes.client.CoreV1Api', 'client.CoreV1Api', ([], {}), '()\n', (8710, 8712), False, 'from kubernetes import client\n'), ((9069, 9141), 'kubernetes.client.V1ObjectMeta', 'client.V1ObjectMeta', ([], {'name': 'MINIO_K8S_SECRET_NAME', 'annotations': 'annotations'}), '(name=MINIO_K8S_SECRET_NAME, annotations=annotations)\n', (9088, 9141), False, 'from kubernetes import client\n'), ((6749, 6790), 'kubernetes.client.V1ObjectMeta', 'client.V1ObjectMeta', ([], {'name': 'self.service_id'}), '(name=self.service_id)\n', (6768, 6790), False, 'from kubernetes import client\n'), ((9583, 9601), 'kubernetes.client.CoreV1Api', 'client.CoreV1Api', ([], {}), '()\n', (9599, 9601), False, 'from kubernetes import client\n'), ((9677, 9695), 'kubernetes.client.CoreV1Api', 'client.CoreV1Api', ([], {}), '()\n', (9693, 9695), False, 'from kubernetes import client\n'), ((6882, 6914), 'kfserving.V1beta1PredictorSpec', 'kfserving.V1beta1PredictorSpec', ([], {}), '()\n', (6912, 6914), False, 'import kfserving\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from flask import Flask, request from arch.api.utils.core import base64_decode from fate_flow.driver.job_controller import JobController from fate_flow.driver.task_scheduler import TaskScheduler from fate_flow.settings import stat_logger from fate_flow.utils.api_utils import get_json_result from fate_flow.utils.authentication_utils import request_authority_certification manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/<job_id>/<role>/<party_id>/create', methods=['POST']) @request_authority_certification def create_job(job_id, role, party_id): JobController.update_job_status(job_id=job_id, role=role, party_id=int(party_id), job_info=request.json, create=True) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<role>/<party_id>/status', methods=['POST']) def job_status(job_id, role, party_id): JobController.update_job_status(job_id=job_id, role=role, party_id=int(party_id), job_info=request.json, create=False) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<role>/<party_id>/<model_id>/<model_version>/save/pipeline', methods=['POST']) @request_authority_certification def save_pipeline(job_id, role, party_id, model_id, model_version): JobController.save_pipeline(job_id=job_id, role=role, party_id=party_id, model_id=base64_decode(model_id), model_version=base64_decode(model_version)) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<role>/<party_id>/kill', methods=['POST']) def kill_job(job_id, role, party_id): JobController.kill_job(job_id=job_id, role=role, party_id=int(party_id), job_initiator=request.json.get('job_initiator', {}), timeout=request.json.get('timeout', False), component_name=request.json.get('component_name', '') ) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<role>/<party_id>/cancel', methods=['POST']) def cancel_job(job_id, role, party_id): res = JobController.cancel_job(job_id=job_id, role=role, party_id=int(party_id), job_initiator=request.json.get('job_initiator', {})) if res: return get_json_result(retcode=0, retmsg='cancel job success') return get_json_result(retcode=101, retmsg='cancel job failed') @manager.route('/<job_id>/<role>/<party_id>/<roles>/<party_ids>/clean', methods=['POST']) @request_authority_certification def clean(job_id, role, party_id, roles, party_ids): JobController.clean_job(job_id=job_id, role=role, party_id=party_id, roles=roles, party_ids=party_ids) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<component_name>/<task_id>/<role>/<party_id>/run', methods=['POST']) @request_authority_certification def run_task(job_id, component_name, task_id, role, party_id): TaskScheduler.start_task(job_id, component_name, task_id, role, party_id, request.json) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<component_name>/<task_id>/<role>/<party_id>/status', methods=['POST']) def task_status(job_id, component_name, task_id, role, party_id): JobController.update_task_status(job_id, component_name, task_id, role, party_id, request.json) return get_json_result(retcode=0, retmsg='success')
[ "fate_flow.driver.job_controller.JobController.clean_job", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.api_utils.get_json_result", "fate_flow.driver.task_scheduler.TaskScheduler.start_task", "fate_flow.driver.job_controller.JobController.update_task_status" ]
[((1002, 1017), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1007, 1017), False, 'from flask import Flask, request\n'), ((1081, 1105), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1102, 1105), False, 'from fate_flow.settings import stat_logger\n'), ((1476, 1520), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (1491, 1520), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1804, 1848), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (1819, 1848), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((2255, 2299), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (2270, 2299), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((2758, 2802), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (2773, 2802), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((3183, 3239), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""cancel job failed"""'}), "(retcode=101, retmsg='cancel job failed')\n", (3198, 3239), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((3422, 3529), 'fate_flow.driver.job_controller.JobController.clean_job', 'JobController.clean_job', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'roles': 'roles', 'party_ids': 'party_ids'}), '(job_id=job_id, role=role, party_id=party_id, roles=\n roles, party_ids=party_ids)\n', (3445, 3529), False, 'from fate_flow.driver.job_controller import JobController\n'), ((3536, 3580), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (3551, 3580), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((3778, 3869), 'fate_flow.driver.task_scheduler.TaskScheduler.start_task', 'TaskScheduler.start_task', (['job_id', 'component_name', 'task_id', 'role', 'party_id', 'request.json'], {}), '(job_id, component_name, task_id, role, party_id,\n request.json)\n', (3802, 3869), False, 'from fate_flow.driver.task_scheduler import TaskScheduler\n'), ((3877, 3921), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (3892, 3921), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((4092, 4191), 'fate_flow.driver.job_controller.JobController.update_task_status', 'JobController.update_task_status', (['job_id', 'component_name', 'task_id', 'role', 'party_id', 'request.json'], {}), '(job_id, component_name, task_id, role,\n party_id, request.json)\n', (4124, 4191), False, 'from fate_flow.driver.job_controller import JobController\n'), ((4199, 4243), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (4214, 4243), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((3116, 3171), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""cancel job success"""'}), "(retcode=0, retmsg='cancel job success')\n", (3131, 3171), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((2143, 2166), 'arch.api.utils.core.base64_decode', 'base64_decode', (['model_id'], {}), '(model_id)\n', (2156, 2166), False, 'from arch.api.utils.core import base64_decode\n'), ((2214, 2242), 'arch.api.utils.core.base64_decode', 'base64_decode', (['model_version'], {}), '(model_version)\n', (2227, 2242), False, 'from arch.api.utils.core import base64_decode\n'), ((2527, 2564), 'flask.request.json.get', 'request.json.get', (['"""job_initiator"""', '{}'], {}), "('job_initiator', {})\n", (2543, 2564), False, 'from flask import Flask, request\n'), ((2601, 2635), 'flask.request.json.get', 'request.json.get', (['"""timeout"""', '(False)'], {}), "('timeout', False)\n", (2617, 2635), False, 'from flask import Flask, request\n'), ((2679, 2717), 'flask.request.json.get', 'request.json.get', (['"""component_name"""', '""""""'], {}), "('component_name', '')\n", (2695, 2717), False, 'from flask import Flask, request\n'), ((3050, 3087), 'flask.request.json.get', 'request.json.get', (['"""job_initiator"""', '{}'], {}), "('job_initiator', {})\n", (3066, 3087), False, 'from flask import Flask, request\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import signal import sys import time import traceback import logging import grpc from grpc._cython import cygrpc from werkzeug.serving import run_simple # be sure to import environment variable before importing fate_arch from fate_flow import set_env from fate_arch.common import file_utils from fate_flow.utils.base_utils import get_fate_flow_directory from fate_flow.utils.proto_compatibility import proxy_pb2_grpc from fate_flow.apps import app from fate_flow.db.db_models import init_database_tables as init_flow_db from fate_arch.metastore.db_models import init_database_tables as init_arch_db from fate_flow.detection.detector import Detector from fate_flow.scheduler.dag_scheduler import DAGScheduler from fate_flow.db.runtime_config import RuntimeConfig from fate_flow.entity.types import ProcessRole from fate_flow.settings import (HOST, HTTP_PORT, GRPC_PORT, _ONE_DAY_IN_SECONDS, GRPC_SERVER_MAX_WORKERS, stat_logger, detect_logger, access_logger, database_logger) from fate_flow.utils.authentication_utils import PrivilegeAuth from fate_flow.utils.grpc_utils import UnaryService from fate_flow.db.db_services import service_db from fate_flow.utils.xthread import ThreadPoolExecutor from fate_flow.utils.log_utils import schedule_logger from fate_arch.common.versions import get_versions from fate_flow.db.config_manager import ConfigManager from fate_flow.db.component_registry import ComponentRegistry from fate_flow.manager.provider_manager import ProviderManager if __name__ == '__main__': stat_logger.info(f"project base: {file_utils.get_project_base_directory()}, fate base: {file_utils.get_fate_directory()}, fate flow base: {get_fate_flow_directory()}") # init # signal.signal(signal.SIGTERM, job_utils.cleaning) # signal.signal(signal.SIGCHLD, process_utils.wait_child_process) # init db init_flow_db() init_arch_db() # init runtime config import argparse parser = argparse.ArgumentParser() parser.add_argument('--version', default=False, help="fate flow version", action='store_true') parser.add_argument('--debug', default=False, help="debug mode", action='store_true') args = parser.parse_args() if args.version: print(get_versions()) sys.exit(0) # todo: add a general init steps? RuntimeConfig.DEBUG = args.debug if RuntimeConfig.DEBUG: stat_logger.info("run on debug mode") ConfigManager.load() RuntimeConfig.init_env() RuntimeConfig.init_config(JOB_SERVER_HOST=HOST, HTTP_PORT=HTTP_PORT) RuntimeConfig.set_process_role(ProcessRole.DRIVER) RuntimeConfig.set_service_db(service_db()) RuntimeConfig.SERVICE_DB.register_flow() RuntimeConfig.SERVICE_DB.register_models() ComponentRegistry.load() default_algorithm_provider = ProviderManager.register_default_providers() RuntimeConfig.set_component_provider(default_algorithm_provider) ComponentRegistry.load() PrivilegeAuth.init() Detector(interval=5 * 1000, logger=detect_logger).start() DAGScheduler(interval=2 * 1000, logger=schedule_logger()).start() peewee_logger = logging.getLogger('peewee') peewee_logger.propagate = False # fate_arch.common.log.ROpenHandler peewee_logger.addHandler(database_logger.handlers[0]) peewee_logger.setLevel(database_logger.level) thread_pool_executor = ThreadPoolExecutor(max_workers=GRPC_SERVER_MAX_WORKERS) stat_logger.info(f"start grpc server thread pool by {thread_pool_executor._max_workers} max workers") server = grpc.server(thread_pool=thread_pool_executor, options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), (cygrpc.ChannelArgKey.max_receive_message_length, -1)]) proxy_pb2_grpc.add_DataTransferServiceServicer_to_server(UnaryService(), server) server.add_insecure_port("{}:{}".format(HOST, GRPC_PORT)) server.start() stat_logger.info("FATE Flow grpc server start successfully") # start http server try: stat_logger.info("FATE Flow http server start...") werkzeug_logger = logging.getLogger("werkzeug") for h in access_logger.handlers: werkzeug_logger.addHandler(h) run_simple(hostname=HOST, port=HTTP_PORT, application=app, threaded=True, use_reloader=RuntimeConfig.DEBUG, use_debugger=RuntimeConfig.DEBUG) except OSError as e: traceback.print_exc() os.kill(os.getpid(), signal.SIGKILL) except Exception as e: traceback.print_exc() os.kill(os.getpid(), signal.SIGKILL) try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: server.stop(0) sys.exit(0)
[ "fate_flow.utils.log_utils.schedule_logger", "fate_flow.db.db_services.service_db", "fate_flow.db.db_models.init_database_tables", "fate_flow.utils.grpc_utils.UnaryService", "fate_flow.settings.stat_logger.info", "fate_flow.db.runtime_config.RuntimeConfig.set_process_role", "fate_flow.db.runtime_config.RuntimeConfig.SERVICE_DB.register_models", "fate_flow.utils.base_utils.get_fate_flow_directory", "fate_flow.manager.provider_manager.ProviderManager.register_default_providers", "fate_flow.utils.xthread.ThreadPoolExecutor", "fate_flow.db.runtime_config.RuntimeConfig.init_config", "fate_flow.detection.detector.Detector", "fate_flow.db.runtime_config.RuntimeConfig.set_component_provider", "fate_flow.db.config_manager.ConfigManager.load", "fate_flow.db.component_registry.ComponentRegistry.load", "fate_flow.utils.authentication_utils.PrivilegeAuth.init", "fate_flow.db.runtime_config.RuntimeConfig.SERVICE_DB.register_flow", "fate_flow.db.runtime_config.RuntimeConfig.init_env" ]
[((2492, 2506), 'fate_flow.db.db_models.init_database_tables', 'init_flow_db', ([], {}), '()\n', (2504, 2506), True, 'from fate_flow.db.db_models import init_database_tables as init_flow_db\n'), ((2511, 2525), 'fate_arch.metastore.db_models.init_database_tables', 'init_arch_db', ([], {}), '()\n', (2523, 2525), True, 'from fate_arch.metastore.db_models import init_database_tables as init_arch_db\n'), ((2585, 2610), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2608, 2610), False, 'import argparse\n'), ((3055, 3075), 'fate_flow.db.config_manager.ConfigManager.load', 'ConfigManager.load', ([], {}), '()\n', (3073, 3075), False, 'from fate_flow.db.config_manager import ConfigManager\n'), ((3080, 3104), 'fate_flow.db.runtime_config.RuntimeConfig.init_env', 'RuntimeConfig.init_env', ([], {}), '()\n', (3102, 3104), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((3109, 3177), 'fate_flow.db.runtime_config.RuntimeConfig.init_config', 'RuntimeConfig.init_config', ([], {'JOB_SERVER_HOST': 'HOST', 'HTTP_PORT': 'HTTP_PORT'}), '(JOB_SERVER_HOST=HOST, HTTP_PORT=HTTP_PORT)\n', (3134, 3177), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((3182, 3232), 'fate_flow.db.runtime_config.RuntimeConfig.set_process_role', 'RuntimeConfig.set_process_role', (['ProcessRole.DRIVER'], {}), '(ProcessRole.DRIVER)\n', (3212, 3232), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((3285, 3325), 'fate_flow.db.runtime_config.RuntimeConfig.SERVICE_DB.register_flow', 'RuntimeConfig.SERVICE_DB.register_flow', ([], {}), '()\n', (3323, 3325), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((3330, 3372), 'fate_flow.db.runtime_config.RuntimeConfig.SERVICE_DB.register_models', 'RuntimeConfig.SERVICE_DB.register_models', ([], {}), '()\n', (3370, 3372), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((3378, 3402), 'fate_flow.db.component_registry.ComponentRegistry.load', 'ComponentRegistry.load', ([], {}), '()\n', (3400, 3402), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((3436, 3480), 'fate_flow.manager.provider_manager.ProviderManager.register_default_providers', 'ProviderManager.register_default_providers', ([], {}), '()\n', (3478, 3480), False, 'from fate_flow.manager.provider_manager import ProviderManager\n'), ((3485, 3549), 'fate_flow.db.runtime_config.RuntimeConfig.set_component_provider', 'RuntimeConfig.set_component_provider', (['default_algorithm_provider'], {}), '(default_algorithm_provider)\n', (3521, 3549), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((3554, 3578), 'fate_flow.db.component_registry.ComponentRegistry.load', 'ComponentRegistry.load', ([], {}), '()\n', (3576, 3578), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((3583, 3603), 'fate_flow.utils.authentication_utils.PrivilegeAuth.init', 'PrivilegeAuth.init', ([], {}), '()\n', (3601, 3603), False, 'from fate_flow.utils.authentication_utils import PrivilegeAuth\n'), ((3757, 3784), 'logging.getLogger', 'logging.getLogger', (['"""peewee"""'], {}), "('peewee')\n", (3774, 3784), False, 'import logging\n'), ((3997, 4052), 'fate_flow.utils.xthread.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'GRPC_SERVER_MAX_WORKERS'}), '(max_workers=GRPC_SERVER_MAX_WORKERS)\n', (4015, 4052), False, 'from fate_flow.utils.xthread import ThreadPoolExecutor\n'), ((4057, 4168), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['f"""start grpc server thread pool by {thread_pool_executor._max_workers} max workers"""'], {}), "(\n f'start grpc server thread pool by {thread_pool_executor._max_workers} max workers'\n )\n", (4073, 4168), False, 'from fate_flow.settings import HOST, HTTP_PORT, GRPC_PORT, _ONE_DAY_IN_SECONDS, GRPC_SERVER_MAX_WORKERS, stat_logger, detect_logger, access_logger, database_logger\n'), ((4172, 4344), 'grpc.server', 'grpc.server', ([], {'thread_pool': 'thread_pool_executor', 'options': '[(cygrpc.ChannelArgKey.max_send_message_length, -1), (cygrpc.ChannelArgKey.\n max_receive_message_length, -1)]'}), '(thread_pool=thread_pool_executor, options=[(cygrpc.\n ChannelArgKey.max_send_message_length, -1), (cygrpc.ChannelArgKey.\n max_receive_message_length, -1)])\n', (4183, 4344), False, 'import grpc\n'), ((4565, 4625), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""FATE Flow grpc server start successfully"""'], {}), "('FATE Flow grpc server start successfully')\n", (4581, 4625), False, 'from fate_flow.settings import HOST, HTTP_PORT, GRPC_PORT, _ONE_DAY_IN_SECONDS, GRPC_SERVER_MAX_WORKERS, stat_logger, detect_logger, access_logger, database_logger\n'), ((2890, 2901), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2898, 2901), False, 'import sys\n'), ((3013, 3050), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""run on debug mode"""'], {}), "('run on debug mode')\n", (3029, 3050), False, 'from fate_flow.settings import HOST, HTTP_PORT, GRPC_PORT, _ONE_DAY_IN_SECONDS, GRPC_SERVER_MAX_WORKERS, stat_logger, detect_logger, access_logger, database_logger\n'), ((3267, 3279), 'fate_flow.db.db_services.service_db', 'service_db', ([], {}), '()\n', (3277, 3279), False, 'from fate_flow.db.db_services import service_db\n'), ((4456, 4470), 'fate_flow.utils.grpc_utils.UnaryService', 'UnaryService', ([], {}), '()\n', (4468, 4470), False, 'from fate_flow.utils.grpc_utils import UnaryService\n'), ((4668, 4718), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""FATE Flow http server start..."""'], {}), "('FATE Flow http server start...')\n", (4684, 4718), False, 'from fate_flow.settings import HOST, HTTP_PORT, GRPC_PORT, _ONE_DAY_IN_SECONDS, GRPC_SERVER_MAX_WORKERS, stat_logger, detect_logger, access_logger, database_logger\n'), ((4745, 4774), 'logging.getLogger', 'logging.getLogger', (['"""werkzeug"""'], {}), "('werkzeug')\n", (4762, 4774), False, 'import logging\n'), ((4866, 5011), 'werkzeug.serving.run_simple', 'run_simple', ([], {'hostname': 'HOST', 'port': 'HTTP_PORT', 'application': 'app', 'threaded': '(True)', 'use_reloader': 'RuntimeConfig.DEBUG', 'use_debugger': 'RuntimeConfig.DEBUG'}), '(hostname=HOST, port=HTTP_PORT, application=app, threaded=True,\n use_reloader=RuntimeConfig.DEBUG, use_debugger=RuntimeConfig.DEBUG)\n', (4876, 5011), False, 'from werkzeug.serving import run_simple\n'), ((2866, 2880), 'fate_arch.common.versions.get_versions', 'get_versions', ([], {}), '()\n', (2878, 2880), False, 'from fate_arch.common.versions import get_versions\n'), ((3608, 3657), 'fate_flow.detection.detector.Detector', 'Detector', ([], {'interval': '(5 * 1000)', 'logger': 'detect_logger'}), '(interval=5 * 1000, logger=detect_logger)\n', (3616, 3657), False, 'from fate_flow.detection.detector import Detector\n'), ((5041, 5062), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5060, 5062), False, 'import traceback\n'), ((5143, 5164), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5162, 5164), False, 'import traceback\n'), ((5252, 5283), 'time.sleep', 'time.sleep', (['_ONE_DAY_IN_SECONDS'], {}), '(_ONE_DAY_IN_SECONDS)\n', (5262, 5283), False, 'import time\n'), ((5345, 5356), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5353, 5356), False, 'import sys\n'), ((2203, 2242), 'fate_arch.common.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (2240, 2242), False, 'from fate_arch.common import file_utils\n'), ((2257, 2288), 'fate_arch.common.file_utils.get_fate_directory', 'file_utils.get_fate_directory', ([], {}), '()\n', (2286, 2288), False, 'from fate_arch.common import file_utils\n'), ((2308, 2333), 'fate_flow.utils.base_utils.get_fate_flow_directory', 'get_fate_flow_directory', ([], {}), '()\n', (2331, 2333), False, 'from fate_flow.utils.base_utils import get_fate_flow_directory\n'), ((5079, 5090), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5088, 5090), False, 'import os\n'), ((5181, 5192), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5190, 5192), False, 'import os\n'), ((3709, 3726), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', ([], {}), '()\n', (3724, 3726), False, 'from fate_flow.utils.log_utils import schedule_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from fate_flow.utils.base_utils import jprint from fate_flow.controller.job_controller import JobController from fate_flow.utils import job_utils class TestJobController(unittest.TestCase): def test_gen_updated_parameters(self): job_id = "202110211127411105150" initiator_role = "guest" initiator_party_id = 9999 input_job_parameters = { "common": { "auto_retries": 1, "auto_retry_delay": 1 } } input_job_parameters = {} input_component_parameters = { "common": { "hetero_lr_0": { "alpha": 0.02 } }, "role": { "guest": { "0": { "reader_0": { "table": {"name": "breast_hetero_guest", "namespace": "unitest_experiment"} }, "homo_nn_0":{ "with_label": True, "output_format": "dense" }, } }, "host": { "1": { "dataio_0":{ "with_label": True, "output_format": "dense" }, "evaluation_0": { "need_run": True } } } } } job_configuration = job_utils.get_job_configuration(job_id=job_id, role=initiator_role, party_id=initiator_party_id) origin_job_parameters = job_configuration.runtime_conf["job_parameters"] origin_component_parameters = job_configuration.runtime_conf["component_parameters"] updated_job_parameters, updated_component_parameters, updated_components = JobController.gen_updated_parameters( job_id=job_id, initiator_role=initiator_role, initiator_party_id=initiator_party_id, input_job_parameters=input_job_parameters, input_component_parameters=input_component_parameters) jprint(updated_job_parameters) jprint(updated_component_parameters) self.assertTrue(check(input_component_parameters, updated_component_parameters)[0]) # todo: add check with origin parameters and add dsl parser check def check(inputs, result): # todo: return check keys chain if type(result) != type(inputs): return False, "type not match" elif isinstance(inputs, dict): for k, v in inputs.items(): if k not in result: return False, f"no such {k} key" if isinstance(v, (dict, list)): return check(v, result[k]) else: if result[k] != v: return False, f"{k} value not match" else: return True, "match" elif isinstance(inputs, list): return result == inputs else: raise Exception(f"not support type {type(inputs)}") if __name__ == '__main__': unittest.main()
[ "fate_flow.controller.job_controller.JobController.gen_updated_parameters", "fate_flow.utils.base_utils.jprint", "fate_flow.utils.job_utils.get_job_configuration" ]
[((3931, 3946), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3944, 3946), False, 'import unittest\n'), ((2202, 2302), 'fate_flow.utils.job_utils.get_job_configuration', 'job_utils.get_job_configuration', ([], {'job_id': 'job_id', 'role': 'initiator_role', 'party_id': 'initiator_party_id'}), '(job_id=job_id, role=initiator_role,\n party_id=initiator_party_id)\n', (2233, 2302), False, 'from fate_flow.utils import job_utils\n'), ((2677, 2910), 'fate_flow.controller.job_controller.JobController.gen_updated_parameters', 'JobController.gen_updated_parameters', ([], {'job_id': 'job_id', 'initiator_role': 'initiator_role', 'initiator_party_id': 'initiator_party_id', 'input_job_parameters': 'input_job_parameters', 'input_component_parameters': 'input_component_parameters'}), '(job_id=job_id, initiator_role=\n initiator_role, initiator_party_id=initiator_party_id,\n input_job_parameters=input_job_parameters, input_component_parameters=\n input_component_parameters)\n', (2713, 2910), False, 'from fate_flow.controller.job_controller import JobController\n'), ((2966, 2996), 'fate_flow.utils.base_utils.jprint', 'jprint', (['updated_job_parameters'], {}), '(updated_job_parameters)\n', (2972, 2996), False, 'from fate_flow.utils.base_utils import jprint\n'), ((3005, 3041), 'fate_flow.utils.base_utils.jprint', 'jprint', (['updated_component_parameters'], {}), '(updated_component_parameters)\n', (3011, 3041), False, 'from fate_flow.utils.base_utils import jprint\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json import sys import time from fate_flow.utils.log_utils import audit_logger, schedule_logger from fate_flow.utils.grpc_utils import wrap_grpc_packet, gen_routing_metadata from fate_flow.utils.proto_compatibility import proxy_pb2_grpc import grpc def get_command_federation_channel(host, port): print(f"connect {host}:{port}") channel = grpc.insecure_channel('{}:{}'.format(host, port)) stub = proxy_pb2_grpc.DataTransferServiceStub(channel) return channel, stub def remote_api(host, port, job_id, method, endpoint, src_party_id, dest_party_id, src_role, json_body, api_version="v1", overall_timeout=30*1000, try_times=3): endpoint = f"/{api_version}{endpoint}" json_body['src_role'] = src_role json_body['src_party_id'] = src_party_id _packet = wrap_grpc_packet(json_body, method, endpoint, src_party_id, dest_party_id, job_id, overall_timeout=overall_timeout) print(_packet) _routing_metadata = gen_routing_metadata(src_party_id=src_party_id, dest_party_id=dest_party_id) exception = None for t in range(try_times): try: channel, stub = get_command_federation_channel(host, port) _return, _call = stub.unaryCall.with_call(_packet, metadata=_routing_metadata, timeout=(overall_timeout/1000)) audit_logger(job_id).info("grpc api response: {}".format(_return)) channel.close() response = json.loads(_return.body.value) return response except Exception as e: exception = e schedule_logger(job_id).warning(f"remote request {endpoint} error, sleep and try again") time.sleep(2 * (t+1)) else: tips = 'Please check rollSite and fateflow network connectivity' raise Exception('{}rpc request error: {}'.format(tips, exception)) host = sys.argv[1] port = int(sys.argv[2]) src_role = sys.argv[3] src_party_id = sys.argv[4] dest_party_id = sys.argv[5] response = remote_api(host, port, "test_job_command", "POST", "/version/get", src_party_id, dest_party_id, src_role, {"src_role": src_role, "src_party_id": src_party_id}) print(response)
[ "fate_flow.utils.log_utils.schedule_logger", "fate_flow.utils.log_utils.audit_logger", "fate_flow.utils.proto_compatibility.proxy_pb2_grpc.DataTransferServiceStub", "fate_flow.utils.grpc_utils.gen_routing_metadata", "fate_flow.utils.grpc_utils.wrap_grpc_packet" ]
[((1035, 1082), 'fate_flow.utils.proto_compatibility.proxy_pb2_grpc.DataTransferServiceStub', 'proxy_pb2_grpc.DataTransferServiceStub', (['channel'], {}), '(channel)\n', (1073, 1082), False, 'from fate_flow.utils.proto_compatibility import proxy_pb2_grpc\n'), ((1424, 1543), 'fate_flow.utils.grpc_utils.wrap_grpc_packet', 'wrap_grpc_packet', (['json_body', 'method', 'endpoint', 'src_party_id', 'dest_party_id', 'job_id'], {'overall_timeout': 'overall_timeout'}), '(json_body, method, endpoint, src_party_id, dest_party_id,\n job_id, overall_timeout=overall_timeout)\n', (1440, 1543), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, gen_routing_metadata\n'), ((1614, 1690), 'fate_flow.utils.grpc_utils.gen_routing_metadata', 'gen_routing_metadata', ([], {'src_party_id': 'src_party_id', 'dest_party_id': 'dest_party_id'}), '(src_party_id=src_party_id, dest_party_id=dest_party_id)\n', (1634, 1690), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, gen_routing_metadata\n'), ((2080, 2110), 'json.loads', 'json.loads', (['_return.body.value'], {}), '(_return.body.value)\n', (2090, 2110), False, 'import json\n'), ((2309, 2332), 'time.sleep', 'time.sleep', (['(2 * (t + 1))'], {}), '(2 * (t + 1))\n', (2319, 2332), False, 'import time\n'), ((1962, 1982), 'fate_flow.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (1974, 1982), False, 'from fate_flow.utils.log_utils import audit_logger, schedule_logger\n'), ((2208, 2231), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (2223, 2231), False, 'from fate_flow.utils.log_utils import audit_logger, schedule_logger\n')]
import os import time import unittest from unittest.mock import patch from kazoo.client import KazooClient from kazoo.exceptions import NodeExistsError, NoNodeError from fate_flow.db import db_services from fate_flow.errors.error_services import * from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel from fate_flow import settings model_download_url = 'http://127.0.0.1:9380/v1/model/transfer/arbiter-10000_guest-9999_host-10000_model/202105060929263278441' escaped_model_download_url = '/FATE-SERVICES/flow/online/transfer/providers/http%3A%2F%2F12192.168.127.12%3A9380%2Fv1%2Fmodel%2Ftransfer%2Farbiter-10000_guest-9999_host-10000_model%2F202105060929263278441' class TestZooKeeperDB(unittest.TestCase): def setUp(self): # required environment: ZOOKEEPER_HOSTS # optional environment: ZOOKEEPER_USERNAME, ZOOKEEPER_PASSWORD config = { 'hosts': os.environ['ZOOKEEPER_HOSTS'].split(','), 'use_acl': False, } username = os.environ.get('ZOOKEEPER_USERNAME') password = os.environ.get('ZOOKEEPER_PASSWORD') if username and password: config.update({ 'use_acl': True, 'username': username, 'password': password, }) with patch.object(db_services.ServiceRegistry, 'USE_REGISTRY', 'ZooKeeper'), \ patch.object(db_services.ServiceRegistry, 'ZOOKEEPER', config): self.service_db = db_services.service_db() def test_services_db(self): self.assertEqual(type(self.service_db), db_services.ZooKeeperDB) self.assertNotEqual(type(self.service_db), db_services.FallbackDB) self.assertEqual(type(self.service_db.client), KazooClient) def test_zookeeper_not_configured(self): with patch.object(db_services.ServiceRegistry, 'USE_REGISTRY', True), \ patch.object(db_services.ServiceRegistry, 'ZOOKEEPER', {'hosts': None}), \ self.assertRaisesRegex(ZooKeeperNotConfigured, ZooKeeperNotConfigured.message): db_services.service_db() def test_missing_zookeeper_username_or_password(self): with patch.object(db_services.ServiceRegistry, 'USE_REGISTRY', True), \ patch.object(db_services.ServiceRegistry, 'ZOOKEEPER', { 'hosts': ['127.0.0.1:2281'], 'use_acl': True, }), self.assertRaisesRegex( MissingZooKeeperUsernameOrPassword, MissingZooKeeperUsernameOrPassword.message): db_services.service_db() def test_get_znode_path(self): self.assertEqual(self.service_db._get_znode_path('fateflow', model_download_url), escaped_model_download_url) def test_crud(self): self.service_db._insert('fateflow', model_download_url) self.assertIn(model_download_url, self.service_db.get_urls('fateflow')) self.service_db._delete('fateflow', model_download_url) self.assertNotIn(model_download_url, self.service_db.get_urls('fateflow')) def test_insert_exists_node(self): self.service_db._delete('servings', 'http://foo/bar') self.service_db._insert('servings', 'http://foo/bar') with self.assertRaises(NodeExistsError): self.service_db.client.create(self.service_db._get_znode_path('servings', 'http://foo/bar'), makepath=True) self.service_db._insert('servings', 'http://foo/bar') self.service_db._delete('servings', 'http://foo/bar') def test_delete_not_exists_node(self): self.service_db._delete('servings', 'http://foo/bar') with self.assertRaises(NoNodeError): self.service_db.client.delete(self.service_db._get_znode_path('servings', 'http://foo/bar')) self.service_db._delete('servings', 'http://foo/bar') def test_connection_closed(self): self.service_db._insert('fateflow', model_download_url) self.assertIn(model_download_url, self.service_db.get_urls('fateflow')) self.service_db.client.stop() self.service_db.client.start() self.assertNotIn(model_download_url, self.service_db.get_urls('fateflow')) def test_register_models(self): try: os.remove(DB.database) except FileNotFoundError: pass MLModel.create_table() for x in range(1, 101): job_id = str(time.time()) model = MLModel( f_role='host', f_party_id='100', f_job_id=job_id, f_model_id=f'foobar#{x}', f_model_version=job_id, f_initiator_role='host', f_work_mode=0 ) model.save(force_insert=True) self.assertEqual(db_services.models_group_by_party_model_id_and_model_version().count(), 100) with patch.object(self.service_db, '_insert') as insert: self.service_db.register_models() self.assertEqual(insert.call_count, 100) with patch.object(self.service_db, '_delete') as delete: self.service_db.unregister_models() self.assertEqual(delete.call_count, 100) os.remove(DB.database) class TestFallbackDB(unittest.TestCase): def setUp(self): with patch.object(db_services.ServiceRegistry, 'USE_REGISTRY', False): self.service_db = db_services.service_db() def test_get_urls(self): self.assertEqual(self.service_db._get_urls('fateflow'), ['http://127.0.0.1:9380/v1/model/transfer']) self.assertEqual(self.service_db._get_urls('servings'), ['http://127.0.0.1:8000']) def test_crud(self): self.service_db._insert('fateflow', model_download_url) self.assertNotIn(model_download_url, self.service_db.get_urls('fateflow')) self.service_db._delete('fateflow', model_download_url) self.assertNotIn(model_download_url, self.service_db.get_urls('fateflow')) def test_get_model_download_url(self): self.assertEqual(db_services.get_model_download_url('foo-111#bar-222', '20210616'), 'http://127.0.0.1:9380/v1/model/transfer/foo-111_bar-222/20210616') def test_not_supported_service(self): with self.assertRaisesRegex(ServiceNotSupported, 'The service foobar is not supported'): self.service_db.get_urls('foobar') if __name__ == '__main__': unittest.main()
[ "fate_flow.db.db_services.service_db", "fate_flow.db.db_services.get_model_download_url", "fate_flow.db.db_services.models_group_by_party_model_id_and_model_version", "fate_flow.db.db_models.MachineLearningModelInfo.create_table", "fate_flow.db.db_models.MachineLearningModelInfo" ]
[((6336, 6351), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6349, 6351), False, 'import unittest\n'), ((1017, 1053), 'os.environ.get', 'os.environ.get', (['"""ZOOKEEPER_USERNAME"""'], {}), "('ZOOKEEPER_USERNAME')\n", (1031, 1053), False, 'import os\n'), ((1073, 1109), 'os.environ.get', 'os.environ.get', (['"""ZOOKEEPER_PASSWORD"""'], {}), "('ZOOKEEPER_PASSWORD')\n", (1087, 1109), False, 'import os\n'), ((4315, 4337), 'fate_flow.db.db_models.MachineLearningModelInfo.create_table', 'MLModel.create_table', ([], {}), '()\n', (4335, 4337), True, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((5114, 5136), 'os.remove', 'os.remove', (['DB.database'], {}), '(DB.database)\n', (5123, 5136), False, 'import os\n'), ((1310, 1380), 'unittest.mock.patch.object', 'patch.object', (['db_services.ServiceRegistry', '"""USE_REGISTRY"""', '"""ZooKeeper"""'], {}), "(db_services.ServiceRegistry, 'USE_REGISTRY', 'ZooKeeper')\n", (1322, 1380), False, 'from unittest.mock import patch\n'), ((1400, 1462), 'unittest.mock.patch.object', 'patch.object', (['db_services.ServiceRegistry', '"""ZOOKEEPER"""', 'config'], {}), "(db_services.ServiceRegistry, 'ZOOKEEPER', config)\n", (1412, 1462), False, 'from unittest.mock import patch\n'), ((1494, 1518), 'fate_flow.db.db_services.service_db', 'db_services.service_db', ([], {}), '()\n', (1516, 1518), False, 'from fate_flow.db import db_services\n'), ((1827, 1890), 'unittest.mock.patch.object', 'patch.object', (['db_services.ServiceRegistry', '"""USE_REGISTRY"""', '(True)'], {}), "(db_services.ServiceRegistry, 'USE_REGISTRY', True)\n", (1839, 1890), False, 'from unittest.mock import patch\n'), ((1906, 1977), 'unittest.mock.patch.object', 'patch.object', (['db_services.ServiceRegistry', '"""ZOOKEEPER"""', "{'hosts': None}"], {}), "(db_services.ServiceRegistry, 'ZOOKEEPER', {'hosts': None})\n", (1918, 1977), False, 'from unittest.mock import patch\n'), ((2089, 2113), 'fate_flow.db.db_services.service_db', 'db_services.service_db', ([], {}), '()\n', (2111, 2113), False, 'from fate_flow.db import db_services\n'), ((2187, 2250), 'unittest.mock.patch.object', 'patch.object', (['db_services.ServiceRegistry', '"""USE_REGISTRY"""', '(True)'], {}), "(db_services.ServiceRegistry, 'USE_REGISTRY', True)\n", (2199, 2250), False, 'from unittest.mock import patch\n'), ((2266, 2373), 'unittest.mock.patch.object', 'patch.object', (['db_services.ServiceRegistry', '"""ZOOKEEPER"""', "{'hosts': ['127.0.0.1:2281'], 'use_acl': True}"], {}), "(db_services.ServiceRegistry, 'ZOOKEEPER', {'hosts': [\n '127.0.0.1:2281'], 'use_acl': True})\n", (2278, 2373), False, 'from unittest.mock import patch\n'), ((2550, 2574), 'fate_flow.db.db_services.service_db', 'db_services.service_db', ([], {}), '()\n', (2572, 2574), False, 'from fate_flow.db import db_services\n'), ((4232, 4254), 'os.remove', 'os.remove', (['DB.database'], {}), '(DB.database)\n', (4241, 4254), False, 'import os\n'), ((4428, 4584), 'fate_flow.db.db_models.MachineLearningModelInfo', 'MLModel', ([], {'f_role': '"""host"""', 'f_party_id': '"""100"""', 'f_job_id': 'job_id', 'f_model_id': 'f"""foobar#{x}"""', 'f_model_version': 'job_id', 'f_initiator_role': '"""host"""', 'f_work_mode': '(0)'}), "(f_role='host', f_party_id='100', f_job_id=job_id, f_model_id=\n f'foobar#{x}', f_model_version=job_id, f_initiator_role='host',\n f_work_mode=0)\n", (4435, 4584), True, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((4796, 4836), 'unittest.mock.patch.object', 'patch.object', (['self.service_db', '"""_insert"""'], {}), "(self.service_db, '_insert')\n", (4808, 4836), False, 'from unittest.mock import patch\n'), ((4956, 4996), 'unittest.mock.patch.object', 'patch.object', (['self.service_db', '"""_delete"""'], {}), "(self.service_db, '_delete')\n", (4968, 4996), False, 'from unittest.mock import patch\n'), ((5215, 5279), 'unittest.mock.patch.object', 'patch.object', (['db_services.ServiceRegistry', '"""USE_REGISTRY"""', '(False)'], {}), "(db_services.ServiceRegistry, 'USE_REGISTRY', False)\n", (5227, 5279), False, 'from unittest.mock import patch\n'), ((5311, 5335), 'fate_flow.db.db_services.service_db', 'db_services.service_db', ([], {}), '()\n', (5333, 5335), False, 'from fate_flow.db import db_services\n'), ((5956, 6021), 'fate_flow.db.db_services.get_model_download_url', 'db_services.get_model_download_url', (['"""foo-111#bar-222"""', '"""20210616"""'], {}), "('foo-111#bar-222', '20210616')\n", (5990, 6021), False, 'from fate_flow.db import db_services\n'), ((4395, 4406), 'time.time', 'time.time', ([], {}), '()\n', (4404, 4406), False, 'import time\n'), ((4705, 4767), 'fate_flow.db.db_services.models_group_by_party_model_id_and_model_version', 'db_services.models_group_by_party_model_id_and_model_version', ([], {}), '()\n', (4765, 4767), False, 'from fate_flow.db import db_services\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import shutil from fate_arch.common.base_utils import json_loads, json_dumps from fate_flow.settings import stat_logger from fate_flow.pipelined_model.pipelined_model import PipelinedModel from fate_flow.model.checkpoint import CheckpointManager from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter from fate_flow.utils.model_utils import (gen_party_model_id, check_before_deploy, compare_version, gather_model_info_data, save_model_info) from fate_flow.utils.schedule_utils import get_dsl_parser_by_version from fate_flow.operation.job_saver import JobSaver def deploy(config_data): model_id = config_data.get('model_id') model_version = config_data.get('model_version') local_role = config_data.get('local').get('role') local_party_id = config_data.get('local').get('party_id') child_model_version = config_data.get('child_model_version') components_checkpoint = config_data.get('components_checkpoint', {}) warning_msg = "" try: party_model_id = gen_party_model_id(model_id=model_id, role=local_role, party_id=local_party_id) model = PipelinedModel(model_id=party_model_id, model_version=model_version) model_data = model.collect_models(in_bytes=True) if "pipeline.pipeline:Pipeline" not in model_data: raise Exception("Can not found pipeline file in model.") # check if the model could be executed the deploy process (parent/child) if not check_before_deploy(model): raise Exception('Child model could not be deployed.') # copy proto content from parent model and generate a child model deploy_model = PipelinedModel(model_id=party_model_id, model_version=child_model_version) shutil.copytree(src=model.model_path, dst=deploy_model.model_path, ignore=lambda src, names: {'checkpoint'} if src == model.model_path else {}) pipeline_model = deploy_model.read_pipeline_model() train_runtime_conf = json_loads(pipeline_model.train_runtime_conf) runtime_conf_on_party = json_loads(pipeline_model.runtime_conf_on_party) dsl_version = train_runtime_conf.get("dsl_version", "1") parser = get_dsl_parser_by_version(dsl_version) train_dsl = json_loads(pipeline_model.train_dsl) parent_predict_dsl = json_loads(pipeline_model.inference_dsl) if config_data.get('dsl') or config_data.get('predict_dsl'): inference_dsl = config_data.get('dsl') if config_data.get('dsl') else config_data.get('predict_dsl') if not isinstance(inference_dsl, dict): inference_dsl = json_loads(inference_dsl) else: if config_data.get('cpn_list', None): cpn_list = config_data.pop('cpn_list') else: cpn_list = list(train_dsl.get('components', {}).keys()) if int(dsl_version) == 1: # convert v1 dsl to v2 dsl inference_dsl, warning_msg = parser.convert_dsl_v1_to_v2(parent_predict_dsl) else: parser = get_dsl_parser_by_version(dsl_version) inference_dsl = parser.deploy_component(cpn_list, train_dsl) # convert v1 conf to v2 conf if int(dsl_version) == 1: components = parser.get_components_light_weight(inference_dsl) from fate_flow.db.component_registry import ComponentRegistry job_providers = parser.get_job_providers(dsl=inference_dsl, provider_detail=ComponentRegistry.REGISTRY) cpn_role_parameters = dict() for cpn in components: cpn_name = cpn.get_name() role_params = parser.parse_component_role_parameters(component=cpn_name, dsl=inference_dsl, runtime_conf=train_runtime_conf, provider_detail=ComponentRegistry.REGISTRY, provider_name=job_providers[cpn_name]["provider"]["name"], provider_version=job_providers[cpn_name]["provider"]["version"]) cpn_role_parameters[cpn_name] = role_params train_runtime_conf = parser.convert_conf_v1_to_v2(train_runtime_conf, cpn_role_parameters) adapter = JobRuntimeConfigAdapter(train_runtime_conf) train_runtime_conf = adapter.update_model_id_version(model_version=deploy_model.model_version) pipeline_model.model_version = child_model_version pipeline_model.train_runtime_conf = json_dumps(train_runtime_conf, byte=True) # save inference dsl into child model file parser = get_dsl_parser_by_version(2) parser.verify_dsl(inference_dsl, "predict") inference_dsl = JobSaver.fill_job_inference_dsl(job_id=model_version, role=local_role, party_id=local_party_id, dsl_parser=parser, origin_inference_dsl=inference_dsl) pipeline_model.inference_dsl = json_dumps(inference_dsl, byte=True) if compare_version(pipeline_model.fate_version, '1.5.0') == 'gt': pipeline_model.parent_info = json_dumps({'parent_model_id': model_id, 'parent_model_version': model_version}, byte=True) pipeline_model.parent = False runtime_conf_on_party['job_parameters']['model_version'] = child_model_version pipeline_model.runtime_conf_on_party = json_dumps(runtime_conf_on_party, byte=True) # save model file deploy_model.save_pipeline(pipeline_model) shutil.copyfile(os.path.join(deploy_model.model_path, "pipeline.pb"), os.path.join(deploy_model.model_path, "variables", "data", "pipeline", "pipeline", "Pipeline")) model_info = gather_model_info_data(deploy_model) model_info['job_id'] = model_info['f_model_version'] model_info['size'] = deploy_model.calculate_model_file_size() model_info['role'] = local_role model_info['party_id'] = local_party_id model_info['parent'] = False if model_info.get('f_inference_dsl') else True if compare_version(model_info['f_fate_version'], '1.5.0') == 'eq': model_info['roles'] = model_info.get('f_train_runtime_conf', {}).get('role', {}) model_info['initiator_role'] = model_info.get('f_train_runtime_conf', {}).get('initiator', {}).get('role') model_info['initiator_party_id'] = model_info.get('f_train_runtime_conf', {}).get('initiator', {}).get('party_id') save_model_info(model_info) for component_name, component in train_dsl.get('components', {}).items(): step_index = components_checkpoint.get(component_name, {}).get('step_index') step_name = components_checkpoint.get(component_name, {}).get('step_name') if step_index is not None: step_index = int(step_index) step_name = None elif step_name is None: continue checkpoint_manager = CheckpointManager( role=local_role, party_id=local_party_id, model_id=model_id, model_version=model_version, component_name=component_name, mkdir=False, ) checkpoint_manager.load_checkpoints_from_disk() if checkpoint_manager.latest_checkpoint is not None: checkpoint_manager.deploy( child_model_version, component['output']['model'][0] if component.get('output', {}).get('model') else 'default', step_index, step_name, ) except Exception as e: stat_logger.exception(e) return 100, f"deploy model of role {local_role} {local_party_id} failed, details: {str(e)}" else: msg = f"deploy model of role {local_role} {local_party_id} success" if warning_msg: msg = msg + f", warning: {warning_msg}" return 0, msg
[ "fate_flow.utils.model_utils.compare_version", "fate_flow.utils.schedule_utils.get_dsl_parser_by_version", "fate_flow.utils.config_adapter.JobRuntimeConfigAdapter", "fate_flow.utils.model_utils.save_model_info", "fate_flow.model.checkpoint.CheckpointManager", "fate_flow.pipelined_model.pipelined_model.PipelinedModel", "fate_flow.utils.model_utils.gen_party_model_id", "fate_flow.operation.job_saver.JobSaver.fill_job_inference_dsl", "fate_flow.utils.model_utils.check_before_deploy", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.model_utils.gather_model_info_data" ]
[((1675, 1754), 'fate_flow.utils.model_utils.gen_party_model_id', 'gen_party_model_id', ([], {'model_id': 'model_id', 'role': 'local_role', 'party_id': 'local_party_id'}), '(model_id=model_id, role=local_role, party_id=local_party_id)\n', (1693, 1754), False, 'from fate_flow.utils.model_utils import gen_party_model_id, check_before_deploy, compare_version, gather_model_info_data, save_model_info\n'), ((1771, 1839), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'PipelinedModel', ([], {'model_id': 'party_model_id', 'model_version': 'model_version'}), '(model_id=party_model_id, model_version=model_version)\n', (1785, 1839), False, 'from fate_flow.pipelined_model.pipelined_model import PipelinedModel\n'), ((2314, 2388), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'PipelinedModel', ([], {'model_id': 'party_model_id', 'model_version': 'child_model_version'}), '(model_id=party_model_id, model_version=child_model_version)\n', (2328, 2388), False, 'from fate_flow.pipelined_model.pipelined_model import PipelinedModel\n'), ((2397, 2545), 'shutil.copytree', 'shutil.copytree', ([], {'src': 'model.model_path', 'dst': 'deploy_model.model_path', 'ignore': "(lambda src, names: {'checkpoint'} if src == model.model_path else {})"}), "(src=model.model_path, dst=deploy_model.model_path, ignore=\n lambda src, names: {'checkpoint'} if src == model.model_path else {})\n", (2412, 2545), False, 'import shutil\n'), ((2655, 2700), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline_model.train_runtime_conf'], {}), '(pipeline_model.train_runtime_conf)\n', (2665, 2700), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((2733, 2781), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline_model.runtime_conf_on_party'], {}), '(pipeline_model.runtime_conf_on_party)\n', (2743, 2781), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((2865, 2903), 'fate_flow.utils.schedule_utils.get_dsl_parser_by_version', 'get_dsl_parser_by_version', (['dsl_version'], {}), '(dsl_version)\n', (2890, 2903), False, 'from fate_flow.utils.schedule_utils import get_dsl_parser_by_version\n'), ((2924, 2960), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline_model.train_dsl'], {}), '(pipeline_model.train_dsl)\n', (2934, 2960), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((2990, 3030), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline_model.inference_dsl'], {}), '(pipeline_model.inference_dsl)\n', (3000, 3030), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((5158, 5201), 'fate_flow.utils.config_adapter.JobRuntimeConfigAdapter', 'JobRuntimeConfigAdapter', (['train_runtime_conf'], {}), '(train_runtime_conf)\n', (5181, 5201), False, 'from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter\n'), ((5408, 5449), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['train_runtime_conf'], {'byte': '(True)'}), '(train_runtime_conf, byte=True)\n', (5418, 5449), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((5520, 5548), 'fate_flow.utils.schedule_utils.get_dsl_parser_by_version', 'get_dsl_parser_by_version', (['(2)'], {}), '(2)\n', (5545, 5548), False, 'from fate_flow.utils.schedule_utils import get_dsl_parser_by_version\n'), ((5625, 5784), 'fate_flow.operation.job_saver.JobSaver.fill_job_inference_dsl', 'JobSaver.fill_job_inference_dsl', ([], {'job_id': 'model_version', 'role': 'local_role', 'party_id': 'local_party_id', 'dsl_parser': 'parser', 'origin_inference_dsl': 'inference_dsl'}), '(job_id=model_version, role=local_role,\n party_id=local_party_id, dsl_parser=parser, origin_inference_dsl=\n inference_dsl)\n', (5656, 5784), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((5815, 5851), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['inference_dsl'], {'byte': '(True)'}), '(inference_dsl, byte=True)\n', (5825, 5851), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((6587, 6623), 'fate_flow.utils.model_utils.gather_model_info_data', 'gather_model_info_data', (['deploy_model'], {}), '(deploy_model)\n', (6609, 6623), False, 'from fate_flow.utils.model_utils import gen_party_model_id, check_before_deploy, compare_version, gather_model_info_data, save_model_info\n'), ((7349, 7376), 'fate_flow.utils.model_utils.save_model_info', 'save_model_info', (['model_info'], {}), '(model_info)\n', (7364, 7376), False, 'from fate_flow.utils.model_utils import gen_party_model_id, check_before_deploy, compare_version, gather_model_info_data, save_model_info\n'), ((2122, 2148), 'fate_flow.utils.model_utils.check_before_deploy', 'check_before_deploy', (['model'], {}), '(model)\n', (2141, 2148), False, 'from fate_flow.utils.model_utils import gen_party_model_id, check_before_deploy, compare_version, gather_model_info_data, save_model_info\n'), ((5864, 5917), 'fate_flow.utils.model_utils.compare_version', 'compare_version', (['pipeline_model.fate_version', '"""1.5.0"""'], {}), "(pipeline_model.fate_version, '1.5.0')\n", (5879, 5917), False, 'from fate_flow.utils.model_utils import gen_party_model_id, check_before_deploy, compare_version, gather_model_info_data, save_model_info\n'), ((5968, 6063), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (["{'parent_model_id': model_id, 'parent_model_version': model_version}"], {'byte': '(True)'}), "({'parent_model_id': model_id, 'parent_model_version':\n model_version}, byte=True)\n", (5978, 6063), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((6244, 6288), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['runtime_conf_on_party'], {'byte': '(True)'}), '(runtime_conf_on_party, byte=True)\n', (6254, 6288), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((6391, 6443), 'os.path.join', 'os.path.join', (['deploy_model.model_path', '"""pipeline.pb"""'], {}), "(deploy_model.model_path, 'pipeline.pb')\n", (6403, 6443), False, 'import os\n'), ((6469, 6567), 'os.path.join', 'os.path.join', (['deploy_model.model_path', '"""variables"""', '"""data"""', '"""pipeline"""', '"""pipeline"""', '"""Pipeline"""'], {}), "(deploy_model.model_path, 'variables', 'data', 'pipeline',\n 'pipeline', 'Pipeline')\n", (6481, 6567), False, 'import os\n'), ((6938, 6992), 'fate_flow.utils.model_utils.compare_version', 'compare_version', (["model_info['f_fate_version']", '"""1.5.0"""'], {}), "(model_info['f_fate_version'], '1.5.0')\n", (6953, 6992), False, 'from fate_flow.utils.model_utils import gen_party_model_id, check_before_deploy, compare_version, gather_model_info_data, save_model_info\n'), ((7848, 8008), 'fate_flow.model.checkpoint.CheckpointManager', 'CheckpointManager', ([], {'role': 'local_role', 'party_id': 'local_party_id', 'model_id': 'model_id', 'model_version': 'model_version', 'component_name': 'component_name', 'mkdir': '(False)'}), '(role=local_role, party_id=local_party_id, model_id=\n model_id, model_version=model_version, component_name=component_name,\n mkdir=False)\n', (7865, 8008), False, 'from fate_flow.model.checkpoint import CheckpointManager\n'), ((8536, 8560), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (8557, 8560), False, 'from fate_flow.settings import stat_logger\n'), ((3298, 3323), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['inference_dsl'], {}), '(inference_dsl)\n', (3308, 3323), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((3750, 3788), 'fate_flow.utils.schedule_utils.get_dsl_parser_by_version', 'get_dsl_parser_by_version', (['dsl_version'], {}), '(dsl_version)\n', (3775, 3788), False, 'from fate_flow.utils.schedule_utils import get_dsl_parser_by_version\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np from arch.api import federation from arch.api.utils import log_utils from fate_flow.entity.metric import MetricMeta, Metric from federatedml.logistic_regression.hetero_logistic_regression.hetero_lr_base import HeteroLRBase from federatedml.optim.federated_aggregator import HeteroFederatedAggregator from federatedml.util import consts LOGGER = log_utils.getLogger() class HeteroLRArbiter(HeteroLRBase): def __init__(self): # LogisticParamChecker.check_param(logistic_params) super(HeteroLRArbiter, self).__init__() self.role = consts.ARBITER # attribute self.pre_loss = None self.batch_num = None def perform_subtasks(self, **training_info): """ performs any tasks that the arbiter is responsible for. This 'perform_subtasks' function serves as a handler on conducting any task that the arbiter is responsible for. For example, for the 'perform_subtasks' function of 'HeteroDNNLRArbiter' class located in 'hetero_dnn_lr_arbiter.py', it performs some works related to updating/training local neural networks of guest or host. For this particular class (i.e., 'HeteroLRArbiter') that serves as a base arbiter class for neural-networks-based hetero-logistic-regression model, the 'perform_subtasks' function will do nothing. In other words, no subtask is performed by this arbiter. :param training_info: a dictionary holding training information """ pass def run(self, component_parameters=None, args=None): self._init_runtime_parameters(component_parameters) if self.need_cv: LOGGER.info("Task is cross validation.") self.cross_validation(None) return if self.need_one_vs_rest: LOGGER.info("Task is one_vs_rest fit") if not "model" in args: self.one_vs_rest_fit() elif not "model" in args: LOGGER.info("Task is fit") self.set_flowid('train') self.fit() else: LOGGER.info("Task is transform") def fit(self, data_instances=None): """ Train lr model of role arbiter Parameters ---------- data_instances: DTable of Instance, input data """ LOGGER.info("Enter hetero_lr_arbiter fit") if data_instances: # self.header = data_instance.schema.get('header') self.header = self.get_header(data_instances) else: self.header = [] # Generate encrypt keys self.encrypt_operator.generate_key(self.key_length) public_key = self.encrypt_operator.get_public_key() public_key = public_key LOGGER.info("public_key:{}".format(public_key)) federation.remote(public_key, name=self.transfer_variable.paillier_pubkey.name, tag=self.transfer_variable.generate_transferid(self.transfer_variable.paillier_pubkey), role=consts.HOST, idx=0) LOGGER.info("remote public_key to host") federation.remote(public_key, name=self.transfer_variable.paillier_pubkey.name, tag=self.transfer_variable.generate_transferid(self.transfer_variable.paillier_pubkey), role=consts.GUEST, idx=0) LOGGER.info("remote public_key to guest") batch_info = federation.get(name=self.transfer_variable.batch_info.name, tag=self.transfer_variable.generate_transferid(self.transfer_variable.batch_info), idx=0) LOGGER.info("Get batch_info from guest:{}".format(batch_info)) self.batch_num = batch_info["batch_num"] is_stop = False self.n_iter_ = 0 while self.n_iter_ < self.max_iter: LOGGER.info("iter:{}".format(self.n_iter_)) batch_index = 0 iter_loss = 0 while batch_index < self.batch_num: LOGGER.info("batch:{}".format(batch_index)) host_gradient = federation.get(name=self.transfer_variable.host_gradient.name, tag=self.transfer_variable.generate_transferid( self.transfer_variable.host_gradient, self.n_iter_, batch_index), idx=0) LOGGER.info("Get host_gradient from Host") guest_gradient = federation.get(name=self.transfer_variable.guest_gradient.name, tag=self.transfer_variable.generate_transferid( self.transfer_variable.guest_gradient, self.n_iter_, batch_index), idx=0) LOGGER.info("Get guest_gradient from Guest") # aggregate gradient host_gradient, guest_gradient = np.array(host_gradient), np.array(guest_gradient) gradient = np.hstack((host_gradient, guest_gradient)) # decrypt gradient for i in range(gradient.shape[0]): gradient[i] = self.encrypt_operator.decrypt(gradient[i]) # optimization optim_gradient = self.optimizer.apply_gradients(gradient) # separate optim_gradient according gradient size of Host and Guest separate_optim_gradient = HeteroFederatedAggregator.separate(optim_gradient, [host_gradient.shape[0], guest_gradient.shape[0]]) host_optim_gradient = separate_optim_gradient[0] guest_optim_gradient = separate_optim_gradient[1] federation.remote(host_optim_gradient, name=self.transfer_variable.host_optim_gradient.name, tag=self.transfer_variable.generate_transferid( self.transfer_variable.host_optim_gradient, self.n_iter_, batch_index), role=consts.HOST, idx=0) LOGGER.info("Remote host_optim_gradient to Host") federation.remote(guest_optim_gradient, name=self.transfer_variable.guest_optim_gradient.name, tag=self.transfer_variable.generate_transferid( self.transfer_variable.guest_optim_gradient, self.n_iter_, batch_index), role=consts.GUEST, idx=0) LOGGER.info("Remote guest_optim_gradient to Guest") training_info = {"iteration": self.n_iter_, "batch_index": batch_index} self.perform_subtasks(**training_info) loss = federation.get(name=self.transfer_variable.loss.name, tag=self.transfer_variable.generate_transferid( self.transfer_variable.loss, self.n_iter_, batch_index), idx=0) de_loss = self.encrypt_operator.decrypt(loss) iter_loss += de_loss # LOGGER.info("Get loss from guest:{}".format(de_loss)) batch_index += 1 # if converge loss = iter_loss / self.batch_num LOGGER.info("iter loss:{}".format(loss)) if not self.need_one_vs_rest: metric_meta = MetricMeta(name='train', metric_type="LOSS", extra_metas={ "unit_name": "iters" }) metric_name = 'loss' self.callback_meta(metric_name=metric_name, metric_namespace='train', metric_meta=metric_meta) self.callback_metric(metric_name=metric_name, metric_namespace='train', metric_data=[Metric(self.n_iter_, float(loss))]) if self.converge_func.is_converge(loss): is_stop = True federation.remote(is_stop, name=self.transfer_variable.is_stopped.name, tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_stopped, self.n_iter_, batch_index), role=consts.HOST, idx=0) LOGGER.info("Remote is_stop to host:{}".format(is_stop)) federation.remote(is_stop, name=self.transfer_variable.is_stopped.name, tag=self.transfer_variable.generate_transferid(self.transfer_variable.is_stopped, self.n_iter_, batch_index), role=consts.GUEST, idx=0) LOGGER.info("Remote is_stop to guest:{}".format(is_stop)) self.n_iter_ += 1 if is_stop: LOGGER.info("Model is converged, iter:{}".format(self.n_iter_)) break LOGGER.info("Reach max iter {} or converge, train model finish!".format(self.max_iter))
[ "fate_flow.entity.metric.MetricMeta" ]
[((983, 1004), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', ([], {}), '()\n', (1002, 1004), False, 'from arch.api.utils import log_utils\n'), ((5839, 5881), 'numpy.hstack', 'np.hstack', (['(host_gradient, guest_gradient)'], {}), '((host_gradient, guest_gradient))\n', (5848, 5881), True, 'import numpy as np\n'), ((6277, 6382), 'federatedml.optim.federated_aggregator.HeteroFederatedAggregator.separate', 'HeteroFederatedAggregator.separate', (['optim_gradient', '[host_gradient.shape[0], guest_gradient.shape[0]]'], {}), '(optim_gradient, [host_gradient.shape[0],\n guest_gradient.shape[0]])\n', (6311, 6382), False, 'from federatedml.optim.federated_aggregator import HeteroFederatedAggregator\n'), ((8670, 8755), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': '"""train"""', 'metric_type': '"""LOSS"""', 'extra_metas': "{'unit_name': 'iters'}"}), "(name='train', metric_type='LOSS', extra_metas={'unit_name': 'iters'}\n )\n", (8680, 8755), False, 'from fate_flow.entity.metric import MetricMeta, Metric\n'), ((5762, 5785), 'numpy.array', 'np.array', (['host_gradient'], {}), '(host_gradient)\n', (5770, 5785), True, 'import numpy as np\n'), ((5787, 5811), 'numpy.array', 'np.array', (['guest_gradient'], {}), '(guest_gradient)\n', (5795, 5811), True, 'import numpy as np\n')]
# # Copyright 2021 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import functools import json from base64 import b64encode from hmac import HMAC from time import time from urllib.parse import quote, urlencode from uuid import uuid1 import requests from fate_arch.common.base_utils import CustomJSONEncoder from fate_flow.settings import HTTP_APP_KEY, HTTP_SECRET_KEY requests.models.complexjson.dumps = functools.partial(json.dumps, cls=CustomJSONEncoder) def request(**kwargs): sess = requests.Session() stream = kwargs.pop('stream', sess.stream) prepped = requests.Request(**kwargs).prepare() if HTTP_APP_KEY and HTTP_SECRET_KEY: timestamp = str(round(time() * 1000)) nonce = str(uuid1()) signature = b64encode(HMAC(HTTP_SECRET_KEY.encode('ascii'), b'\n'.join([ timestamp.encode('ascii'), nonce.encode('ascii'), HTTP_APP_KEY.encode('ascii'), prepped.path_url.encode('ascii'), prepped.body if kwargs.get('json') else b'', urlencode(sorted(kwargs['data'].items()), quote_via=quote, safe='-._~').encode('ascii') if kwargs.get('data') and isinstance(kwargs['data'], dict) else b'', ]), 'sha1').digest()).decode('ascii') prepped.headers.update({ 'TIMESTAMP': timestamp, 'NONCE': nonce, 'APP_KEY': HTTP_APP_KEY, 'SIGNATURE': signature, }) return sess.send(prepped, stream=stream)
[ "fate_flow.settings.HTTP_APP_KEY.encode", "fate_flow.settings.HTTP_SECRET_KEY.encode" ]
[((958, 1010), 'functools.partial', 'functools.partial', (['json.dumps'], {'cls': 'CustomJSONEncoder'}), '(json.dumps, cls=CustomJSONEncoder)\n', (975, 1010), False, 'import functools\n'), ((1047, 1065), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1063, 1065), False, 'import requests\n'), ((1127, 1153), 'requests.Request', 'requests.Request', ([], {}), '(**kwargs)\n', (1143, 1153), False, 'import requests\n'), ((1272, 1279), 'uuid.uuid1', 'uuid1', ([], {}), '()\n', (1277, 1279), False, 'from uuid import uuid1\n'), ((1236, 1242), 'time.time', 'time', ([], {}), '()\n', (1240, 1242), False, 'from time import time\n'), ((1316, 1347), 'fate_flow.settings.HTTP_SECRET_KEY.encode', 'HTTP_SECRET_KEY.encode', (['"""ascii"""'], {}), "('ascii')\n", (1338, 1347), False, 'from fate_flow.settings import HTTP_APP_KEY, HTTP_SECRET_KEY\n'), ((1448, 1476), 'fate_flow.settings.HTTP_APP_KEY.encode', 'HTTP_APP_KEY.encode', (['"""ascii"""'], {}), "('ascii')\n", (1467, 1476), False, 'from fate_flow.settings import HTTP_APP_KEY, HTTP_SECRET_KEY\n')]
import unittest from unittest.mock import patch import os import io import shutil import hashlib import concurrent.futures from pathlib import Path from copy import deepcopy from zipfile import ZipFile from ruamel import yaml from fate_flow.pipelined_model.pipelined_model import PipelinedModel from fate_flow.settings import TEMP_DIRECTORY with open(Path(__file__).parent / 'define_meta.yaml', encoding='utf8') as _f: data_define_meta = yaml.safe_load(_f) args_update_component_meta = [ 'dataio_0', 'DataIO', 'dataio', { 'DataIOMeta': 'DataIOMeta', 'DataIOParam': 'DataIOParam', }, ] class TestPipelinedModel(unittest.TestCase): def setUp(self): shutil.rmtree(TEMP_DIRECTORY, True) self.pipelined_model = PipelinedModel('foobar', 'v1') shutil.rmtree(self.pipelined_model.model_path, True) self.pipelined_model.create_pipelined_model() with open(self.pipelined_model.define_meta_path, 'w', encoding='utf8') as f: yaml.dump(data_define_meta, f) def tearDown(self): shutil.rmtree(TEMP_DIRECTORY, True) shutil.rmtree(self.pipelined_model.model_path, True) def test_write_read_file_same_time(self): fw = open(self.pipelined_model.define_meta_path, 'r+', encoding='utf8') self.assertEqual(yaml.safe_load(fw), data_define_meta) fw.seek(0) fw.write('foobar') with open(self.pipelined_model.define_meta_path, encoding='utf8') as fr: self.assertEqual(yaml.safe_load(fr), data_define_meta) fw.truncate() with open(self.pipelined_model.define_meta_path, encoding='utf8') as fr: self.assertEqual(fr.read(), 'foobar') fw.seek(0) fw.write('abc') fw.close() with open(self.pipelined_model.define_meta_path, encoding='utf8') as fr: self.assertEqual(fr.read(), 'abcbar') def test_update_component_meta_with_changes(self): with patch('ruamel.yaml.dump', side_effect=yaml.dump) as yaml_dump: self.pipelined_model.update_component_meta( 'dataio_0', 'DataIO_v0', 'dataio', { 'DataIOMeta': 'DataIOMeta_v0', 'DataIOParam': 'DataIOParam_v0', } ) yaml_dump.assert_called_once() with open(self.pipelined_model.define_meta_path, encoding='utf8') as tmp: define_index = yaml.safe_load(tmp) _data = deepcopy(data_define_meta) _data['component_define']['dataio_0']['module_name'] = 'DataIO_v0' _data['model_proto']['dataio_0']['dataio'] = { 'DataIOMeta': 'DataIOMeta_v0', 'DataIOParam': 'DataIOParam_v0', } self.assertEqual(define_index, _data) def test_update_component_meta_without_changes(self): with open(self.pipelined_model.define_meta_path, 'w', encoding='utf8') as f: yaml.dump(data_define_meta, f, Dumper=yaml.RoundTripDumper) with patch('ruamel.yaml.dump', side_effect=yaml.dump) as yaml_dump: self.pipelined_model.update_component_meta(*args_update_component_meta) yaml_dump.assert_not_called() with open(self.pipelined_model.define_meta_path, encoding='utf8') as tmp: define_index = yaml.safe_load(tmp) self.assertEqual(define_index, data_define_meta) def test_update_component_meta_multi_thread(self): with patch('ruamel.yaml.safe_load', side_effect=yaml.safe_load) as yaml_load, \ patch('ruamel.yaml.dump', side_effect=yaml.dump) as yaml_dump, \ concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: for _ in range(100): executor.submit(self.pipelined_model.update_component_meta, *args_update_component_meta) self.assertEqual(yaml_load.call_count, 100) self.assertEqual(yaml_dump.call_count, 0) with open(self.pipelined_model.define_meta_path, encoding='utf8') as tmp: define_index = yaml.safe_load(tmp) self.assertEqual(define_index, data_define_meta) def test_update_component_meta_empty_file(self): open(self.pipelined_model.define_meta_path, 'w').close() with self.assertRaisesRegex(ValueError, 'Invalid meta file'): self.pipelined_model.update_component_meta(*args_update_component_meta) def test_packaging_model(self): archive_file_path = self.pipelined_model.packaging_model() self.assertEqual(archive_file_path, self.pipelined_model.archive_model_file_path) self.assertTrue(Path(archive_file_path).is_file()) self.assertTrue(Path(archive_file_path + '.sha1').is_file()) with ZipFile(archive_file_path) as z: with io.TextIOWrapper(z.open('define/define_meta.yaml'), encoding='utf8') as f: define_index = yaml.safe_load(f) self.assertEqual(define_index, data_define_meta) with open(archive_file_path, 'rb') as f, open(archive_file_path + '.sha1', encoding='utf8') as g: sha1 = hashlib.sha1(f.read()).hexdigest() sha1_orig = g.read().strip() self.assertEqual(sha1, sha1_orig) def test_packaging_model_not_exists(self): shutil.rmtree(self.pipelined_model.model_path, True) with self.assertRaisesRegex(FileNotFoundError, 'Can not found foobar v1 model local cache'): self.pipelined_model.packaging_model() def test_unpack_model(self): archive_file_path = self.pipelined_model.packaging_model() self.assertTrue(Path(archive_file_path + '.sha1').is_file()) shutil.rmtree(self.pipelined_model.model_path, True) self.assertFalse(Path(self.pipelined_model.model_path).exists()) self.pipelined_model.unpack_model(archive_file_path) with open(self.pipelined_model.define_meta_path, encoding='utf8') as tmp: define_index = yaml.safe_load(tmp) self.assertEqual(define_index, data_define_meta) def test_unpack_model_local_cache_exists(self): archive_file_path = self.pipelined_model.packaging_model() with self.assertRaisesRegex(FileExistsError, 'Model foobar v1 local cache already existed'): self.pipelined_model.unpack_model(archive_file_path) def test_unpack_model_no_hash_file(self): archive_file_path = self.pipelined_model.packaging_model() Path(archive_file_path + '.sha1').unlink() self.assertFalse(Path(archive_file_path + '.sha1').exists()) shutil.rmtree(self.pipelined_model.model_path, True) self.assertFalse(os.path.exists(self.pipelined_model.model_path)) self.pipelined_model.unpack_model(archive_file_path) with open(self.pipelined_model.define_meta_path, encoding='utf8') as tmp: define_index = yaml.safe_load(tmp) self.assertEqual(define_index, data_define_meta) def test_unpack_model_hash_not_match(self): archive_file_path = self.pipelined_model.packaging_model() self.assertTrue(Path(archive_file_path + '.sha1').is_file()) with open(archive_file_path + '.sha1', 'w', encoding='utf8') as f: f.write('abc123') shutil.rmtree(self.pipelined_model.model_path, True) self.assertFalse(Path(self.pipelined_model.model_path).exists()) with self.assertRaisesRegex(ValueError, 'Hash not match.'): self.pipelined_model.unpack_model(archive_file_path) if __name__ == '__main__': unittest.main()
[ "fate_flow.pipelined_model.pipelined_model.PipelinedModel" ]
[((447, 465), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['_f'], {}), '(_f)\n', (461, 465), False, 'from ruamel import yaml\n'), ((7513, 7528), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7526, 7528), False, 'import unittest\n'), ((707, 742), 'shutil.rmtree', 'shutil.rmtree', (['TEMP_DIRECTORY', '(True)'], {}), '(TEMP_DIRECTORY, True)\n', (720, 742), False, 'import shutil\n'), ((775, 805), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'PipelinedModel', (['"""foobar"""', '"""v1"""'], {}), "('foobar', 'v1')\n", (789, 805), False, 'from fate_flow.pipelined_model.pipelined_model import PipelinedModel\n'), ((814, 866), 'shutil.rmtree', 'shutil.rmtree', (['self.pipelined_model.model_path', '(True)'], {}), '(self.pipelined_model.model_path, True)\n', (827, 866), False, 'import shutil\n'), ((1083, 1118), 'shutil.rmtree', 'shutil.rmtree', (['TEMP_DIRECTORY', '(True)'], {}), '(TEMP_DIRECTORY, True)\n', (1096, 1118), False, 'import shutil\n'), ((1127, 1179), 'shutil.rmtree', 'shutil.rmtree', (['self.pipelined_model.model_path', '(True)'], {}), '(self.pipelined_model.model_path, True)\n', (1140, 1179), False, 'import shutil\n'), ((2478, 2504), 'copy.deepcopy', 'deepcopy', (['data_define_meta'], {}), '(data_define_meta)\n', (2486, 2504), False, 'from copy import deepcopy\n'), ((5258, 5310), 'shutil.rmtree', 'shutil.rmtree', (['self.pipelined_model.model_path', '(True)'], {}), '(self.pipelined_model.model_path, True)\n', (5271, 5310), False, 'import shutil\n'), ((5642, 5694), 'shutil.rmtree', 'shutil.rmtree', (['self.pipelined_model.model_path', '(True)'], {}), '(self.pipelined_model.model_path, True)\n', (5655, 5694), False, 'import shutil\n'), ((6546, 6598), 'shutil.rmtree', 'shutil.rmtree', (['self.pipelined_model.model_path', '(True)'], {}), '(self.pipelined_model.model_path, True)\n', (6559, 6598), False, 'import shutil\n'), ((7220, 7272), 'shutil.rmtree', 'shutil.rmtree', (['self.pipelined_model.model_path', '(True)'], {}), '(self.pipelined_model.model_path, True)\n', (7233, 7272), False, 'import shutil\n'), ((1019, 1049), 'ruamel.yaml.dump', 'yaml.dump', (['data_define_meta', 'f'], {}), '(data_define_meta, f)\n', (1028, 1049), False, 'from ruamel import yaml\n'), ((1332, 1350), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['fw'], {}), '(fw)\n', (1346, 1350), False, 'from ruamel import yaml\n'), ((1984, 2032), 'unittest.mock.patch', 'patch', (['"""ruamel.yaml.dump"""'], {'side_effect': 'yaml.dump'}), "('ruamel.yaml.dump', side_effect=yaml.dump)\n", (1989, 2032), False, 'from unittest.mock import patch\n'), ((2441, 2460), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['tmp'], {}), '(tmp)\n', (2455, 2460), False, 'from ruamel import yaml\n'), ((2936, 2995), 'ruamel.yaml.dump', 'yaml.dump', (['data_define_meta', 'f'], {'Dumper': 'yaml.RoundTripDumper'}), '(data_define_meta, f, Dumper=yaml.RoundTripDumper)\n', (2945, 2995), False, 'from ruamel import yaml\n'), ((3010, 3058), 'unittest.mock.patch', 'patch', (['"""ruamel.yaml.dump"""'], {'side_effect': 'yaml.dump'}), "('ruamel.yaml.dump', side_effect=yaml.dump)\n", (3015, 3058), False, 'from unittest.mock import patch\n'), ((3305, 3324), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['tmp'], {}), '(tmp)\n', (3319, 3324), False, 'from ruamel import yaml\n'), ((3451, 3509), 'unittest.mock.patch', 'patch', (['"""ruamel.yaml.safe_load"""'], {'side_effect': 'yaml.safe_load'}), "('ruamel.yaml.safe_load', side_effect=yaml.safe_load)\n", (3456, 3509), False, 'from unittest.mock import patch\n'), ((3542, 3590), 'unittest.mock.patch', 'patch', (['"""ruamel.yaml.dump"""'], {'side_effect': 'yaml.dump'}), "('ruamel.yaml.dump', side_effect=yaml.dump)\n", (3547, 3590), False, 'from unittest.mock import patch\n'), ((4041, 4060), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['tmp'], {}), '(tmp)\n', (4055, 4060), False, 'from ruamel import yaml\n'), ((4727, 4753), 'zipfile.ZipFile', 'ZipFile', (['archive_file_path'], {}), '(archive_file_path)\n', (4734, 4753), False, 'from zipfile import ZipFile\n'), ((5939, 5958), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['tmp'], {}), '(tmp)\n', (5953, 5958), False, 'from ruamel import yaml\n'), ((6624, 6671), 'os.path.exists', 'os.path.exists', (['self.pipelined_model.model_path'], {}), '(self.pipelined_model.model_path)\n', (6638, 6671), False, 'import os\n'), ((6844, 6863), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['tmp'], {}), '(tmp)\n', (6858, 6863), False, 'from ruamel import yaml\n'), ((356, 370), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (360, 370), False, 'from pathlib import Path\n'), ((1527, 1545), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['fr'], {}), '(fr)\n', (1541, 1545), False, 'from ruamel import yaml\n'), ((4883, 4900), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (4897, 4900), False, 'from ruamel import yaml\n'), ((6425, 6458), 'pathlib.Path', 'Path', (["(archive_file_path + '.sha1')"], {}), "(archive_file_path + '.sha1')\n", (6429, 6458), False, 'from pathlib import Path\n'), ((4609, 4632), 'pathlib.Path', 'Path', (['archive_file_path'], {}), '(archive_file_path)\n', (4613, 4632), False, 'from pathlib import Path\n'), ((4668, 4701), 'pathlib.Path', 'Path', (["(archive_file_path + '.sha1')"], {}), "(archive_file_path + '.sha1')\n", (4672, 4701), False, 'from pathlib import Path\n'), ((5588, 5621), 'pathlib.Path', 'Path', (["(archive_file_path + '.sha1')"], {}), "(archive_file_path + '.sha1')\n", (5592, 5621), False, 'from pathlib import Path\n'), ((5720, 5757), 'pathlib.Path', 'Path', (['self.pipelined_model.model_path'], {}), '(self.pipelined_model.model_path)\n', (5724, 5757), False, 'from pathlib import Path\n'), ((6493, 6526), 'pathlib.Path', 'Path', (["(archive_file_path + '.sha1')"], {}), "(archive_file_path + '.sha1')\n", (6497, 6526), False, 'from pathlib import Path\n'), ((7061, 7094), 'pathlib.Path', 'Path', (["(archive_file_path + '.sha1')"], {}), "(archive_file_path + '.sha1')\n", (7065, 7094), False, 'from pathlib import Path\n'), ((7298, 7335), 'pathlib.Path', 'Path', (['self.pipelined_model.model_path'], {}), '(self.pipelined_model.model_path)\n', (7302, 7335), False, 'from pathlib import Path\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import datetime import inspect import os import sys import __main__ from peewee import (CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField) from playhouse.apsw_ext import APSWDatabase from playhouse.pool import PooledMySQLDatabase from fate_arch.common import log from fate_arch.common.base_utils import current_timestamp from fate_arch.storage.metastore.base_model import JSONField, BaseModel, LongTextField from fate_arch.common import WorkMode from fate_flow.settings import DATABASE, WORK_MODE, stat_logger from fate_flow.entity.runtime_config import RuntimeConfig LOGGER = log.getLogger() def singleton(cls, *args, **kw): instances = {} def _singleton(): key = str(cls) + str(os.getpid()) if key not in instances: instances[key] = cls(*args, **kw) return instances[key] return _singleton @singleton class BaseDataBase(object): def __init__(self): database_config = DATABASE.copy() db_name = database_config.pop("name") if WORK_MODE == WorkMode.STANDALONE: self.database_connection = APSWDatabase('fate_flow_sqlite.db') RuntimeConfig.init_config(USE_LOCAL_DATABASE=True) stat_logger.info('init sqlite database on standalone mode successfully') elif WORK_MODE == WorkMode.CLUSTER: self.database_connection = PooledMySQLDatabase(db_name, **database_config) stat_logger.info('init mysql database on cluster mode successfully') RuntimeConfig.init_config(USE_LOCAL_DATABASE=False) else: raise Exception('can not init database') MAIN_FILE_PATH = os.path.realpath(__main__.__file__) if MAIN_FILE_PATH.endswith('fate_flow_server.py') or \ MAIN_FILE_PATH.endswith('task_executor.py') or \ MAIN_FILE_PATH.find("/unittest/__main__.py"): DB = BaseDataBase().database_connection else: # Initialize the database only when the server is started. DB = None def close_connection(): try: if DB: DB.close() except Exception as e: LOGGER.exception(e) class DataBaseModel(BaseModel): class Meta: database = DB @DB.connection_context() def init_database_tables(): members = inspect.getmembers(sys.modules[__name__], inspect.isclass) table_objs = [] for name, obj in members: if obj != DataBaseModel and issubclass(obj, DataBaseModel): table_objs.append(obj) DB.create_tables(table_objs) def fill_db_model_object(model_object, human_model_dict): for k, v in human_model_dict.items(): attr_name = 'f_%s' % k if hasattr(model_object.__class__, attr_name): setattr(model_object, attr_name, v) return model_object class Job(DataBaseModel): # multi-party common configuration f_job_id = CharField(max_length=25) f_name = CharField(max_length=500, null=True, default='') f_description = TextField(null=True, default='') f_tag = CharField(max_length=50, null=True, index=True, default='') f_dsl = JSONField() f_runtime_conf = JSONField() f_train_runtime_conf = JSONField(null=True) f_roles = JSONField() f_work_mode = IntegerField() f_initiator_role = CharField(max_length=50, index=True) f_initiator_party_id = CharField(max_length=50, index=True, default=-1) f_status = CharField(max_length=50) # this party configuration f_role = CharField(max_length=50, index=True) f_party_id = CharField(max_length=10, index=True) f_is_initiator = BooleanField(null=True, index=True, default=False) f_progress = IntegerField(null=True, default=0) f_engine_name = CharField(max_length=50, null=True, index=True) f_engine_type = CharField(max_length=10, null=True, index=True) f_cores = IntegerField(index=True, default=0) f_memory = IntegerField(index=True, default=0) # MB f_remaining_cores = IntegerField(index=True, default=0) f_remaining_memory = IntegerField(index=True, default=0) # MB f_resource_in_use = BooleanField(index=True, default=False) f_apply_resource_time = BigIntegerField(null=True) f_return_resource_time = BigIntegerField(null=True) f_create_time = BigIntegerField() f_update_time = BigIntegerField(null=True) f_start_time = BigIntegerField(null=True) f_end_time = BigIntegerField(null=True) f_elapsed = BigIntegerField(null=True) class Meta: db_table = "t_job" primary_key = CompositeKey('f_job_id', 'f_role', 'f_party_id') class Task(DataBaseModel): # multi-party common configuration f_job_id = CharField(max_length=25) f_component_name = TextField() f_task_id = CharField(max_length=100) f_task_version = BigIntegerField() f_initiator_role = CharField(max_length=50, index=True) f_initiator_party_id = CharField(max_length=50, index=True, default=-1) f_federated_mode = CharField(max_length=10, index=True) f_federated_status_collect_type = CharField(max_length=10, index=True) f_status = CharField(max_length=50) # this party configuration f_role = CharField(max_length=50, index=True) f_party_id = CharField(max_length=10, index=True) f_run_on_this_party = BooleanField(null=True, index=True, default=False) f_run_ip = CharField(max_length=100, null=True) f_run_pid = IntegerField(null=True) f_party_status = CharField(max_length=50) f_create_time = BigIntegerField() f_update_time = BigIntegerField(null=True) f_start_time = BigIntegerField(null=True) f_end_time = BigIntegerField(null=True) f_elapsed = BigIntegerField(null=True) class Meta: db_table = "t_task" primary_key = CompositeKey('f_job_id', 'f_task_id', 'f_task_version', 'f_role', 'f_party_id') class TrackingMetric(DataBaseModel): _mapper = {} @classmethod def model(cls, table_index=None, date=None): if not table_index: table_index = date.strftime( '%Y%m%d') if date else datetime.datetime.now().strftime( '%Y%m%d') class_name = 'TrackingMetric_%s' % table_index ModelClass = TrackingMetric._mapper.get(class_name, None) if ModelClass is None: class Meta: db_table = '%s_%s' % ('t_tracking_metric', table_index) attrs = {'__module__': cls.__module__, 'Meta': Meta} ModelClass = type("%s_%s" % (cls.__name__, table_index), (cls,), attrs) TrackingMetric._mapper[class_name] = ModelClass return ModelClass() f_id = BigAutoField(primary_key=True) f_job_id = CharField(max_length=25) f_component_name = TextField() f_task_id = CharField(max_length=100, null=True) f_task_version = BigIntegerField(null=True) f_role = CharField(max_length=50, index=True) f_party_id = CharField(max_length=10, index=True) f_metric_namespace = CharField(max_length=180, index=True) f_metric_name = CharField(max_length=180, index=True) f_key = CharField(max_length=200) f_value = TextField() f_type = IntegerField(index=True) # 0 is data, 1 is meta f_create_time = BigIntegerField() f_update_time = BigIntegerField(null=True) class TrackingOutputDataInfo(DataBaseModel): _mapper = {} @classmethod def model(cls, table_index=None, date=None): if not table_index: table_index = date.strftime( '%Y%m%d') if date else datetime.datetime.now().strftime( '%Y%m%d') class_name = 'TrackingOutputDataInfo_%s' % table_index ModelClass = TrackingOutputDataInfo._mapper.get(class_name, None) if ModelClass is None: class Meta: db_table = '%s_%s' % ('t_tracking_output_data_info', table_index) primary_key = CompositeKey('f_job_id', 'f_task_id', 'f_task_version', 'f_data_name', 'f_role', 'f_party_id') attrs = {'__module__': cls.__module__, 'Meta': Meta} ModelClass = type("%s_%s" % (cls.__name__, table_index), (cls,), attrs) TrackingOutputDataInfo._mapper[class_name] = ModelClass return ModelClass() # multi-party common configuration f_job_id = CharField(max_length=25) f_component_name = TextField() f_task_id = CharField(max_length=100, null=True) f_task_version = BigIntegerField(null=True) f_data_name = CharField(max_length=30) # this party configuration f_role = CharField(max_length=50, index=True) f_party_id = CharField(max_length=10, index=True) f_table_name = CharField(max_length=500, null=True) f_table_namespace = CharField(max_length=500, null=True) f_create_time = BigIntegerField() f_update_time = BigIntegerField(null=True) f_description = TextField(null=True, default='') class MachineLearningModelInfo(DataBaseModel): f_id = BigAutoField(primary_key=True) f_role = CharField(max_length=50, index=True) f_party_id = CharField(max_length=10, index=True) f_roles = JSONField() f_job_id = CharField(max_length=25) f_model_id = CharField(max_length=100, index=True) f_model_version = CharField(max_length=100, index=True) f_loaded_times = IntegerField(default=0) f_size = BigIntegerField(default=0) f_create_time = BigIntegerField(default=0) f_update_time = BigIntegerField(default=0) f_description = TextField(null=True, default='') f_initiator_role = CharField(max_length=50, index=True) f_initiator_party_id = CharField(max_length=50, index=True, default=-1) f_runtime_conf = JSONField() f_work_mode = IntegerField() f_dsl = JSONField() f_train_runtime_conf = JSONField(default={}) f_imported = IntegerField(default=0) f_job_status = CharField(max_length=50) class Meta: db_table = "t_machine_learning_model_info" class ModelTag(DataBaseModel): f_id = BigAutoField(primary_key=True) f_m_id = BigIntegerField(null=False) f_t_id = BigIntegerField(null=False) class Meta: db_table = "t_model_tag" class Tag(DataBaseModel): f_id = BigAutoField(primary_key=True) f_name = CharField(max_length=100, index=True, unique=True) f_desc = TextField(null=True) f_create_time = BigIntegerField(default=current_timestamp()) f_update_time = BigIntegerField(default=current_timestamp()) class Meta: db_table = "t_tags" class ComponentSummary(DataBaseModel): _mapper = {} @classmethod def model(cls, table_index=None, date=None): if not table_index: table_index = date.strftime( '%Y%m%d') if date else datetime.datetime.now().strftime( '%Y%m%d') class_name = 'ComponentSummary_%s' % table_index ModelClass = TrackingMetric._mapper.get(class_name, None) if ModelClass is None: class Meta: db_table = '%s_%s' % ('t_component_summary', table_index) attrs = {'__module__': cls.__module__, 'Meta': Meta} ModelClass = type("%s_%s" % (cls.__name__, table_index), (cls,), attrs) ComponentSummary._mapper[class_name] = ModelClass return ModelClass() f_id = BigAutoField(primary_key=True) f_job_id = CharField(max_length=25) f_role = CharField(max_length=25, index=True) f_party_id = CharField(max_length=10, index=True) f_component_name = TextField() f_task_id = CharField(max_length=50, null=True) f_task_version = CharField(max_length=50, null=True) f_summary = LongTextField() f_create_time = BigIntegerField(default=0) f_update_time = BigIntegerField(default=0) class ModelOperationLog(DataBaseModel): f_operation_type = CharField(max_length=20, null=False, index=True) f_operation_status = CharField(max_length=20, null=True, index=True) f_initiator_role = CharField(max_length=50, index=True, null=True) f_initiator_party_id = CharField(max_length=10, index=True, null=True) f_request_ip = CharField(max_length=20, null=True) f_model_id = CharField(max_length=100, index=True) f_model_version = CharField(max_length=100, index=True) f_create_time = BigIntegerField(default=current_timestamp()) f_update_time = BigIntegerField(default=current_timestamp()) class Meta: db_table = "t_model_operation_log" class EngineRegistry(DataBaseModel): f_engine_name = CharField(max_length=50, index=True) f_engine_type = CharField(max_length=10, index=True) f_engine_address = JSONField() f_cores = IntegerField(index=True) f_memory = IntegerField(index=True) # MB f_remaining_cores = IntegerField(index=True) f_remaining_memory = IntegerField(index=True) # MB f_nodes = IntegerField(index=True) f_create_time = BigIntegerField() f_update_time = BigIntegerField(null=True) class Meta: db_table = "t_engine_registry" primary_key = CompositeKey('f_engine_name', 'f_engine_type') class DBQueue(DataBaseModel): f_job_id = CharField(max_length=25, primary_key=True) f_job_status = CharField(max_length=50, index=True) f_initiator_role = CharField(max_length=50, index=True) f_initiator_party_id = CharField(max_length=50, index=True, default=-1) f_create_time = BigIntegerField() f_update_time = BigIntegerField(null=True) f_tag = CharField(max_length=50, null=True, index=True, default='') class Meta: db_table = "t_queue"
[ "fate_flow.settings.DATABASE.copy", "fate_flow.entity.runtime_config.RuntimeConfig.init_config", "fate_flow.settings.stat_logger.info" ]
[((1261, 1276), 'fate_arch.common.log.getLogger', 'log.getLogger', ([], {}), '()\n', (1274, 1276), False, 'from fate_arch.common import log\n'), ((2311, 2346), 'os.path.realpath', 'os.path.realpath', (['__main__.__file__'], {}), '(__main__.__file__)\n', (2327, 2346), False, 'import os\n'), ((2909, 2967), 'inspect.getmembers', 'inspect.getmembers', (['sys.modules[__name__]', 'inspect.isclass'], {}), '(sys.modules[__name__], inspect.isclass)\n', (2927, 2967), False, 'import inspect\n'), ((3496, 3520), 'peewee.CharField', 'CharField', ([], {'max_length': '(25)'}), '(max_length=25)\n', (3505, 3520), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((3534, 3582), 'peewee.CharField', 'CharField', ([], {'max_length': '(500)', 'null': '(True)', 'default': '""""""'}), "(max_length=500, null=True, default='')\n", (3543, 3582), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((3603, 3635), 'peewee.TextField', 'TextField', ([], {'null': '(True)', 'default': '""""""'}), "(null=True, default='')\n", (3612, 3635), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((3648, 3707), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'null': '(True)', 'index': '(True)', 'default': '""""""'}), "(max_length=50, null=True, index=True, default='')\n", (3657, 3707), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((3720, 3731), 'fate_arch.storage.metastore.base_model.JSONField', 'JSONField', ([], {}), '()\n', (3729, 3731), False, 'from fate_arch.storage.metastore.base_model import JSONField, BaseModel, LongTextField\n'), ((3753, 3764), 'fate_arch.storage.metastore.base_model.JSONField', 'JSONField', ([], {}), '()\n', (3762, 3764), False, 'from fate_arch.storage.metastore.base_model import JSONField, BaseModel, LongTextField\n'), ((3792, 3812), 'fate_arch.storage.metastore.base_model.JSONField', 'JSONField', ([], {'null': '(True)'}), '(null=True)\n', (3801, 3812), False, 'from fate_arch.storage.metastore.base_model import JSONField, BaseModel, LongTextField\n'), ((3827, 3838), 'fate_arch.storage.metastore.base_model.JSONField', 'JSONField', ([], {}), '()\n', (3836, 3838), False, 'from fate_arch.storage.metastore.base_model import JSONField, BaseModel, LongTextField\n'), ((3857, 3871), 'peewee.IntegerField', 'IntegerField', ([], {}), '()\n', (3869, 3871), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((3895, 3931), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)'}), '(max_length=50, index=True)\n', (3904, 3931), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((3959, 4007), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)', 'default': '(-1)'}), '(max_length=50, index=True, default=-1)\n', (3968, 4007), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4023, 4047), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (4032, 4047), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4092, 4128), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)'}), '(max_length=50, index=True)\n', (4101, 4128), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4146, 4182), 'peewee.CharField', 'CharField', ([], {'max_length': '(10)', 'index': '(True)'}), '(max_length=10, index=True)\n', (4155, 4182), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4204, 4254), 'peewee.BooleanField', 'BooleanField', ([], {'null': '(True)', 'index': '(True)', 'default': '(False)'}), '(null=True, index=True, default=False)\n', (4216, 4254), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4272, 4306), 'peewee.IntegerField', 'IntegerField', ([], {'null': '(True)', 'default': '(0)'}), '(null=True, default=0)\n', (4284, 4306), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4328, 4375), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'null': '(True)', 'index': '(True)'}), '(max_length=50, null=True, index=True)\n', (4337, 4375), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4396, 4443), 'peewee.CharField', 'CharField', ([], {'max_length': '(10)', 'null': '(True)', 'index': '(True)'}), '(max_length=10, null=True, index=True)\n', (4405, 4443), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4458, 4493), 'peewee.IntegerField', 'IntegerField', ([], {'index': '(True)', 'default': '(0)'}), '(index=True, default=0)\n', (4470, 4493), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4509, 4544), 'peewee.IntegerField', 'IntegerField', ([], {'index': '(True)', 'default': '(0)'}), '(index=True, default=0)\n', (4521, 4544), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4575, 4610), 'peewee.IntegerField', 'IntegerField', ([], {'index': '(True)', 'default': '(0)'}), '(index=True, default=0)\n', (4587, 4610), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4636, 4671), 'peewee.IntegerField', 'IntegerField', ([], {'index': '(True)', 'default': '(0)'}), '(index=True, default=0)\n', (4648, 4671), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4702, 4741), 'peewee.BooleanField', 'BooleanField', ([], {'index': '(True)', 'default': '(False)'}), '(index=True, default=False)\n', (4714, 4741), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4770, 4796), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (4785, 4796), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4826, 4852), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (4841, 4852), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4874, 4891), 'peewee.BigIntegerField', 'BigIntegerField', ([], {}), '()\n', (4889, 4891), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4912, 4938), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (4927, 4938), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((4958, 4984), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (4973, 4984), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5002, 5028), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (5017, 5028), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5045, 5071), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (5060, 5071), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5270, 5294), 'peewee.CharField', 'CharField', ([], {'max_length': '(25)'}), '(max_length=25)\n', (5279, 5294), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5318, 5329), 'peewee.TextField', 'TextField', ([], {}), '()\n', (5327, 5329), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5346, 5371), 'peewee.CharField', 'CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (5355, 5371), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5393, 5410), 'peewee.BigIntegerField', 'BigIntegerField', ([], {}), '()\n', (5408, 5410), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5434, 5470), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)'}), '(max_length=50, index=True)\n', (5443, 5470), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5498, 5546), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)', 'default': '(-1)'}), '(max_length=50, index=True, default=-1)\n', (5507, 5546), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5570, 5606), 'peewee.CharField', 'CharField', ([], {'max_length': '(10)', 'index': '(True)'}), '(max_length=10, index=True)\n', (5579, 5606), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5645, 5681), 'peewee.CharField', 'CharField', ([], {'max_length': '(10)', 'index': '(True)'}), '(max_length=10, index=True)\n', (5654, 5681), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5697, 5721), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (5706, 5721), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5766, 5802), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)'}), '(max_length=50, index=True)\n', (5775, 5802), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5820, 5856), 'peewee.CharField', 'CharField', ([], {'max_length': '(10)', 'index': '(True)'}), '(max_length=10, index=True)\n', (5829, 5856), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5883, 5933), 'peewee.BooleanField', 'BooleanField', ([], {'null': '(True)', 'index': '(True)', 'default': '(False)'}), '(null=True, index=True, default=False)\n', (5895, 5933), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((5949, 5985), 'peewee.CharField', 'CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (5958, 5985), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((6002, 6025), 'peewee.IntegerField', 'IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (6014, 6025), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((6047, 6071), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (6056, 6071), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((6092, 6109), 'peewee.BigIntegerField', 'BigIntegerField', ([], {}), '()\n', (6107, 6109), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((6130, 6156), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (6145, 6156), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((6176, 6202), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (6191, 6202), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((6220, 6246), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (6235, 6246), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((6263, 6289), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (6278, 6289), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7257, 7287), 'peewee.BigAutoField', 'BigAutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (7269, 7287), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7303, 7327), 'peewee.CharField', 'CharField', ([], {'max_length': '(25)'}), '(max_length=25)\n', (7312, 7327), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7351, 7362), 'peewee.TextField', 'TextField', ([], {}), '()\n', (7360, 7362), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7379, 7415), 'peewee.CharField', 'CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (7388, 7415), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7437, 7463), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (7452, 7463), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7477, 7513), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)'}), '(max_length=50, index=True)\n', (7486, 7513), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7531, 7567), 'peewee.CharField', 'CharField', ([], {'max_length': '(10)', 'index': '(True)'}), '(max_length=10, index=True)\n', (7540, 7567), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7593, 7630), 'peewee.CharField', 'CharField', ([], {'max_length': '(180)', 'index': '(True)'}), '(max_length=180, index=True)\n', (7602, 7630), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7651, 7688), 'peewee.CharField', 'CharField', ([], {'max_length': '(180)', 'index': '(True)'}), '(max_length=180, index=True)\n', (7660, 7688), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7701, 7726), 'peewee.CharField', 'CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (7710, 7726), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7741, 7752), 'peewee.TextField', 'TextField', ([], {}), '()\n', (7750, 7752), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7766, 7790), 'peewee.IntegerField', 'IntegerField', ([], {'index': '(True)'}), '(index=True)\n', (7778, 7790), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7835, 7852), 'peewee.BigIntegerField', 'BigIntegerField', ([], {}), '()\n', (7850, 7852), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((7873, 7899), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (7888, 7899), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((8930, 8954), 'peewee.CharField', 'CharField', ([], {'max_length': '(25)'}), '(max_length=25)\n', (8939, 8954), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((8978, 8989), 'peewee.TextField', 'TextField', ([], {}), '()\n', (8987, 8989), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9006, 9042), 'peewee.CharField', 'CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (9015, 9042), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9064, 9090), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (9079, 9090), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9109, 9133), 'peewee.CharField', 'CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (9118, 9133), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9178, 9214), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)'}), '(max_length=50, index=True)\n', (9187, 9214), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9232, 9268), 'peewee.CharField', 'CharField', ([], {'max_length': '(10)', 'index': '(True)'}), '(max_length=10, index=True)\n', (9241, 9268), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9288, 9324), 'peewee.CharField', 'CharField', ([], {'max_length': '(500)', 'null': '(True)'}), '(max_length=500, null=True)\n', (9297, 9324), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9349, 9385), 'peewee.CharField', 'CharField', ([], {'max_length': '(500)', 'null': '(True)'}), '(max_length=500, null=True)\n', (9358, 9385), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9406, 9423), 'peewee.BigIntegerField', 'BigIntegerField', ([], {}), '()\n', (9421, 9423), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9444, 9470), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (9459, 9470), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9491, 9523), 'peewee.TextField', 'TextField', ([], {'null': '(True)', 'default': '""""""'}), "(null=True, default='')\n", (9500, 9523), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9584, 9614), 'peewee.BigAutoField', 'BigAutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (9596, 9614), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9628, 9664), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)'}), '(max_length=50, index=True)\n', (9637, 9664), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9682, 9718), 'peewee.CharField', 'CharField', ([], {'max_length': '(10)', 'index': '(True)'}), '(max_length=10, index=True)\n', (9691, 9718), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9733, 9744), 'fate_arch.storage.metastore.base_model.JSONField', 'JSONField', ([], {}), '()\n', (9742, 9744), False, 'from fate_arch.storage.metastore.base_model import JSONField, BaseModel, LongTextField\n'), ((9760, 9784), 'peewee.CharField', 'CharField', ([], {'max_length': '(25)'}), '(max_length=25)\n', (9769, 9784), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9802, 9839), 'peewee.CharField', 'CharField', ([], {'max_length': '(100)', 'index': '(True)'}), '(max_length=100, index=True)\n', (9811, 9839), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9862, 9899), 'peewee.CharField', 'CharField', ([], {'max_length': '(100)', 'index': '(True)'}), '(max_length=100, index=True)\n', (9871, 9899), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9921, 9944), 'peewee.IntegerField', 'IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (9933, 9944), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((9958, 9984), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (9973, 9984), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10005, 10031), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (10020, 10031), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10052, 10078), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (10067, 10078), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10099, 10131), 'peewee.TextField', 'TextField', ([], {'null': '(True)', 'default': '""""""'}), "(null=True, default='')\n", (10108, 10131), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10155, 10191), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)'}), '(max_length=50, index=True)\n', (10164, 10191), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10219, 10267), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)', 'default': '(-1)'}), '(max_length=50, index=True, default=-1)\n', (10228, 10267), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10289, 10300), 'fate_arch.storage.metastore.base_model.JSONField', 'JSONField', ([], {}), '()\n', (10298, 10300), False, 'from fate_arch.storage.metastore.base_model import JSONField, BaseModel, LongTextField\n'), ((10319, 10333), 'peewee.IntegerField', 'IntegerField', ([], {}), '()\n', (10331, 10333), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10346, 10357), 'fate_arch.storage.metastore.base_model.JSONField', 'JSONField', ([], {}), '()\n', (10355, 10357), False, 'from fate_arch.storage.metastore.base_model import JSONField, BaseModel, LongTextField\n'), ((10385, 10406), 'fate_arch.storage.metastore.base_model.JSONField', 'JSONField', ([], {'default': '{}'}), '(default={})\n', (10394, 10406), False, 'from fate_arch.storage.metastore.base_model import JSONField, BaseModel, LongTextField\n'), ((10424, 10447), 'peewee.IntegerField', 'IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (10436, 10447), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10467, 10491), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (10476, 10491), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10604, 10634), 'peewee.BigAutoField', 'BigAutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (10616, 10634), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10648, 10675), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(False)'}), '(null=False)\n', (10663, 10675), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10689, 10716), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(False)'}), '(null=False)\n', (10704, 10716), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10806, 10836), 'peewee.BigAutoField', 'BigAutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (10818, 10836), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10850, 10900), 'peewee.CharField', 'CharField', ([], {'max_length': '(100)', 'index': '(True)', 'unique': '(True)'}), '(max_length=100, index=True, unique=True)\n', (10859, 10900), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((10914, 10934), 'peewee.TextField', 'TextField', ([], {'null': '(True)'}), '(null=True)\n', (10923, 10934), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((11908, 11938), 'peewee.BigAutoField', 'BigAutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (11920, 11938), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((11954, 11978), 'peewee.CharField', 'CharField', ([], {'max_length': '(25)'}), '(max_length=25)\n', (11963, 11978), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((11992, 12028), 'peewee.CharField', 'CharField', ([], {'max_length': '(25)', 'index': '(True)'}), '(max_length=25, index=True)\n', (12001, 12028), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12046, 12082), 'peewee.CharField', 'CharField', ([], {'max_length': '(10)', 'index': '(True)'}), '(max_length=10, index=True)\n', (12055, 12082), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12106, 12117), 'peewee.TextField', 'TextField', ([], {}), '()\n', (12115, 12117), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12134, 12169), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'null': '(True)'}), '(max_length=50, null=True)\n', (12143, 12169), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12191, 12226), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'null': '(True)'}), '(max_length=50, null=True)\n', (12200, 12226), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12243, 12258), 'fate_arch.storage.metastore.base_model.LongTextField', 'LongTextField', ([], {}), '()\n', (12256, 12258), False, 'from fate_arch.storage.metastore.base_model import JSONField, BaseModel, LongTextField\n'), ((12279, 12305), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (12294, 12305), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12326, 12352), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (12341, 12352), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12418, 12466), 'peewee.CharField', 'CharField', ([], {'max_length': '(20)', 'null': '(False)', 'index': '(True)'}), '(max_length=20, null=False, index=True)\n', (12427, 12466), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12492, 12539), 'peewee.CharField', 'CharField', ([], {'max_length': '(20)', 'null': '(True)', 'index': '(True)'}), '(max_length=20, null=True, index=True)\n', (12501, 12539), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12563, 12610), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)', 'null': '(True)'}), '(max_length=50, index=True, null=True)\n', (12572, 12610), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12638, 12685), 'peewee.CharField', 'CharField', ([], {'max_length': '(10)', 'index': '(True)', 'null': '(True)'}), '(max_length=10, index=True, null=True)\n', (12647, 12685), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12705, 12740), 'peewee.CharField', 'CharField', ([], {'max_length': '(20)', 'null': '(True)'}), '(max_length=20, null=True)\n', (12714, 12740), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12758, 12795), 'peewee.CharField', 'CharField', ([], {'max_length': '(100)', 'index': '(True)'}), '(max_length=100, index=True)\n', (12767, 12795), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((12818, 12855), 'peewee.CharField', 'CharField', ([], {'max_length': '(100)', 'index': '(True)'}), '(max_length=100, index=True)\n', (12827, 12855), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13105, 13141), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)'}), '(max_length=50, index=True)\n', (13114, 13141), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13162, 13198), 'peewee.CharField', 'CharField', ([], {'max_length': '(10)', 'index': '(True)'}), '(max_length=10, index=True)\n', (13171, 13198), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13222, 13233), 'fate_arch.storage.metastore.base_model.JSONField', 'JSONField', ([], {}), '()\n', (13231, 13233), False, 'from fate_arch.storage.metastore.base_model import JSONField, BaseModel, LongTextField\n'), ((13248, 13272), 'peewee.IntegerField', 'IntegerField', ([], {'index': '(True)'}), '(index=True)\n', (13260, 13272), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13288, 13312), 'peewee.IntegerField', 'IntegerField', ([], {'index': '(True)'}), '(index=True)\n', (13300, 13312), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13343, 13367), 'peewee.IntegerField', 'IntegerField', ([], {'index': '(True)'}), '(index=True)\n', (13355, 13367), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13393, 13417), 'peewee.IntegerField', 'IntegerField', ([], {'index': '(True)'}), '(index=True)\n', (13405, 13417), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13437, 13461), 'peewee.IntegerField', 'IntegerField', ([], {'index': '(True)'}), '(index=True)\n', (13449, 13461), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13482, 13499), 'peewee.BigIntegerField', 'BigIntegerField', ([], {}), '()\n', (13497, 13499), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13520, 13546), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (13535, 13546), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13719, 13761), 'peewee.CharField', 'CharField', ([], {'max_length': '(25)', 'primary_key': '(True)'}), '(max_length=25, primary_key=True)\n', (13728, 13761), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13781, 13817), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)'}), '(max_length=50, index=True)\n', (13790, 13817), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13841, 13877), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)'}), '(max_length=50, index=True)\n', (13850, 13877), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13905, 13953), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'index': '(True)', 'default': '(-1)'}), '(max_length=50, index=True, default=-1)\n', (13914, 13953), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13974, 13991), 'peewee.BigIntegerField', 'BigIntegerField', ([], {}), '()\n', (13989, 13991), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((14012, 14038), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (14027, 14038), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((14051, 14110), 'peewee.CharField', 'CharField', ([], {'max_length': '(50)', 'null': '(True)', 'index': '(True)', 'default': '""""""'}), "(max_length=50, null=True, index=True, default='')\n", (14060, 14110), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((1619, 1634), 'fate_flow.settings.DATABASE.copy', 'DATABASE.copy', ([], {}), '()\n', (1632, 1634), False, 'from fate_flow.settings import DATABASE, WORK_MODE, stat_logger\n'), ((5138, 5186), 'peewee.CompositeKey', 'CompositeKey', (['"""f_job_id"""', '"""f_role"""', '"""f_party_id"""'], {}), "('f_job_id', 'f_role', 'f_party_id')\n", (5150, 5186), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((6357, 6436), 'peewee.CompositeKey', 'CompositeKey', (['"""f_job_id"""', '"""f_task_id"""', '"""f_task_version"""', '"""f_role"""', '"""f_party_id"""'], {}), "('f_job_id', 'f_task_id', 'f_task_version', 'f_role', 'f_party_id')\n", (6369, 6436), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((13625, 13671), 'peewee.CompositeKey', 'CompositeKey', (['"""f_engine_name"""', '"""f_engine_type"""'], {}), "('f_engine_name', 'f_engine_type')\n", (13637, 13671), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((1765, 1800), 'playhouse.apsw_ext.APSWDatabase', 'APSWDatabase', (['"""fate_flow_sqlite.db"""'], {}), "('fate_flow_sqlite.db')\n", (1777, 1800), False, 'from playhouse.apsw_ext import APSWDatabase\n'), ((1813, 1863), 'fate_flow.entity.runtime_config.RuntimeConfig.init_config', 'RuntimeConfig.init_config', ([], {'USE_LOCAL_DATABASE': '(True)'}), '(USE_LOCAL_DATABASE=True)\n', (1838, 1863), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((1876, 1948), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""init sqlite database on standalone mode successfully"""'], {}), "('init sqlite database on standalone mode successfully')\n", (1892, 1948), False, 'from fate_flow.settings import DATABASE, WORK_MODE, stat_logger\n'), ((10979, 10998), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (10996, 10998), False, 'from fate_arch.common.base_utils import current_timestamp\n'), ((11044, 11063), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (11061, 11063), False, 'from fate_arch.common.base_utils import current_timestamp\n'), ((12900, 12919), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (12917, 12919), False, 'from fate_arch.common.base_utils import current_timestamp\n'), ((12965, 12984), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (12982, 12984), False, 'from fate_arch.common.base_utils import current_timestamp\n'), ((1383, 1394), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1392, 1394), False, 'import os\n'), ((2032, 2079), 'playhouse.pool.PooledMySQLDatabase', 'PooledMySQLDatabase', (['db_name'], {}), '(db_name, **database_config)\n', (2051, 2079), False, 'from playhouse.pool import PooledMySQLDatabase\n'), ((2092, 2160), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""init mysql database on cluster mode successfully"""'], {}), "('init mysql database on cluster mode successfully')\n", (2108, 2160), False, 'from fate_flow.settings import DATABASE, WORK_MODE, stat_logger\n'), ((2173, 2224), 'fate_flow.entity.runtime_config.RuntimeConfig.init_config', 'RuntimeConfig.init_config', ([], {'USE_LOCAL_DATABASE': '(False)'}), '(USE_LOCAL_DATABASE=False)\n', (2198, 2224), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((8504, 8602), 'peewee.CompositeKey', 'CompositeKey', (['"""f_job_id"""', '"""f_task_id"""', '"""f_task_version"""', '"""f_data_name"""', '"""f_role"""', '"""f_party_id"""'], {}), "('f_job_id', 'f_task_id', 'f_task_version', 'f_data_name',\n 'f_role', 'f_party_id')\n", (8516, 8602), False, 'from peewee import CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField, BooleanField\n'), ((6668, 6691), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6689, 6691), False, 'import datetime\n'), ((8139, 8162), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8160, 8162), False, 'import datetime\n'), ((11343, 11366), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11364, 11366), False, 'import datetime\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import importlib import inspect import os import shutil import base64 from ruamel import yaml from os.path import join, getsize from fate_arch.common import file_utils from fate_arch.protobuf.python import default_empty_fill_pb2 from fate_flow.settings import stat_logger, TEMP_DIRECTORY class PipelinedModel(object): def __init__(self, model_id, model_version): """ Support operations on FATE PipelinedModels TODO: add lock :param model_id: the model id stored at the local party. :param model_version: the model version. """ self.model_id = model_id self.model_version = model_version self.model_path = os.path.join(file_utils.get_project_base_directory(), "model_local_cache", model_id, model_version) self.define_proto_path = os.path.join(self.model_path, "define", "proto") self.define_meta_path = os.path.join(self.model_path, "define", "define_meta.yaml") self.variables_index_path = os.path.join(self.model_path, "variables", "index") self.variables_data_path = os.path.join(self.model_path, "variables", "data") self.default_archive_format = "zip" def create_pipelined_model(self): if os.path.exists(self.model_path): raise Exception("Model creation failed because it has already been created, model cache path is {}".format( self.model_path )) else: os.makedirs(self.model_path, exist_ok=False) for path in [self.variables_index_path, self.variables_data_path]: os.makedirs(path, exist_ok=False) shutil.copytree(os.path.join(file_utils.get_python_base_directory(), "federatedml", "protobuf", "proto"), self.define_proto_path) with open(self.define_meta_path, "w", encoding="utf-8") as fw: yaml.dump({"describe": "This is the model definition meta"}, fw, Dumper=yaml.RoundTripDumper) def save_component_model(self, component_name, component_module_name, model_alias, model_buffers): model_proto_index = {} component_model_storage_path = os.path.join(self.variables_data_path, component_name, model_alias) os.makedirs(component_model_storage_path, exist_ok=True) for model_name, buffer_object in model_buffers.items(): storage_path = os.path.join(component_model_storage_path, model_name) buffer_object_serialized_string = buffer_object.SerializeToString() if not buffer_object_serialized_string: fill_message = default_empty_fill_pb2.DefaultEmptyFillMessage() fill_message.flag = 'set' buffer_object_serialized_string = fill_message.SerializeToString() with open(storage_path, "wb") as fw: fw.write(buffer_object_serialized_string) model_proto_index[model_name] = type(buffer_object).__name__ # index of model name and proto buffer class name stat_logger.info("Save {} {} {} buffer".format(component_name, model_alias, model_name)) self.update_component_meta(component_name=component_name, component_module_name=component_module_name, model_alias=model_alias, model_proto_index=model_proto_index) stat_logger.info("Save {} {} successfully".format(component_name, model_alias)) def read_component_model(self, component_name, model_alias): component_model_storage_path = os.path.join(self.variables_data_path, component_name, model_alias) model_proto_index = self.get_model_proto_index(component_name=component_name, model_alias=model_alias) model_buffers = {} for model_name, buffer_name in model_proto_index.items(): with open(os.path.join(component_model_storage_path, model_name), "rb") as fr: buffer_object_serialized_string = fr.read() model_buffers[model_name] = self.parse_proto_object(buffer_name=buffer_name, buffer_object_serialized_string=buffer_object_serialized_string) return model_buffers def collect_models(self, in_bytes=False, b64encode=True): model_buffers = {} with open(self.define_meta_path, "r", encoding="utf-8") as fr: define_index = yaml.safe_load(fr) for component_name in define_index.get("model_proto", {}).keys(): for model_alias, model_proto_index in define_index["model_proto"][component_name].items(): component_model_storage_path = os.path.join(self.variables_data_path, component_name, model_alias) for model_name, buffer_name in model_proto_index.items(): with open(os.path.join(component_model_storage_path, model_name), "rb") as fr: buffer_object_serialized_string = fr.read() if not in_bytes: model_buffers[model_name] = self.parse_proto_object(buffer_name=buffer_name, buffer_object_serialized_string=buffer_object_serialized_string) else: if b64encode: buffer_object_serialized_string = base64.b64encode(buffer_object_serialized_string).decode() model_buffers["{}.{}:{}".format(component_name, model_alias, model_name)] = buffer_object_serialized_string return model_buffers def set_model_path(self): self.model_path = os.path.join(file_utils.get_project_base_directory(), "model_local_cache", self.model_id, self.model_version) def exists(self): return os.path.exists(self.model_path) def save_pipeline(self, pipelined_buffer_object): buffer_object_serialized_string = pipelined_buffer_object.SerializeToString() if not buffer_object_serialized_string: fill_message = default_empty_fill_pb2.DefaultEmptyFillMessage() fill_message.flag = 'set' buffer_object_serialized_string = fill_message.SerializeToString() with open(os.path.join(self.model_path, "pipeline.pb"), "wb") as fw: fw.write(buffer_object_serialized_string) def packaging_model(self): if not self.exists(): raise Exception("Can not found {} {} model local cache".format(self.model_id, self.model_version)) archive_file_path = shutil.make_archive(base_name=self.archive_model_base_path(), format=self.default_archive_format, root_dir=self.model_path) stat_logger.info("Make model {} {} archive on {} successfully".format(self.model_id, self.model_version, archive_file_path)) return archive_file_path def unpack_model(self, archive_file_path: str): if os.path.exists(self.model_path): raise Exception("Model {} {} local cache already existed".format(self.model_id, self.model_version)) shutil.unpack_archive(archive_file_path, self.model_path) stat_logger.info("Unpack model archive to {}".format(self.model_path)) def update_component_meta(self, component_name, component_module_name, model_alias, model_proto_index): """ update meta info yaml TODO: with lock :param component_name: :param component_module_name: :param model_alias: :param model_proto_index: :return: """ with open(self.define_meta_path, "r", encoding="utf-8") as fr: define_index = yaml.safe_load(fr) with open(self.define_meta_path, "w", encoding="utf-8") as fw: define_index["component_define"] = define_index.get("component_define", {}) define_index["component_define"][component_name] = define_index["component_define"].get(component_name, {}) define_index["component_define"][component_name].update({"module_name": component_module_name}) define_index["model_proto"] = define_index.get("model_proto", {}) define_index["model_proto"][component_name] = define_index["model_proto"].get(component_name, {}) define_index["model_proto"][component_name][model_alias] = define_index["model_proto"][component_name].get(model_alias, {}) define_index["model_proto"][component_name][model_alias].update(model_proto_index) yaml.dump(define_index, fw, Dumper=yaml.RoundTripDumper) def get_model_proto_index(self, component_name, model_alias): with open(self.define_meta_path, "r", encoding="utf-8") as fr: define_index = yaml.safe_load(fr) return define_index.get("model_proto", {}).get(component_name, {}).get(model_alias, {}) def get_component_define(self, component_name=None): with open(self.define_meta_path, "r", encoding="utf-8") as fr: define_index = yaml.safe_load(fr) if component_name: return define_index.get("component_define", {}).get(component_name, {}) else: return define_index.get("component_define", {}) def parse_proto_object(self, buffer_name, buffer_object_serialized_string): try: buffer_object = self.get_proto_buffer_class(buffer_name)() except Exception as e: stat_logger.exception("Can not restore proto buffer object", e) raise e try: buffer_object.ParseFromString(buffer_object_serialized_string) stat_logger.info('parse {} proto object normal'.format(type(buffer_object).__name__)) return buffer_object except Exception as e1: try: fill_message = default_empty_fill_pb2.DefaultEmptyFillMessage() fill_message.ParseFromString(buffer_object_serialized_string) buffer_object.ParseFromString(bytes()) stat_logger.info('parse {} proto object with default values'.format(type(buffer_object).__name__)) return buffer_object except Exception as e2: stat_logger.exception(e2) raise e1 @classmethod def get_proto_buffer_class(cls, buffer_name): package_path = os.path.join(file_utils.get_python_base_directory(), 'federatedml', 'protobuf', 'generated') package_python_path = 'federatedml.protobuf.generated' for f in os.listdir(package_path): if f.startswith('.'): continue try: proto_module = importlib.import_module(package_python_path + '.' + f.rstrip('.py')) for name, obj in inspect.getmembers(proto_module): if inspect.isclass(obj) and name == buffer_name: return obj except Exception as e: stat_logger.warning(e) else: return None def archive_model_base_path(self): return os.path.join(TEMP_DIRECTORY, "{}_{}".format(self.model_id, self.model_version)) def archive_model_file_path(self): return "{}.{}".format(self.archive_model_base_path(), self.default_archive_format) def calculate_model_file_size(self): size = 0 for root, dirs, files in os.walk(self.model_path): size += sum([getsize(join(root, name)) for name in files]) return round(size/1024)
[ "fate_flow.settings.stat_logger.warning", "fate_flow.settings.stat_logger.exception" ]
[((1433, 1481), 'os.path.join', 'os.path.join', (['self.model_path', '"""define"""', '"""proto"""'], {}), "(self.model_path, 'define', 'proto')\n", (1445, 1481), False, 'import os\n'), ((1514, 1573), 'os.path.join', 'os.path.join', (['self.model_path', '"""define"""', '"""define_meta.yaml"""'], {}), "(self.model_path, 'define', 'define_meta.yaml')\n", (1526, 1573), False, 'import os\n'), ((1610, 1661), 'os.path.join', 'os.path.join', (['self.model_path', '"""variables"""', '"""index"""'], {}), "(self.model_path, 'variables', 'index')\n", (1622, 1661), False, 'import os\n'), ((1697, 1747), 'os.path.join', 'os.path.join', (['self.model_path', '"""variables"""', '"""data"""'], {}), "(self.model_path, 'variables', 'data')\n", (1709, 1747), False, 'import os\n'), ((1842, 1873), 'os.path.exists', 'os.path.exists', (['self.model_path'], {}), '(self.model_path)\n', (1856, 1873), False, 'import os\n'), ((2723, 2790), 'os.path.join', 'os.path.join', (['self.variables_data_path', 'component_name', 'model_alias'], {}), '(self.variables_data_path, component_name, model_alias)\n', (2735, 2790), False, 'import os\n'), ((2799, 2855), 'os.makedirs', 'os.makedirs', (['component_model_storage_path'], {'exist_ok': '(True)'}), '(component_model_storage_path, exist_ok=True)\n', (2810, 2855), False, 'import os\n'), ((4143, 4210), 'os.path.join', 'os.path.join', (['self.variables_data_path', 'component_name', 'model_alias'], {}), '(self.variables_data_path, component_name, model_alias)\n', (4155, 4210), False, 'import os\n'), ((6565, 6596), 'os.path.exists', 'os.path.exists', (['self.model_path'], {}), '(self.model_path)\n', (6579, 6596), False, 'import os\n'), ((7821, 7852), 'os.path.exists', 'os.path.exists', (['self.model_path'], {}), '(self.model_path)\n', (7835, 7852), False, 'import os\n'), ((7975, 8032), 'shutil.unpack_archive', 'shutil.unpack_archive', (['archive_file_path', 'self.model_path'], {}), '(archive_file_path, self.model_path)\n', (7996, 8032), False, 'import shutil\n'), ((11391, 11415), 'os.listdir', 'os.listdir', (['package_path'], {}), '(package_path)\n', (11401, 11415), False, 'import os\n'), ((12234, 12258), 'os.walk', 'os.walk', (['self.model_path'], {}), '(self.model_path)\n', (12241, 12258), False, 'import os\n'), ((1313, 1352), 'fate_arch.common.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (1350, 1352), False, 'from fate_arch.common import file_utils\n'), ((2068, 2112), 'os.makedirs', 'os.makedirs', (['self.model_path'], {'exist_ok': '(False)'}), '(self.model_path, exist_ok=False)\n', (2079, 2112), False, 'import os\n'), ((2200, 2233), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(False)'}), '(path, exist_ok=False)\n', (2211, 2233), False, 'import os\n'), ((2455, 2553), 'ruamel.yaml.dump', 'yaml.dump', (["{'describe': 'This is the model definition meta'}", 'fw'], {'Dumper': 'yaml.RoundTripDumper'}), "({'describe': 'This is the model definition meta'}, fw, Dumper=\n yaml.RoundTripDumper)\n", (2464, 2553), False, 'from ruamel import yaml\n'), ((2947, 3001), 'os.path.join', 'os.path.join', (['component_model_storage_path', 'model_name'], {}), '(component_model_storage_path, model_name)\n', (2959, 3001), False, 'import os\n'), ((5064, 5082), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['fr'], {}), '(fr)\n', (5078, 5082), False, 'from ruamel import yaml\n'), ((6391, 6430), 'fate_arch.common.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (6428, 6430), False, 'from fate_arch.common import file_utils\n'), ((6813, 6861), 'fate_arch.protobuf.python.default_empty_fill_pb2.DefaultEmptyFillMessage', 'default_empty_fill_pb2.DefaultEmptyFillMessage', ([], {}), '()\n', (6859, 6861), False, 'from fate_arch.protobuf.python import default_empty_fill_pb2\n'), ((8545, 8563), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['fr'], {}), '(fr)\n', (8559, 8563), False, 'from ruamel import yaml\n'), ((9382, 9438), 'ruamel.yaml.dump', 'yaml.dump', (['define_index', 'fw'], {'Dumper': 'yaml.RoundTripDumper'}), '(define_index, fw, Dumper=yaml.RoundTripDumper)\n', (9391, 9438), False, 'from ruamel import yaml\n'), ((9604, 9622), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['fr'], {}), '(fr)\n', (9618, 9622), False, 'from ruamel import yaml\n'), ((9879, 9897), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['fr'], {}), '(fr)\n', (9893, 9897), False, 'from ruamel import yaml\n'), ((11231, 11269), 'fate_arch.common.file_utils.get_python_base_directory', 'file_utils.get_python_base_directory', ([], {}), '()\n', (11267, 11269), False, 'from fate_arch.common import file_utils\n'), ((2271, 2309), 'fate_arch.common.file_utils.get_python_base_directory', 'file_utils.get_python_base_directory', ([], {}), '()\n', (2307, 2309), False, 'from fate_arch.common import file_utils\n'), ((3165, 3213), 'fate_arch.protobuf.python.default_empty_fill_pb2.DefaultEmptyFillMessage', 'default_empty_fill_pb2.DefaultEmptyFillMessage', ([], {}), '()\n', (3211, 3213), False, 'from fate_arch.protobuf.python import default_empty_fill_pb2\n'), ((6997, 7041), 'os.path.join', 'os.path.join', (['self.model_path', '"""pipeline.pb"""'], {}), "(self.model_path, 'pipeline.pb')\n", (7009, 7041), False, 'import os\n'), ((10307, 10370), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['"""Can not restore proto buffer object"""', 'e'], {}), "('Can not restore proto buffer object', e)\n", (10328, 10370), False, 'from fate_flow.settings import stat_logger, TEMP_DIRECTORY\n'), ((11626, 11658), 'inspect.getmembers', 'inspect.getmembers', (['proto_module'], {}), '(proto_module)\n', (11644, 11658), False, 'import inspect\n'), ((4492, 4546), 'os.path.join', 'os.path.join', (['component_model_storage_path', 'model_name'], {}), '(component_model_storage_path, model_name)\n', (4504, 4546), False, 'import os\n'), ((5319, 5386), 'os.path.join', 'os.path.join', (['self.variables_data_path', 'component_name', 'model_alias'], {}), '(self.variables_data_path, component_name, model_alias)\n', (5331, 5386), False, 'import os\n'), ((10690, 10738), 'fate_arch.protobuf.python.default_empty_fill_pb2.DefaultEmptyFillMessage', 'default_empty_fill_pb2.DefaultEmptyFillMessage', ([], {}), '()\n', (10736, 10738), False, 'from fate_arch.protobuf.python import default_empty_fill_pb2\n'), ((11815, 11837), 'fate_flow.settings.stat_logger.warning', 'stat_logger.warning', (['e'], {}), '(e)\n', (11834, 11837), False, 'from fate_flow.settings import stat_logger, TEMP_DIRECTORY\n'), ((11076, 11101), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e2'], {}), '(e2)\n', (11097, 11101), False, 'from fate_flow.settings import stat_logger, TEMP_DIRECTORY\n'), ((11683, 11703), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (11698, 11703), False, 'import inspect\n'), ((12293, 12309), 'os.path.join', 'join', (['root', 'name'], {}), '(root, name)\n', (12297, 12309), False, 'from os.path import join, getsize\n'), ((5499, 5553), 'os.path.join', 'os.path.join', (['component_model_storage_path', 'model_name'], {}), '(component_model_storage_path, model_name)\n', (5511, 5553), False, 'import os\n'), ((6093, 6142), 'base64.b64encode', 'base64.b64encode', (['buffer_object_serialized_string'], {}), '(buffer_object_serialized_string)\n', (6109, 6142), False, 'import base64\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from flask import Flask, request from fate_flow.settings import stat_logger from fate_flow.utils.api_utils import get_json_result from fate_flow.utils.authentication_utils import modify_permission, PrivilegeAuth manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/grant/privilege', methods=['post']) def grant_permission(): modify_permission(request.json) return get_json_result(retcode=0, retmsg='success') @manager.route('/delete/privilege', methods=['post']) def delete_permission(): modify_permission(request.json, delete=True) return get_json_result(retcode=0, retmsg='success') @manager.route('/query/privilege', methods=['post']) def query_privilege(): privilege_dict = PrivilegeAuth.get_permission_config(request.json.get('src_party_id', "*"), request.json.get('src_role', "*")) data = {'src_party_id': request.json.get('src_party_id', "*"), 'role': request.json.get('src_role', "*"), 'privilege_role': privilege_dict.get('privilege_role', []), 'privilege_command': privilege_dict.get('privilege_command', []), 'privilege_component': privilege_dict.get('privilege_component', [])} if request.json.get("src_user") and request.json.get("dest_user"): data = { "src_user": request.json.get("src_user"), "dest_user": request.json.get("dest_user"), "privilege_dataset": privilege_dict.get('privilege_dataset', {}).get(request.json.get("src_user"), {}).get(request.json.get("dest_user"), []) } return get_json_result(retcode=0, retmsg='success', data=data)
[ "fate_flow.settings.stat_logger.exception", "fate_flow.utils.api_utils.get_json_result", "fate_flow.utils.authentication_utils.modify_permission" ]
[((840, 855), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (845, 855), False, 'from flask import Flask, request\n'), ((919, 943), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (940, 943), False, 'from fate_flow.settings import stat_logger\n'), ((1082, 1113), 'fate_flow.utils.authentication_utils.modify_permission', 'modify_permission', (['request.json'], {}), '(request.json)\n', (1099, 1113), False, 'from fate_flow.utils.authentication_utils import modify_permission, PrivilegeAuth\n'), ((1125, 1169), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (1140, 1169), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1255, 1299), 'fate_flow.utils.authentication_utils.modify_permission', 'modify_permission', (['request.json'], {'delete': '(True)'}), '(request.json, delete=True)\n', (1272, 1299), False, 'from fate_flow.utils.authentication_utils import modify_permission, PrivilegeAuth\n'), ((1311, 1355), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (1326, 1355), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((2292, 2347), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'data'}), "(retcode=0, retmsg='success', data=data)\n", (2307, 2347), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1491, 1528), 'flask.request.json.get', 'request.json.get', (['"""src_party_id"""', '"""*"""'], {}), "('src_party_id', '*')\n", (1507, 1528), False, 'from flask import Flask, request\n'), ((1530, 1563), 'flask.request.json.get', 'request.json.get', (['"""src_role"""', '"""*"""'], {}), "('src_role', '*')\n", (1546, 1563), False, 'from flask import Flask, request\n'), ((1593, 1630), 'flask.request.json.get', 'request.json.get', (['"""src_party_id"""', '"""*"""'], {}), "('src_party_id', '*')\n", (1609, 1630), False, 'from flask import Flask, request\n'), ((1652, 1685), 'flask.request.json.get', 'request.json.get', (['"""src_role"""', '"""*"""'], {}), "('src_role', '*')\n", (1668, 1685), False, 'from flask import Flask, request\n'), ((1926, 1954), 'flask.request.json.get', 'request.json.get', (['"""src_user"""'], {}), "('src_user')\n", (1942, 1954), False, 'from flask import Flask, request\n'), ((1959, 1988), 'flask.request.json.get', 'request.json.get', (['"""dest_user"""'], {}), "('dest_user')\n", (1975, 1988), False, 'from flask import Flask, request\n'), ((2031, 2059), 'flask.request.json.get', 'request.json.get', (['"""src_user"""'], {}), "('src_user')\n", (2047, 2059), False, 'from flask import Flask, request\n'), ((2086, 2115), 'flask.request.json.get', 'request.json.get', (['"""dest_user"""'], {}), "('dest_user')\n", (2102, 2115), False, 'from flask import Flask, request\n'), ((2236, 2265), 'flask.request.json.get', 'request.json.get', (['"""dest_user"""'], {}), "('dest_user')\n", (2252, 2265), False, 'from flask import Flask, request\n'), ((2198, 2226), 'flask.request.json.get', 'request.json.get', (['"""src_user"""'], {}), "('src_user')\n", (2214, 2226), False, 'from flask import Flask, request\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import importlib import os import traceback from fate_arch.common import file_utils, log, EngineType, profile from fate_arch.common.base_utils import current_timestamp, timestamp_to_date from fate_arch.common.log import schedule_logger, getLogger from fate_arch import session from fate_flow.entity.types import TaskStatus, ProcessRole, RunParameters from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.operation import Tracker from fate_arch import storage from fate_flow.utils import job_utils, schedule_utils from fate_flow.scheduling_apps.client import ControllerClient from fate_flow.scheduling_apps.client import TrackerClient from fate_flow.db.db_models import TrackingOutputDataInfo, fill_db_model_object from fate_arch.computing import ComputingEngine LOGGER = getLogger() class TaskExecutor(object): REPORT_TO_DRIVER_FIELDS = ["run_ip", "run_pid", "party_status", "update_time", "end_time", "elapsed"] @classmethod def run_task(cls): task_info = {} try: parser = argparse.ArgumentParser() parser.add_argument('-j', '--job_id', required=True, type=str, help="job id") parser.add_argument('-n', '--component_name', required=True, type=str, help="component name") parser.add_argument('-t', '--task_id', required=True, type=str, help="task id") parser.add_argument('-v', '--task_version', required=True, type=int, help="task version") parser.add_argument('-r', '--role', required=True, type=str, help="role") parser.add_argument('-p', '--party_id', required=True, type=int, help="party id") parser.add_argument('-c', '--config', required=True, type=str, help="task parameters") parser.add_argument('--run_ip', help="run ip", type=str) parser.add_argument('--job_server', help="job server", type=str) args = parser.parse_args() schedule_logger(args.job_id).info('enter task process') schedule_logger(args.job_id).info(args) # init function args if args.job_server: RuntimeConfig.init_config(JOB_SERVER_HOST=args.job_server.split(':')[0], HTTP_PORT=args.job_server.split(':')[1]) RuntimeConfig.set_process_role(ProcessRole.EXECUTOR) job_id = args.job_id component_name = args.component_name task_id = args.task_id task_version = args.task_version role = args.role party_id = args.party_id executor_pid = os.getpid() task_info.update({ "job_id": job_id, "component_name": component_name, "task_id": task_id, "task_version": task_version, "role": role, "party_id": party_id, "run_ip": args.run_ip, "run_pid": executor_pid }) start_time = current_timestamp() job_conf = job_utils.get_job_conf(job_id, role) job_dsl = job_conf["job_dsl_path"] job_runtime_conf = job_conf["job_runtime_conf_path"] dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job_dsl, runtime_conf=job_runtime_conf, train_runtime_conf=job_conf["train_runtime_conf_path"], pipeline_dsl=job_conf["pipeline_dsl_path"] ) party_index = job_runtime_conf["role"][role].index(party_id) job_args_on_party = TaskExecutor.get_job_args_on_party(dsl_parser, job_runtime_conf, role, party_id) component = dsl_parser.get_component_info(component_name=component_name) component_parameters = component.get_role_parameters() component_parameters_on_party = component_parameters[role][ party_index] if role in component_parameters else {} module_name = component.get_module() task_input_dsl = component.get_input() task_output_dsl = component.get_output() component_parameters_on_party['output_data_name'] = task_output_dsl.get('data') task_parameters = RunParameters(**file_utils.load_json_conf(args.config)) job_parameters = task_parameters if job_parameters.assistant_role: TaskExecutor.monkey_patch() except Exception as e: traceback.print_exc() schedule_logger().exception(e) task_info["party_status"] = TaskStatus.FAILED return try: job_log_dir = os.path.join(job_utils.get_job_log_directory(job_id=job_id), role, str(party_id)) task_log_dir = os.path.join(job_log_dir, component_name) log.LoggerFactory.set_directory(directory=task_log_dir, parent_log_dir=job_log_dir, append_to_parent_log=True, force=True) tracker = Tracker(job_id=job_id, role=role, party_id=party_id, component_name=component_name, task_id=task_id, task_version=task_version, model_id=job_parameters.model_id, model_version=job_parameters.model_version, component_module_name=module_name, job_parameters=job_parameters) tracker_client = TrackerClient(job_id=job_id, role=role, party_id=party_id, component_name=component_name, task_id=task_id, task_version=task_version, model_id=job_parameters.model_id, model_version=job_parameters.model_version, component_module_name=module_name, job_parameters=job_parameters) run_class_paths = component_parameters_on_party.get('CodePath').split('/') run_class_package = '.'.join(run_class_paths[:-2]) + '.' + run_class_paths[-2].replace('.py', '') run_class_name = run_class_paths[-1] task_info["party_status"] = TaskStatus.RUNNING cls.report_task_update_to_driver(task_info=task_info) # init environment, process is shared globally RuntimeConfig.init_config(WORK_MODE=job_parameters.work_mode, COMPUTING_ENGINE=job_parameters.computing_engine, FEDERATION_ENGINE=job_parameters.federation_engine, FEDERATED_MODE=job_parameters.federated_mode) if RuntimeConfig.COMPUTING_ENGINE == ComputingEngine.EGGROLL: session_options = task_parameters.eggroll_run.copy() else: session_options = {} sess = session.Session(computing_type=job_parameters.computing_engine, federation_type=job_parameters.federation_engine) computing_session_id = job_utils.generate_session_id(task_id, task_version, role, party_id) sess.init_computing(computing_session_id=computing_session_id, options=session_options) federation_session_id = job_utils.generate_task_version_id(task_id, task_version) component_parameters_on_party["job_parameters"] = job_parameters.to_dict() sess.init_federation(federation_session_id=federation_session_id, runtime_conf=component_parameters_on_party, service_conf=job_parameters.engines_address.get(EngineType.FEDERATION, {})) sess.as_default() schedule_logger().info('Run {} {} {} {} {} task'.format(job_id, component_name, task_id, role, party_id)) schedule_logger().info("Component parameters on party {}".format(component_parameters_on_party)) schedule_logger().info("Task input dsl {}".format(task_input_dsl)) task_run_args = cls.get_task_run_args(job_id=job_id, role=role, party_id=party_id, task_id=task_id, task_version=task_version, job_args=job_args_on_party, job_parameters=job_parameters, task_parameters=task_parameters, input_dsl=task_input_dsl, ) if module_name in {"Upload", "Download", "Reader", "Writer"}: task_run_args["job_parameters"] = job_parameters run_object = getattr(importlib.import_module(run_class_package), run_class_name)() run_object.set_tracker(tracker=tracker_client) run_object.set_task_version_id(task_version_id=job_utils.generate_task_version_id(task_id, task_version)) # add profile logs profile.profile_start() run_object.run(component_parameters_on_party, task_run_args) profile.profile_ends() output_data = run_object.save_data() if not isinstance(output_data, list): output_data = [output_data] for index in range(0, len(output_data)): data_name = task_output_dsl.get('data')[index] if task_output_dsl.get('data') else '{}'.format(index) persistent_table_namespace, persistent_table_name = tracker.save_output_data( computing_table=output_data[index], output_storage_engine=job_parameters.storage_engine, output_storage_address=job_parameters.engines_address.get(EngineType.STORAGE, {})) if persistent_table_namespace and persistent_table_name: tracker.log_output_data_info(data_name=data_name, table_namespace=persistent_table_namespace, table_name=persistent_table_name) output_model = run_object.export_model() # There is only one model output at the current dsl version. tracker.save_output_model(output_model, task_output_dsl['model'][0] if task_output_dsl.get('model') else 'default') task_info["party_status"] = TaskStatus.SUCCESS except Exception as e: task_info["party_status"] = TaskStatus.FAILED schedule_logger().exception(e) finally: try: task_info["end_time"] = current_timestamp() task_info["elapsed"] = task_info["end_time"] - start_time cls.report_task_update_to_driver(task_info=task_info) except Exception as e: task_info["party_status"] = TaskStatus.FAILED traceback.print_exc() schedule_logger().exception(e) schedule_logger().info( 'task {} {} {} start time: {}'.format(task_id, role, party_id, timestamp_to_date(start_time))) schedule_logger().info( 'task {} {} {} end time: {}'.format(task_id, role, party_id, timestamp_to_date(task_info["end_time"]))) schedule_logger().info( 'task {} {} {} takes {}s'.format(task_id, role, party_id, int(task_info["elapsed"]) / 1000)) schedule_logger().info( 'Finish {} {} {} {} {} {} task {}'.format(job_id, component_name, task_id, task_version, role, party_id, task_info["party_status"])) print('Finish {} {} {} {} {} {} task {}'.format(job_id, component_name, task_id, task_version, role, party_id, task_info["party_status"])) return task_info @classmethod def get_job_args_on_party(cls, dsl_parser, job_runtime_conf, role, party_id): party_index = job_runtime_conf["role"][role].index(int(party_id)) job_args = dsl_parser.get_args_input() job_args_on_party = job_args[role][party_index].get('args') if role in job_args else {} return job_args_on_party @classmethod def get_task_run_args(cls, job_id, role, party_id, task_id, task_version, job_args, job_parameters: RunParameters, task_parameters: RunParameters, input_dsl, filter_type=None, filter_attr=None, get_input_table=False): task_run_args = {} input_table = {} if 'idmapping' in role: return {} for input_type, input_detail in input_dsl.items(): if filter_type and input_type not in filter_type: continue if input_type == 'data': this_type_args = task_run_args[input_type] = task_run_args.get(input_type, {}) for data_type, data_list in input_detail.items(): data_dict = {} for data_key in data_list: data_key_item = data_key.split('.') data_dict[data_key_item[0]] = {data_type: []} for data_key in data_list: data_key_item = data_key.split('.') search_component_name, search_data_name = data_key_item[0], data_key_item[1] storage_table_meta = None if search_component_name == 'args': if job_args.get('data', {}).get(search_data_name).get('namespace', '') and job_args.get( 'data', {}).get(search_data_name).get('name', ''): storage_table_meta = storage.StorageTableMeta(name=job_args['data'][search_data_name]['name'], namespace=job_args['data'][search_data_name]['namespace']) else: tracker_client = TrackerClient(job_id=job_id, role=role, party_id=party_id, component_name=search_component_name) upstream_output_table_infos_json = tracker_client.get_output_data_info( data_name=search_data_name) if upstream_output_table_infos_json: tracker = Tracker(job_id=job_id, role=role, party_id=party_id, component_name=search_component_name) upstream_output_table_infos = [] for _ in upstream_output_table_infos_json: upstream_output_table_infos.append(fill_db_model_object( Tracker.get_dynamic_db_model(TrackingOutputDataInfo, job_id)(), _)) output_tables_meta = tracker.get_output_data_table(output_data_infos=upstream_output_table_infos) if output_tables_meta: storage_table_meta = output_tables_meta.get(search_data_name, None) args_from_component = this_type_args[search_component_name] = this_type_args.get( search_component_name, {}) if get_input_table and storage_table_meta: input_table[data_key] = {'namespace': storage_table_meta.get_namespace(), 'name': storage_table_meta.get_name()} computing_table = None elif storage_table_meta: LOGGER.info(f"load computing table use {task_parameters.computing_partitions}") computing_table = session.get_latest_opened().computing.load( storage_table_meta.get_address(), schema=storage_table_meta.get_schema(), partitions=task_parameters.computing_partitions) else: computing_table = None if not computing_table or not filter_attr or not filter_attr.get("data", None): data_dict[search_component_name][data_type].append(computing_table) args_from_component[data_type] = data_dict[search_component_name][data_type] else: args_from_component[data_type] = dict( [(a, getattr(computing_table, "get_{}".format(a))()) for a in filter_attr["data"]]) elif input_type in ['model', 'isometric_model']: this_type_args = task_run_args[input_type] = task_run_args.get(input_type, {}) for dsl_model_key in input_detail: dsl_model_key_items = dsl_model_key.split('.') if len(dsl_model_key_items) == 2: search_component_name, search_model_alias = dsl_model_key_items[0], dsl_model_key_items[1] elif len(dsl_model_key_items) == 3 and dsl_model_key_items[0] == 'pipeline': search_component_name, search_model_alias = dsl_model_key_items[1], dsl_model_key_items[2] else: raise Exception('get input {} failed'.format(input_type)) models = Tracker(job_id=job_id, role=role, party_id=party_id, component_name=search_component_name, model_id=job_parameters.model_id, model_version=job_parameters.model_version).get_output_model( model_alias=search_model_alias) this_type_args[search_component_name] = models if get_input_table: return input_table return task_run_args @classmethod def report_task_update_to_driver(cls, task_info): """ Report task update to FATEFlow Server :param task_info: :return: """ schedule_logger().info("report task {} {} {} {} to driver".format( task_info["task_id"], task_info["task_version"], task_info["role"], task_info["party_id"], )) ControllerClient.report_task(task_info=task_info) @classmethod def monkey_patch(cls): package_name = "monkey_patch" package_path = os.path.join(file_utils.get_python_base_directory(), "fate_flow", package_name) if not os.path.exists(package_path): return for f in os.listdir(package_path): f_path = os.path.join(file_utils.get_python_base_directory(), "fate_flow", package_name, f) if not os.path.isdir(f_path) or "__pycache__" in f_path: continue patch_module = importlib.import_module("fate_flow." + package_name + '.' + f + '.monkey_patch') patch_module.patch_all() if __name__ == '__main__': task_info = TaskExecutor.run_task() TaskExecutor.report_task_update_to_driver(task_info=task_info)
[ "fate_flow.utils.job_utils.get_job_conf", "fate_flow.entity.runtime_config.RuntimeConfig.init_config", "fate_flow.utils.schedule_utils.get_job_dsl_parser", "fate_flow.utils.job_utils.get_job_log_directory", "fate_flow.operation.Tracker", "fate_flow.utils.job_utils.generate_task_version_id", "fate_flow.operation.Tracker.get_dynamic_db_model", "fate_flow.scheduling_apps.client.TrackerClient", "fate_flow.scheduling_apps.client.ControllerClient.report_task", "fate_flow.utils.job_utils.generate_session_id", "fate_flow.entity.runtime_config.RuntimeConfig.set_process_role" ]
[((1424, 1435), 'fate_arch.common.log.getLogger', 'getLogger', ([], {}), '()\n', (1433, 1435), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((19376, 19425), 'fate_flow.scheduling_apps.client.ControllerClient.report_task', 'ControllerClient.report_task', ([], {'task_info': 'task_info'}), '(task_info=task_info)\n', (19404, 19425), False, 'from fate_flow.scheduling_apps.client import ControllerClient\n'), ((19693, 19717), 'os.listdir', 'os.listdir', (['package_path'], {}), '(package_path)\n', (19703, 19717), False, 'import os\n'), ((1670, 1695), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1693, 1695), False, 'import argparse\n'), ((3263, 3274), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3272, 3274), False, 'import os\n'), ((3659, 3678), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (3676, 3678), False, 'from fate_arch.common.base_utils import current_timestamp, timestamp_to_date\n'), ((3702, 3738), 'fate_flow.utils.job_utils.get_job_conf', 'job_utils.get_job_conf', (['job_id', 'role'], {}), '(job_id, role)\n', (3724, 3738), False, 'from fate_flow.utils import job_utils, schedule_utils\n'), ((3876, 4063), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job_dsl', 'runtime_conf': 'job_runtime_conf', 'train_runtime_conf': "job_conf['train_runtime_conf_path']", 'pipeline_dsl': "job_conf['pipeline_dsl_path']"}), "(dsl=job_dsl, runtime_conf=\n job_runtime_conf, train_runtime_conf=job_conf['train_runtime_conf_path'\n ], pipeline_dsl=job_conf['pipeline_dsl_path'])\n", (3909, 4063), False, 'from fate_flow.utils import job_utils, schedule_utils\n'), ((5569, 5610), 'os.path.join', 'os.path.join', (['job_log_dir', 'component_name'], {}), '(job_log_dir, component_name)\n', (5581, 5610), False, 'import os\n'), ((5623, 5750), 'fate_arch.common.log.LoggerFactory.set_directory', 'log.LoggerFactory.set_directory', ([], {'directory': 'task_log_dir', 'parent_log_dir': 'job_log_dir', 'append_to_parent_log': '(True)', 'force': '(True)'}), '(directory=task_log_dir, parent_log_dir=\n job_log_dir, append_to_parent_log=True, force=True)\n', (5654, 5750), False, 'from fate_arch.common import file_utils, log, EngineType, profile\n'), ((5813, 6098), 'fate_flow.operation.Tracker', 'Tracker', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'component_name': 'component_name', 'task_id': 'task_id', 'task_version': 'task_version', 'model_id': 'job_parameters.model_id', 'model_version': 'job_parameters.model_version', 'component_module_name': 'module_name', 'job_parameters': 'job_parameters'}), '(job_id=job_id, role=role, party_id=party_id, component_name=\n component_name, task_id=task_id, task_version=task_version, model_id=\n job_parameters.model_id, model_version=job_parameters.model_version,\n component_module_name=module_name, job_parameters=job_parameters)\n', (5820, 6098), False, 'from fate_flow.operation import Tracker\n'), ((6294, 6585), 'fate_flow.scheduling_apps.client.TrackerClient', 'TrackerClient', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'component_name': 'component_name', 'task_id': 'task_id', 'task_version': 'task_version', 'model_id': 'job_parameters.model_id', 'model_version': 'job_parameters.model_version', 'component_module_name': 'module_name', 'job_parameters': 'job_parameters'}), '(job_id=job_id, role=role, party_id=party_id, component_name=\n component_name, task_id=task_id, task_version=task_version, model_id=\n job_parameters.model_id, model_version=job_parameters.model_version,\n component_module_name=module_name, job_parameters=job_parameters)\n', (6307, 6585), False, 'from fate_flow.scheduling_apps.client import TrackerClient\n'), ((7316, 7539), 'fate_flow.entity.runtime_config.RuntimeConfig.init_config', 'RuntimeConfig.init_config', ([], {'WORK_MODE': 'job_parameters.work_mode', 'COMPUTING_ENGINE': 'job_parameters.computing_engine', 'FEDERATION_ENGINE': 'job_parameters.federation_engine', 'FEDERATED_MODE': 'job_parameters.federated_mode'}), '(WORK_MODE=job_parameters.work_mode,\n COMPUTING_ENGINE=job_parameters.computing_engine, FEDERATION_ENGINE=\n job_parameters.federation_engine, FEDERATED_MODE=job_parameters.\n federated_mode)\n', (7341, 7539), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((7859, 7976), 'fate_arch.session.Session', 'session.Session', ([], {'computing_type': 'job_parameters.computing_engine', 'federation_type': 'job_parameters.federation_engine'}), '(computing_type=job_parameters.computing_engine,\n federation_type=job_parameters.federation_engine)\n', (7874, 7976), False, 'from fate_arch import session\n'), ((8008, 8076), 'fate_flow.utils.job_utils.generate_session_id', 'job_utils.generate_session_id', (['task_id', 'task_version', 'role', 'party_id'], {}), '(task_id, task_version, role, party_id)\n', (8037, 8076), False, 'from fate_flow.utils import job_utils, schedule_utils\n'), ((8213, 8270), 'fate_flow.utils.job_utils.generate_task_version_id', 'job_utils.generate_task_version_id', (['task_id', 'task_version'], {}), '(task_id, task_version)\n', (8247, 8270), False, 'from fate_flow.utils import job_utils, schedule_utils\n'), ((10022, 10045), 'fate_arch.common.profile.profile_start', 'profile.profile_start', ([], {}), '()\n', (10043, 10045), False, 'from fate_arch.common import file_utils, log, EngineType, profile\n'), ((10131, 10153), 'fate_arch.common.profile.profile_ends', 'profile.profile_ends', ([], {}), '()\n', (10151, 10153), False, 'from fate_arch.common import file_utils, log, EngineType, profile\n'), ((19545, 19583), 'fate_arch.common.file_utils.get_python_base_directory', 'file_utils.get_python_base_directory', ([], {}), '()\n', (19581, 19583), False, 'from fate_arch.common import file_utils, log, EngineType, profile\n'), ((19627, 19655), 'os.path.exists', 'os.path.exists', (['package_path'], {}), '(package_path)\n', (19641, 19655), False, 'import os\n'), ((19944, 20029), 'importlib.import_module', 'importlib.import_module', (["('fate_flow.' + package_name + '.' + f + '.monkey_patch')"], {}), "('fate_flow.' + package_name + '.' + f + '.monkey_patch'\n )\n", (19967, 20029), False, 'import importlib\n'), ((2955, 3007), 'fate_flow.entity.runtime_config.RuntimeConfig.set_process_role', 'RuntimeConfig.set_process_role', (['ProcessRole.EXECUTOR'], {}), '(ProcessRole.EXECUTOR)\n', (2985, 3007), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((5279, 5300), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5298, 5300), False, 'import traceback\n'), ((5473, 5519), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (5504, 5519), False, 'from fate_flow.utils import job_utils, schedule_utils\n'), ((11670, 11689), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (11687, 11689), False, 'from fate_arch.common.base_utils import current_timestamp, timestamp_to_date\n'), ((12024, 12041), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (12039, 12041), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((12123, 12152), 'fate_arch.common.base_utils.timestamp_to_date', 'timestamp_to_date', (['start_time'], {}), '(start_time)\n', (12140, 12152), False, 'from fate_arch.common.base_utils import current_timestamp, timestamp_to_date\n'), ((12163, 12180), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (12178, 12180), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((12260, 12300), 'fate_arch.common.base_utils.timestamp_to_date', 'timestamp_to_date', (["task_info['end_time']"], {}), "(task_info['end_time'])\n", (12277, 12300), False, 'from fate_arch.common.base_utils import current_timestamp, timestamp_to_date\n'), ((12311, 12328), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (12326, 12328), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((12448, 12465), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (12463, 12465), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((19151, 19168), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (19166, 19168), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((19753, 19791), 'fate_arch.common.file_utils.get_python_base_directory', 'file_utils.get_python_base_directory', ([], {}), '()\n', (19789, 19791), False, 'from fate_arch.common import file_utils, log, EngineType, profile\n'), ((2594, 2622), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['args.job_id'], {}), '(args.job_id)\n', (2609, 2622), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((2662, 2690), 'fate_arch.common.log.schedule_logger', 'schedule_logger', (['args.job_id'], {}), '(args.job_id)\n', (2677, 2690), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((5061, 5099), 'fate_arch.common.file_utils.load_json_conf', 'file_utils.load_json_conf', (['args.config'], {}), '(args.config)\n', (5086, 5099), False, 'from fate_arch.common import file_utils, log, EngineType, profile\n'), ((8665, 8682), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (8680, 8682), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((8783, 8800), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (8798, 8800), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((8892, 8909), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (8907, 8909), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((9740, 9782), 'importlib.import_module', 'importlib.import_module', (['run_class_package'], {}), '(run_class_package)\n', (9763, 9782), False, 'import importlib\n'), ((9920, 9977), 'fate_flow.utils.job_utils.generate_task_version_id', 'job_utils.generate_task_version_id', (['task_id', 'task_version'], {}), '(task_id, task_version)\n', (9954, 9977), False, 'from fate_flow.utils import job_utils, schedule_utils\n'), ((11947, 11968), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (11966, 11968), False, 'import traceback\n'), ((19842, 19863), 'os.path.isdir', 'os.path.isdir', (['f_path'], {}), '(f_path)\n', (19855, 19863), False, 'import os\n'), ((5313, 5330), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (5328, 5330), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((11565, 11582), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (11580, 11582), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((11985, 12002), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (12000, 12002), False, 'from fate_arch.common.log import schedule_logger, getLogger\n'), ((14961, 15062), 'fate_flow.scheduling_apps.client.TrackerClient', 'TrackerClient', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'component_name': 'search_component_name'}), '(job_id=job_id, role=role, party_id=party_id, component_name=\n search_component_name)\n', (14974, 15062), False, 'from fate_flow.scheduling_apps.client import TrackerClient\n'), ((14753, 14889), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': "job_args['data'][search_data_name]['name']", 'namespace': "job_args['data'][search_data_name]['namespace']"}), "(name=job_args['data'][search_data_name]['name'],\n namespace=job_args['data'][search_data_name]['namespace'])\n", (14777, 14889), False, 'from fate_arch import storage\n'), ((15384, 15479), 'fate_flow.operation.Tracker', 'Tracker', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'component_name': 'search_component_name'}), '(job_id=job_id, role=role, party_id=party_id, component_name=\n search_component_name)\n', (15391, 15479), False, 'from fate_flow.operation import Tracker\n'), ((18486, 18664), 'fate_flow.operation.Tracker', 'Tracker', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'component_name': 'search_component_name', 'model_id': 'job_parameters.model_id', 'model_version': 'job_parameters.model_version'}), '(job_id=job_id, role=role, party_id=party_id, component_name=\n search_component_name, model_id=job_parameters.model_id, model_version=\n job_parameters.model_version)\n', (18493, 18664), False, 'from fate_flow.operation import Tracker\n'), ((16831, 16858), 'fate_arch.session.get_latest_opened', 'session.get_latest_opened', ([], {}), '()\n', (16856, 16858), False, 'from fate_arch import session\n'), ((15798, 15858), 'fate_flow.operation.Tracker.get_dynamic_db_model', 'Tracker.get_dynamic_db_model', (['TrackingOutputDataInfo', 'job_id'], {}), '(TrackingOutputDataInfo, job_id)\n', (15826, 15858), False, 'from fate_flow.operation import Tracker\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_arch.common import FederatedCommunicationType from fate_flow.entity.types import TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, FederatedSchedulingStatusCode, RetCode from fate_flow.utils import job_utils from fate_flow.scheduler.federated_scheduler import FederatedScheduler from fate_flow.operation.job_saver import JobSaver from fate_arch.common.log import schedule_logger from fate_flow.manager.resource_manager import ResourceManager class TaskScheduler(object): @classmethod def schedule(cls, job, dsl_parser, canceled=False): schedule_logger(job_id=job.f_job_id).info("scheduling job {} tasks".format(job.f_job_id)) initiator_tasks_group = JobSaver.get_tasks_asc(job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id) waiting_tasks = [] for initiator_task in initiator_tasks_group.values(): # collect all party task party status if job.f_runtime_conf_on_party["job_parameters"]["federated_status_collect_type"] == FederatedCommunicationType.PULL: cls.collect_task_of_all_party(job=job, initiator_task=initiator_task) new_task_status = cls.federated_task_status(job_id=initiator_task.f_job_id, task_id=initiator_task.f_task_id, task_version=initiator_task.f_task_version) task_status_have_update = False if new_task_status != initiator_task.f_status: task_status_have_update = True initiator_task.f_status = new_task_status FederatedScheduler.sync_task_status(job=job, task=initiator_task) if initiator_task.f_status == TaskStatus.WAITING: waiting_tasks.append(initiator_task) elif task_status_have_update and EndStatus.contains(initiator_task.f_status): FederatedScheduler.stop_task(job=job, task=initiator_task, stop_status=initiator_task.f_status) scheduling_status_code = SchedulingStatusCode.NO_NEXT if not canceled: for waiting_task in waiting_tasks: for component in dsl_parser.get_upstream_dependent_components(component_name=waiting_task.f_component_name): dependent_task = initiator_tasks_group[ JobSaver.task_key(task_id=job_utils.generate_task_id(job_id=job.f_job_id, component_name=component.get_name()), role=job.f_role, party_id=job.f_party_id ) ] if dependent_task.f_status != TaskStatus.SUCCESS: # can not start task break else: # all upstream dependent tasks have been successful, can start this task scheduling_status_code = SchedulingStatusCode.HAVE_NEXT status_code = cls.start_task(job=job, task=waiting_task) if status_code == SchedulingStatusCode.NO_RESOURCE: # wait for the next round of scheduling schedule_logger(job_id=job.f_job_id).info(f"job {waiting_task.f_job_id} task {waiting_task.f_task_id} can not apply resource, wait for the next round of scheduling") break elif status_code == SchedulingStatusCode.FAILED: scheduling_status_code = SchedulingStatusCode.FAILED waiting_task.f_status = StatusSet.FAILED FederatedScheduler.sync_task_status(job, waiting_task) break else: schedule_logger(job_id=job.f_job_id).info("have cancel signal, pass start job {} tasks".format(job.f_job_id)) schedule_logger(job_id=job.f_job_id).info("finish scheduling job {} tasks".format(job.f_job_id)) return scheduling_status_code, initiator_tasks_group.values() @classmethod def start_task(cls, job, task): schedule_logger(job_id=task.f_job_id).info("try to start job {} task {} {} on {} {}".format(task.f_job_id, task.f_task_id, task.f_task_version, task.f_role, task.f_party_id)) apply_status = ResourceManager.apply_for_task_resource(task_info=task.to_human_model_dict(only_primary_with=["status"])) if not apply_status: return SchedulingStatusCode.NO_RESOURCE task.f_status = TaskStatus.RUNNING update_status = JobSaver.update_task_status(task_info=task.to_human_model_dict(only_primary_with=["status"])) if not update_status: # Another scheduler scheduling the task schedule_logger(job_id=task.f_job_id).info("job {} task {} {} start on another scheduler".format(task.f_job_id, task.f_task_id, task.f_task_version)) # Rollback task.f_status = TaskStatus.WAITING ResourceManager.return_task_resource(task_info=task.to_human_model_dict(only_primary_with=["status"])) return SchedulingStatusCode.PASS schedule_logger(job_id=task.f_job_id).info("start job {} task {} {} on {} {}".format(task.f_job_id, task.f_task_id, task.f_task_version, task.f_role, task.f_party_id)) FederatedScheduler.sync_task_status(job=job, task=task) status_code, response = FederatedScheduler.start_task(job=job, task=task) if status_code == FederatedSchedulingStatusCode.SUCCESS: return SchedulingStatusCode.SUCCESS else: return SchedulingStatusCode.FAILED @classmethod def collect_task_of_all_party(cls, job, initiator_task, set_status=None): tasks_on_all_party = JobSaver.query_task(task_id=initiator_task.f_task_id, task_version=initiator_task.f_task_version) tasks_status_on_all = set([task.f_status for task in tasks_on_all_party]) if not len(tasks_status_on_all) > 1 and not TaskStatus.RUNNING in tasks_status_on_all: return status, federated_response = FederatedScheduler.collect_task(job=job, task=initiator_task) if status != FederatedSchedulingStatusCode.SUCCESS: schedule_logger(job_id=job.f_job_id).warning(f"collect task {initiator_task.f_task_id} {initiator_task.f_task_version} on {initiator_task.f_role} {initiator_task.f_party_id} failed") for _role in federated_response.keys(): for _party_id, party_response in federated_response[_role].items(): if party_response["retcode"] == RetCode.SUCCESS: JobSaver.update_task_status(task_info=party_response["data"]) JobSaver.update_task(task_info=party_response["data"]) elif party_response["retcode"] == RetCode.FEDERATED_ERROR and set_status: tmp_task_info = { "job_id": initiator_task.f_job_id, "task_id": initiator_task.f_task_id, "task_version": initiator_task.f_task_version, "role": _role, "party_id": _party_id, "party_status": TaskStatus.RUNNING } JobSaver.update_task_status(task_info=tmp_task_info) tmp_task_info["party_status"] = set_status JobSaver.update_task_status(task_info=tmp_task_info) @classmethod def federated_task_status(cls, job_id, task_id, task_version): tasks_on_all_party = JobSaver.query_task(task_id=task_id, task_version=task_version) status_flag = 0 # idmapping role status can only be ignored if all non-idmapping roles success for task in tasks_on_all_party: if 'idmapping' not in task.f_role and task.f_party_status != TaskStatus.SUCCESS: status_flag = 1 break if status_flag: tasks_party_status = [task.f_party_status for task in tasks_on_all_party] else: tasks_party_status = [task.f_party_status for task in tasks_on_all_party if 'idmapping' not in task.f_role] status = cls.calculate_multi_party_task_status(tasks_party_status) schedule_logger(job_id=job_id).info("job {} task {} {} status is {}, calculate by task party status list: {}".format(job_id, task_id, task_version, status, tasks_party_status)) return status @classmethod def calculate_multi_party_task_status(cls, tasks_party_status): # 1. all waiting # 2. have end status, should be interrupted # 3. have running # 4. waiting + success # 5. all the same end status tmp_status_set = set(tasks_party_status) if len(tmp_status_set) == 1: # 1 and 5 return tmp_status_set.pop() else: # 2 for status in sorted(EndStatus.status_list(), key=lambda s: StatusSet.get_level(status=s), reverse=True): if status == TaskStatus.SUCCESS: continue if status in tmp_status_set: return status # 3 if TaskStatus.RUNNING in tmp_status_set or TaskStatus.SUCCESS in tmp_status_set: return StatusSet.RUNNING raise Exception("Calculate task status failed: {}".format(tasks_party_status))
[ "fate_flow.scheduler.federated_scheduler.FederatedScheduler.start_task", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.stop_task", "fate_flow.entity.types.EndStatus.status_list", "fate_flow.entity.types.EndStatus.contains", "fate_flow.operation.job_saver.JobSaver.update_task", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_task_status", "fate_flow.operation.job_saver.JobSaver.update_task_status", "fate_flow.entity.types.StatusSet.get_level", "fate_flow.scheduler.federated_scheduler.FederatedScheduler.collect_task", "fate_flow.operation.job_saver.JobSaver.get_tasks_asc", "fate_flow.operation.job_saver.JobSaver.query_task" ]
[((1308, 1398), 'fate_flow.operation.job_saver.JobSaver.get_tasks_asc', 'JobSaver.get_tasks_asc', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id'}), '(job_id=job.f_job_id, role=job.f_role, party_id=job.\n f_party_id)\n', (1330, 1398), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((5836, 5891), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_task_status', 'FederatedScheduler.sync_task_status', ([], {'job': 'job', 'task': 'task'}), '(job=job, task=task)\n', (5871, 5891), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((5924, 5973), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.start_task', 'FederatedScheduler.start_task', ([], {'job': 'job', 'task': 'task'}), '(job=job, task=task)\n', (5953, 5973), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((6273, 6375), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'task_id': 'initiator_task.f_task_id', 'task_version': 'initiator_task.f_task_version'}), '(task_id=initiator_task.f_task_id, task_version=\n initiator_task.f_task_version)\n', (6292, 6375), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((6604, 6665), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.collect_task', 'FederatedScheduler.collect_task', ([], {'job': 'job', 'task': 'initiator_task'}), '(job=job, task=initiator_task)\n', (6635, 6665), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((8080, 8143), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'task_id': 'task_id', 'task_version': 'task_version'}), '(task_id=task_id, task_version=task_version)\n', (8099, 8143), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((1186, 1222), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (1201, 1222), False, 'from fate_arch.common.log import schedule_logger\n'), ((2139, 2204), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_task_status', 'FederatedScheduler.sync_task_status', ([], {'job': 'job', 'task': 'initiator_task'}), '(job=job, task=initiator_task)\n', (2174, 2204), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((4403, 4439), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (4418, 4439), False, 'from fate_arch.common.log import schedule_logger\n'), ((4632, 4669), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'task.f_job_id'}), '(job_id=task.f_job_id)\n', (4647, 4669), False, 'from fate_arch.common.log import schedule_logger\n'), ((5660, 5697), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'task.f_job_id'}), '(job_id=task.f_job_id)\n', (5675, 5697), False, 'from fate_arch.common.log import schedule_logger\n'), ((8769, 8799), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (8784, 8799), False, 'from fate_arch.common.log import schedule_logger\n'), ((9436, 9459), 'fate_flow.entity.types.EndStatus.status_list', 'EndStatus.status_list', ([], {}), '()\n', (9457, 9459), False, 'from fate_flow.entity.types import TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, FederatedSchedulingStatusCode, RetCode\n'), ((2366, 2409), 'fate_flow.entity.types.EndStatus.contains', 'EndStatus.contains', (['initiator_task.f_status'], {}), '(initiator_task.f_status)\n', (2384, 2409), False, 'from fate_flow.entity.types import TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, FederatedSchedulingStatusCode, RetCode\n'), ((2427, 2527), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.stop_task', 'FederatedScheduler.stop_task', ([], {'job': 'job', 'task': 'initiator_task', 'stop_status': 'initiator_task.f_status'}), '(job=job, task=initiator_task, stop_status=\n initiator_task.f_status)\n', (2455, 2527), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((4285, 4321), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (4300, 4321), False, 'from fate_arch.common.log import schedule_logger\n'), ((5272, 5309), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'task.f_job_id'}), '(job_id=task.f_job_id)\n', (5287, 5309), False, 'from fate_arch.common.log import schedule_logger\n'), ((6738, 6774), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (6753, 6774), False, 'from fate_arch.common.log import schedule_logger\n'), ((7134, 7195), 'fate_flow.operation.job_saver.JobSaver.update_task_status', 'JobSaver.update_task_status', ([], {'task_info': "party_response['data']"}), "(task_info=party_response['data'])\n", (7161, 7195), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((7216, 7270), 'fate_flow.operation.job_saver.JobSaver.update_task', 'JobSaver.update_task', ([], {'task_info': "party_response['data']"}), "(task_info=party_response['data'])\n", (7236, 7270), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((7777, 7829), 'fate_flow.operation.job_saver.JobSaver.update_task_status', 'JobSaver.update_task_status', ([], {'task_info': 'tmp_task_info'}), '(task_info=tmp_task_info)\n', (7804, 7829), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((7913, 7965), 'fate_flow.operation.job_saver.JobSaver.update_task_status', 'JobSaver.update_task_status', ([], {'task_info': 'tmp_task_info'}), '(task_info=tmp_task_info)\n', (7940, 7965), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((9475, 9504), 'fate_flow.entity.types.StatusSet.get_level', 'StatusSet.get_level', ([], {'status': 's'}), '(status=s)\n', (9494, 9504), False, 'from fate_flow.entity.types import TaskStatus, EndStatus, StatusSet, SchedulingStatusCode, FederatedSchedulingStatusCode, RetCode\n'), ((4174, 4228), 'fate_flow.scheduler.federated_scheduler.FederatedScheduler.sync_task_status', 'FederatedScheduler.sync_task_status', (['job', 'waiting_task'], {}), '(job, waiting_task)\n', (4209, 4228), False, 'from fate_flow.scheduler.federated_scheduler import FederatedScheduler\n'), ((3743, 3779), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (3758, 3779), False, 'from fate_arch.common.log import schedule_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_flow.utils.log_utils import getLogger from fate_flow.controller.task_controller import TaskController from fate_flow.entity import ComponentProvider from fate_flow.manager.provider_manager import ProviderManager from fate_flow.utils import schedule_utils from fate_flow.worker.base_worker import BaseWorker from fate_flow.utils.log_utils import start_log, successful_log LOGGER = getLogger() class TaskInitializer(BaseWorker): def _run(self): result = {} dsl_parser = schedule_utils.get_job_dsl_parser(dsl=self.args.dsl, runtime_conf=self.args.runtime_conf, train_runtime_conf=self.args.train_runtime_conf, pipeline_dsl=self.args.pipeline_dsl) provider = ComponentProvider(**self.args.config["provider"]) common_task_info = self.args.config["common_task_info"] log_msg = f"initialize the components: {self.args.config['components']}" LOGGER.info(start_log(log_msg, role=self.args.role, party_id=self.args.party_id)) for component_name in self.args.config["components"]: result[component_name] = {} task_info = {} task_info.update(common_task_info) parameters, user_specified_parameters = ProviderManager.get_component_parameters(dsl_parser=dsl_parser, component_name=component_name, role=self.args.role, party_id=self.args.party_id, provider=provider) if parameters: task_info = {} task_info.update(common_task_info) task_info["component_name"] = component_name task_info["component_module"] = parameters["module"] task_info["provider_info"] = provider.to_dict() task_info["component_parameters"] = parameters TaskController.create_task(role=self.args.role, party_id=self.args.party_id, run_on_this_party=common_task_info["run_on_this_party"], task_info=task_info) result[component_name]["need_run"] = True else: # The party does not need to run, pass result[component_name]["need_run"] = False LOGGER.info(successful_log(log_msg, role=self.args.role, party_id=self.args.party_id)) return result if __name__ == "__main__": TaskInitializer().run()
[ "fate_flow.utils.log_utils.successful_log", "fate_flow.controller.task_controller.TaskController.create_task", "fate_flow.manager.provider_manager.ProviderManager.get_component_parameters", "fate_flow.utils.schedule_utils.get_job_dsl_parser", "fate_flow.utils.log_utils.getLogger", "fate_flow.utils.log_utils.start_log", "fate_flow.entity.ComponentProvider" ]
[((1007, 1018), 'fate_flow.utils.log_utils.getLogger', 'getLogger', ([], {}), '()\n', (1016, 1018), False, 'from fate_flow.utils.log_utils import getLogger\n'), ((1117, 1301), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'self.args.dsl', 'runtime_conf': 'self.args.runtime_conf', 'train_runtime_conf': 'self.args.train_runtime_conf', 'pipeline_dsl': 'self.args.pipeline_dsl'}), '(dsl=self.args.dsl, runtime_conf=self.args\n .runtime_conf, train_runtime_conf=self.args.train_runtime_conf,\n pipeline_dsl=self.args.pipeline_dsl)\n', (1150, 1301), False, 'from fate_flow.utils import schedule_utils\n'), ((1478, 1527), 'fate_flow.entity.ComponentProvider', 'ComponentProvider', ([], {}), "(**self.args.config['provider'])\n", (1495, 1527), False, 'from fate_flow.entity import ComponentProvider\n'), ((1693, 1761), 'fate_flow.utils.log_utils.start_log', 'start_log', (['log_msg'], {'role': 'self.args.role', 'party_id': 'self.args.party_id'}), '(log_msg, role=self.args.role, party_id=self.args.party_id)\n', (1702, 1761), False, 'from fate_flow.utils.log_utils import start_log, successful_log\n'), ((1992, 2164), 'fate_flow.manager.provider_manager.ProviderManager.get_component_parameters', 'ProviderManager.get_component_parameters', ([], {'dsl_parser': 'dsl_parser', 'component_name': 'component_name', 'role': 'self.args.role', 'party_id': 'self.args.party_id', 'provider': 'provider'}), '(dsl_parser=dsl_parser,\n component_name=component_name, role=self.args.role, party_id=self.args.\n party_id, provider=provider)\n', (2032, 2164), False, 'from fate_flow.manager.provider_manager import ProviderManager\n'), ((3361, 3434), 'fate_flow.utils.log_utils.successful_log', 'successful_log', (['log_msg'], {'role': 'self.args.role', 'party_id': 'self.args.party_id'}), '(log_msg, role=self.args.role, party_id=self.args.party_id)\n', (3375, 3434), False, 'from fate_flow.utils.log_utils import start_log, successful_log\n'), ((2910, 3073), 'fate_flow.controller.task_controller.TaskController.create_task', 'TaskController.create_task', ([], {'role': 'self.args.role', 'party_id': 'self.args.party_id', 'run_on_this_party': "common_task_info['run_on_this_party']", 'task_info': 'task_info'}), "(role=self.args.role, party_id=self.args.party_id,\n run_on_this_party=common_task_info['run_on_this_party'], task_info=\n task_info)\n", (2936, 3073), False, 'from fate_flow.controller.task_controller import TaskController\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import inspect import os import sys import __main__ from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey from playhouse.pool import PooledMySQLDatabase from playhouse.apsw_ext import APSWDatabase from arch.api.utils import log_utils from arch.api.utils.core_utils import current_timestamp from fate_flow.entity.constant_config import WorkMode from fate_flow.settings import DATABASE, USE_LOCAL_DATABASE, WORK_MODE, stat_logger from fate_flow.entity.runtime_config import RuntimeConfig LOGGER = log_utils.getLogger() def singleton(cls, *args, **kw): instances = {} def _singleton(): key = str(cls) + str(os.getpid()) if key not in instances: instances[key] = cls(*args, **kw) return instances[key] return _singleton @singleton class BaseDataBase(object): def __init__(self): database_config = DATABASE.copy() db_name = database_config.pop("name") if WORK_MODE == WorkMode.STANDALONE: if USE_LOCAL_DATABASE: self.database_connection = APSWDatabase('fate_flow_sqlite.db') RuntimeConfig.init_config(USE_LOCAL_DATABASE=True) stat_logger.info('init sqlite database on standalone mode successfully') else: self.database_connection = PooledMySQLDatabase(db_name, **database_config) stat_logger.info('init mysql database on standalone mode successfully') RuntimeConfig.init_config(USE_LOCAL_DATABASE=False) elif WORK_MODE == WorkMode.CLUSTER: self.database_connection = PooledMySQLDatabase(db_name, **database_config) stat_logger.info('init mysql database on cluster mode successfully') RuntimeConfig.init_config(USE_LOCAL_DATABASE=False) else: raise Exception('can not init database') DB = BaseDataBase().database_connection def close_connection(db_connection): try: if db_connection: db_connection.close() except Exception as e: LOGGER.exception(e) class DataBaseModel(Model): class Meta: database = DB def to_json(self): return self.__dict__['__data__'] def save(self, *args, **kwargs): if hasattr(self, "f_update_time"): self.f_update_time = current_timestamp() super(DataBaseModel, self).save(*args, **kwargs) def init_database_tables(): with DB.connection_context(): members = inspect.getmembers(sys.modules[__name__], inspect.isclass) table_objs = [] for name, obj in members: if obj != DataBaseModel and issubclass(obj, DataBaseModel): table_objs.append(obj) DB.create_tables(table_objs) DEFAULT_TAG = 'Za' class IdLibraryCacheInfo(DataBaseModel): f_party_id = CharField(max_length=32) f_id_type = CharField(max_length=16) f_encrypt_type = CharField(max_length=16) f_tag = CharField(max_length=16, default=DEFAULT_TAG) f_namespcae = CharField(max_length=128) f_version = CharField(max_length=128) f_rsa_key_n = CharField(max_length=512) f_rsa_key_d = CharField(max_length=512) f_rsa_key_e = CharField(max_length=32) f_create_time = BigIntegerField() f_update_time = BigIntegerField(null=True) f_description = TextField(null=True, default='') class Meta: db_table = "t_id_library_cache_infos" primary_key = CompositeKey('f_party_id', 'f_id_type', 'f_encrypt_type', 'f_tag', 'f_namespcae', 'f_version')
[ "fate_flow.entity.runtime_config.RuntimeConfig.init_config", "fate_flow.settings.stat_logger.info", "fate_flow.settings.DATABASE.copy" ]
[((1152, 1173), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', ([], {}), '()\n', (1171, 1173), False, 'from arch.api.utils import log_utils\n'), ((3456, 3480), 'peewee.CharField', 'CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (3465, 3480), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((3497, 3521), 'peewee.CharField', 'CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (3506, 3521), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((3543, 3567), 'peewee.CharField', 'CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (3552, 3567), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((3580, 3625), 'peewee.CharField', 'CharField', ([], {'max_length': '(16)', 'default': 'DEFAULT_TAG'}), '(max_length=16, default=DEFAULT_TAG)\n', (3589, 3625), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((3644, 3669), 'peewee.CharField', 'CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (3653, 3669), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((3686, 3711), 'peewee.CharField', 'CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (3695, 3711), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((3730, 3755), 'peewee.CharField', 'CharField', ([], {'max_length': '(512)'}), '(max_length=512)\n', (3739, 3755), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((3774, 3799), 'peewee.CharField', 'CharField', ([], {'max_length': '(512)'}), '(max_length=512)\n', (3783, 3799), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((3818, 3842), 'peewee.CharField', 'CharField', ([], {'max_length': '(32)'}), '(max_length=32)\n', (3827, 3842), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((3863, 3880), 'peewee.BigIntegerField', 'BigIntegerField', ([], {}), '()\n', (3878, 3880), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((3901, 3927), 'peewee.BigIntegerField', 'BigIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (3916, 3927), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((3948, 3980), 'peewee.TextField', 'TextField', ([], {'null': '(True)', 'default': '""""""'}), "(null=True, default='')\n", (3957, 3980), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((1516, 1531), 'fate_flow.settings.DATABASE.copy', 'DATABASE.copy', ([], {}), '()\n', (1529, 1531), False, 'from fate_flow.settings import DATABASE, USE_LOCAL_DATABASE, WORK_MODE, stat_logger\n'), ((3110, 3168), 'inspect.getmembers', 'inspect.getmembers', (['sys.modules[__name__]', 'inspect.isclass'], {}), '(sys.modules[__name__], inspect.isclass)\n', (3128, 3168), False, 'import inspect\n'), ((4066, 4164), 'peewee.CompositeKey', 'CompositeKey', (['"""f_party_id"""', '"""f_id_type"""', '"""f_encrypt_type"""', '"""f_tag"""', '"""f_namespcae"""', '"""f_version"""'], {}), "('f_party_id', 'f_id_type', 'f_encrypt_type', 'f_tag',\n 'f_namespcae', 'f_version')\n", (4078, 4164), False, 'from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey\n'), ((2951, 2970), 'arch.api.utils.core_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (2968, 2970), False, 'from arch.api.utils.core_utils import current_timestamp\n'), ((1280, 1291), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1289, 1291), False, 'import os\n'), ((1701, 1736), 'playhouse.apsw_ext.APSWDatabase', 'APSWDatabase', (['"""fate_flow_sqlite.db"""'], {}), "('fate_flow_sqlite.db')\n", (1713, 1736), False, 'from playhouse.apsw_ext import APSWDatabase\n'), ((1753, 1803), 'fate_flow.entity.runtime_config.RuntimeConfig.init_config', 'RuntimeConfig.init_config', ([], {'USE_LOCAL_DATABASE': '(True)'}), '(USE_LOCAL_DATABASE=True)\n', (1778, 1803), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((1820, 1892), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""init sqlite database on standalone mode successfully"""'], {}), "('init sqlite database on standalone mode successfully')\n", (1836, 1892), False, 'from fate_flow.settings import DATABASE, USE_LOCAL_DATABASE, WORK_MODE, stat_logger\n'), ((1954, 2001), 'playhouse.pool.PooledMySQLDatabase', 'PooledMySQLDatabase', (['db_name'], {}), '(db_name, **database_config)\n', (1973, 2001), False, 'from playhouse.pool import PooledMySQLDatabase\n'), ((2018, 2089), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""init mysql database on standalone mode successfully"""'], {}), "('init mysql database on standalone mode successfully')\n", (2034, 2089), False, 'from fate_flow.settings import DATABASE, USE_LOCAL_DATABASE, WORK_MODE, stat_logger\n'), ((2106, 2157), 'fate_flow.entity.runtime_config.RuntimeConfig.init_config', 'RuntimeConfig.init_config', ([], {'USE_LOCAL_DATABASE': '(False)'}), '(USE_LOCAL_DATABASE=False)\n', (2131, 2157), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((2241, 2288), 'playhouse.pool.PooledMySQLDatabase', 'PooledMySQLDatabase', (['db_name'], {}), '(db_name, **database_config)\n', (2260, 2288), False, 'from playhouse.pool import PooledMySQLDatabase\n'), ((2301, 2369), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""init mysql database on cluster mode successfully"""'], {}), "('init mysql database on cluster mode successfully')\n", (2317, 2369), False, 'from fate_flow.settings import DATABASE, USE_LOCAL_DATABASE, WORK_MODE, stat_logger\n'), ((2382, 2433), 'fate_flow.entity.runtime_config.RuntimeConfig.init_config', 'RuntimeConfig.init_config', ([], {'USE_LOCAL_DATABASE': '(False)'}), '(USE_LOCAL_DATABASE=False)\n', (2407, 2433), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import traceback from fate_flow.controller.job_controller import JobController from fate_flow.entity.run_status import JobInheritanceStatus from fate_flow.operation.job_saver import JobSaver from fate_flow.utils.log_utils import getLogger from fate_flow.worker.base_worker import BaseWorker LOGGER = getLogger() class JobInherit(BaseWorker): def _run(self): job = JobSaver.query_job(job_id=self.args.job_id, role=self.args.role, party_id=self.args.party_id)[0] try: JobController.job_reload(job) except Exception as e: traceback.print_exc() JobSaver.update_job(job_info={"job_id": job.f_job_id, "role": job.f_role, "party_id": job.f_party_id, "inheritance_status": JobInheritanceStatus.FAILED}) LOGGER.exception(e) if __name__ == '__main__': JobInherit().run()
[ "fate_flow.operation.job_saver.JobSaver.update_job", "fate_flow.operation.job_saver.JobSaver.query_job", "fate_flow.utils.log_utils.getLogger", "fate_flow.controller.job_controller.JobController.job_reload" ]
[((918, 929), 'fate_flow.utils.log_utils.getLogger', 'getLogger', ([], {}), '()\n', (927, 929), False, 'from fate_flow.utils.log_utils import getLogger\n'), ((996, 1094), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'self.args.job_id', 'role': 'self.args.role', 'party_id': 'self.args.party_id'}), '(job_id=self.args.job_id, role=self.args.role, party_id=\n self.args.party_id)\n', (1014, 1094), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((1118, 1147), 'fate_flow.controller.job_controller.JobController.job_reload', 'JobController.job_reload', (['job'], {}), '(job)\n', (1142, 1147), False, 'from fate_flow.controller.job_controller import JobController\n'), ((1191, 1212), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1210, 1212), False, 'import traceback\n'), ((1225, 1387), 'fate_flow.operation.job_saver.JobSaver.update_job', 'JobSaver.update_job', ([], {'job_info': "{'job_id': job.f_job_id, 'role': job.f_role, 'party_id': job.f_party_id,\n 'inheritance_status': JobInheritanceStatus.FAILED}"}), "(job_info={'job_id': job.f_job_id, 'role': job.f_role,\n 'party_id': job.f_party_id, 'inheritance_status': JobInheritanceStatus.\n FAILED})\n", (1244, 1387), False, 'from fate_flow.operation.job_saver import JobSaver\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_flow.utils.api_utils import get_json_result from fate_flow.settings import stat_logger from flask import Flask, request import json, requests from fate_flow.settings import API_VERSION, HTTP_PORT fate_flow_server_host = 'http://127.0.0.1:{}/{}'.format(HTTP_PORT, API_VERSION) manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) # 查找目标用户 # 参数样例 '{"job_id":"20211030114602298164131", "role":"guest", "party_id":10000, "component_name":"hetero_svd_0"}' @manager.route('/target/user/count', methods=['POST']) def count_target_user(): base_request_data = request.json headers = { "Content-Type": "application/json; charset=UTF-8" } response = requests.post('{}/tracking/component/output/data'.format(fate_flow_server_host), data=base_request_data, headers=headers) # print(json.loads(response.text)) return json.loads(response.text) return 0 if __name__ == '__main__': # 测试自己写的API base_request_data = '{"job_id":"20211030114602298164131", "role":"guest", "party_id":10000, "component_name":"hetero_svd_0"}' headers = { "Content-Type": "application/json; charset=UTF-8" } response = requests.post('{}/fate/target/user/count'.format(fate_flow_server_host), data=base_request_data, headers=headers)
[ "fate_flow.settings.stat_logger.exception" ]
[((914, 929), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (919, 929), False, 'from flask import Flask, request\n'), ((992, 1016), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1013, 1016), False, 'from fate_flow.settings import stat_logger\n'), ((1610, 1635), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1620, 1635), False, 'import json, requests\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_arch.common.base_utils import json_dumps from fate_flow.utils.log_utils import getLogger from fate_flow.db.component_registry import ComponentRegistry from fate_flow.entity import ComponentProvider from fate_flow.settings import stat_logger from fate_flow.worker.base_worker import BaseWorker LOGGER = getLogger() class ProviderRegistrar(BaseWorker): def _run(self): provider = ComponentProvider(**self.args.config.get("provider")) support_components = ComponentRegistry.register_provider(provider) ComponentRegistry.register_components(provider, support_components) ComponentRegistry.dump() stat_logger.info(json_dumps(ComponentRegistry.REGISTRY, indent=4)) if __name__ == '__main__': ProviderRegistrar().run()
[ "fate_flow.utils.log_utils.getLogger", "fate_flow.db.component_registry.ComponentRegistry.register_provider", "fate_flow.db.component_registry.ComponentRegistry.register_components", "fate_flow.db.component_registry.ComponentRegistry.dump" ]
[((929, 940), 'fate_flow.utils.log_utils.getLogger', 'getLogger', ([], {}), '()\n', (938, 940), False, 'from fate_flow.utils.log_utils import getLogger\n'), ((1102, 1147), 'fate_flow.db.component_registry.ComponentRegistry.register_provider', 'ComponentRegistry.register_provider', (['provider'], {}), '(provider)\n', (1137, 1147), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((1156, 1223), 'fate_flow.db.component_registry.ComponentRegistry.register_components', 'ComponentRegistry.register_components', (['provider', 'support_components'], {}), '(provider, support_components)\n', (1193, 1223), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((1232, 1256), 'fate_flow.db.component_registry.ComponentRegistry.dump', 'ComponentRegistry.dump', ([], {}), '()\n', (1254, 1256), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((1282, 1330), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['ComponentRegistry.REGISTRY'], {'indent': '(4)'}), '(ComponentRegistry.REGISTRY, indent=4)\n', (1292, 1330), False, 'from fate_arch.common.base_utils import json_dumps\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import List from arch.api import session, WorkMode from arch.api.base.table import Table from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64 from arch.api.utils.log_utils import schedule_logger from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView from fate_flow.entity.constant_config import JobStatus, TaskStatus from fate_flow.entity.metric import Metric, MetricMeta from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.manager.model_manager import pipelined_model from fate_flow.settings import API_VERSION, MAX_CONCURRENT_JOB_RUN_HOST from fate_flow.utils import job_utils, api_utils, model_utils, session_utils class Tracking(object): METRIC_DATA_PARTITION = 48 METRIC_LIST_PARTITION = 48 JOB_VIEW_PARTITION = 8 def __init__(self, job_id: str, role: str, party_id: int, model_id: str = None, model_version: str = None, component_name: str = None, component_module_name: str = None, task_id: str = None): self.job_id = job_id self.role = role self.party_id = party_id self.component_name = component_name if component_name else 'pipeline' self.module_name = component_module_name if component_module_name else 'Pipeline' self.task_id = task_id if task_id else job_utils.generate_task_id(job_id=self.job_id, component_name=self.component_name) self.table_namespace = '_'.join( ['fate_flow', 'tracking', 'data', self.job_id, self.role, str(self.party_id), self.component_name]) self.job_table_namespace = '_'.join( ['fate_flow', 'tracking', 'data', self.job_id, self.role, str(self.party_id)]) self.model_id = model_id self.party_model_id = model_utils.gen_party_model_id(model_id=model_id, role=role, party_id=party_id) self.model_version = model_version self.pipelined_model = None if self.party_model_id and self.model_version: self.pipelined_model = pipelined_model.PipelinedModel(model_id=self.party_model_id, model_version=self.model_version) def log_job_metric_data(self, metric_namespace: str, metric_name: str, metrics: List[Metric]): self.save_metric_data_remote(metric_namespace=metric_namespace, metric_name=metric_name, metrics=metrics, job_level=True) def log_metric_data(self, metric_namespace: str, metric_name: str, metrics: List[Metric]): self.save_metric_data_remote(metric_namespace=metric_namespace, metric_name=metric_name, metrics=metrics, job_level=False) def save_metric_data_remote(self, metric_namespace: str, metric_name: str, metrics: List[Metric], job_level=False): # TODO: In the next version will be moved to tracking api module on arch/api package schedule_logger(self.job_id).info( 'request save job {} component {} on {} {} {} {} metric data'.format(self.job_id, self.component_name, self.role, self.party_id, metric_namespace, metric_name)) request_body = dict() request_body['metric_namespace'] = metric_namespace request_body['metric_name'] = metric_name request_body['metrics'] = [serialize_b64(metric, to_str=True) for metric in metrics] request_body['job_level'] = job_level response = api_utils.local_api(job_id=self.job_id, method='POST', endpoint='/{}/tracking/{}/{}/{}/{}/{}/metric_data/save'.format( API_VERSION, self.job_id, self.component_name, self.task_id, self.role, self.party_id), json_body=request_body) return response['retcode'] == 0 def save_metric_data(self, metric_namespace: str, metric_name: str, metrics: List[Metric], job_level=False): schedule_logger(self.job_id).info( 'save job {} component {} on {} {} {} {} metric data'.format(self.job_id, self.component_name, self.role, self.party_id, metric_namespace, metric_name)) kv = [] for metric in metrics: kv.append((metric.key, metric.value)) self.insert_data_to_db(metric_namespace, metric_name, 1, kv, job_level) def get_job_metric_data(self, metric_namespace: str, metric_name: str): return self.read_metric_data(metric_namespace=metric_namespace, metric_name=metric_name, job_level=True) def get_metric_data(self, metric_namespace: str, metric_name: str): return self.read_metric_data(metric_namespace=metric_namespace, metric_name=metric_name, job_level=False) def read_metric_data(self, metric_namespace: str, metric_name: str, job_level=False): with DB.connection_context(): metrics = [] for k, v in self.read_data_from_db(metric_namespace, metric_name, 1, job_level): metrics.append(Metric(key=k, value=v)) return metrics def set_metric_meta(self, metric_namespace: str, metric_name: str, metric_meta: MetricMeta, job_level: bool = False): self.save_metric_meta_remote(metric_namespace=metric_namespace, metric_name=metric_name, metric_meta=metric_meta, job_level=job_level) def save_metric_meta_remote(self, metric_namespace: str, metric_name: str, metric_meta: MetricMeta, job_level: bool = False): # TODO: In the next version will be moved to tracking api module on arch/api package schedule_logger(self.job_id).info( 'request save job {} component {} on {} {} {} {} metric meta'.format(self.job_id, self.component_name, self.role, self.party_id, metric_namespace, metric_name)) request_body = dict() request_body['metric_namespace'] = metric_namespace request_body['metric_name'] = metric_name request_body['metric_meta'] = serialize_b64(metric_meta, to_str=True) request_body['job_level'] = job_level response = api_utils.local_api(job_id=self.job_id, method='POST', endpoint='/{}/tracking/{}/{}/{}/{}/{}/metric_meta/save'.format( API_VERSION, self.job_id, self.component_name, self.task_id, self.role, self.party_id), json_body=request_body) return response['retcode'] == 0 def save_metric_meta(self, metric_namespace: str, metric_name: str, metric_meta: MetricMeta, job_level: bool = False): schedule_logger(self.job_id).info( 'save job {} component {} on {} {} {} {} metric meta'.format(self.job_id, self.component_name, self.role, self.party_id, metric_namespace, metric_name)) self.insert_data_to_db(metric_namespace, metric_name, 0, metric_meta.to_dict().items(), job_level) def get_metric_meta(self, metric_namespace: str, metric_name: str, job_level: bool = False): with DB.connection_context(): kv = dict() for k, v in self.read_data_from_db(metric_namespace, metric_name, 0, job_level): kv[k] = v return MetricMeta(name=kv.get('name'), metric_type=kv.get('metric_type'), extra_metas=kv) def get_metric_list(self, job_level: bool = False): with DB.connection_context(): metrics = dict() query_sql = 'select distinct f_metric_namespace, f_metric_name from t_tracking_metric_{} where ' \ 'f_job_id = "{}" and f_component_name = "{}" and f_role = "{}" and f_party_id = "{}" ' \ 'and f_task_id = "{}"'.format( self.get_table_index(), self.job_id, self.component_name if not job_level else 'dag', self.role, self.party_id, self.task_id) cursor = DB.execute_sql(query_sql) for row in cursor.fetchall(): metrics[row[0]] = metrics.get(row[0], []) metrics[row[0]].append(row[1]) return metrics def log_job_view(self, view_data: dict): self.insert_data_to_db('job', 'job_view', 2, view_data.items(), job_level=True) def get_job_view(self): with DB.connection_context(): view_data = {} for k, v in self.read_data_from_db('job', 'job_view', 2, job_level=True): view_data[k] = v return view_data @session_utils.session_detect() def save_output_data_table(self, data_table: Table, data_name: str = 'component'): """ Save component output data, will run in the task executor process :param data_table: :param data_name: :return: """ if data_table: persistent_table_namespace, persistent_table_name = 'output_data_{}'.format( self.task_id), data_table.get_name() schedule_logger(self.job_id).info( 'persisting the component output temporary table: {} {} to {} {}'.format(data_table.get_namespace(), data_table.get_name(), persistent_table_namespace, persistent_table_name)) persistent_table = data_table.save_as( namespace=persistent_table_namespace, name=persistent_table_name) persistent_table_metas = {} persistent_table_metas.update(data_table.get_metas()) persistent_table_metas["schema"] = data_table.schema session.save_data_table_meta( persistent_table_metas, data_table_namespace=persistent_table.get_namespace(), data_table_name=persistent_table.get_name()) data_table_info = { data_name: {'name': persistent_table.get_name(), 'namespace': persistent_table.get_namespace()}} else: data_table_info = {} session.save_data( data_table_info.items(), name=Tracking.output_table_name('data'), namespace=self.table_namespace, partition=48) self.save_data_view(self.role, self.party_id, data_info={'f_table_name': persistent_table._name if data_table else '', 'f_table_namespace': persistent_table._namespace if data_table else '', 'f_partition': persistent_table._partitions if data_table else None, 'f_table_count_actual': data_table.count() if data_table else 0}, mark=True) @session_utils.session_detect() def get_output_data_table(self, data_name: str = 'component'): """ Get component output data table, will run in the task executor process :param data_name: :return: """ output_data_info_table = session.table(name=Tracking.output_table_name('data'), namespace=self.table_namespace) data_table_info = output_data_info_table.get(data_name) if data_table_info: data_table = session.table(name=data_table_info.get('name', ''), namespace=data_table_info.get('namespace', '')) data_table_meta = data_table.get_metas() if data_table_meta.get('schema', None): data_table.schema = data_table_meta['schema'] return data_table else: return None def init_pipelined_model(self): self.pipelined_model.create_pipelined_model() def save_output_model(self, model_buffers: dict, model_alias: str): if model_buffers: self.pipelined_model.save_component_model(component_name=self.component_name, component_module_name=self.module_name, model_alias=model_alias, model_buffers=model_buffers) self.save_data_view(self.role, self.party_id, data_info={'f_party_model_id': self.party_model_id, 'f_model_version': self.model_version}) def get_output_model(self, model_alias): model_buffers = self.pipelined_model.read_component_model(component_name=self.component_name, model_alias=model_alias) return model_buffers def collect_model(self): model_buffers = self.pipelined_model.collect_models() return model_buffers def save_pipeline(self, pipelined_buffer_object): self.save_output_model({'Pipeline': pipelined_buffer_object}, 'pipeline') self.pipelined_model.save_pipeline(pipelined_buffer_object=pipelined_buffer_object) def get_component_define(self): return self.pipelined_model.get_component_define(component_name=self.component_name) def insert_data_to_db(self, metric_namespace: str, metric_name: str, data_type: int, kv, job_level=False): with DB.connection_context(): try: tracking_metric = TrackingMetric.model(table_index=self.job_id) tracking_metric.f_job_id = self.job_id tracking_metric.f_component_name = self.component_name if not job_level else 'dag' tracking_metric.f_task_id = self.task_id tracking_metric.f_role = self.role tracking_metric.f_party_id = self.party_id tracking_metric.f_metric_namespace = metric_namespace tracking_metric.f_metric_name = metric_name tracking_metric.f_type = data_type default_db_source = tracking_metric.to_json() tracking_metric_data_source = [] for k, v in kv: db_source = default_db_source.copy() db_source['f_key'] = serialize_b64(k) db_source['f_value'] = serialize_b64(v) db_source['f_create_time'] = current_timestamp() tracking_metric_data_source.append(db_source) self.bulk_insert_model_data(TrackingMetric.model(table_index=self.get_table_index()), tracking_metric_data_source) except Exception as e: schedule_logger(self.job_id).exception(e) def bulk_insert_model_data(self, model, data_source): with DB.connection_context(): try: DB.create_tables([model]) batch_size = 50 if RuntimeConfig.USE_LOCAL_DATABASE else 1000 for i in range(0, len(data_source), batch_size): with DB.atomic(): model.insert_many(data_source[i:i+batch_size]).execute() return len(data_source) except Exception as e: schedule_logger(self.job_id).exception(e) return 0 def read_data_from_db(self, metric_namespace: str, metric_name: str, data_type, job_level=False): with DB.connection_context(): metrics = [] try: query_sql = 'select f_key, f_value from t_tracking_metric_{} where ' \ 'f_job_id = "{}" and f_component_name = "{}" and f_role = "{}" and f_party_id = "{}"' \ 'and f_task_id = "{}" and f_metric_namespace = "{}" and f_metric_name= "{}" and f_type="{}" order by f_id'.format( self.get_table_index(), self.job_id, self.component_name if not job_level else 'dag', self.role, self.party_id, self.task_id, metric_namespace, metric_name, data_type) cursor = DB.execute_sql(query_sql) for row in cursor.fetchall(): yield deserialize_b64(row[0]), deserialize_b64(row[1]) except Exception as e: schedule_logger(self.job_id).exception(e) return metrics def save_job_info(self, role, party_id, job_info, create=False): with DB.connection_context(): schedule_logger(self.job_id).info('save {} {} job: {}'.format(role, party_id, job_info)) jobs = Job.select().where(Job.f_job_id == self.job_id, Job.f_role == role, Job.f_party_id == party_id) is_insert = True if jobs: job = jobs[0] is_insert = False if job.f_status == JobStatus.TIMEOUT: return None elif create: job = Job() job.f_create_time = current_timestamp() else: return None job.f_job_id = self.job_id job.f_role = role job.f_party_id = party_id if 'f_status' in job_info: if job.f_status in [JobStatus.COMPLETE, JobStatus.FAILED]: # Termination status cannot be updated # TODO: return if (job_info['f_status'] in [JobStatus.FAILED, JobStatus.TIMEOUT]) and (not job.f_end_time): if not job.f_start_time: return job_info['f_end_time'] = current_timestamp() job_info['f_elapsed'] = job_info['f_end_time'] - job.f_start_time job_info['f_update_time'] = current_timestamp() if (job_info['f_status'] in [JobStatus.FAILED, JobStatus.TIMEOUT, JobStatus.CANCELED, JobStatus.COMPLETE]): job_info['f_tag'] = 'job_end' if job.f_status == JobStatus.CANCELED: job_info.pop('f_status') update_fields = [] for k, v in job_info.items(): try: if k in ['f_job_id', 'f_role', 'f_party_id'] or v == getattr(Job, k).default: continue setattr(job, k, v) update_fields.append(getattr(Job, k)) except: pass if is_insert: job.save(force_insert=True) else: job.save(only=update_fields) def save_task(self, role, party_id, task_info): with DB.connection_context(): tasks = Task.select().where(Task.f_job_id == self.job_id, Task.f_component_name == self.component_name, Task.f_task_id == self.task_id, Task.f_role == role, Task.f_party_id == party_id) is_insert = True if tasks: task = tasks[0] is_insert = False else: task = Task() task.f_create_time = current_timestamp() task.f_job_id = self.job_id task.f_component_name = self.component_name task.f_task_id = self.task_id task.f_role = role task.f_party_id = party_id if 'f_status' in task_info: if task.f_status in [TaskStatus.COMPLETE, TaskStatus.FAILED]: # Termination status cannot be updated # TODO: pass for k, v in task_info.items(): try: if k in ['f_job_id', 'f_component_name', 'f_task_id', 'f_role', 'f_party_id'] or v == getattr(Task, k).default: continue except: pass setattr(task, k, v) if is_insert: task.save(force_insert=True) else: task.save() return task def save_data_view(self, role, party_id, data_info, mark=False): with DB.connection_context(): data_views = DataView.select().where(DataView.f_job_id == self.job_id, DataView.f_component_name == self.component_name, DataView.f_task_id == self.task_id, DataView.f_role == role, DataView.f_party_id == party_id) is_insert = True if mark and self.component_name == "upload_0": return if data_views: data_view = data_views[0] is_insert = False else: data_view = DataView() data_view.f_create_time = current_timestamp() data_view.f_job_id = self.job_id data_view.f_component_name = self.component_name data_view.f_task_id = self.task_id data_view.f_role = role data_view.f_party_id = party_id data_view.f_update_time = current_timestamp() for k, v in data_info.items(): if k in ['f_job_id', 'f_component_name', 'f_task_id', 'f_role', 'f_party_id'] or v == getattr( DataView, k).default: continue setattr(data_view, k, v) if is_insert: data_view.save(force_insert=True) else: data_view.save() return data_view @session_utils.session_detect() def clean_task(self, roles, party_ids): schedule_logger(self.job_id).info('clean task {} on {} {}'.format(self.task_id, self.role, self.party_id)) try: for role in roles.split(','): for party_id in party_ids.split(','): # clean up temporary tables namespace_clean = job_utils.generate_session_id(task_id=self.task_id, role=role, party_id=party_id) session.clean_tables(namespace=namespace_clean, regex_string='*') schedule_logger(self.job_id).info('clean table by namespace {} on {} {} done'.format(namespace_clean, self.role, self.party_id)) # clean up the last tables of the federation namespace_clean = self.task_id session.clean_tables(namespace=namespace_clean, regex_string='*') schedule_logger(self.job_id).info('clean table by namespace {} on {} {} done'.format(namespace_clean, self.role, self.party_id)) except Exception as e: schedule_logger(self.job_id).exception(e) schedule_logger(self.job_id).info('clean task {} on {} {} done'.format(self.task_id, self.role, self.party_id)) def get_table_namespace(self, job_level: bool = False): return self.table_namespace if not job_level else self.job_table_namespace def get_table_index(self): return self.job_id[:8] @staticmethod def metric_table_name(metric_namespace: str, metric_name: str): return '_'.join(['metric', metric_namespace, metric_name]) @staticmethod def metric_list_table_name(): return '_'.join(['metric', 'list']) @staticmethod def output_table_name(output_type: str): return '_'.join(['output', output_type]) @staticmethod def job_view_table_name(): return '_'.join(['job', 'view'])
[ "fate_flow.db.db_models.Job", "fate_flow.utils.session_utils.session_detect", "fate_flow.db.db_models.DB.connection_context", "fate_flow.utils.job_utils.generate_session_id", "fate_flow.db.db_models.DB.create_tables", "fate_flow.db.db_models.Task", "fate_flow.db.db_models.DataView", "fate_flow.db.db_models.Job.select", "fate_flow.entity.metric.Metric", "fate_flow.db.db_models.DB.atomic", "fate_flow.db.db_models.DataView.select", "fate_flow.manager.model_manager.pipelined_model.PipelinedModel", "fate_flow.utils.model_utils.gen_party_model_id", "fate_flow.db.db_models.TrackingMetric.model", "fate_flow.db.db_models.Task.select", "fate_flow.utils.job_utils.generate_task_id", "fate_flow.db.db_models.DB.execute_sql" ]
[((10421, 10451), 'fate_flow.utils.session_utils.session_detect', 'session_utils.session_detect', ([], {}), '()\n', (10449, 10451), False, 'from fate_flow.utils import job_utils, api_utils, model_utils, session_utils\n'), ((12799, 12829), 'fate_flow.utils.session_utils.session_detect', 'session_utils.session_detect', ([], {}), '()\n', (12827, 12829), False, 'from fate_flow.utils import job_utils, api_utils, model_utils, session_utils\n'), ((23793, 23823), 'fate_flow.utils.session_utils.session_detect', 'session_utils.session_detect', ([], {}), '()\n', (23821, 23823), False, 'from fate_flow.utils import job_utils, api_utils, model_utils, session_utils\n'), ((2532, 2611), 'fate_flow.utils.model_utils.gen_party_model_id', 'model_utils.gen_party_model_id', ([], {'model_id': 'model_id', 'role': 'role', 'party_id': 'party_id'}), '(model_id=model_id, role=role, party_id=party_id)\n', (2562, 2611), False, 'from fate_flow.utils import job_utils, api_utils, model_utils, session_utils\n'), ((7588, 7627), 'arch.api.utils.core_utils.serialize_b64', 'serialize_b64', (['metric_meta'], {'to_str': '(True)'}), '(metric_meta, to_str=True)\n', (7601, 7627), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((2023, 2110), 'fate_flow.utils.job_utils.generate_task_id', 'job_utils.generate_task_id', ([], {'job_id': 'self.job_id', 'component_name': 'self.component_name'}), '(job_id=self.job_id, component_name=self.\n component_name)\n', (2049, 2110), False, 'from fate_flow.utils import job_utils, api_utils, model_utils, session_utils\n'), ((2781, 2880), 'fate_flow.manager.model_manager.pipelined_model.PipelinedModel', 'pipelined_model.PipelinedModel', ([], {'model_id': 'self.party_model_id', 'model_version': 'self.model_version'}), '(model_id=self.party_model_id, model_version=\n self.model_version)\n', (2811, 2880), False, 'from fate_flow.manager.model_manager import pipelined_model\n'), ((4321, 4355), 'arch.api.utils.core_utils.serialize_b64', 'serialize_b64', (['metric'], {'to_str': '(True)'}), '(metric, to_str=True)\n', (4334, 4355), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((6143, 6166), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (6164, 6166), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((8987, 9010), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (9008, 9010), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((9327, 9350), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (9348, 9350), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((9839, 9864), 'fate_flow.db.db_models.DB.execute_sql', 'DB.execute_sql', (['query_sql'], {}), '(query_sql)\n', (9853, 9864), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((10215, 10238), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (10236, 10238), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((15335, 15358), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (15356, 15358), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((16752, 16775), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (16773, 16775), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((17372, 17395), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (17393, 17395), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((18368, 18391), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (18389, 18391), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((20594, 20617), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (20615, 20617), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((22289, 22312), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (22310, 22312), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((23341, 23360), 'arch.api.utils.core_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (23358, 23360), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((3695, 3723), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (3710, 3723), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((5212, 5240), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (5227, 5240), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((6959, 6987), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (6974, 6987), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((8496, 8524), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (8511, 8524), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((15411, 15456), 'fate_flow.db.db_models.TrackingMetric.model', 'TrackingMetric.model', ([], {'table_index': 'self.job_id'}), '(table_index=self.job_id)\n', (15431, 15456), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((16810, 16835), 'fate_flow.db.db_models.DB.create_tables', 'DB.create_tables', (['[model]'], {}), '([model])\n', (16826, 16835), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((18018, 18043), 'fate_flow.db.db_models.DB.execute_sql', 'DB.execute_sql', (['query_sql'], {}), '(query_sql)\n', (18032, 18043), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((21135, 21141), 'fate_flow.db.db_models.Task', 'Task', ([], {}), '()\n', (21139, 21141), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((21179, 21198), 'arch.api.utils.core_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (21196, 21198), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((22997, 23007), 'fate_flow.db.db_models.DataView', 'DataView', ([], {}), '()\n', (23005, 23007), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((23050, 23069), 'arch.api.utils.core_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (23067, 23069), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((23876, 23904), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (23891, 23904), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((25664, 25692), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (25679, 25692), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((6317, 6339), 'fate_flow.entity.metric.Metric', 'Metric', ([], {'key': 'k', 'value': 'v'}), '(key=k, value=v)\n', (6323, 6339), False, 'from fate_flow.entity.metric import Metric, MetricMeta\n'), ((10884, 10912), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (10899, 10912), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((16200, 16216), 'arch.api.utils.core_utils.serialize_b64', 'serialize_b64', (['k'], {}), '(k)\n', (16213, 16216), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((16260, 16276), 'arch.api.utils.core_utils.serialize_b64', 'serialize_b64', (['v'], {}), '(v)\n', (16273, 16276), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((16326, 16345), 'arch.api.utils.core_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (16343, 16345), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((18405, 18433), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (18420, 18433), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((18513, 18525), 'fate_flow.db.db_models.Job.select', 'Job.select', ([], {}), '()\n', (18523, 18525), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((18856, 18861), 'fate_flow.db.db_models.Job', 'Job', ([], {}), '()\n', (18859, 18861), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((18898, 18917), 'arch.api.utils.core_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (18915, 18917), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((19529, 19548), 'arch.api.utils.core_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (19546, 19548), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((19683, 19702), 'arch.api.utils.core_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (19700, 19702), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((20639, 20652), 'fate_flow.db.db_models.Task.select', 'Task.select', ([], {}), '()\n', (20650, 20652), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((22339, 22356), 'fate_flow.db.db_models.DataView.select', 'DataView.select', ([], {}), '()\n', (22354, 22356), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((24326, 24412), 'fate_flow.utils.job_utils.generate_session_id', 'job_utils.generate_session_id', ([], {'task_id': 'self.task_id', 'role': 'role', 'party_id': 'party_id'}), '(task_id=self.task_id, role=role, party_id=\n party_id)\n', (24355, 24412), False, 'from fate_flow.utils import job_utils, api_utils, model_utils, session_utils\n'), ((24564, 24629), 'arch.api.session.clean_tables', 'session.clean_tables', ([], {'namespace': 'namespace_clean', 'regex_string': '"""*"""'}), "(namespace=namespace_clean, regex_string='*')\n", (24584, 24629), False, 'from arch.api import session, WorkMode\n'), ((25125, 25190), 'arch.api.session.clean_tables', 'session.clean_tables', ([], {'namespace': 'namespace_clean', 'regex_string': '"""*"""'}), "(namespace=namespace_clean, regex_string='*')\n", (25145, 25190), False, 'from arch.api import session, WorkMode\n'), ((17004, 17015), 'fate_flow.db.db_models.DB.atomic', 'DB.atomic', ([], {}), '()\n', (17013, 17015), False, 'from fate_flow.db.db_models import DB, Job, Task, TrackingMetric, DataView\n'), ((25614, 25642), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (25629, 25642), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((16638, 16666), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (16653, 16666), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((17189, 17217), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (17204, 17217), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((18116, 18139), 'arch.api.utils.core_utils.deserialize_b64', 'deserialize_b64', (['row[0]'], {}), '(row[0])\n', (18131, 18139), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((18141, 18164), 'arch.api.utils.core_utils.deserialize_b64', 'deserialize_b64', (['row[1]'], {}), '(row[1])\n', (18156, 18164), False, 'from arch.api.utils.core_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((18216, 18244), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (18231, 18244), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((24650, 24678), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (24665, 24678), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((25211, 25239), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (25226, 25239), False, 'from arch.api.utils.log_utils import schedule_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import shutil from flask import Flask, request, send_file from fate_flow.settings import stat_logger, API_VERSION, MODEL_STORE_ADDRESS, TEMP_DIRECTORY from fate_flow.driver.job_controller import JobController from fate_flow.manager.model_manager import publish_model from fate_flow.manager.model_manager import pipelined_model from fate_flow.utils.api_utils import get_json_result, federated_api from fate_flow.utils.job_utils import generate_job_id, runtime_conf_basic from fate_flow.utils.service_utils import ServiceUtils from fate_flow.utils.detect_utils import check_config from fate_flow.utils.model_utils import gen_party_model_id from fate_flow.entity.constant_config import ModelOperation manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/load', methods=['POST']) def load_model(): request_config = request.json _job_id = generate_job_id() initiator_party_id = request_config['initiator']['party_id'] initiator_role = request_config['initiator']['role'] publish_model.generate_publish_model_info(request_config) load_status = True load_status_info = {} load_status_msg = 'success' load_status_info['detail'] = {} for role_name, role_partys in request_config.get("role").items(): if role_name == 'arbiter': continue load_status_info[role_name] = load_status_info.get(role_name, {}) for _party_id in role_partys: request_config['local'] = {'role': role_name, 'party_id': _party_id} try: response = federated_api(job_id=_job_id, method='POST', endpoint='/{}/model/load/do'.format(API_VERSION), src_party_id=initiator_party_id, dest_party_id=_party_id, src_role = initiator_role, json_body=request_config, work_mode=request_config['job_parameters']['work_mode']) load_status_info[role_name][_party_id] = response['retcode'] load_status_info['detail'][role_name] = {} detail = {_party_id: {}} detail[_party_id]['retcode'] = response['retcode'] detail[_party_id]['retmsg'] = response['retmsg'] load_status_info['detail'][role_name].update(detail) if response['retcode']: load_status = False load_status_msg = 'failed' except Exception as e: stat_logger.exception(e) load_status = False load_status_msg = 'failed' load_status_info[role_name][_party_id] = 100 return get_json_result(job_id=_job_id, retcode=(0 if load_status else 101), retmsg=load_status_msg, data=load_status_info) @manager.route('/load/do', methods=['POST']) def do_load_model(): request_data = request.json request_data["servings"] = ServiceUtils.get("servings", []) retcode, retmsg = publish_model.load_model(config_data=request_data) return get_json_result(retcode=retcode, retmsg=retmsg) @manager.route('/bind', methods=['POST']) def bind_model_service(): request_config = request.json if not request_config.get('servings'): # get my party all servings request_config['servings'] = ServiceUtils.get("servings", []) service_id = request_config.get('service_id') if not service_id: return get_json_result(retcode=101, retmsg='no service id') bind_status, retmsg = publish_model.bind_model_service(config_data=request_config) return get_json_result(retcode=bind_status, retmsg='service id is {}'.format(service_id) if not retmsg else retmsg) @manager.route('/transfer', methods=['post']) def transfer_model(): model_data = publish_model.download_model(request.json) return get_json_result(retcode=0, retmsg="success", data=model_data) @manager.route('/<model_operation>', methods=['post', 'get']) def operate_model(model_operation): request_config = request.json or request.form.to_dict() job_id = generate_job_id() if model_operation not in [ModelOperation.STORE, ModelOperation.RESTORE, ModelOperation.EXPORT, ModelOperation.IMPORT]: raise Exception('Can not support this operating now: {}'.format(model_operation)) required_arguments = ["model_id", "model_version", "role", "party_id"] check_config(request_config, required_arguments=required_arguments) request_config["model_id"] = gen_party_model_id(model_id=request_config["model_id"], role=request_config["role"], party_id=request_config["party_id"]) if model_operation in [ModelOperation.EXPORT, ModelOperation.IMPORT]: if model_operation == ModelOperation.IMPORT: file = request.files.get('file') file_path = os.path.join(TEMP_DIRECTORY, file.filename) try: os.makedirs(os.path.dirname(file_path), exist_ok=True) file.save(file_path) except Exception as e: shutil.rmtree(file_path) raise e request_config['file'] = file_path model = pipelined_model.PipelinedModel(model_id=request_config["model_id"], model_version=request_config["model_version"]) model.unpack_model(file_path) return get_json_result() else: model = pipelined_model.PipelinedModel(model_id=request_config["model_id"], model_version=request_config["model_version"]) archive_file_path = model.packaging_model() return send_file(archive_file_path, attachment_filename=os.path.basename(archive_file_path), as_attachment=True) else: data = {} job_dsl, job_runtime_conf = gen_model_operation_job_config(request_config, model_operation) job_id, job_dsl_path, job_runtime_conf_path, logs_directory, model_info, board_url = JobController.submit_job( {'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}, job_id=job_id) data.update({'job_dsl_path': job_dsl_path, 'job_runtime_conf_path': job_runtime_conf_path, 'board_url': board_url, 'logs_directory': logs_directory}) return get_json_result(job_id=job_id, data=data) def gen_model_operation_job_config(config_data: dict, model_operation: ModelOperation): job_runtime_conf = runtime_conf_basic(if_local=True) initiator_role = "local" job_dsl = { "components": {} } if model_operation in [ModelOperation.STORE, ModelOperation.RESTORE]: component_name = "{}_0".format(model_operation) component_parameters = dict() component_parameters["model_id"] = [config_data["model_id"]] component_parameters["model_version"] = [config_data["model_version"]] component_parameters["store_address"] = [MODEL_STORE_ADDRESS] component_parameters["force_update"] = [config_data.get("force_update", False)] job_runtime_conf["role_parameters"][initiator_role] = {component_name: component_parameters} job_dsl["components"][component_name] = { "module": "Model{}".format(model_operation.capitalize()) } else: raise Exception("Can not support this model operation: {}".format(model_operation)) return job_dsl, job_runtime_conf
[ "fate_flow.utils.service_utils.ServiceUtils.get", "fate_flow.utils.api_utils.get_json_result", "fate_flow.manager.model_manager.publish_model.bind_model_service", "fate_flow.settings.stat_logger.exception", "fate_flow.driver.job_controller.JobController.submit_job", "fate_flow.manager.model_manager.publish_model.download_model", "fate_flow.manager.model_manager.pipelined_model.PipelinedModel", "fate_flow.utils.model_utils.gen_party_model_id", "fate_flow.utils.job_utils.runtime_conf_basic", "fate_flow.manager.model_manager.publish_model.load_model", "fate_flow.utils.detect_utils.check_config", "fate_flow.manager.model_manager.publish_model.generate_publish_model_info", "fate_flow.utils.job_utils.generate_job_id" ]
[((1337, 1352), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1342, 1352), False, 'from flask import Flask, request, send_file\n'), ((1416, 1440), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1437, 1440), False, 'from fate_flow.settings import stat_logger, API_VERSION, MODEL_STORE_ADDRESS, TEMP_DIRECTORY\n'), ((1606, 1623), 'fate_flow.utils.job_utils.generate_job_id', 'generate_job_id', ([], {}), '()\n', (1621, 1623), False, 'from fate_flow.utils.job_utils import generate_job_id, runtime_conf_basic\n'), ((1750, 1807), 'fate_flow.manager.model_manager.publish_model.generate_publish_model_info', 'publish_model.generate_publish_model_info', (['request_config'], {}), '(request_config)\n', (1791, 1807), False, 'from fate_flow.manager.model_manager import publish_model\n'), ((3570, 3688), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'job_id': '_job_id', 'retcode': '(0 if load_status else 101)', 'retmsg': 'load_status_msg', 'data': 'load_status_info'}), '(job_id=_job_id, retcode=0 if load_status else 101, retmsg=\n load_status_msg, data=load_status_info)\n', (3585, 3688), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api\n'), ((3844, 3876), 'fate_flow.utils.service_utils.ServiceUtils.get', 'ServiceUtils.get', (['"""servings"""', '[]'], {}), "('servings', [])\n", (3860, 3876), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((3899, 3949), 'fate_flow.manager.model_manager.publish_model.load_model', 'publish_model.load_model', ([], {'config_data': 'request_data'}), '(config_data=request_data)\n', (3923, 3949), False, 'from fate_flow.manager.model_manager import publish_model\n'), ((3961, 4008), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'retcode', 'retmsg': 'retmsg'}), '(retcode=retcode, retmsg=retmsg)\n', (3976, 4008), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api\n'), ((4429, 4489), 'fate_flow.manager.model_manager.publish_model.bind_model_service', 'publish_model.bind_model_service', ([], {'config_data': 'request_config'}), '(config_data=request_config)\n', (4461, 4489), False, 'from fate_flow.manager.model_manager import publish_model\n'), ((4697, 4739), 'fate_flow.manager.model_manager.publish_model.download_model', 'publish_model.download_model', (['request.json'], {}), '(request.json)\n', (4725, 4739), False, 'from fate_flow.manager.model_manager import publish_model\n'), ((4751, 4812), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'model_data'}), "(retcode=0, retmsg='success', data=model_data)\n", (4766, 4812), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api\n'), ((4986, 5003), 'fate_flow.utils.job_utils.generate_job_id', 'generate_job_id', ([], {}), '()\n', (5001, 5003), False, 'from fate_flow.utils.job_utils import generate_job_id, runtime_conf_basic\n'), ((5297, 5364), 'fate_flow.utils.detect_utils.check_config', 'check_config', (['request_config'], {'required_arguments': 'required_arguments'}), '(request_config, required_arguments=required_arguments)\n', (5309, 5364), False, 'from fate_flow.utils.detect_utils import check_config\n'), ((5398, 5524), 'fate_flow.utils.model_utils.gen_party_model_id', 'gen_party_model_id', ([], {'model_id': "request_config['model_id']", 'role': "request_config['role']", 'party_id': "request_config['party_id']"}), "(model_id=request_config['model_id'], role=request_config\n ['role'], party_id=request_config['party_id'])\n", (5416, 5524), False, 'from fate_flow.utils.model_utils import gen_party_model_id\n'), ((7259, 7292), 'fate_flow.utils.job_utils.runtime_conf_basic', 'runtime_conf_basic', ([], {'if_local': '(True)'}), '(if_local=True)\n', (7277, 7292), False, 'from fate_flow.utils.job_utils import generate_job_id, runtime_conf_basic\n'), ((4229, 4261), 'fate_flow.utils.service_utils.ServiceUtils.get', 'ServiceUtils.get', (['"""servings"""', '[]'], {}), "('servings', [])\n", (4245, 4261), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((4350, 4402), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""no service id"""'}), "(retcode=101, retmsg='no service id')\n", (4365, 4402), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api\n'), ((4950, 4972), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (4970, 4972), False, 'from flask import Flask, request, send_file\n'), ((6797, 6900), 'fate_flow.driver.job_controller.JobController.submit_job', 'JobController.submit_job', (["{'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}"], {'job_id': 'job_id'}), "({'job_dsl': job_dsl, 'job_runtime_conf':\n job_runtime_conf}, job_id=job_id)\n", (6821, 6900), False, 'from fate_flow.driver.job_controller import JobController\n'), ((7104, 7145), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'job_id': 'job_id', 'data': 'data'}), '(job_id=job_id, data=data)\n', (7119, 7145), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api\n'), ((5666, 5691), 'flask.request.files.get', 'request.files.get', (['"""file"""'], {}), "('file')\n", (5683, 5691), False, 'from flask import Flask, request, send_file\n'), ((5716, 5759), 'os.path.join', 'os.path.join', (['TEMP_DIRECTORY', 'file.filename'], {}), '(TEMP_DIRECTORY, file.filename)\n', (5728, 5759), False, 'import os\n'), ((6052, 6170), 'fate_flow.manager.model_manager.pipelined_model.PipelinedModel', 'pipelined_model.PipelinedModel', ([], {'model_id': "request_config['model_id']", 'model_version': "request_config['model_version']"}), "(model_id=request_config['model_id'],\n model_version=request_config['model_version'])\n", (6082, 6170), False, 'from fate_flow.manager.model_manager import pipelined_model\n'), ((6228, 6245), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {}), '()\n', (6243, 6245), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api\n'), ((6280, 6398), 'fate_flow.manager.model_manager.pipelined_model.PipelinedModel', 'pipelined_model.PipelinedModel', ([], {'model_id': "request_config['model_id']", 'model_version': "request_config['model_version']"}), "(model_id=request_config['model_id'],\n model_version=request_config['model_version'])\n", (6310, 6398), False, 'from fate_flow.manager.model_manager import pipelined_model\n'), ((3394, 3418), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (3415, 3418), False, 'from fate_flow.settings import stat_logger, API_VERSION, MODEL_STORE_ADDRESS, TEMP_DIRECTORY\n'), ((5805, 5831), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (5820, 5831), False, 'import os\n'), ((5936, 5960), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (5949, 5960), False, 'import shutil\n'), ((6519, 6554), 'os.path.basename', 'os.path.basename', (['archive_file_path'], {}), '(archive_file_path)\n', (6535, 6554), False, 'import os\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys from collections import defaultdict import math import numpy as np import logging from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import explained_variance_score from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import mean_squared_log_error from sklearn.metrics import median_absolute_error from sklearn.metrics import r2_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve from arch.api.utils import log_utils from fate_flow.entity.metric import Metric, MetricMeta from federatedml.param import EvaluateParam from federatedml.util import consts from federatedml.model_base import ModelBase LOGGER = log_utils.getLogger() class PerformanceRecorder(): """ This class record performance(single value metrics during the training process) """ def __init__(self): # all of them are single value metrics self.allowed_metric = [consts.AUC, consts.EXPLAINED_VARIANCE, consts.MEAN_ABSOLUTE_ERROR, consts.MEAN_SQUARED_ERROR, consts.MEAN_SQUARED_LOG_ERROR, consts.MEDIAN_ABSOLUTE_ERROR, consts.R2_SCORE, consts.ROOT_MEAN_SQUARED_ERROR, consts.PRECISION, consts.RECALL, consts.ACCURACY, consts.KS ] self.larger_is_better = [consts.AUC, consts.R2_SCORE, consts.PRECISION, consts.RECALL, consts.EXPLAINED_VARIANCE, consts.ACCURACY, consts.KS ] self.smaller_is_better = [consts.ROOT_MEAN_SQUARED_ERROR, consts.MEAN_ABSOLUTE_ERROR, consts.MEAN_SQUARED_ERROR, consts.MEAN_SQUARED_LOG_ERROR] self.cur_best_performance = {} self.no_improvement_round = {} # record no improvement round of all metrics def has_improved(self, val: float, metric: str, cur_best: dict): if metric not in cur_best: return True if metric in self.larger_is_better and val > cur_best[metric]: return True elif metric in self.smaller_is_better and val < cur_best[metric]: return True return False def update(self, eval_dict: dict): """ Parameters ---------- eval_dict dict, {metric_name:metric_val}, e.g. {'auc':0.99} Returns stop flag, if should stop return True, else False ------- """ if len(eval_dict) == 0: return for metric in eval_dict: if metric not in self.allowed_metric: continue if self.has_improved(eval_dict[metric], metric, self.cur_best_performance): self.cur_best_performance[metric] = eval_dict[metric] self.no_improvement_round[metric] = 0 else: self.no_improvement_round[metric] += 1 class Evaluation(ModelBase): def __init__(self): super().__init__() self.model_param = EvaluateParam() self.eval_results = defaultdict(list) self.save_single_value_metric_list = [consts.AUC, consts.EXPLAINED_VARIANCE, consts.MEAN_ABSOLUTE_ERROR, consts.MEAN_SQUARED_ERROR, consts.MEAN_SQUARED_LOG_ERROR, consts.MEDIAN_ABSOLUTE_ERROR, consts.R2_SCORE, consts.ROOT_MEAN_SQUARED_ERROR] self.save_curve_metric_list = [consts.KS, consts.ROC, consts.LIFT, consts.GAIN, consts.PRECISION, consts.RECALL, consts.ACCURACY] self.metrics = None self.round_num = 6 self.validate_metric = {} self.train_metric = {} def _init_model(self, model): self.model_param = model self.eval_type = self.model_param.eval_type self.pos_label = self.model_param.pos_label self.metrics = model.metrics def _run_data(self, data_sets=None, stage=None): if not self.need_run: return data = {} for data_key in data_sets: if data_sets[data_key].get("data", None): data[data_key] = data_sets[data_key]["data"] if stage == "fit": self.data_output = self.fit(data) else: LOGGER.warning("Evaluation has not transform, return") def split_data_with_type(self, data: list) -> dict: split_result = defaultdict(list) for value in data: mode = value[1][4] split_result[mode].append(value) return split_result def evaluate_metircs(self, mode: str, data: list) -> dict: labels = [] pred_scores = [] pred_labels = [] for d in data: labels.append(d[1][0]) pred_labels.append(d[1][1]) pred_scores.append(d[1][2]) if self.eval_type == consts.BINARY or self.eval_type == consts.REGRESSION: if self.pos_label and self.eval_type == consts.BINARY: new_labels = [] for label in labels: if self.pos_label == label: new_labels.append(1) else: new_labels.append(0) labels = new_labels pred_results = pred_scores else: pred_results = pred_labels eval_result = defaultdict(list) metrics = self.metrics for eval_metric in metrics: res = getattr(self, eval_metric)(labels, pred_results) if res is not None: try: if math.isinf(res): res = float(-9999999) LOGGER.info("res is inf, set to {}".format(res)) except: pass eval_result[eval_metric].append(mode) eval_result[eval_metric].append(res) return eval_result def fit(self, data, return_result=False): if len(data) <= 0: return self.eval_results.clear() for (key, eval_data) in data.items(): eval_data_local = list(eval_data.collect()) split_data_with_label = self.split_data_with_type(eval_data_local) for mode, data in split_data_with_label.items(): eval_result = self.evaluate_metircs(mode, data) self.eval_results[key].append(eval_result) return self.callback_metric_data(return_single_val_metrics=return_result) def __save_single_value(self, result, metric_name, metric_namespace, eval_name): self.tracker.log_metric_data(metric_namespace, metric_name, [Metric(eval_name, np.round(result, self.round_num))]) self.tracker.set_metric_meta(metric_namespace, metric_name, MetricMeta(name=metric_name, metric_type="EVALUATION_SUMMARY")) def __save_curve_data(self, x_axis_list, y_axis_list, metric_name, metric_namespace): points = [] for i, value in enumerate(x_axis_list): if isinstance(value, float): value = np.round(value, self.round_num) points.append((value, np.round(y_axis_list[i], self.round_num))) points.sort(key=lambda x: x[0]) metric_points = [Metric(point[0], point[1]) for point in points] self.tracker.log_metric_data(metric_namespace, metric_name, metric_points) def __save_curve_meta(self, metric_name, metric_namespace, metric_type, unit_name=None, ordinate_name=None, curve_name=None, best=None, pair_type=None, thresholds=None): extra_metas = {} metric_type = "_".join([metric_type, "EVALUATION"]) key_list = ["unit_name", "ordinate_name", "curve_name", "best", "pair_type", "thresholds"] for key in key_list: value = locals()[key] if value: if key == "thresholds": value = np.round(value, self.round_num).tolist() extra_metas[key] = value self.tracker.set_metric_meta(metric_namespace, metric_name, MetricMeta(name=metric_name, metric_type=metric_type, extra_metas=extra_metas)) def __filt_override_unit_ordinate_coordinate(self, x_sets, y_sets): max_y_dict = {} for idx, x_value in enumerate(x_sets): if x_value not in max_y_dict: max_y_dict[x_value] = {"max_y": y_sets[idx], "idx": idx} else: max_y = max_y_dict[x_value]["max_y"] if max_y < y_sets[idx]: max_y_dict[x_value] = {"max_y": y_sets[idx], "idx": idx} x = [] y = [] idx_list = [] for key, value in max_y_dict.items(): x.append(key) y.append(value["max_y"]) idx_list.append(value["idx"]) return x, y, idx_list def __save_roc(self, data_type, metric_name, metric_namespace, metric_res): fpr, tpr, thresholds, _ = metric_res # set roc edge value fpr.append(1.0) tpr.append(1.0) fpr, tpr, idx_list = self.__filt_override_unit_ordinate_coordinate(fpr, tpr) edge_idx = idx_list[-1] if edge_idx == len(thresholds): idx_list = idx_list[:-1] thresholds = [thresholds[idx] for idx in idx_list] self.__save_curve_data(fpr, tpr, metric_name, metric_namespace) self.__save_curve_meta(metric_name=metric_name, metric_namespace=metric_namespace, metric_type="ROC", unit_name="fpr", ordinate_name="tpr", curve_name=data_type, thresholds=thresholds) def callback_metric_data(self, return_single_val_metrics=False): """ Parameters ---------- return_single_val_metrics if True return single_val_metrics Returns None or return_result dict ------- """ collect_dict = {} LOGGER.debug('callback metric called') for (data_type, eval_res_list) in self.eval_results.items(): precision_recall = {} for eval_res in eval_res_list: for (metric, metric_res) in eval_res.items(): metric_namespace = metric_res[0] if metric_namespace == 'validate': collect_dict = self.validate_metric elif metric_namespace == 'train': collect_dict = self.train_metric metric_name = '_'.join([data_type, metric]) if metric in self.save_single_value_metric_list: self.__save_single_value(metric_res[1], metric_name=data_type, metric_namespace=metric_namespace , eval_name=metric) collect_dict[metric] = metric_res[1] elif metric == consts.KS: best_ks, fpr, tpr, thresholds, cuts = metric_res[1] self.__save_single_value(best_ks, metric_name=data_type, metric_namespace=metric_namespace, eval_name=metric) collect_dict[metric] = best_ks metric_name_fpr = '_'.join([metric_name, "fpr"]) curve_name_fpr = "_".join([data_type, "fpr"]) self.__save_curve_data(cuts, fpr, metric_name_fpr, metric_namespace) self.__save_curve_meta(metric_name=metric_name_fpr, metric_namespace=metric_namespace, metric_type=metric.upper(), unit_name="", curve_name=curve_name_fpr, pair_type=data_type, thresholds=thresholds) metric_name_tpr = '_'.join([metric_name, "tpr"]) curve_name_tpr = "_".join([data_type, "tpr"]) self.__save_curve_data(cuts, tpr, metric_name_tpr, metric_namespace) self.__save_curve_meta(metric_name_tpr, metric_namespace, metric.upper(), unit_name="", curve_name=curve_name_tpr, pair_type=data_type, thresholds=thresholds) elif metric == consts.ROC: self.__save_roc(data_type, metric_name, metric_namespace, metric_res[1]) elif metric in [consts.ACCURACY, consts.LIFT, consts.GAIN]: if self.eval_type == consts.MULTY and metric == consts.ACCURACY: self.__save_single_value(metric_res[1], metric_name=data_type, metric_namespace=metric_namespace, eval_name=metric) collect_dict[metric] = metric_res[1] continue score, cuts, thresholds = metric_res[1] if metric in [consts.LIFT, consts.GAIN]: score = [float(s[1]) for s in score] cuts = [float(c[1]) for c in cuts] cuts, score, idx_list = self.__filt_override_unit_ordinate_coordinate(cuts, score) thresholds = [thresholds[idx] for idx in idx_list] score.append(1.0) cuts.append(1.0) thresholds.append(0.0) self.__save_curve_data(cuts, score, metric_name, metric_namespace) self.__save_curve_meta(metric_name=metric_name, metric_namespace=metric_namespace, metric_type=metric.upper(), unit_name="", curve_name=data_type, thresholds=thresholds) elif metric in [consts.PRECISION, consts.RECALL]: precision_recall[metric] = metric_res if len(precision_recall) < 2: continue precision_res = precision_recall.get(consts.PRECISION) recall_res = precision_recall.get(consts.RECALL) if precision_res[0] != recall_res[0]: LOGGER.warning( "precision mode:{} is not equal to recall mode:{}".format(precision_res[0], recall_res[0])) continue metric_namespace = precision_res[0] metric_name_precision = '_'.join([data_type, "precision"]) metric_name_recall = '_'.join([data_type, "recall"]) pos_precision_score = precision_res[1][0] precision_cuts = precision_res[1][1] if len(precision_res[1]) >= 3: precision_thresholds = precision_res[1][2] else: precision_thresholds = None pos_recall_score = recall_res[1][0] recall_cuts = recall_res[1][1] if len(recall_res[1]) >= 3: recall_thresholds = recall_res[1][2] else: recall_thresholds = None precision_curve_name = data_type recall_curve_name = data_type if self.eval_type == consts.BINARY: pos_precision_score = [score[1] for score in pos_precision_score] pos_recall_score = [score[1] for score in pos_recall_score] pos_recall_score, pos_precision_score, idx_list = self.__filt_override_unit_ordinate_coordinate( pos_recall_score, pos_precision_score) precision_cuts = [precision_cuts[idx] for idx in idx_list] recall_cuts = [recall_cuts[idx] for idx in idx_list] edge_idx = idx_list[-1] if edge_idx == len(precision_thresholds) - 1: idx_list = idx_list[:-1] precision_thresholds = [precision_thresholds[idx] for idx in idx_list] recall_thresholds = [recall_thresholds[idx] for idx in idx_list] elif self.eval_type == consts.MULTY: average_precision = float(np.array(pos_precision_score).mean()) average_recall = float(np.array(pos_recall_score).mean()) self.__save_single_value(average_precision, metric_name=data_type, metric_namespace=metric_namespace, eval_name="precision") self.__save_single_value(average_recall, metric_name=data_type, metric_namespace=metric_namespace, eval_name="recall") collect_dict[consts.PRECISION] = average_precision collect_dict[consts.RECALL] = average_recall precision_curve_name = metric_name_precision recall_curve_name = metric_name_recall self.__save_curve_data(precision_cuts, pos_precision_score, metric_name_precision, metric_namespace) self.__save_curve_meta(metric_name_precision, metric_namespace, "_".join([consts.PRECISION.upper(), self.eval_type.upper()]), unit_name="", ordinate_name="Precision", curve_name=precision_curve_name, pair_type=data_type, thresholds=precision_thresholds) self.__save_curve_data(recall_cuts, pos_recall_score, metric_name_recall, metric_namespace) self.__save_curve_meta(metric_name_recall, metric_namespace, "_".join([consts.RECALL.upper(), self.eval_type.upper()]), unit_name="", ordinate_name="Recall", curve_name=recall_curve_name, pair_type=data_type, thresholds=recall_thresholds) else: LOGGER.warning("Unknown metric:{}".format(metric)) if return_single_val_metrics: if len(self.validate_metric) != 0: LOGGER.debug("return validate metric") LOGGER.debug('validate metric is {}'.format(self.validate_metric)) return self.validate_metric else: LOGGER.debug("validate metric is empty, return train metric") LOGGER.debug('train metric is {}'.format(self.train_metric)) return self.train_metric def __filt_threshold(self, thresholds, step): cuts = list(map(float, np.arange(0, 1, step))) size = len(list(thresholds)) thresholds.sort(reverse=True) index_list = [int(size * cut) for cut in cuts] new_thresholds = [thresholds[idx] for idx in index_list] return new_thresholds, cuts def auc(self, labels, pred_scores): """ Compute AUC for binary classification. Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float The AUC """ if self.eval_type == consts.BINARY: return roc_auc_score(labels, pred_scores) else: logging.warning("auc is just suppose Binary Classification! return None as results") return None def explained_variance(self, labels, pred_scores): """ Compute explain variance Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float The explain variance """ return explained_variance_score(labels, pred_scores) def mean_absolute_error(self, labels, pred_scores): """ Compute mean absolute error Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float A non-negative floating point. """ return mean_absolute_error(labels, pred_scores) def mean_squared_error(self, labels, pred_scores): """ Compute mean square error Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float A non-negative floating point value """ return mean_squared_error(labels, pred_scores) def mean_squared_log_error(self, labels, pred_scores): """ Compute mean squared logarithmic error Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float A non-negative floating point value """ return mean_squared_log_error(labels, pred_scores) def median_absolute_error(self, labels, pred_scores): """ Compute median absolute error Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float A positive floating point value """ return median_absolute_error(labels, pred_scores) def r2_score(self, labels, pred_scores): """ Compute R^2 (coefficient of determination) score Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float The R^2 score """ return r2_score(labels, pred_scores) def root_mean_squared_error(self, labels, pred_scores): """ Compute the root of mean square error Parameters ---------- labels: value list. The labels of data set. pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Return ---------- float A positive floating point value """ return np.sqrt(mean_squared_error(labels, pred_scores)) def roc(self, labels, pred_scores): if self.eval_type == consts.BINARY: fpr, tpr, thresholds = roc_curve(np.array(labels), np.array(pred_scores), drop_intermediate=1) fpr, tpr, thresholds = list(map(float, fpr)), list(map(float, tpr)), list(map(float, thresholds)) filt_thresholds, cuts = self.__filt_threshold(thresholds=thresholds, step=0.01) new_thresholds = [] new_tpr = [] new_fpr = [] for threshold in filt_thresholds: index = thresholds.index(threshold) new_tpr.append(tpr[index]) new_fpr.append(fpr[index]) new_thresholds.append(threshold) fpr = new_fpr tpr = new_tpr thresholds = new_thresholds return fpr, tpr, thresholds, cuts else: logging.warning("roc_curve is just suppose Binary Classification! return None as results") fpr, tpr, thresholds, cuts = None, None, None, None return fpr, tpr, thresholds, cuts def ks(self, labels, pred_scores): """ Compute Kolmogorov-Smirnov Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- max_ks_interval: float max value of each tpr - fpt fpr: """ score_label_list = [] for i, label in enumerate(labels): score_label_list.append((pred_scores[i], label)) score_label_list.sort(key=lambda x: x[0], reverse=True) cuts = [c / 100 for c in range(100)] data_size = len(pred_scores) indexs = [int(data_size * cut) for cut in cuts] score_threshold = [score_label_list[idx][0] for idx in indexs] fpr = [] tpr = [] ks = [] for i, index in enumerate(indexs): positive = 0 positive_recall = 0 negative = 0 false_positive = 0 for score_label in score_label_list: pre_score = score_label[0] label = score_label[1] if label == self.pos_label: positive += 1 if pre_score > score_threshold[i]: positive_recall += 1 if label == 0: negative += 1 if pre_score > score_threshold[i]: false_positive += 1 if positive == 0 or negative == 0: raise ValueError("all labels are positive or negative, please check your data!") _tpr = positive_recall / positive _fpr = false_positive / negative _ks = _tpr - _fpr tpr.append(_tpr) fpr.append(_fpr) ks.append(_ks) fpr.append(1.0) tpr.append(1.0) cuts.append(1.0) return max(ks), fpr, tpr, score_threshold, cuts def lift(self, labels, pred_scores): """ Compute lift of binary classification. Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float The lift """ if self.eval_type == consts.BINARY: thresholds = list(set(pred_scores)) thresholds, cuts = self.__filt_threshold(thresholds, 0.01) lift_operator = Lift() lift_y, lift_x = lift_operator.compute(labels, pred_scores, thresholds=thresholds) return lift_y, lift_x, thresholds else: logging.warning("lift is just suppose Binary Classification! return None as results") return None def gain(self, labels, pred_scores): """ Compute gain of binary classification. Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- float The gain """ if self.eval_type == consts.BINARY: thresholds = list(set(pred_scores)) thresholds, cuts = self.__filt_threshold(thresholds, 0.01) gain_operator = Gain() gain_x, gain_y = gain_operator.compute(labels, pred_scores, thresholds=thresholds) return gain_y, gain_x, thresholds else: logging.warning("gain is just suppose Binary Classification! return None as results") return None def precision(self, labels, pred_scores): """ Compute the precision Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- dict The key is threshold and the value is another dic, which key is label in parameter labels, and value is the label's precision. """ if self.eval_type == consts.BINARY: thresholds = list(set(pred_scores)) thresholds, cuts = self.__filt_threshold(thresholds, 0.01) # set for recall edge value thresholds.append(min(thresholds) - 0.001) cuts.append(1) precision_operator = BiClassPrecision() precision_res, thresholds = precision_operator.compute(labels, pred_scores, thresholds) return precision_res, cuts, thresholds elif self.eval_type == consts.MULTY: precision_operator = MultiClassPrecision() return precision_operator.compute(labels, pred_scores) else: logging.warning("error:can not find classification type:{}".format(self.eval_type)) def recall(self, labels, pred_scores): """ Compute the recall Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. Returns ---------- dict The key is threshold and the value is another dic, which key is label in parameter labels, and value is the label's recall. """ if self.eval_type == consts.BINARY: thresholds = list(set(pred_scores)) thresholds, cuts = self.__filt_threshold(thresholds, 0.01) # set for recall edge value thresholds.append(min(thresholds) - 0.001) cuts.append(1) recall_operator = BiClassRecall() recall_res, thresholds = recall_operator.compute(labels, pred_scores, thresholds) return recall_res, cuts, thresholds elif self.eval_type == consts.MULTY: recall_operator = MultiClassRecall() return recall_operator.compute(labels, pred_scores) else: logging.warning("error:can not find classification type:{}".format(self.eval_type)) def accuracy(self, labels, pred_scores, normalize=True): """ Compute the accuracy Parameters ---------- labels: value list. The labels of data set. pred_scores: pred_scores: value list. The predict results of model. It should be corresponding to labels each data. normalize: bool. If true, return the fraction of correctly classified samples, else returns the number of correctly classified samples Returns ---------- dict the key is threshold and the value is the accuracy of this threshold. """ if self.eval_type == consts.BINARY: thresholds = list(set(pred_scores)) thresholds, cuts = self.__filt_threshold(thresholds, 0.01) acc_operator = BiClassAccuracy() acc_res, thresholds = acc_operator.compute(labels, pred_scores, thresholds, normalize) return acc_res, cuts, thresholds elif self.eval_type == consts.MULTY: acc_operator = MultiClassAccuracy() return acc_operator.compute(labels, pred_scores, normalize) else: logging.warning("error:can not find classification type:".format(self.eval_type)) @staticmethod def extract_data(data: dict): return data class Lift(object): """ Compute lift """ def __predict_value_to_one_hot(self, pred_value, threshold): one_hot = [] for value in pred_value: if value > threshold: one_hot.append(1) else: one_hot.append(0) return one_hot def __compute_lift(self, labels, pred_scores_one_hot, pos_label="1"): tn, fp, fn, tp = confusion_matrix(labels, pred_scores_one_hot).ravel() if pos_label == '0': tp, tn = tn, tp fp, fn = fn, fp labels_num = len(labels) if labels_num == 0: lift_x = 1 denominator = 1 else: lift_x = (tp + fp) / labels_num denominator = (tp + fn) / labels_num if tp + fp == 0: numerator = 1 else: numerator = tp / (tp + fp) if denominator == 0: lift_y = sys.float_info.max else: lift_y = numerator / denominator return lift_x, lift_y def compute(self, labels, pred_scores, thresholds=None): lifts_x = [] lifts_y = [] for threshold in thresholds: pred_scores_one_hot = self.__predict_value_to_one_hot(pred_scores, threshold) label_type = ['0', '1'] lift_x_type = [] lift_y_type = [] for lt in label_type: lift_x, lift_y = self.__compute_lift(labels, pred_scores_one_hot, pos_label=lt) lift_x_type.append(lift_x) lift_y_type.append(lift_y) lifts_x.append(lift_x_type) lifts_y.append(lift_y_type) return lifts_y, lifts_x class Gain(object): """ Compute Gain """ def __init__(self): pass def __predict_value_to_one_hot(self, pred_value, threshold): one_hot = [] for value in pred_value: if value > threshold: one_hot.append(1) else: one_hot.append(0) return one_hot def __compute_gain(self, label, pred_scores_one_hot, pos_label="1"): tn, fp, fn, tp = confusion_matrix(label, pred_scores_one_hot).ravel() if pos_label == '0': tp, tn = tn, tp fp, fn = fn, fp num_label = len(label) if num_label == 0: gain_x = 1 else: gain_x = float((tp + fp) / num_label) num_positives = tp + fn if num_positives == 0: gain_y = 1 else: gain_y = float(tp / num_positives) return gain_x, gain_y def compute(self, labels, pred_scores, thresholds=None): gains_x = [] gains_y = [] for threshold in thresholds: pred_scores_one_hot = self.__predict_value_to_one_hot(pred_scores, threshold) label_type = ['0', '1'] gain_x_type = [] gain_y_type = [] for lt in label_type: gain_x, gain_y = self.__compute_gain(labels, pred_scores_one_hot, pos_label=lt) gain_x_type.append(gain_x) gain_y_type.append(gain_y) gains_x.append(gain_x_type) gains_y.append(gain_y_type) return gains_x, gains_y class BiClassPrecision(object): """ Compute binary classification precision """ def __init__(self): self.total_positives = 0 def __predict_value_to_one_hot(self, pred_value, threshold): one_hot = [] self.total_positives = 0 for value in pred_value: if value > threshold: one_hot.append(1) self.total_positives += 1 else: one_hot.append(0) return one_hot def compute(self, labels, pred_scores, thresholds): scores = [] for threshold in thresholds: pred_scores_one_hot = self.__predict_value_to_one_hot(pred_scores, threshold) score = list(map(float, precision_score(labels, pred_scores_one_hot, average=None))) if self.total_positives == 0: score[1] = 1.0 scores.append(score) return scores, thresholds class MultiClassPrecision(object): """ Compute multi-classification precision """ def compute(self, labels, pred_scores): all_labels = list(set(labels).union(set(pred_scores))) all_labels.sort() return precision_score(labels, pred_scores, average=None), all_labels class BiClassRecall(object): """ Compute binary classification recall """ def __predict_value_to_one_hot(self, pred_value, threshold): one_hot = [] for value in pred_value: if value > threshold: one_hot.append(1) else: one_hot.append(0) return one_hot def compute(self, labels, pred_scores, thresholds): scores = [] for threshold in thresholds: pred_scores_one_hot = self.__predict_value_to_one_hot(pred_scores, threshold) score = list(map(float, recall_score(labels, pred_scores_one_hot, average=None))) scores.append(score) return scores, thresholds class MultiClassRecall(object): """ Compute multi-classification recall """ def compute(self, labels, pred_scores): all_labels = list(set(labels).union(set(pred_scores))) all_labels.sort() return recall_score(labels, pred_scores, average=None), all_labels class BiClassAccuracy(object): """ Compute binary classification accuracy """ def __predict_value_to_one_hot(self, pred_value, threshold): one_hot = [] for value in pred_value: if value > threshold: one_hot.append(1) else: one_hot.append(0) return one_hot def compute(self, labels, pred_scores, thresholds, normalize=True): scores = [] for threshold in thresholds: pred_scores_one_hot = self.__predict_value_to_one_hot(pred_scores, threshold) score = accuracy_score(labels, pred_scores_one_hot, normalize) scores.append(score) return scores, thresholds class MultiClassAccuracy(object): """ Compute multi-classification accuracy """ def compute(self, labels, pred_scores, normalize=True): return accuracy_score(labels, pred_scores, normalize) class IC(object): """ Compute Information Criterion with a given dTable and loss When k = 2, result is genuine AIC; when k = log(n), results is BIC, also called SBC, SIC, SBIC. """ def compute(self, k, n, dfe, loss): aic_score = k * dfe + 2 * n * loss return aic_score class IC_Approx(object): """ Compute Information Criterion value with a given dTable and loss When k = 2, result is genuine AIC; when k = log(n), results is BIC, also called SBC, SIC, SBIC. Note that this formula for linear regression dismisses the constant term n * np.log(2 * np.pi) for sake of simplicity, so the absolute value of result will be small. """ def compute(self, k, n, dfe, loss): aic_score = k * dfe + n * np.log(loss * 2) return aic_score
[ "fate_flow.entity.metric.Metric", "fate_flow.entity.metric.MetricMeta" ]
[((1478, 1499), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', ([], {}), '()\n', (1497, 1499), False, 'from arch.api.utils import log_utils\n'), ((4293, 4308), 'federatedml.param.EvaluateParam', 'EvaluateParam', ([], {}), '()\n', (4306, 4308), False, 'from federatedml.param import EvaluateParam\n'), ((4337, 4354), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4348, 4354), False, 'from collections import defaultdict\n'), ((5944, 5961), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5955, 5961), False, 'from collections import defaultdict\n'), ((6903, 6920), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6914, 6920), False, 'from collections import defaultdict\n'), ((22396, 22441), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['labels', 'pred_scores'], {}), '(labels, pred_scores)\n', (22420, 22441), False, 'from sklearn.metrics import explained_variance_score\n'), ((22867, 22907), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['labels', 'pred_scores'], {}), '(labels, pred_scores)\n', (22886, 22907), False, 'from sklearn.metrics import mean_absolute_error\n'), ((23335, 23374), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['labels', 'pred_scores'], {}), '(labels, pred_scores)\n', (23353, 23374), False, 'from sklearn.metrics import mean_squared_error\n'), ((23819, 23862), 'sklearn.metrics.mean_squared_log_error', 'mean_squared_log_error', (['labels', 'pred_scores'], {}), '(labels, pred_scores)\n', (23841, 23862), False, 'from sklearn.metrics import mean_squared_log_error\n'), ((24293, 24335), 'sklearn.metrics.median_absolute_error', 'median_absolute_error', (['labels', 'pred_scores'], {}), '(labels, pred_scores)\n', (24314, 24335), False, 'from sklearn.metrics import median_absolute_error\n'), ((24754, 24783), 'sklearn.metrics.r2_score', 'r2_score', (['labels', 'pred_scores'], {}), '(labels, pred_scores)\n', (24762, 24783), False, 'from sklearn.metrics import r2_score\n'), ((40315, 40361), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels', 'pred_scores', 'normalize'], {}), '(labels, pred_scores, normalize)\n', (40329, 40361), False, 'from sklearn.metrics import accuracy_score\n'), ((8381, 8443), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': 'metric_name', 'metric_type': '"""EVALUATION_SUMMARY"""'}), "(name=metric_name, metric_type='EVALUATION_SUMMARY')\n", (8391, 8443), False, 'from fate_flow.entity.metric import Metric, MetricMeta\n'), ((8844, 8870), 'fate_flow.entity.metric.Metric', 'Metric', (['point[0]', 'point[1]'], {}), '(point[0], point[1])\n', (8850, 8870), False, 'from fate_flow.entity.metric import Metric, MetricMeta\n'), ((9702, 9780), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': 'metric_name', 'metric_type': 'metric_type', 'extra_metas': 'extra_metas'}), '(name=metric_name, metric_type=metric_type, extra_metas=extra_metas)\n', (9712, 9780), False, 'from fate_flow.entity.metric import Metric, MetricMeta\n'), ((21814, 21848), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels', 'pred_scores'], {}), '(labels, pred_scores)\n', (21827, 21848), False, 'from sklearn.metrics import roc_auc_score\n'), ((21875, 21964), 'logging.warning', 'logging.warning', (['"""auc is just suppose Binary Classification! return None as results"""'], {}), "(\n 'auc is just suppose Binary Classification! return None as results')\n", (21890, 21964), False, 'import logging\n'), ((25231, 25270), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['labels', 'pred_scores'], {}), '(labels, pred_scores)\n', (25249, 25270), False, 'from sklearn.metrics import mean_squared_error\n'), ((26147, 26242), 'logging.warning', 'logging.warning', (['"""roc_curve is just suppose Binary Classification! return None as results"""'], {}), "(\n 'roc_curve is just suppose Binary Classification! return None as results')\n", (26162, 26242), False, 'import logging\n'), ((29104, 29194), 'logging.warning', 'logging.warning', (['"""lift is just suppose Binary Classification! return None as results"""'], {}), "(\n 'lift is just suppose Binary Classification! return None as results')\n", (29119, 29194), False, 'import logging\n'), ((29977, 30067), 'logging.warning', 'logging.warning', (['"""gain is just suppose Binary Classification! return None as results"""'], {}), "(\n 'gain is just suppose Binary Classification! return None as results')\n", (29992, 30067), False, 'import logging\n'), ((38345, 38395), 'sklearn.metrics.precision_score', 'precision_score', (['labels', 'pred_scores'], {'average': 'None'}), '(labels, pred_scores, average=None)\n', (38360, 38395), False, 'from sklearn.metrics import precision_score\n'), ((39366, 39413), 'sklearn.metrics.recall_score', 'recall_score', (['labels', 'pred_scores'], {'average': 'None'}), '(labels, pred_scores, average=None)\n', (39378, 39413), False, 'from sklearn.metrics import recall_score\n'), ((40022, 40076), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels', 'pred_scores_one_hot', 'normalize'], {}), '(labels, pred_scores_one_hot, normalize)\n', (40036, 40076), False, 'from sklearn.metrics import accuracy_score\n'), ((8669, 8700), 'numpy.round', 'np.round', (['value', 'self.round_num'], {}), '(value, self.round_num)\n', (8677, 8700), True, 'import numpy as np\n'), ((21111, 21132), 'numpy.arange', 'np.arange', (['(0)', '(1)', 'step'], {}), '(0, 1, step)\n', (21120, 21132), True, 'import numpy as np\n'), ((25402, 25418), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (25410, 25418), True, 'import numpy as np\n'), ((25420, 25441), 'numpy.array', 'np.array', (['pred_scores'], {}), '(pred_scores)\n', (25428, 25441), True, 'import numpy as np\n'), ((34312, 34357), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels', 'pred_scores_one_hot'], {}), '(labels, pred_scores_one_hot)\n', (34328, 34357), False, 'from sklearn.metrics import confusion_matrix\n'), ((36048, 36092), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['label', 'pred_scores_one_hot'], {}), '(label, pred_scores_one_hot)\n', (36064, 36092), False, 'from sklearn.metrics import confusion_matrix\n'), ((41155, 41171), 'numpy.log', 'np.log', (['(loss * 2)'], {}), '(loss * 2)\n', (41161, 41171), True, 'import numpy as np\n'), ((7133, 7148), 'math.isinf', 'math.isinf', (['res'], {}), '(res)\n', (7143, 7148), False, 'import math\n'), ((8240, 8272), 'numpy.round', 'np.round', (['result', 'self.round_num'], {}), '(result, self.round_num)\n', (8248, 8272), True, 'import numpy as np\n'), ((8735, 8775), 'numpy.round', 'np.round', (['y_axis_list[i]', 'self.round_num'], {}), '(y_axis_list[i], self.round_num)\n', (8743, 8775), True, 'import numpy as np\n'), ((37898, 37956), 'sklearn.metrics.precision_score', 'precision_score', (['labels', 'pred_scores_one_hot'], {'average': 'None'}), '(labels, pred_scores_one_hot, average=None)\n', (37913, 37956), False, 'from sklearn.metrics import precision_score\n'), ((39001, 39056), 'sklearn.metrics.recall_score', 'recall_score', (['labels', 'pred_scores_one_hot'], {'average': 'None'}), '(labels, pred_scores_one_hot, average=None)\n', (39013, 39056), False, 'from sklearn.metrics import recall_score\n'), ((9514, 9545), 'numpy.round', 'np.round', (['value', 'self.round_num'], {}), '(value, self.round_num)\n', (9522, 9545), True, 'import numpy as np\n'), ((19604, 19628), 'federatedml.util.consts.PRECISION.upper', 'consts.PRECISION.upper', ([], {}), '()\n', (19626, 19628), False, 'from federatedml.util import consts\n'), ((20184, 20205), 'federatedml.util.consts.RECALL.upper', 'consts.RECALL.upper', ([], {}), '()\n', (20203, 20205), False, 'from federatedml.util import consts\n'), ((18357, 18386), 'numpy.array', 'np.array', (['pos_precision_score'], {}), '(pos_precision_score)\n', (18365, 18386), True, 'import numpy as np\n'), ((18446, 18472), 'numpy.array', 'np.array', (['pos_recall_score'], {}), '(pos_recall_score)\n', (18454, 18472), True, 'import numpy as np\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_flow.utils.log_utils import getLogger from fate_flow.components._base import ( BaseParam, ComponentBase, ComponentMeta, ComponentInputProtocol, ) from fate_flow.entity.types import ModelStorage from fate_flow.pipelined_model import mysql_model_storage, redis_model_storage, tencent_cos_model_storage LOGGER = getLogger() ModelStorageClassMap = { ModelStorage.REDIS.value: redis_model_storage.RedisModelStorage, ModelStorage.MYSQL.value: mysql_model_storage.MysqlModelStorage, ModelStorage.TENCENT_COS.value: tencent_cos_model_storage.TencentCOSModelStorage, } def get_model_storage(parameters): model_storage = parameters.get("store_address", {}).get("storage") if not model_storage: raise TypeError(f"'store_address' is empty.") if model_storage not in ModelStorageClassMap: raise ValueError(f"Model storage '{model_storage}' is not supported.") return ModelStorageClassMap[model_storage]() model_store_cpn_meta = ComponentMeta("ModelStore") @model_store_cpn_meta.bind_param class ModelStoreParam(BaseParam): def __init__( self, model_id: str = None, model_version: str = None, store_address: dict = None, force_update: bool = False, ): self.model_id = model_id self.model_version = model_version self.store_address = store_address self.force_update = force_update def check(self): return True @model_store_cpn_meta.bind_runner.on_local class ModelStore(ComponentBase): def _run(self, input_cpn: ComponentInputProtocol): parameters = input_cpn.parameters model_storage = get_model_storage(parameters) model_storage.store(parameters["model_id"], parameters["model_version"], parameters["store_address"], parameters.get("force_update", False)) model_restore_cpn_meta = ComponentMeta("ModelRestore") @model_restore_cpn_meta.bind_param class ModelRestoreParam(BaseParam): def __init__( self, model_id: str = None, model_version: str = None, store_address: dict = None, ): self.model_id = model_id self.model_version = model_version self.store_address = store_address def check(self): return True @model_restore_cpn_meta.bind_runner.on_local class ModelRestore(ComponentBase): def _run(self, input_cpn: ComponentInputProtocol): parameters = input_cpn.parameters model_storage = get_model_storage(parameters) model_storage.restore(parameters["model_id"], parameters["model_version"], parameters["store_address"])
[ "fate_flow.utils.log_utils.getLogger", "fate_flow.components._base.ComponentMeta" ]
[((953, 964), 'fate_flow.utils.log_utils.getLogger', 'getLogger', ([], {}), '()\n', (962, 964), False, 'from fate_flow.utils.log_utils import getLogger\n'), ((1609, 1636), 'fate_flow.components._base.ComponentMeta', 'ComponentMeta', (['"""ModelStore"""'], {}), "('ModelStore')\n", (1622, 1636), False, 'from fate_flow.components._base import BaseParam, ComponentBase, ComponentMeta, ComponentInputProtocol\n'), ((2517, 2546), 'fate_flow.components._base.ComponentMeta', 'ComponentMeta', (['"""ModelRestore"""'], {}), "('ModelRestore')\n", (2530, 2546), False, 'from fate_flow.components._base import BaseParam, ComponentBase, ComponentMeta, ComponentInputProtocol\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from abc import ABC import abc from federatedml.ensemble.boosting.boosting_core import Boosting from federatedml.param.boosting_param import HeteroBoostingParam from federatedml.secureprotol import IterativeAffineEncrypt from federatedml.secureprotol import PaillierEncrypt from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator from federatedml.util import consts from federatedml.feature.binning.quantile_binning import QuantileBinning from federatedml.util.classify_label_checker import ClassifyLabelChecker from federatedml.util.classify_label_checker import RegressionLabelChecker from federatedml.util import LOGGER from fate_flow.entity.metric import Metric from fate_flow.entity.metric import MetricMeta from federatedml.transfer_variable.transfer_class.hetero_boosting_transfer_variable import \ HeteroBoostingTransferVariable from federatedml.util.io_check import assert_io_num_rows_equal class HeteroBoosting(Boosting, ABC): def __init__(self): super(HeteroBoosting, self).__init__() self.encrypter = None self.encrypted_calculator = None self.early_stopping_rounds = None self.binning_class = QuantileBinning self.model_param = HeteroBoostingParam() self.transfer_variable = HeteroBoostingTransferVariable() self.mode = consts.HETERO def _init_model(self, param: HeteroBoostingParam): LOGGER.debug('in hetero boosting, objective param is {}'.format(param.objective_param.objective)) super(HeteroBoosting, self)._init_model(param) self.encrypt_param = param.encrypt_param self.re_encrypt_rate = param.encrypted_mode_calculator_param self.calculated_mode = param.encrypted_mode_calculator_param.mode self.re_encrypted_rate = param.encrypted_mode_calculator_param.re_encrypted_rate self.early_stopping_rounds = param.early_stopping_rounds self.use_first_metric_only = param.use_first_metric_only def generate_encrypter(self): LOGGER.info("generate encrypter") if self.encrypt_param.method.lower() == consts.PAILLIER.lower(): self.encrypter = PaillierEncrypt() self.encrypter.generate_key(self.encrypt_param.key_length) elif self.encrypt_param.method.lower() == consts.ITERATIVEAFFINE.lower(): self.encrypter = IterativeAffineEncrypt() self.encrypter.generate_key(key_size=self.encrypt_param.key_length, randomized=False) elif self.encrypt_param.method.lower() == consts.RANDOM_ITERATIVEAFFINE.lower(): self.encrypter = IterativeAffineEncrypt() self.encrypter.generate_key(key_size=self.encrypt_param.key_length, randomized=True) else: raise NotImplementedError("encrypt method not supported yes!!!") self.encrypted_calculator = EncryptModeCalculator(self.encrypter, self.calculated_mode, self.re_encrypted_rate) def check_label(self): LOGGER.info("check label") classes_ = [] num_classes, booster_dim = 1, 1 if self.task_type == consts.CLASSIFICATION: num_classes, classes_ = ClassifyLabelChecker.validate_label(self.data_bin) if num_classes > 2: booster_dim = num_classes range_from_zero = True for _class in classes_: try: if 0 <= _class < len(classes_) and isinstance(_class, int): continue else: range_from_zero = False break except: range_from_zero = False classes_ = sorted(classes_) if not range_from_zero: class_mapping = dict(zip(classes_, range(num_classes))) self.y = self.y.mapValues(lambda _class: class_mapping[_class]) else: RegressionLabelChecker.validate_label(self.data_bin) return classes_, num_classes, booster_dim class HeteroBoostingGuest(HeteroBoosting, ABC): def __init__(self): super(HeteroBoostingGuest, self).__init__() def _init_model(self, param): super(HeteroBoostingGuest, self)._init_model(param) def sync_booster_dim(self): LOGGER.info("sync booster_dim to host") self.transfer_variable.booster_dim.remote(self.booster_dim, role=consts.HOST, idx=-1) def sync_stop_flag(self, stop_flag, num_round): LOGGER.info("sync stop flag to host, boosting_core round is {}".format(num_round)) self.transfer_variable.stop_flag.remote(stop_flag, role=consts.HOST, idx=-1, suffix=(num_round,)) def sync_predict_round(self, predict_round,): LOGGER.info("sync predict start round {}".format(predict_round)) self.transfer_variable.predict_start_round.remote(predict_round, role=consts.HOST, idx=-1,) def fit(self, data_inst, validate_data=None): LOGGER.info('begin to fit a hetero boosting model, model is {}'.format(self.model_name)) self.data_bin, self.bin_split_points, self.bin_sparse_points = self.prepare_data(data_inst) self.y = self.get_label(self.data_bin) self.classes_, self.num_classes, self.booster_dim = self.check_label() LOGGER.info('class index is {}'.format(self.classes_)) self.loss = self.get_loss_function() self.sync_booster_dim() self.y_hat, self.init_score = self.get_init_score(self.y, self.num_classes) self.generate_encrypter() self.callback_meta("loss", "train", MetricMeta(name="train", metric_type="LOSS", extra_metas={"unit_name": "iters"})) self.validation_strategy = self.init_validation_strategy(data_inst, validate_data) for epoch_idx in range(self.boosting_round): LOGGER.info('cur epoch idx is {}'.format(epoch_idx)) for class_idx in range(self.booster_dim): # fit a booster model = self.fit_a_booster(epoch_idx, class_idx) booster_meta, booster_param = model.get_model() if booster_meta is not None and booster_param is not None: self.booster_meta = booster_meta self.boosting_model_list.append(booster_param) # update predict score cur_sample_weights = model.get_sample_weights() self.y_hat = self.get_new_predict_score(self.y_hat, cur_sample_weights, dim=class_idx) # compute loss loss = self.compute_loss(self.y_hat, self.y) self.history_loss.append(loss) LOGGER.info("round {} loss is {}".format(epoch_idx, loss)) self.callback_metric("loss", "train", [Metric(epoch_idx, loss)]) if self.validation_strategy: self.validation_strategy.validate(self, epoch_idx, use_precomputed_train=True, train_scores=self.score_to_predict_result(data_inst, self.y_hat)) should_stop_a, should_stop_b = False, False if self.validation_strategy is not None: if self.validation_strategy.need_stop(): should_stop_a = True if self.n_iter_no_change and self.check_convergence(loss): should_stop_b = True self.is_converged = True self.sync_stop_flag(self.is_converged, epoch_idx) if should_stop_a or should_stop_b: break self.callback_meta("loss", "train", MetricMeta(name="train", metric_type="LOSS", extra_metas={"Best": min(self.history_loss)})) if self.validation_strategy and self.validation_strategy.has_saved_best_model(): LOGGER.info('best model exported') self.load_model(self.validation_strategy.cur_best_model) # get summary self.set_summary(self.generate_summary()) @assert_io_num_rows_equal def predict(self, data_inst): LOGGER.info('using default lazy prediction') return self.lazy_predict(data_inst) def lazy_predict(self, data_inst): LOGGER.info('running guest lazy prediction') processed_data = self.data_alignment(data_inst) rounds = len(self.boosting_model_list) // self.booster_dim cache_dataset_key = self.predict_data_cache.get_data_key(data_inst) last_round = self.predict_data_cache.predict_data_last_round(cache_dataset_key) LOGGER.debug('last round is {}, dataset_key_is {}'.format(last_round, cache_dataset_key)) if last_round == -1: init_score = self.init_score self.predict_y_hat = processed_data.mapValues(lambda v: init_score) else: LOGGER.debug("hit cache, cached round is {}".format(last_round)) if last_round >= rounds - 1: LOGGER.debug("predict data cached, rounds is {}, total cached round is {}".format(rounds, last_round)) self.predict_y_hat = self.predict_data_cache.predict_data_at(cache_dataset_key, min(rounds - 1, last_round)) self.sync_predict_round(last_round + 1) for idx in range(last_round + 1, rounds): for booster_idx in range(self.booster_dim): model = self.load_booster(self.booster_meta, self.boosting_model_list[idx * self.booster_dim + booster_idx], idx, booster_idx) score = model.predict(processed_data) self.predict_y_hat = self.get_new_predict_score(self.predict_y_hat, score, booster_idx) self.predict_data_cache.add_data(cache_dataset_key, self.predict_y_hat) LOGGER.debug('lazy prediction finished') return self.score_to_predict_result(data_inst, self.predict_y_hat) @abc.abstractmethod def fit_a_booster(self, epoch_idx: int, booster_dim: int): raise NotImplementedError() @abc.abstractmethod def load_booster(self, model_meta, model_param, epoch_idx, booster_idx): raise NotImplementedError() @abc.abstractmethod def get_model_meta(self): raise NotImplementedError() @abc.abstractmethod def get_model_param(self): raise NotImplementedError() @abc.abstractmethod def set_model_meta(self, model_meta): raise NotImplementedError() @abc.abstractmethod def set_model_param(self, model_param): raise NotImplementedError() class HeteroBoostingHost(HeteroBoosting, ABC): def __init__(self): super(HeteroBoostingHost, self).__init__() def _init_model(self, param): super(HeteroBoostingHost, self)._init_model(param) def sync_booster_dim(self): LOGGER.info("sync booster dim from guest") self.booster_dim = self.transfer_variable.booster_dim.get(idx=0) LOGGER.info("booster dim is %d" % self.booster_dim) def sync_stop_flag(self, num_round): LOGGER.info("sync stop flag from guest, boosting_core round is {}".format(num_round)) stop_flag = self.transfer_variable.stop_flag.get(idx=0, suffix=(num_round,)) return stop_flag def sync_predict_start_round(self,): return self.transfer_variable.predict_start_round.get(idx=0,) def fit(self, data_inst, validate_data=None): LOGGER.info('begin to fit a hetero boosting model, model is {}'.format(self.model_name)) self.data_bin, self.bin_split_points, self.bin_sparse_points = self.prepare_data(data_inst) self.sync_booster_dim() self.generate_encrypter() self.validation_strategy = self.init_validation_strategy(data_inst, validate_data) for epoch_idx in range(self.boosting_round): LOGGER.info('cur epoch idx is {}'.format(epoch_idx)) for class_idx in range(self.booster_dim): # fit a booster model = self.fit_a_booster(epoch_idx, class_idx) # need to implement booster_meta, booster_param = model.get_model() if booster_meta is not None and booster_param is not None: self.booster_meta = booster_meta self.boosting_model_list.append(booster_param) if self.validation_strategy: self.validation_strategy.validate(self, epoch_idx, use_precomputed_train=True, train_scores=None) should_stop_a = False if self.validation_strategy is not None: if self.validation_strategy.need_stop(): should_stop_a = True should_stop_b = self.sync_stop_flag(epoch_idx) self.is_converged = should_stop_b if should_stop_a or should_stop_b: break if self.validation_strategy and self.validation_strategy.has_saved_best_model(): LOGGER.info('best model exported') self.load_model(self.validation_strategy.cur_best_model) self.set_summary(self.generate_summary()) def lazy_predict(self, data_inst): LOGGER.info('running guest lazy prediction') data_inst = self.data_alignment(data_inst) init_score = self.init_score self.predict_y_hat = data_inst.mapValues(lambda v: init_score) rounds = len(self.boosting_model_list) // self.booster_dim predict_start_round = self.sync_predict_start_round() for idx in range(predict_start_round, rounds): for booster_idx in range(self.booster_dim): model = self.load_booster(self.booster_meta, self.boosting_model_list[idx * self.booster_dim + booster_idx], idx, booster_idx) model.predict(data_inst) LOGGER.debug('lazy prediction finished') def predict(self, data_inst): LOGGER.info('using default lazy prediction') self.lazy_predict(data_inst) @abc.abstractmethod def load_booster(self, model_meta, model_param, epoch_idx, booster_idx): raise NotImplementedError() @abc.abstractmethod def fit_a_booster(self, epoch_idx: int, booster_dim: int): raise NotImplementedError() @abc.abstractmethod def get_model_meta(self): raise NotImplementedError() @abc.abstractmethod def get_model_param(self): raise NotImplementedError() @abc.abstractmethod def set_model_meta(self, model_meta): raise NotImplementedError() @abc.abstractmethod def set_model_param(self, model_param): raise NotImplementedError()
[ "fate_flow.entity.metric.MetricMeta", "fate_flow.entity.metric.Metric" ]
[((1880, 1901), 'federatedml.param.boosting_param.HeteroBoostingParam', 'HeteroBoostingParam', ([], {}), '()\n', (1899, 1901), False, 'from federatedml.param.boosting_param import HeteroBoostingParam\n'), ((1935, 1967), 'federatedml.transfer_variable.transfer_class.hetero_boosting_transfer_variable.HeteroBoostingTransferVariable', 'HeteroBoostingTransferVariable', ([], {}), '()\n', (1965, 1967), False, 'from federatedml.transfer_variable.transfer_class.hetero_boosting_transfer_variable import HeteroBoostingTransferVariable\n'), ((2673, 2706), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""generate encrypter"""'], {}), "('generate encrypter')\n", (2684, 2706), False, 'from federatedml.util import LOGGER\n'), ((3580, 3668), 'federatedml.secureprotol.encrypt_mode.EncryptModeCalculator', 'EncryptModeCalculator', (['self.encrypter', 'self.calculated_mode', 'self.re_encrypted_rate'], {}), '(self.encrypter, self.calculated_mode, self.\n re_encrypted_rate)\n', (3601, 3668), False, 'from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator\n'), ((3701, 3727), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""check label"""'], {}), "('check label')\n", (3712, 3727), False, 'from federatedml.util import LOGGER\n'), ((5004, 5043), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""sync booster_dim to host"""'], {}), "('sync booster_dim to host')\n", (5015, 5043), False, 'from federatedml.util import LOGGER\n'), ((9295, 9339), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""using default lazy prediction"""'], {}), "('using default lazy prediction')\n", (9306, 9339), False, 'from federatedml.util import LOGGER\n'), ((9433, 9477), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""running guest lazy prediction"""'], {}), "('running guest lazy prediction')\n", (9444, 9477), False, 'from federatedml.util import LOGGER\n'), ((11022, 11062), 'federatedml.util.LOGGER.debug', 'LOGGER.debug', (['"""lazy prediction finished"""'], {}), "('lazy prediction finished')\n", (11034, 11062), False, 'from federatedml.util import LOGGER\n'), ((12052, 12094), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""sync booster dim from guest"""'], {}), "('sync booster dim from guest')\n", (12063, 12094), False, 'from federatedml.util import LOGGER\n'), ((12176, 12227), 'federatedml.util.LOGGER.info', 'LOGGER.info', (["('booster dim is %d' % self.booster_dim)"], {}), "('booster dim is %d' % self.booster_dim)\n", (12187, 12227), False, 'from federatedml.util import LOGGER\n'), ((14426, 14470), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""running guest lazy prediction"""'], {}), "('running guest lazy prediction')\n", (14437, 14470), False, 'from federatedml.util import LOGGER\n'), ((15149, 15189), 'federatedml.util.LOGGER.debug', 'LOGGER.debug', (['"""lazy prediction finished"""'], {}), "('lazy prediction finished')\n", (15161, 15189), False, 'from federatedml.util import LOGGER\n'), ((15234, 15278), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""using default lazy prediction"""'], {}), "('using default lazy prediction')\n", (15245, 15278), False, 'from federatedml.util import LOGGER\n'), ((2755, 2778), 'federatedml.util.consts.PAILLIER.lower', 'consts.PAILLIER.lower', ([], {}), '()\n', (2776, 2778), False, 'from federatedml.util import consts\n'), ((2809, 2826), 'federatedml.secureprotol.PaillierEncrypt', 'PaillierEncrypt', ([], {}), '()\n', (2824, 2826), False, 'from federatedml.secureprotol import PaillierEncrypt\n'), ((3878, 3928), 'federatedml.util.classify_label_checker.ClassifyLabelChecker.validate_label', 'ClassifyLabelChecker.validate_label', (['self.data_bin'], {}), '(self.data_bin)\n', (3913, 3928), False, 'from federatedml.util.classify_label_checker import ClassifyLabelChecker\n'), ((4637, 4689), 'federatedml.util.classify_label_checker.RegressionLabelChecker.validate_label', 'RegressionLabelChecker.validate_label', (['self.data_bin'], {}), '(self.data_bin)\n', (4674, 4689), False, 'from federatedml.util.classify_label_checker import RegressionLabelChecker\n'), ((6598, 6683), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': '"""train"""', 'metric_type': '"""LOSS"""', 'extra_metas': "{'unit_name': 'iters'}"}), "(name='train', metric_type='LOSS', extra_metas={'unit_name': 'iters'}\n )\n", (6608, 6683), False, 'from fate_flow.entity.metric import MetricMeta\n'), ((9044, 9078), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""best model exported"""'], {}), "('best model exported')\n", (9055, 9078), False, 'from federatedml.util import LOGGER\n'), ((14222, 14256), 'federatedml.util.LOGGER.info', 'LOGGER.info', (['"""best model exported"""'], {}), "('best model exported')\n", (14233, 14256), False, 'from federatedml.util import LOGGER\n'), ((2948, 2978), 'federatedml.util.consts.ITERATIVEAFFINE.lower', 'consts.ITERATIVEAFFINE.lower', ([], {}), '()\n', (2976, 2978), False, 'from federatedml.util import consts\n'), ((3009, 3033), 'federatedml.secureprotol.IterativeAffineEncrypt', 'IterativeAffineEncrypt', ([], {}), '()\n', (3031, 3033), False, 'from federatedml.secureprotol import IterativeAffineEncrypt\n'), ((3222, 3259), 'federatedml.util.consts.RANDOM_ITERATIVEAFFINE.lower', 'consts.RANDOM_ITERATIVEAFFINE.lower', ([], {}), '()\n', (3257, 3259), False, 'from federatedml.util import consts\n'), ((3290, 3314), 'federatedml.secureprotol.IterativeAffineEncrypt', 'IterativeAffineEncrypt', ([], {}), '()\n', (3312, 3314), False, 'from federatedml.secureprotol import IterativeAffineEncrypt\n'), ((7905, 7928), 'fate_flow.entity.metric.Metric', 'Metric', (['epoch_idx', 'loss'], {}), '(epoch_idx, loss)\n', (7911, 7928), False, 'from fate_flow.entity.metric import Metric\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import functools from arch.api import session from arch.api.utils.log_utils import schedule_logger from arch.api.utils.core_utils import fate_uuid from fate_flow.settings import stat_logger, DETECT_TABLE from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.entity.constant_config import ProcessRole class SessionStop(object): @staticmethod def run(): parser = argparse.ArgumentParser() parser.add_argument('-j', '--job_id', required=True, type=str, help="job id") parser.add_argument('-w', '--work_mode', required=True, type=str, help="work mode") parser.add_argument('-b', '--backend', required=True, type=str, help="backend") parser.add_argument('-c', '--command', required=True, type=str, help="command") args = parser.parse_args() session_job_id = args.job_id fate_job_id = session_job_id.split('_')[0] work_mode = int(args.work_mode) backend = int(args.backend) command = args.command session.init(job_id=session_job_id, mode=work_mode, backend=backend, set_log_dir=False) try: schedule_logger(fate_job_id).info('start {} session {}'.format(command, session.get_session_id())) if command == 'stop': session.stop() elif command == 'kill': session.kill() else: schedule_logger(fate_job_id).info('{} session {} failed, this command is not supported'.format(command, session.get_session_id())) schedule_logger(fate_job_id).info('{} session {} success'.format(command, session.get_session_id())) except Exception as e: pass def init_session_for_flow_server(): # Options are used with different backend on demand session.init(job_id="session_used_by_fate_flow_server_{}".format(fate_uuid()), mode=RuntimeConfig.WORK_MODE, backend=RuntimeConfig.BACKEND, options={"eggroll.session.processors.per.node": 1}) # init session detect table detect_table = session.table(namespace=DETECT_TABLE[0], name=DETECT_TABLE[1], partition=DETECT_TABLE[2]) detect_table.destroy() detect_table = session.table(namespace=DETECT_TABLE[0], name=DETECT_TABLE[1], partition=DETECT_TABLE[2]) detect_table.put_all(enumerate(range(DETECT_TABLE[2]))) stat_logger.info("init detect table {} {} for session {}".format(detect_table.get_namespace(), detect_table.get_name(), session.get_session_id())) stat_logger.info("init session {} for fate flow server successfully".format(session.get_session_id())) def clean_server_used_session(): used_session_id = None try: used_session_id = session.get_session_id() session.stop() except: pass session.exit() stat_logger.info("clean session {} for fate flow server done".format(used_session_id)) def session_detect(): def _out_wrapper(func): @functools.wraps(func) def _wrapper(*args, **kwargs): if RuntimeConfig.PROCESS_ROLE in [ProcessRole.SERVER]: for i in range(3): try: stat_logger.info("detect session {} by table {} {}".format( session.get_session_id(), DETECT_TABLE[0], DETECT_TABLE[1])) stat_logger.info("start count table {} {}".format(DETECT_TABLE[0], DETECT_TABLE[1])) count = session.table(namespace=DETECT_TABLE[0], name=DETECT_TABLE[1]).count() stat_logger.info("table {} {} count is {}".format(DETECT_TABLE[0], DETECT_TABLE[1], count)) if count != DETECT_TABLE[2]: raise Exception("session {} count error".format(session.get_session_id())) stat_logger.info("session {} is ok".format(session.get_session_id())) break except Exception as e: stat_logger.exception(e) stat_logger.info("start init new session") try: clean_server_used_session() init_session_for_flow_server() except Exception as e: stat_logger.exception(e) stat_logger.info("init new session failed.") else: stat_logger.error("init new session failed.") else: # If in executor pass. TODO: detect and restore the session in executor pass return func(*args, **kwargs) return _wrapper return _out_wrapper if __name__ == '__main__': SessionStop.run()
[ "fate_flow.settings.stat_logger.error", "fate_flow.settings.stat_logger.info", "fate_flow.settings.stat_logger.exception" ]
[((2711, 2805), 'arch.api.session.table', 'session.table', ([], {'namespace': 'DETECT_TABLE[0]', 'name': 'DETECT_TABLE[1]', 'partition': 'DETECT_TABLE[2]'}), '(namespace=DETECT_TABLE[0], name=DETECT_TABLE[1], partition=\n DETECT_TABLE[2])\n', (2724, 2805), False, 'from arch.api import session\n'), ((2847, 2941), 'arch.api.session.table', 'session.table', ([], {'namespace': 'DETECT_TABLE[0]', 'name': 'DETECT_TABLE[1]', 'partition': 'DETECT_TABLE[2]'}), '(namespace=DETECT_TABLE[0], name=DETECT_TABLE[1], partition=\n DETECT_TABLE[2])\n', (2860, 2941), False, 'from arch.api import session\n'), ((3567, 3581), 'arch.api.session.exit', 'session.exit', ([], {}), '()\n', (3579, 3581), False, 'from arch.api import session\n'), ((1031, 1056), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1054, 1056), False, 'import argparse\n'), ((1649, 1740), 'arch.api.session.init', 'session.init', ([], {'job_id': 'session_job_id', 'mode': 'work_mode', 'backend': 'backend', 'set_log_dir': '(False)'}), '(job_id=session_job_id, mode=work_mode, backend=backend,\n set_log_dir=False)\n', (1661, 1740), False, 'from arch.api import session\n'), ((3490, 3514), 'arch.api.session.get_session_id', 'session.get_session_id', ([], {}), '()\n', (3512, 3514), False, 'from arch.api import session\n'), ((3523, 3537), 'arch.api.session.stop', 'session.stop', ([], {}), '()\n', (3535, 3537), False, 'from arch.api import session\n'), ((3734, 3755), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (3749, 3755), False, 'import functools\n'), ((3259, 3283), 'arch.api.session.get_session_id', 'session.get_session_id', ([], {}), '()\n', (3281, 3283), False, 'from arch.api import session\n'), ((3366, 3390), 'arch.api.session.get_session_id', 'session.get_session_id', ([], {}), '()\n', (3388, 3390), False, 'from arch.api import session\n'), ((1911, 1925), 'arch.api.session.stop', 'session.stop', ([], {}), '()\n', (1923, 1925), False, 'from arch.api import session\n'), ((2482, 2493), 'arch.api.utils.core_utils.fate_uuid', 'fate_uuid', ([], {}), '()\n', (2491, 2493), False, 'from arch.api.utils.core_utils import fate_uuid\n'), ((1762, 1790), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['fate_job_id'], {}), '(fate_job_id)\n', (1777, 1790), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((1834, 1858), 'arch.api.session.get_session_id', 'session.get_session_id', ([], {}), '()\n', (1856, 1858), False, 'from arch.api import session\n'), ((1978, 1992), 'arch.api.session.kill', 'session.kill', ([], {}), '()\n', (1990, 1992), False, 'from arch.api import session\n'), ((2170, 2198), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['fate_job_id'], {}), '(fate_job_id)\n', (2185, 2198), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((2244, 2268), 'arch.api.session.get_session_id', 'session.get_session_id', ([], {}), '()\n', (2266, 2268), False, 'from arch.api import session\n'), ((5221, 5266), 'fate_flow.settings.stat_logger.error', 'stat_logger.error', (['"""init new session failed."""'], {}), "('init new session failed.')\n", (5238, 5266), False, 'from fate_flow.settings import stat_logger, DETECT_TABLE\n'), ((2027, 2055), 'arch.api.utils.log_utils.schedule_logger', 'schedule_logger', (['fate_job_id'], {}), '(fate_job_id)\n', (2042, 2055), False, 'from arch.api.utils.log_utils import schedule_logger\n'), ((2131, 2155), 'arch.api.session.get_session_id', 'session.get_session_id', ([], {}), '()\n', (2153, 2155), False, 'from arch.api import session\n'), ((4770, 4794), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (4791, 4794), False, 'from fate_flow.settings import stat_logger, DETECT_TABLE\n'), ((4819, 4861), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""start init new session"""'], {}), "('start init new session')\n", (4835, 4861), False, 'from fate_flow.settings import stat_logger, DETECT_TABLE\n'), ((4034, 4058), 'arch.api.session.get_session_id', 'session.get_session_id', ([], {}), '()\n', (4056, 4058), False, 'from arch.api import session\n'), ((4236, 4298), 'arch.api.session.table', 'session.table', ([], {'namespace': 'DETECT_TABLE[0]', 'name': 'DETECT_TABLE[1]'}), '(namespace=DETECT_TABLE[0], name=DETECT_TABLE[1])\n', (4249, 4298), False, 'from arch.api import session\n'), ((4646, 4670), 'arch.api.session.get_session_id', 'session.get_session_id', ([], {}), '()\n', (4668, 4670), False, 'from arch.api import session\n'), ((4552, 4576), 'arch.api.session.get_session_id', 'session.get_session_id', ([], {}), '()\n', (4574, 4576), False, 'from arch.api import session\n'), ((5081, 5105), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (5102, 5105), False, 'from fate_flow.settings import stat_logger, DETECT_TABLE\n'), ((5134, 5178), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""init new session failed."""'], {}), "('init new session failed.')\n", (5150, 5178), False, 'from fate_flow.settings import stat_logger, DETECT_TABLE\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import math import typing from fate_arch.common import EngineType, Backend from fate_arch.common import base_utils from fate_arch.common.conf_utils import get_base_config from fate_arch.common.log import schedule_logger from fate_arch.computing import ComputingEngine from fate_flow.db.db_models import DB, EngineRegistry, Job from fate_flow.entity.types import ResourceOperation, RunParameters from fate_flow.settings import stat_logger, STANDALONE_BACKEND_VIRTUAL_CORES_PER_NODE, SUPPORT_BACKENDS_ENTRANCE, \ MAX_CORES_PERCENT_PER_JOB, DEFAULT_TASK_CORES, IGNORE_RESOURCE_ROLES, SUPPORT_IGNORE_RESOURCE_ENGINES, TOTAL_CORES_OVERWEIGHT_PERCENT, TOTAL_MEMORY_OVERWEIGHT_PERCENT from fate_flow.utils import job_utils class ResourceManager(object): @classmethod def initialize(cls): for backend_name, backend_engines in SUPPORT_BACKENDS_ENTRANCE.items(): for engine_type, engine_keys_list in backend_engines.items(): for engine_keys in engine_keys_list: engine_config = get_base_config(backend_name, {}).get(engine_keys[1], {}) if engine_config: cls.register_engine(engine_type=engine_type, engine_name=engine_keys[0], engine_entrance=engine_keys[1], engine_config=engine_config) # initialize standalone engine for backend_engines in SUPPORT_BACKENDS_ENTRANCE.values(): for engine_type in backend_engines.keys(): engine_name = "STANDALONE" engine_entrance = "fateflow" engine_config = { "nodes": 1, "cores_per_node": STANDALONE_BACKEND_VIRTUAL_CORES_PER_NODE, } cls.register_engine(engine_type=engine_type, engine_name=engine_name, engine_entrance=engine_entrance, engine_config=engine_config) @classmethod @DB.connection_context() def register_engine(cls, engine_type, engine_name, engine_entrance, engine_config): nodes = engine_config.get("nodes", 1) cores = engine_config.get("cores_per_node", 0) * nodes * TOTAL_CORES_OVERWEIGHT_PERCENT memory = engine_config.get("memory_per_node", 0) * nodes * TOTAL_MEMORY_OVERWEIGHT_PERCENT filters = [EngineRegistry.f_engine_type == engine_type, EngineRegistry.f_engine_name == engine_name] resources = EngineRegistry.select().where(*filters) if resources: resource = resources[0] update_fields = {} update_fields[EngineRegistry.f_engine_config] = engine_config update_fields[EngineRegistry.f_cores] = cores update_fields[EngineRegistry.f_memory] = memory update_fields[EngineRegistry.f_remaining_cores] = EngineRegistry.f_remaining_cores + ( cores - resource.f_cores) update_fields[EngineRegistry.f_remaining_memory] = EngineRegistry.f_remaining_memory + ( memory - resource.f_memory) update_fields[EngineRegistry.f_nodes] = nodes operate = EngineRegistry.update(update_fields).where(*filters) update_status = operate.execute() > 0 if update_status: stat_logger.info(f"update {engine_type} engine {engine_name} {engine_entrance} registration information") else: stat_logger.info(f"update {engine_type} engine {engine_name} {engine_entrance} registration information takes no effect") else: resource = EngineRegistry() resource.f_create_time = base_utils.current_timestamp() resource.f_engine_type = engine_type resource.f_engine_name = engine_name resource.f_engine_entrance = engine_entrance resource.f_engine_config = engine_config resource.f_cores = cores resource.f_memory = memory resource.f_remaining_cores = cores resource.f_remaining_memory = memory resource.f_nodes = nodes try: resource.save(force_insert=True) except Exception as e: stat_logger.warning(e) stat_logger.info(f"create {engine_type} engine {engine_name} {engine_entrance} registration information") @classmethod def check_resource_apply(cls, job_parameters: RunParameters, role, party_id, engines_info): computing_engine, cores, memory = cls.calculate_job_resource(job_parameters=job_parameters, role=role, party_id=party_id) max_cores_per_job = math.floor(engines_info[EngineType.COMPUTING].f_cores * MAX_CORES_PERCENT_PER_JOB) if cores > max_cores_per_job: return False, cores, max_cores_per_job else: return True, cores, max_cores_per_job @classmethod def apply_for_job_resource(cls, job_id, role, party_id): return cls.resource_for_job(job_id=job_id, role=role, party_id=party_id, operation_type=ResourceOperation.APPLY) @classmethod def return_job_resource(cls, job_id, role, party_id): return cls.resource_for_job(job_id=job_id, role=role, party_id=party_id, operation_type=ResourceOperation.RETURN) @classmethod @DB.connection_context() def resource_for_job(cls, job_id, role, party_id, operation_type): operate_status = False engine_name, cores, memory = cls.calculate_job_resource(job_id=job_id, role=role, party_id=party_id) try: with DB.atomic(): updates = { Job.f_engine_type: EngineType.COMPUTING, Job.f_engine_name: engine_name, Job.f_cores: cores, Job.f_memory: memory, } filters = [ Job.f_job_id == job_id, Job.f_role == role, Job.f_party_id == party_id, ] if operation_type == ResourceOperation.APPLY: updates[Job.f_remaining_cores] = cores updates[Job.f_remaining_memory] = memory updates[Job.f_resource_in_use] = True updates[Job.f_apply_resource_time] = base_utils.current_timestamp() filters.append(Job.f_resource_in_use == False) elif operation_type == ResourceOperation.RETURN: updates[Job.f_resource_in_use] = False updates[Job.f_return_resource_time] = base_utils.current_timestamp() filters.append(Job.f_resource_in_use == True) operate = Job.update(updates).where(*filters) record_status = operate.execute() > 0 if not record_status: raise RuntimeError(f"record job {job_id} resource {operation_type} failed on {role} {party_id}") if cores or memory: filters, updates = cls.update_resource_sql(resource_model=EngineRegistry, cores=cores, memory=memory, operation_type=operation_type, ) filters.append(EngineRegistry.f_engine_type == EngineType.COMPUTING) filters.append(EngineRegistry.f_engine_name == engine_name) operate = EngineRegistry.update(updates).where(*filters) apply_status = operate.execute() > 0 else: apply_status = True if not apply_status: raise RuntimeError( f"update engine {engine_name} record for job {job_id} resource {operation_type} on {role} {party_id} failed") operate_status = True except Exception as e: schedule_logger(job_id=job_id).warning(e) schedule_logger(job_id=job_id).warning( f"{operation_type} job {job_id} resource(cores {cores} memory {memory}) on {role} {party_id} failed") operate_status = False finally: remaining_cores, remaining_memory = cls.get_remaining_resource(EngineRegistry, [ EngineRegistry.f_engine_type == EngineType.COMPUTING, EngineRegistry.f_engine_name == engine_name]) operate_msg = "successfully" if operate_status else "failed" schedule_logger(job_id=job_id).info( f"{operation_type} job {job_id} resource(cores {cores} memory {memory}) on {role} {party_id} {operate_msg}, remaining cores: {remaining_cores} remaining memory: {remaining_memory}") return operate_status @classmethod def adapt_engine_parameters(cls, role, job_parameters: RunParameters, create_initiator_baseline=False): computing_engine_info = ResourceManager.get_engine_registration_info(engine_type=EngineType.COMPUTING, engine_name=job_parameters.computing_engine) if create_initiator_baseline: job_parameters.adaptation_parameters = { "task_nodes": 0, "task_cores_per_node": 0, "task_memory_per_node": 0, # request_task_cores base on initiator and distribute to all parties, using job conf parameters or initiator fateflow server default settings "request_task_cores": int(job_parameters.task_cores) if job_parameters.task_cores else DEFAULT_TASK_CORES, "if_initiator_baseline": True } else: # use initiator baseline if role == "arbiter": job_parameters.adaptation_parameters["request_task_cores"] = 1 elif "request_task_cores" not in job_parameters.adaptation_parameters: # compatibility 1.5.0 job_parameters.adaptation_parameters["request_task_cores"] = job_parameters.adaptation_parameters["task_nodes"] * job_parameters.adaptation_parameters["task_cores_per_node"] job_parameters.adaptation_parameters["if_initiator_baseline"] = False adaptation_parameters = job_parameters.adaptation_parameters if job_parameters.computing_engine in {ComputingEngine.STANDALONE, ComputingEngine.EGGROLL}: adaptation_parameters["task_nodes"] = computing_engine_info.f_nodes if int(job_parameters.eggroll_run.get("eggroll.session.processors.per.node", 0)) > 0: adaptation_parameters["task_cores_per_node"] = int(job_parameters.eggroll_run["eggroll.session.processors.per.node"]) else: adaptation_parameters["task_cores_per_node"] = max(1, int(adaptation_parameters["request_task_cores"] / adaptation_parameters["task_nodes"])) if not create_initiator_baseline: # set the adaptation parameters to the actual engine operation parameters job_parameters.eggroll_run["eggroll.session.processors.per.node"] = adaptation_parameters["task_cores_per_node"] elif job_parameters.computing_engine == ComputingEngine.SPARK or job_parameters.computing_engine == ComputingEngine.LINKIS_SPARK: adaptation_parameters["task_nodes"] = int(job_parameters.spark_run.get("num-executors", computing_engine_info.f_nodes)) if int(job_parameters.spark_run.get("executor-cores", 0)) > 0: adaptation_parameters["task_cores_per_node"] = int(job_parameters.spark_run["executor-cores"]) else: adaptation_parameters["task_cores_per_node"] = max(1, int(adaptation_parameters["request_task_cores"] / adaptation_parameters["task_nodes"])) if not create_initiator_baseline: # set the adaptation parameters to the actual engine operation parameters job_parameters.spark_run["num-executors"] = adaptation_parameters["task_nodes"] job_parameters.spark_run["executor-cores"] = adaptation_parameters["task_cores_per_node"] @classmethod def calculate_job_resource(cls, job_parameters: RunParameters = None, job_id=None, role=None, party_id=None): if not job_parameters: job_parameters = job_utils.get_job_parameters(job_id=job_id, role=role, party_id=party_id) job_parameters = RunParameters(**job_parameters) if job_parameters.backend == Backend.LINKIS_SPARK_RABBITMQ: cores = 0 memory = 0 elif role in IGNORE_RESOURCE_ROLES and job_parameters.computing_engine in SUPPORT_IGNORE_RESOURCE_ENGINES: cores = 0 memory = 0 else: cores = job_parameters.adaptation_parameters["task_cores_per_node"] * job_parameters.adaptation_parameters[ "task_nodes"] * job_parameters.task_parallelism memory = job_parameters.adaptation_parameters["task_memory_per_node"] * job_parameters.adaptation_parameters[ "task_nodes"] * job_parameters.task_parallelism return job_parameters.computing_engine, cores, memory @classmethod def calculate_task_resource(cls, task_parameters: RunParameters = None, task_info: dict = None): if not task_parameters: job_parameters = job_utils.get_job_parameters(job_id=task_info["job_id"], role=task_info["role"], party_id=task_info["party_id"]) task_parameters = RunParameters(**job_parameters) if task_parameters.backend == Backend.LINKIS_SPARK_RABBITMQ: cores_per_task = 0 memory_per_task = 0 elif task_info["role"] in IGNORE_RESOURCE_ROLES and task_parameters.computing_engine in SUPPORT_IGNORE_RESOURCE_ENGINES: cores_per_task = 0 memory_per_task = 0 else: cores_per_task = task_parameters.adaptation_parameters["task_cores_per_node"] * \ task_parameters.adaptation_parameters["task_nodes"] memory_per_task = task_parameters.adaptation_parameters["task_memory_per_node"] * \ task_parameters.adaptation_parameters["task_nodes"] return cores_per_task, memory_per_task @classmethod def apply_for_task_resource(cls, task_info): return ResourceManager.resource_for_task(task_info=task_info, operation_type=ResourceOperation.APPLY) @classmethod def return_task_resource(cls, task_info): return ResourceManager.resource_for_task(task_info=task_info, operation_type=ResourceOperation.RETURN) @classmethod def resource_for_task(cls, task_info, operation_type): cores_per_task, memory_per_task = cls.calculate_task_resource(task_info=task_info) schedule_logger(job_id=task_info["job_id"]).info(f"cores_per_task:{cores_per_task}, memory_per_task:{memory_per_task}") if cores_per_task or memory_per_task: filters, updates = cls.update_resource_sql(resource_model=Job, cores=cores_per_task, memory=memory_per_task, operation_type=operation_type, ) filters.append(Job.f_job_id == task_info["job_id"]) filters.append(Job.f_role == task_info["role"]) filters.append(Job.f_party_id == task_info["party_id"]) filters.append(Job.f_resource_in_use == True) operate = Job.update(updates).where(*filters) operate_status = operate.execute() > 0 else: operate_status = True if operate_status: schedule_logger(job_id=task_info["job_id"]).info( "task {} {} {} resource successfully".format(task_info["task_id"], task_info["task_version"], operation_type)) else: schedule_logger(job_id=task_info["job_id"]).warning( "task {} {} {} resource failed".format(task_info["task_id"], task_info["task_version"], operation_type)) return operate_status @classmethod def update_resource_sql(cls, resource_model: typing.Union[EngineRegistry, Job], cores, memory, operation_type): if operation_type == ResourceOperation.APPLY: filters = [ resource_model.f_remaining_cores >= cores, resource_model.f_remaining_memory >= memory ] updates = {resource_model.f_remaining_cores: resource_model.f_remaining_cores - cores, resource_model.f_remaining_memory: resource_model.f_remaining_memory - memory} elif operation_type == ResourceOperation.RETURN: filters = [] updates = {resource_model.f_remaining_cores: resource_model.f_remaining_cores + cores, resource_model.f_remaining_memory: resource_model.f_remaining_memory + memory} else: raise RuntimeError(f"can not support {operation_type} resource operation type") return filters, updates @classmethod @DB.connection_context() def get_remaining_resource(cls, resource_model: typing.Union[EngineRegistry, Job], filters): remaining_cores, remaining_memory = None, None try: objs = resource_model.select(resource_model.f_remaining_cores, resource_model.f_remaining_memory).where( *filters) if objs: remaining_cores, remaining_memory = objs[0].f_remaining_cores, objs[0].f_remaining_memory except Exception as e: schedule_logger().exception(e) finally: return remaining_cores, remaining_memory @classmethod @DB.connection_context() def get_engine_registration_info(cls, engine_type, engine_name) -> EngineRegistry: engines = EngineRegistry.select().where(EngineRegistry.f_engine_type == engine_type, EngineRegistry.f_engine_name == engine_name) if engines: return engines[0] else: return None
[ "fate_flow.db.db_models.DB.connection_context", "fate_flow.entity.types.RunParameters", "fate_flow.settings.SUPPORT_BACKENDS_ENTRANCE.items", "fate_flow.utils.job_utils.get_job_parameters", "fate_flow.db.db_models.EngineRegistry.select", "fate_flow.db.db_models.EngineRegistry.update", "fate_flow.settings.stat_logger.warning", "fate_flow.db.db_models.EngineRegistry", "fate_flow.settings.SUPPORT_BACKENDS_ENTRANCE.values", "fate_flow.settings.stat_logger.info", "fate_flow.db.db_models.Job.update", "fate_flow.db.db_models.DB.atomic" ]
[((2496, 2519), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (2517, 2519), False, 'from fate_flow.db.db_models import DB, EngineRegistry, Job\n'), ((5847, 5870), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (5868, 5870), False, 'from fate_flow.db.db_models import DB, EngineRegistry, Job\n'), ((18389, 18412), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (18410, 18412), False, 'from fate_flow.db.db_models import DB, EngineRegistry, Job\n'), ((19015, 19038), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (19036, 19038), False, 'from fate_flow.db.db_models import DB, EngineRegistry, Job\n'), ((1458, 1491), 'fate_flow.settings.SUPPORT_BACKENDS_ENTRANCE.items', 'SUPPORT_BACKENDS_ENTRANCE.items', ([], {}), '()\n', (1489, 1491), False, 'from fate_flow.settings import stat_logger, STANDALONE_BACKEND_VIRTUAL_CORES_PER_NODE, SUPPORT_BACKENDS_ENTRANCE, MAX_CORES_PERCENT_PER_JOB, DEFAULT_TASK_CORES, IGNORE_RESOURCE_ROLES, SUPPORT_IGNORE_RESOURCE_ENGINES, TOTAL_CORES_OVERWEIGHT_PERCENT, TOTAL_MEMORY_OVERWEIGHT_PERCENT\n'), ((1981, 2015), 'fate_flow.settings.SUPPORT_BACKENDS_ENTRANCE.values', 'SUPPORT_BACKENDS_ENTRANCE.values', ([], {}), '()\n', (2013, 2015), False, 'from fate_flow.settings import stat_logger, STANDALONE_BACKEND_VIRTUAL_CORES_PER_NODE, SUPPORT_BACKENDS_ENTRANCE, MAX_CORES_PERCENT_PER_JOB, DEFAULT_TASK_CORES, IGNORE_RESOURCE_ROLES, SUPPORT_IGNORE_RESOURCE_ENGINES, TOTAL_CORES_OVERWEIGHT_PERCENT, TOTAL_MEMORY_OVERWEIGHT_PERCENT\n'), ((5154, 5240), 'math.floor', 'math.floor', (['(engines_info[EngineType.COMPUTING].f_cores * MAX_CORES_PERCENT_PER_JOB)'], {}), '(engines_info[EngineType.COMPUTING].f_cores *\n MAX_CORES_PERCENT_PER_JOB)\n', (5164, 5240), False, 'import math\n'), ((4121, 4137), 'fate_flow.db.db_models.EngineRegistry', 'EngineRegistry', ([], {}), '()\n', (4135, 4137), False, 'from fate_flow.db.db_models import DB, EngineRegistry, Job\n'), ((4175, 4205), 'fate_arch.common.base_utils.current_timestamp', 'base_utils.current_timestamp', ([], {}), '()\n', (4203, 4205), False, 'from fate_arch.common import base_utils\n'), ((4776, 4891), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['f"""create {engine_type} engine {engine_name} {engine_entrance} registration information"""'], {}), "(\n f'create {engine_type} engine {engine_name} {engine_entrance} registration information'\n )\n", (4792, 4891), False, 'from fate_flow.settings import stat_logger, STANDALONE_BACKEND_VIRTUAL_CORES_PER_NODE, SUPPORT_BACKENDS_ENTRANCE, MAX_CORES_PERCENT_PER_JOB, DEFAULT_TASK_CORES, IGNORE_RESOURCE_ROLES, SUPPORT_IGNORE_RESOURCE_ENGINES, TOTAL_CORES_OVERWEIGHT_PERCENT, TOTAL_MEMORY_OVERWEIGHT_PERCENT\n'), ((13183, 13256), 'fate_flow.utils.job_utils.get_job_parameters', 'job_utils.get_job_parameters', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, role=role, party_id=party_id)\n', (13211, 13256), False, 'from fate_flow.utils import job_utils\n'), ((13402, 13433), 'fate_flow.entity.types.RunParameters', 'RunParameters', ([], {}), '(**job_parameters)\n', (13415, 13433), False, 'from fate_flow.entity.types import ResourceOperation, RunParameters\n'), ((14333, 14450), 'fate_flow.utils.job_utils.get_job_parameters', 'job_utils.get_job_parameters', ([], {'job_id': "task_info['job_id']", 'role': "task_info['role']", 'party_id': "task_info['party_id']"}), "(job_id=task_info['job_id'], role=task_info[\n 'role'], party_id=task_info['party_id'])\n", (14361, 14450), False, 'from fate_flow.utils import job_utils\n'), ((14592, 14623), 'fate_flow.entity.types.RunParameters', 'RunParameters', ([], {}), '(**job_parameters)\n', (14605, 14623), False, 'from fate_flow.entity.types import ResourceOperation, RunParameters\n'), ((2978, 3001), 'fate_flow.db.db_models.EngineRegistry.select', 'EngineRegistry.select', ([], {}), '()\n', (2999, 3001), False, 'from fate_flow.db.db_models import DB, EngineRegistry, Job\n'), ((3822, 3937), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['f"""update {engine_type} engine {engine_name} {engine_entrance} registration information"""'], {}), "(\n f'update {engine_type} engine {engine_name} {engine_entrance} registration information'\n )\n", (3838, 3937), False, 'from fate_flow.settings import stat_logger, STANDALONE_BACKEND_VIRTUAL_CORES_PER_NODE, SUPPORT_BACKENDS_ENTRANCE, MAX_CORES_PERCENT_PER_JOB, DEFAULT_TASK_CORES, IGNORE_RESOURCE_ROLES, SUPPORT_IGNORE_RESOURCE_ENGINES, TOTAL_CORES_OVERWEIGHT_PERCENT, TOTAL_MEMORY_OVERWEIGHT_PERCENT\n'), ((3962, 4093), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['f"""update {engine_type} engine {engine_name} {engine_entrance} registration information takes no effect"""'], {}), "(\n f'update {engine_type} engine {engine_name} {engine_entrance} registration information takes no effect'\n )\n", (3978, 4093), False, 'from fate_flow.settings import stat_logger, STANDALONE_BACKEND_VIRTUAL_CORES_PER_NODE, SUPPORT_BACKENDS_ENTRANCE, MAX_CORES_PERCENT_PER_JOB, DEFAULT_TASK_CORES, IGNORE_RESOURCE_ROLES, SUPPORT_IGNORE_RESOURCE_ENGINES, TOTAL_CORES_OVERWEIGHT_PERCENT, TOTAL_MEMORY_OVERWEIGHT_PERCENT\n'), ((6112, 6123), 'fate_flow.db.db_models.DB.atomic', 'DB.atomic', ([], {}), '()\n', (6121, 6123), False, 'from fate_flow.db.db_models import DB, EngineRegistry, Job\n'), ((15890, 15933), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': "task_info['job_id']"}), "(job_id=task_info['job_id'])\n", (15905, 15933), False, 'from fate_arch.common.log import schedule_logger\n'), ((19144, 19167), 'fate_flow.db.db_models.EngineRegistry.select', 'EngineRegistry.select', ([], {}), '()\n', (19165, 19167), False, 'from fate_flow.db.db_models import DB, EngineRegistry, Job\n'), ((3673, 3709), 'fate_flow.db.db_models.EngineRegistry.update', 'EngineRegistry.update', (['update_fields'], {}), '(update_fields)\n', (3694, 3709), False, 'from fate_flow.db.db_models import DB, EngineRegistry, Job\n'), ((4741, 4763), 'fate_flow.settings.stat_logger.warning', 'stat_logger.warning', (['e'], {}), '(e)\n', (4760, 4763), False, 'from fate_flow.settings import stat_logger, STANDALONE_BACKEND_VIRTUAL_CORES_PER_NODE, SUPPORT_BACKENDS_ENTRANCE, MAX_CORES_PERCENT_PER_JOB, DEFAULT_TASK_CORES, IGNORE_RESOURCE_ROLES, SUPPORT_IGNORE_RESOURCE_ENGINES, TOTAL_CORES_OVERWEIGHT_PERCENT, TOTAL_MEMORY_OVERWEIGHT_PERCENT\n'), ((6841, 6871), 'fate_arch.common.base_utils.current_timestamp', 'base_utils.current_timestamp', ([], {}), '()\n', (6869, 6871), False, 'from fate_arch.common import base_utils\n'), ((9361, 9391), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (9376, 9391), False, 'from fate_arch.common.log import schedule_logger\n'), ((16702, 16721), 'fate_flow.db.db_models.Job.update', 'Job.update', (['updates'], {}), '(updates)\n', (16712, 16721), False, 'from fate_flow.db.db_models import DB, EngineRegistry, Job\n'), ((16876, 16919), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': "task_info['job_id']"}), "(job_id=task_info['job_id'])\n", (16891, 16919), False, 'from fate_arch.common.log import schedule_logger\n'), ((17140, 17183), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': "task_info['job_id']"}), "(job_id=task_info['job_id'])\n", (17155, 17183), False, 'from fate_arch.common.log import schedule_logger\n'), ((7121, 7151), 'fate_arch.common.base_utils.current_timestamp', 'base_utils.current_timestamp', ([], {}), '()\n', (7149, 7151), False, 'from fate_arch.common import base_utils\n'), ((7244, 7263), 'fate_flow.db.db_models.Job.update', 'Job.update', (['updates'], {}), '(updates)\n', (7254, 7263), False, 'from fate_flow.db.db_models import DB, EngineRegistry, Job\n'), ((8586, 8616), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (8601, 8616), False, 'from fate_arch.common.log import schedule_logger\n'), ((8640, 8670), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (8655, 8670), False, 'from fate_arch.common.log import schedule_logger\n'), ((18891, 18908), 'fate_arch.common.log.schedule_logger', 'schedule_logger', ([], {}), '()\n', (18906, 18908), False, 'from fate_arch.common.log import schedule_logger\n'), ((1656, 1689), 'fate_arch.common.conf_utils.get_base_config', 'get_base_config', (['backend_name', '{}'], {}), '(backend_name, {})\n', (1671, 1689), False, 'from fate_arch.common.conf_utils import get_base_config\n'), ((8132, 8162), 'fate_flow.db.db_models.EngineRegistry.update', 'EngineRegistry.update', (['updates'], {}), '(updates)\n', (8153, 8162), False, 'from fate_flow.db.db_models import DB, EngineRegistry, Job\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_arch import storage from fate_arch.session import Session from fate_flow.entity import RunParameters from fate_flow.manager.data_manager import DataTableTracker, TableStorage from fate_flow.operation.job_saver import JobSaver from fate_flow.operation.job_tracker import Tracker from fate_flow.worker.task_executor import TaskExecutor from fate_flow.utils.api_utils import get_json_result, error_response from fate_flow.utils import detect_utils, job_utils, schedule_utils from fate_flow.utils.detect_utils import validate_request from flask import request from fate_flow.utils.detect_utils import validate_request @manager.route('/add', methods=['post']) @manager.route('/bind', methods=['post']) @validate_request("engine", "address", "namespace", "name") def table_bind(): request_data = request.json address_dict = request_data.get('address') engine = request_data.get('engine') name = request_data.get('name') namespace = request_data.get('namespace') address = storage.StorageTableMeta.create_address(storage_engine=engine, address_dict=address_dict) in_serialized = request_data.get("in_serialized", 1 if engine in {storage.StorageEngine.STANDALONE, storage.StorageEngine.EGGROLL, storage.StorageEngine.MYSQL, storage.StorageEngine.PATH} else 0) destroy = (int(request_data.get("drop", 0)) == 1) data_table_meta = storage.StorageTableMeta(name=name, namespace=namespace) if data_table_meta: if destroy: data_table_meta.destroy_metas() else: return get_json_result(retcode=100, retmsg='The data table already exists.' 'If you still want to continue uploading, please add the parameter -drop.' '1 means to add again after deleting the table') id_column = request_data.get("id_column") or request_data.get("id_name") feature_column = request_data.get("feature_column") or request_data.get("feature_name") schema = None if id_column and feature_column: schema = {'header': feature_column, 'sid': id_column} sess = Session() storage_session = sess.storage(storage_engine=engine, options=request_data.get("options")) table = storage_session.create_table(address=address, name=name, namespace=namespace, partitions=request_data.get('partitions', None), hava_head=request_data.get("head"), schema=schema, id_delimiter=request_data.get("id_delimiter"), in_serialized=in_serialized) response = get_json_result(data={"table_name": name, "namespace": namespace}) if not table.check_address(): response = get_json_result(retcode=100, retmsg=f'engine {engine} address {address_dict} check failed') else: DataTableTracker.create_table_tracker( table_name=name, table_namespace=namespace, entity_info={"have_parent": False}, ) sess.destroy_all_sessions() return response @manager.route('/download', methods=['get']) def table_download(): request_data = request.json from fate_flow.component_env_utils.env_utils import import_component_output_depend import_component_output_depend() data_table_meta = storage.StorageTableMeta(name=request_data.get("name"), namespace=request_data.get("namespace")) if not data_table_meta: return error_response(response_code=210, retmsg=f'no found table:{request_data.get("namespace")}, {request_data.get("name")}') tar_file_name = 'table_{}_{}.tar.gz'.format(request_data.get("namespace"), request_data.get("name")) return TableStorage.send_table( output_tables_meta={"table": data_table_meta}, tar_file_name=tar_file_name, need_head=request_data.get("head", True) ) @manager.route('/delete', methods=['post']) def table_delete(): request_data = request.json table_name = request_data.get('table_name') namespace = request_data.get('namespace') data = None sess = Session() table = sess.get_table(name=table_name, namespace=namespace) if table: table.destroy() data = {'table_name': table_name, 'namespace': namespace} sess.destroy_all_sessions() if data: return get_json_result(data=data) return get_json_result(retcode=101, retmsg='no find table') @manager.route('/list', methods=['post']) @validate_request('job_id', 'role', 'party_id') def get_job_table_list(): jobs = JobSaver.query_job(**request.json) if jobs: job = jobs[0] tables = get_job_all_table(job) return get_json_result(data=tables) else: return get_json_result(retcode=101, retmsg='no find job') @manager.route('/<table_func>', methods=['post']) def table_api(table_func): config = request.json if table_func == 'table_info': table_key_count = 0 table_partition = None table_schema = None table_name, namespace = config.get("name") or config.get("table_name"), config.get("namespace") table_meta = storage.StorageTableMeta(name=table_name, namespace=namespace) address = None if table_meta: table_key_count = table_meta.get_count() table_partition = table_meta.get_partitions() table_schema = table_meta.get_schema() address = table_meta.get_address().__dict__ exist = 1 else: exist = 0 return get_json_result(data={"table_name": table_name, "namespace": namespace, "exist": exist, "count": table_key_count, "partition": table_partition, "schema": table_schema, "address": address}) else: return get_json_result() @manager.route('/tracking/source', methods=['post']) @validate_request("table_name", "namespace") def table_tracking(): request_info = request.json data = DataTableTracker.get_parent_table(request_info.get("table_name"), request_info.get("namespace")) return get_json_result(data=data) @manager.route('/tracking/job', methods=['post']) @validate_request("table_name", "namespace") def table_tracking_job(): request_info = request.json data = DataTableTracker.track_job(request_info.get("table_name"), request_info.get("namespace"), display=True) return get_json_result(data=data) def get_job_all_table(job): dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl, runtime_conf=job.f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf ) _, hierarchical_structure = dsl_parser.get_dsl_hierarchical_structure() component_table = {} try: component_output_tables = Tracker.query_output_data_infos(job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id) except: component_output_tables = [] for component_name_list in hierarchical_structure: for component_name in component_name_list: component_table[component_name] = {} component_input_table = get_component_input_table(dsl_parser, job, component_name) component_table[component_name]['input'] = component_input_table component_table[component_name]['output'] = {} for output_table in component_output_tables: if output_table.f_component_name == component_name: component_table[component_name]['output'][output_table.f_data_name] = \ {'name': output_table.f_table_name, 'namespace': output_table.f_table_namespace} return component_table def get_component_input_table(dsl_parser, job, component_name): component = dsl_parser.get_component_info(component_name=component_name) module_name = get_component_module(component_name, job.f_dsl) if 'reader' in module_name.lower(): return job.f_runtime_conf.get("component_parameters", {}).get("role", {}).get(job.f_role, {}).get(str(job.f_roles.get(job.f_role).index(int(job.f_party_id)))).get(component_name) task_input_dsl = component.get_input() job_args_on_party = TaskExecutor.get_job_args_on_party(dsl_parser=dsl_parser, job_runtime_conf=job.f_runtime_conf, role=job.f_role, party_id=job.f_party_id) config = job_utils.get_job_parameters(job.f_job_id, job.f_role, job.f_party_id) task_parameters = RunParameters(**config) job_parameters = task_parameters component_input_table = TaskExecutor.get_task_run_args(job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id, task_id=None, task_version=None, job_args=job_args_on_party, job_parameters=job_parameters, task_parameters=task_parameters, input_dsl=task_input_dsl, get_input_table=True ) return component_input_table def get_component_module(component_name, job_dsl): return job_dsl["components"][component_name]["module"].lower()
[ "fate_flow.manager.data_manager.DataTableTracker.create_table_tracker", "fate_flow.operation.job_saver.JobSaver.query_job", "fate_flow.worker.task_executor.TaskExecutor.get_job_args_on_party", "fate_flow.entity.RunParameters", "fate_flow.worker.task_executor.TaskExecutor.get_task_run_args", "fate_flow.utils.job_utils.get_job_parameters", "fate_flow.utils.api_utils.get_json_result", "fate_flow.operation.job_tracker.Tracker.query_output_data_infos", "fate_flow.component_env_utils.env_utils.import_component_output_depend", "fate_flow.utils.schedule_utils.get_job_dsl_parser", "fate_flow.utils.detect_utils.validate_request" ]
[((1326, 1384), 'fate_flow.utils.detect_utils.validate_request', 'validate_request', (['"""engine"""', '"""address"""', '"""namespace"""', '"""name"""'], {}), "('engine', 'address', 'namespace', 'name')\n", (1342, 1384), False, 'from fate_flow.utils.detect_utils import validate_request\n'), ((5186, 5232), 'fate_flow.utils.detect_utils.validate_request', 'validate_request', (['"""job_id"""', '"""role"""', '"""party_id"""'], {}), "('job_id', 'role', 'party_id')\n", (5202, 5232), False, 'from fate_flow.utils.detect_utils import validate_request\n'), ((6762, 6805), 'fate_flow.utils.detect_utils.validate_request', 'validate_request', (['"""table_name"""', '"""namespace"""'], {}), "('table_name', 'namespace')\n", (6778, 6805), False, 'from fate_flow.utils.detect_utils import validate_request\n'), ((7059, 7102), 'fate_flow.utils.detect_utils.validate_request', 'validate_request', (['"""table_name"""', '"""namespace"""'], {}), "('table_name', 'namespace')\n", (7075, 7102), False, 'from fate_flow.utils.detect_utils import validate_request\n'), ((1618, 1712), 'fate_arch.storage.StorageTableMeta.create_address', 'storage.StorageTableMeta.create_address', ([], {'storage_engine': 'engine', 'address_dict': 'address_dict'}), '(storage_engine=engine, address_dict\n =address_dict)\n', (1657, 1712), False, 'from fate_arch import storage\n'), ((2054, 2110), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'name', 'namespace': 'namespace'}), '(name=name, namespace=namespace)\n', (2078, 2110), False, 'from fate_arch import storage\n'), ((2841, 2850), 'fate_arch.session.Session', 'Session', ([], {}), '()\n', (2848, 2850), False, 'from fate_arch.session import Session\n'), ((3350, 3416), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'table_name': name, 'namespace': namespace}"}), "(data={'table_name': name, 'namespace': namespace})\n", (3365, 3416), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((3989, 4021), 'fate_flow.component_env_utils.env_utils.import_component_output_depend', 'import_component_output_depend', ([], {}), '()\n', (4019, 4021), False, 'from fate_flow.component_env_utils.env_utils import import_component_output_depend\n'), ((4811, 4820), 'fate_arch.session.Session', 'Session', ([], {}), '()\n', (4818, 4820), False, 'from fate_arch.session import Session\n'), ((5088, 5140), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""no find table"""'}), "(retcode=101, retmsg='no find table')\n", (5103, 5140), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((5270, 5304), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {}), '(**request.json)\n', (5288, 5304), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((6979, 7005), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'data'}), '(data=data)\n', (6994, 7005), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7287, 7313), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'data'}), '(data=data)\n', (7302, 7313), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7361, 7492), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job.f_dsl', 'runtime_conf': 'job.f_runtime_conf', 'train_runtime_conf': 'job.f_train_runtime_conf'}), '(dsl=job.f_dsl, runtime_conf=job.\n f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf)\n', (7394, 7492), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils\n'), ((9234, 9375), 'fate_flow.worker.task_executor.TaskExecutor.get_job_args_on_party', 'TaskExecutor.get_job_args_on_party', ([], {'dsl_parser': 'dsl_parser', 'job_runtime_conf': 'job.f_runtime_conf', 'role': 'job.f_role', 'party_id': 'job.f_party_id'}), '(dsl_parser=dsl_parser, job_runtime_conf=\n job.f_runtime_conf, role=job.f_role, party_id=job.f_party_id)\n', (9268, 9375), False, 'from fate_flow.worker.task_executor import TaskExecutor\n'), ((9502, 9572), 'fate_flow.utils.job_utils.get_job_parameters', 'job_utils.get_job_parameters', (['job.f_job_id', 'job.f_role', 'job.f_party_id'], {}), '(job.f_job_id, job.f_role, job.f_party_id)\n', (9530, 9572), False, 'from fate_flow.utils import detect_utils, job_utils, schedule_utils\n'), ((9595, 9618), 'fate_flow.entity.RunParameters', 'RunParameters', ([], {}), '(**config)\n', (9608, 9618), False, 'from fate_flow.entity import RunParameters\n'), ((9684, 9964), 'fate_flow.worker.task_executor.TaskExecutor.get_task_run_args', 'TaskExecutor.get_task_run_args', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id', 'task_id': 'None', 'task_version': 'None', 'job_args': 'job_args_on_party', 'job_parameters': 'job_parameters', 'task_parameters': 'task_parameters', 'input_dsl': 'task_input_dsl', 'get_input_table': '(True)'}), '(job_id=job.f_job_id, role=job.f_role,\n party_id=job.f_party_id, task_id=None, task_version=None, job_args=\n job_args_on_party, job_parameters=job_parameters, task_parameters=\n task_parameters, input_dsl=task_input_dsl, get_input_table=True)\n', (9714, 9964), False, 'from fate_flow.worker.task_executor import TaskExecutor\n'), ((3470, 3566), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(100)', 'retmsg': 'f"""engine {engine} address {address_dict} check failed"""'}), "(retcode=100, retmsg=\n f'engine {engine} address {address_dict} check failed')\n", (3485, 3566), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((3580, 3702), 'fate_flow.manager.data_manager.DataTableTracker.create_table_tracker', 'DataTableTracker.create_table_tracker', ([], {'table_name': 'name', 'table_namespace': 'namespace', 'entity_info': "{'have_parent': False}"}), "(table_name=name, table_namespace=\n namespace, entity_info={'have_parent': False})\n", (3617, 3702), False, 'from fate_flow.manager.data_manager import DataTableTracker, TableStorage\n'), ((5050, 5076), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'data'}), '(data=data)\n', (5065, 5076), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((5395, 5423), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'tables'}), '(data=tables)\n', (5410, 5423), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((5449, 5499), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""no find job"""'}), "(retcode=101, retmsg='no find job')\n", (5464, 5499), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((5852, 5914), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'table_name', 'namespace': 'namespace'}), '(name=table_name, namespace=namespace)\n', (5876, 5914), False, 'from fate_arch import storage\n'), ((6252, 6448), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'table_name': table_name, 'namespace': namespace, 'exist': exist, 'count':\n table_key_count, 'partition': table_partition, 'schema': table_schema,\n 'address': address}"}), "(data={'table_name': table_name, 'namespace': namespace,\n 'exist': exist, 'count': table_key_count, 'partition': table_partition,\n 'schema': table_schema, 'address': address})\n", (6267, 6448), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((6688, 6705), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {}), '()\n', (6703, 6705), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7786, 7884), 'fate_flow.operation.job_tracker.Tracker.query_output_data_infos', 'Tracker.query_output_data_infos', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id'}), '(job_id=job.f_job_id, role=job.f_role,\n party_id=job.f_party_id)\n', (7817, 7884), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((2232, 2428), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(100)', 'retmsg': '"""The data table already exists.If you still want to continue uploading, please add the parameter -drop.1 means to add again after deleting the table"""'}), "(retcode=100, retmsg=\n 'The data table already exists.If you still want to continue uploading, please add the parameter -drop.1 means to add again after deleting the table'\n )\n", (2247, 2428), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n')]
# # Copyright 2021 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_flow.utils.log_utils import getLogger from fate_flow.components._base import BaseParam, ComponentBase, ComponentInputProtocol, ComponentMeta from fate_flow.entity import JobConfiguration from fate_flow.entity import MetricMeta from fate_flow.model.checkpoint import CheckpointManager from fate_flow.pipelined_model.pipelined_model import PipelinedModel from fate_flow.scheduling_apps.client.operation_client import OperationClient from fate_flow.utils.model_utils import gen_party_model_id from fate_flow.utils.schedule_utils import get_job_dsl_parser LOGGER = getLogger() model_loader_cpn_meta = ComponentMeta('ModelLoader') @model_loader_cpn_meta.bind_runner.on_guest.on_host.on_arbiter class ModelLoader(ComponentBase): """ ModelLoader is a component for loading models trained by previous jobs. `self.model_id`, `self.model_version`, `self.component_name` and `self.model_alias` come from the previous job. However, most of the data in `self.tracker` belongs to the current job. Such as `self.tracker.job_id`, `self.tracker.task_id`, `self.tracker.task_version`, etc. Be careful when using them. """ def __init__(self): super().__init__() self.serialize = False self.model_id = None self.model_version = None self.component_name = None self.model_alias = None self.step_index = None self.step_name = None def get_model_alias(self): job_configuration = OperationClient().get_job_conf(self.model_version, self.tracker.role, self.tracker.party_id) if not job_configuration: raise ValueError('The job was not found.') job_configuration = JobConfiguration(**job_configuration) dsl_parser = get_job_dsl_parser(job_configuration.dsl, job_configuration.runtime_conf, train_runtime_conf=job_configuration.train_runtime_conf) component = dsl_parser.get_component_info(self.component_name) task_output_dsl = component.get_output() self.model_alias = task_output_dsl['model'][0] if task_output_dsl.get('model') else 'default' def read_component_model(self): pipelined_model = PipelinedModel(gen_party_model_id( self.model_id, self.tracker.role, self.tracker.party_id ), self.model_version) component_model = pipelined_model._read_component_model(self.component_name, self.model_alias) if not component_model: raise ValueError('The component model is empty.') self.model_output = component_model self.tracker.set_metric_meta('model_loader', f'{self.component_name}-{self.model_alias}', MetricMeta('component_model', 'component_model_info', { 'model_id': self.model_id, 'model_version': self.model_version, 'component_name': self.component_name, 'model_alias': self.model_alias, })) def read_checkpoint(self): checkpoint_manager = CheckpointManager( role=self.tracker.role, party_id=self.tracker.party_id, model_id=self.model_id, model_version=self.model_version, component_name=self.component_name, mkdir=False, ) checkpoint_manager.load_checkpoints_from_disk() if self.step_index is not None: checkpoint = checkpoint_manager.get_checkpoint_by_index(self.step_index) elif self.step_name is not None: checkpoint = checkpoint_manager.get_checkpoint_by_name(self.step_name) else: checkpoint = checkpoint_manager.latest_checkpoint if checkpoint is None: raise ValueError('The checkpoint was not found.') data = checkpoint.read(include_database=True) data['model_id'] = checkpoint_manager.model_id data['model_version'] = checkpoint_manager.model_version data['component_name'] = checkpoint_manager.component_name self.model_output = data.pop('models') self.tracker.set_metric_meta('model_loader', f'{checkpoint.step_index}-{checkpoint.step_name}', MetricMeta('checkpoint', 'checkpoint_info', data)) def _run(self, cpn_input: ComponentInputProtocol): need_run = cpn_input.parameters.get('need_run', True) if not need_run: return for k in ('model_id', 'model_version', 'component_name'): v = cpn_input.parameters.get(k) if v is None: raise KeyError(f"The component ModelLoader needs '{k}'") setattr(self, k, v) for k in ('model_alias', 'step_index', 'step_name'): v = cpn_input.parameters.get(k) if v is not None: setattr(self, k, v) break else: try: self.get_model_alias() except Exception: # This should not have happened. But give me a chance to find a checkpoint. LOGGER.exception("Get 'model_alias' failed. Trying to find a checkpoint...") if self.model_alias is not None: try: return self.read_component_model() except Exception: LOGGER.exception('Read component model error. Trying to find a checkpoint...') try: return self.read_checkpoint() except Exception: LOGGER.exception('Read checkpoint error.') raise EnvironmentError('No component model or checkpoint was found.' if self.model_alias is not None else 'No checkpoint was found.') @model_loader_cpn_meta.bind_param class ModelLoaderParam(BaseParam): def __init__(self, model_id: str = None, model_version: str = None, component_name: str = None, model_alias: str = None, step_index: int = None, step_name: str = None, need_run: bool = True): self.model_id = model_id self.model_version = model_version self.component_name = component_name self.model_alias = model_alias self.step_index = step_index self.step_name = step_name self.need_run = need_run if self.step_index is not None: self.step_index = int(self.step_index) def check(self): for i in ('model_id', 'model_version', 'component_name'): if getattr(self, i) is None: raise KeyError(f"The parameter '{i}' is required.")
[ "fate_flow.utils.log_utils.getLogger", "fate_flow.model.checkpoint.CheckpointManager", "fate_flow.entity.MetricMeta", "fate_flow.utils.model_utils.gen_party_model_id", "fate_flow.scheduling_apps.client.operation_client.OperationClient", "fate_flow.entity.JobConfiguration", "fate_flow.components._base.ComponentMeta", "fate_flow.utils.schedule_utils.get_job_dsl_parser" ]
[((1190, 1201), 'fate_flow.utils.log_utils.getLogger', 'getLogger', ([], {}), '()\n', (1199, 1201), False, 'from fate_flow.utils.log_utils import getLogger\n'), ((1226, 1254), 'fate_flow.components._base.ComponentMeta', 'ComponentMeta', (['"""ModelLoader"""'], {}), "('ModelLoader')\n", (1239, 1254), False, 'from fate_flow.components._base import BaseParam, ComponentBase, ComponentInputProtocol, ComponentMeta\n'), ((2321, 2358), 'fate_flow.entity.JobConfiguration', 'JobConfiguration', ([], {}), '(**job_configuration)\n', (2337, 2358), False, 'from fate_flow.entity import JobConfiguration\n'), ((2381, 2515), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'get_job_dsl_parser', (['job_configuration.dsl', 'job_configuration.runtime_conf'], {'train_runtime_conf': 'job_configuration.train_runtime_conf'}), '(job_configuration.dsl, job_configuration.runtime_conf,\n train_runtime_conf=job_configuration.train_runtime_conf)\n', (2399, 2515), False, 'from fate_flow.utils.schedule_utils import get_job_dsl_parser\n'), ((3808, 3996), 'fate_flow.model.checkpoint.CheckpointManager', 'CheckpointManager', ([], {'role': 'self.tracker.role', 'party_id': 'self.tracker.party_id', 'model_id': 'self.model_id', 'model_version': 'self.model_version', 'component_name': 'self.component_name', 'mkdir': '(False)'}), '(role=self.tracker.role, party_id=self.tracker.party_id,\n model_id=self.model_id, model_version=self.model_version,\n component_name=self.component_name, mkdir=False)\n', (3825, 3996), False, 'from fate_flow.model.checkpoint import CheckpointManager\n'), ((2853, 2928), 'fate_flow.utils.model_utils.gen_party_model_id', 'gen_party_model_id', (['self.model_id', 'self.tracker.role', 'self.tracker.party_id'], {}), '(self.model_id, self.tracker.role, self.tracker.party_id)\n', (2871, 2928), False, 'from fate_flow.utils.model_utils import gen_party_model_id\n'), ((3350, 3551), 'fate_flow.entity.MetricMeta', 'MetricMeta', (['"""component_model"""', '"""component_model_info"""', "{'model_id': self.model_id, 'model_version': self.model_version,\n 'component_name': self.component_name, 'model_alias': self.model_alias}"], {}), "('component_model', 'component_model_info', {'model_id': self.\n model_id, 'model_version': self.model_version, 'component_name': self.\n component_name, 'model_alias': self.model_alias})\n", (3360, 3551), False, 'from fate_flow.entity import MetricMeta\n'), ((4955, 5004), 'fate_flow.entity.MetricMeta', 'MetricMeta', (['"""checkpoint"""', '"""checkpoint_info"""', 'data'], {}), "('checkpoint', 'checkpoint_info', data)\n", (4965, 5004), False, 'from fate_flow.entity import MetricMeta\n'), ((2111, 2128), 'fate_flow.scheduling_apps.client.operation_client.OperationClient', 'OperationClient', ([], {}), '()\n', (2126, 2128), False, 'from fate_flow.scheduling_apps.client.operation_client import OperationClient\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import shutil import traceback import peewee from copy import deepcopy from fate_arch.common.base_utils import json_loads from fate_flow.db.db_models import MachineLearningModelInfo as MLModel from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog from flask import Flask, request, send_file from fate_flow.pipelined_model.migrate_model import compare_roles from fate_flow.scheduler import DAGScheduler from fate_flow.settings import stat_logger, MODEL_STORE_ADDRESS, TEMP_DIRECTORY from fate_flow.pipelined_model import migrate_model, pipelined_model, publish_model from fate_flow.utils.api_utils import get_json_result, federated_api, error_response from fate_flow.utils import job_utils from fate_flow.utils.service_utils import ServiceUtils from fate_flow.utils.detect_utils import check_config from fate_flow.utils.model_utils import gen_party_model_id from fate_flow.entity.types import ModelOperation, TagOperation from fate_arch.common import file_utils, WorkMode, FederatedMode manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/load', methods=['POST']) def load_model(): request_config = request.json if request_config.get('job_id', None): with DB.connection_context(): model = MLModel.get_or_none( MLModel.f_job_id == request_config.get("job_id"), MLModel.f_role == 'guest' ) if model: model_info = model.to_json() request_config['initiator'] = {} request_config['initiator']['party_id'] = str(model_info.get('f_initiator_party_id')) request_config['initiator']['role'] = model_info.get('f_initiator_role') request_config['job_parameters'] = model_info.get('f_runtime_conf').get('job_parameters') request_config['role'] = model_info.get('f_runtime_conf').get('role') for key, value in request_config['role'].items(): for i, v in enumerate(value): value[i] = str(v) request_config.pop('job_id') else: return get_json_result(retcode=101, retmsg="model with version {} can not be found in database. " "Please check if the model version is valid.".format(request_config.get('job_id'))) _job_id = job_utils.generate_job_id() initiator_party_id = request_config['initiator']['party_id'] initiator_role = request_config['initiator']['role'] publish_model.generate_publish_model_info(request_config) load_status = True load_status_info = {} load_status_msg = 'success' load_status_info['detail'] = {} if "federated_mode" not in request_config['job_parameters']: if request_config["job_parameters"]["work_mode"] == WorkMode.STANDALONE: request_config['job_parameters']["federated_mode"] = FederatedMode.SINGLE elif request_config["job_parameters"]["work_mode"] == WorkMode.CLUSTER: request_config['job_parameters']["federated_mode"] = FederatedMode.MULTIPLE for role_name, role_partys in request_config.get("role").items(): if role_name == 'arbiter': continue load_status_info[role_name] = load_status_info.get(role_name, {}) load_status_info['detail'][role_name] = {} for _party_id in role_partys: request_config['local'] = {'role': role_name, 'party_id': _party_id} try: response = federated_api(job_id=_job_id, method='POST', endpoint='/model/load/do', src_party_id=initiator_party_id, dest_party_id=_party_id, src_role = initiator_role, json_body=request_config, federated_mode=request_config['job_parameters']['federated_mode']) load_status_info[role_name][_party_id] = response['retcode'] detail = {_party_id: {}} detail[_party_id]['retcode'] = response['retcode'] detail[_party_id]['retmsg'] = response['retmsg'] load_status_info['detail'][role_name].update(detail) if response['retcode']: load_status = False load_status_msg = 'failed' except Exception as e: stat_logger.exception(e) load_status = False load_status_msg = 'failed' load_status_info[role_name][_party_id] = 100 return get_json_result(job_id=_job_id, retcode=(0 if load_status else 101), retmsg=load_status_msg, data=load_status_info) @manager.route('/migrate', methods=['POST']) def migrate_model_process(): request_config = request.json _job_id = job_utils.generate_job_id() initiator_party_id = request_config['migrate_initiator']['party_id'] initiator_role = request_config['migrate_initiator']['role'] if not request_config.get("unify_model_version"): request_config["unify_model_version"] = _job_id migrate_status = True migrate_status_info = {} migrate_status_msg = 'success' migrate_status_info['detail'] = {} require_arguments = ["migrate_initiator", "role", "migrate_role", "model_id", "model_version", "execute_party", "job_parameters"] check_config(request_config, require_arguments) try: if compare_roles(request_config.get("migrate_role"), request_config.get("role")): return get_json_result(retcode=100, retmsg="The config of previous roles is the same with that of migrate roles. " "There is no need to migrate model. Migration process aborting.") except Exception as e: return get_json_result(retcode=100, retmsg=str(e)) local_template = { "role": "", "party_id": "", "migrate_party_id": "" } res_dict = {} for role_name, role_partys in request_config.get("migrate_role").items(): for offset, party_id in enumerate(role_partys): local_res = deepcopy(local_template) local_res["role"] = role_name local_res["party_id"] = request_config.get("role").get(role_name)[offset] local_res["migrate_party_id"] = party_id if not res_dict.get(role_name): res_dict[role_name] = {} res_dict[role_name][local_res["party_id"]] = local_res for role_name, role_partys in request_config.get("execute_party").items(): migrate_status_info[role_name] = migrate_status_info.get(role_name, {}) migrate_status_info['detail'][role_name] = {} for party_id in role_partys: request_config["local"] = res_dict.get(role_name).get(party_id) try: response = federated_api(job_id=_job_id, method='POST', endpoint='/model/migrate/do', src_party_id=initiator_party_id, dest_party_id=party_id, src_role=initiator_role, json_body=request_config, federated_mode=request_config['job_parameters']['federated_mode']) migrate_status_info[role_name][party_id] = response['retcode'] detail = {party_id: {}} detail[party_id]['retcode'] = response['retcode'] detail[party_id]['retmsg'] = response['retmsg'] migrate_status_info['detail'][role_name].update(detail) except Exception as e: stat_logger.exception(e) migrate_status = False migrate_status_msg = 'failed' migrate_status_info[role_name][party_id] = 100 return get_json_result(job_id=_job_id, retcode=(0 if migrate_status else 101), retmsg=migrate_status_msg, data=migrate_status_info) @manager.route('/migrate/do', methods=['POST']) def do_migrate_model(): request_data = request.json retcode, retmsg, data = migrate_model.migration(config_data=request_data) operation_record(request_data, "migrate", "success" if not retcode else "failed") return get_json_result(retcode=retcode, retmsg=retmsg, data=data) @manager.route('/load/do', methods=['POST']) def do_load_model(): request_data = request.json request_data["servings"] = ServiceUtils.get("servings", {}).get('hosts', []) retcode, retmsg = publish_model.load_model(config_data=request_data) try: if not retcode: with DB.connection_context(): model = MLModel.get_or_none(MLModel.f_role == request_data.get("local").get("role"), MLModel.f_party_id == request_data.get("local").get("party_id"), MLModel.f_model_id == request_data.get("job_parameters").get("model_id"), MLModel.f_model_version == request_data.get("job_parameters").get("model_version")) if model: count = model.f_loaded_times model.f_loaded_times = count + 1 model.save() except Exception as modify_err: stat_logger.exception(modify_err) try: party_model_id = gen_party_model_id(role=request_data.get("local").get("role"), party_id=request_data.get("local").get("party_id"), model_id=request_data.get("job_parameters").get("model_id")) src_model_path = os.path.join(file_utils.get_project_base_directory(), 'model_local_cache', party_model_id, request_data.get("job_parameters").get("model_version")) dst_model_path = os.path.join(file_utils.get_project_base_directory(), 'loaded_model_backup', party_model_id, request_data.get("job_parameters").get("model_version")) if not os.path.exists(dst_model_path): shutil.copytree(src=src_model_path, dst=dst_model_path) except Exception as copy_err: stat_logger.exception(copy_err) operation_record(request_data, "load", "success" if not retcode else "failed") return get_json_result(retcode=retcode, retmsg=retmsg) @manager.route('/bind', methods=['POST']) def bind_model_service(): request_config = request.json if request_config.get('job_id', None): with DB.connection_context(): model = MLModel.get_or_none( MLModel.f_job_id == request_config.get("job_id"), MLModel.f_role == 'guest' ) if model: model_info = model.to_json() request_config['initiator'] = {} request_config['initiator']['party_id'] = str(model_info.get('f_initiator_party_id')) request_config['initiator']['role'] = model_info.get('f_initiator_role') request_config['job_parameters'] = model_info.get('f_runtime_conf').get('job_parameters') request_config['role'] = model_info.get('f_runtime_conf').get('role') for key, value in request_config['role'].items(): for i, v in enumerate(value): value[i] = str(v) request_config.pop('job_id') else: return get_json_result(retcode=101, retmsg="model {} can not be found in database. " "Please check if the model version is valid.".format(request_config.get('job_id'))) if not request_config.get('servings'): # get my party all servings request_config['servings'] = ServiceUtils.get("servings", {}).get('hosts', []) service_id = request_config.get('service_id') if not service_id: return get_json_result(retcode=101, retmsg='no service id') check_config(request_config, ['initiator', 'role', 'job_parameters']) bind_status, retmsg = publish_model.bind_model_service(config_data=request_config) operation_record(request_config, "bind", "success" if not bind_status else "failed") return get_json_result(retcode=bind_status, retmsg='service id is {}'.format(service_id) if not retmsg else retmsg) @manager.route('/transfer', methods=['post']) def transfer_model(): model_data = publish_model.download_model(request.json) return get_json_result(retcode=0, retmsg="success", data=model_data) @manager.route('/<model_operation>', methods=['post', 'get']) def operate_model(model_operation): request_config = request.json or request.form.to_dict() job_id = job_utils.generate_job_id() if model_operation not in [ModelOperation.STORE, ModelOperation.RESTORE, ModelOperation.EXPORT, ModelOperation.IMPORT]: raise Exception('Can not support this operating now: {}'.format(model_operation)) required_arguments = ["model_id", "model_version", "role", "party_id"] check_config(request_config, required_arguments=required_arguments) request_config["model_id"] = gen_party_model_id(model_id=request_config["model_id"], role=request_config["role"], party_id=request_config["party_id"]) if model_operation in [ModelOperation.EXPORT, ModelOperation.IMPORT]: if model_operation == ModelOperation.IMPORT: try: file = request.files.get('file') file_path = os.path.join(TEMP_DIRECTORY, file.filename) # if not os.path.exists(file_path): # raise Exception('The file is obtained from the fate flow client machine, but it does not exist, ' # 'please check the path: {}'.format(file_path)) try: os.makedirs(os.path.dirname(file_path), exist_ok=True) file.save(file_path) except Exception as e: shutil.rmtree(file_path) raise e request_config['file'] = file_path model = pipelined_model.PipelinedModel(model_id=request_config["model_id"], model_version=request_config["model_version"]) model.unpack_model(file_path) pipeline = model.read_component_model('pipeline', 'pipeline')['Pipeline'] train_runtime_conf = json_loads(pipeline.train_runtime_conf) permitted_party_id = [] for key, value in train_runtime_conf.get('role', {}).items(): for v in value: permitted_party_id.extend([v, str(v)]) if request_config["party_id"] not in permitted_party_id: shutil.rmtree(model.model_path) raise Exception("party id {} is not in model roles, please check if the party id is valid.") try: with DB.connection_context(): model = MLModel.get_or_none( MLModel.f_job_id == train_runtime_conf["job_parameters"]["model_version"], MLModel.f_role == request_config["role"] ) if not model: MLModel.create( f_role=request_config["role"], f_party_id=request_config["party_id"], f_roles=train_runtime_conf["role"], f_job_id=train_runtime_conf["job_parameters"]["model_version"], f_model_id=train_runtime_conf["job_parameters"]["model_id"], f_model_version=train_runtime_conf["job_parameters"]["model_version"], f_initiator_role=train_runtime_conf["initiator"]["role"], f_initiator_party_id=train_runtime_conf["initiator"]["party_id"], f_runtime_conf=train_runtime_conf, f_work_mode=train_runtime_conf["job_parameters"]["work_mode"], f_dsl=json_loads(pipeline.train_dsl), f_imported=1, f_job_status='complete' ) else: stat_logger.info(f'job id: {train_runtime_conf["job_parameters"]["model_version"]}, ' f'role: {request_config["role"]} model info already existed in database.') except peewee.IntegrityError as e: stat_logger.exception(e) operation_record(request_config, "import", "success") return get_json_result() except Exception: operation_record(request_config, "import", "failed") raise else: try: model = pipelined_model.PipelinedModel(model_id=request_config["model_id"], model_version=request_config["model_version"]) if model.exists(): archive_file_path = model.packaging_model() operation_record(request_config, "export", "success") return send_file(archive_file_path, attachment_filename=os.path.basename(archive_file_path), as_attachment=True) else: operation_record(request_config, "export", "failed") res = error_response(response_code=210, retmsg="Model {} {} is not exist.".format(request_config.get("model_id"), request_config.get("model_version"))) return res except Exception as e: operation_record(request_config, "export", "failed") stat_logger.exception(e) return error_response(response_code=210, retmsg=str(e)) else: data = {} job_dsl, job_runtime_conf = gen_model_operation_job_config(request_config, model_operation) job_id, job_dsl_path, job_runtime_conf_path, logs_directory, model_info, board_url = DAGScheduler.submit( {'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}, job_id=job_id) data.update({'job_dsl_path': job_dsl_path, 'job_runtime_conf_path': job_runtime_conf_path, 'board_url': board_url, 'logs_directory': logs_directory}) operation_record(data=job_runtime_conf, oper_type=model_operation, oper_status='') return get_json_result(job_id=job_id, data=data) @manager.route('/model_tag/<operation>', methods=['POST']) @DB.connection_context() def tag_model(operation): if operation not in ['retrieve', 'create', 'remove']: return get_json_result(100, "'{}' is not currently supported.".format(operation)) request_data = request.json model = MLModel.get_or_none(MLModel.f_job_id == request_data.get("job_id")) if not model: raise Exception("Can not found model by job id: '{}'.".format(request_data.get("job_id"))) if operation == 'retrieve': res = {'tags': []} tags = (Tag.select().join(ModelTag, on=ModelTag.f_t_id == Tag.f_id).where(ModelTag.f_m_id == model.f_id)) for tag in tags: res['tags'].append({'name': tag.f_name, 'description': tag.f_desc}) res['count'] = tags.count() return get_json_result(data=res) elif operation == 'remove': tag = Tag.get_or_none(Tag.f_name == request_data.get('tag_name')) if not tag: raise Exception("Can not found '{}' tag.".format(request_data.get('tag_name'))) tags = (Tag.select().join(ModelTag, on=ModelTag.f_t_id == Tag.f_id).where(ModelTag.f_m_id == model.f_id)) if tag.f_name not in [t.f_name for t in tags]: raise Exception("Model {} {} does not have tag '{}'.".format(model.f_model_id, model.f_model_version, tag.f_name)) delete_query = ModelTag.delete().where(ModelTag.f_m_id == model.f_id, ModelTag.f_t_id == tag.f_id) delete_query.execute() return get_json_result(retmsg="'{}' tag has been removed from tag list of model {} {}.".format(request_data.get('tag_name'), model.f_model_id, model.f_model_version)) else: if not str(request_data.get('tag_name')): raise Exception("Tag name should not be an empty string.") tag = Tag.get_or_none(Tag.f_name == request_data.get('tag_name')) if not tag: tag = Tag() tag.f_name = request_data.get('tag_name') tag.save(force_insert=True) else: tags = (Tag.select().join(ModelTag, on=ModelTag.f_t_id == Tag.f_id).where(ModelTag.f_m_id == model.f_id)) if tag.f_name in [t.f_name for t in tags]: raise Exception("Model {} {} already been tagged as tag '{}'.".format(model.f_model_id, model.f_model_version, tag.f_name)) ModelTag.create(f_t_id=tag.f_id, f_m_id=model.f_id) return get_json_result(retmsg="Adding {} tag for model with job id: {} successfully.".format(request_data.get('tag_name'), request_data.get('job_id'))) @manager.route('/tag/<tag_operation>', methods=['POST']) @DB.connection_context() def operate_tag(tag_operation): request_data = request.json if tag_operation not in [TagOperation.CREATE, TagOperation.RETRIEVE, TagOperation.UPDATE, TagOperation.DESTROY, TagOperation.LIST]: raise Exception('The {} operation is not currently supported.'.format(tag_operation)) tag_name = request_data.get('tag_name') tag_desc = request_data.get('tag_desc') if tag_operation == TagOperation.CREATE: try: if not tag_name: return get_json_result(100, "'{}' tag created failed. Please input a valid tag name.".format(tag_name)) else: Tag.create(f_name=tag_name, f_desc=tag_desc) except peewee.IntegrityError: raise Exception("'{}' has already exists in database.".format(tag_name)) else: return get_json_result("'{}' tag has been created successfully.".format(tag_name)) elif tag_operation == TagOperation.LIST: tags = Tag.select() limit = request_data.get('limit') res = {"tags": []} if limit > len(tags): count = len(tags) else: count = limit for tag in tags[:count]: res['tags'].append({'name': tag.f_name, 'description': tag.f_desc, 'model_count': ModelTag.filter(ModelTag.f_t_id == tag.f_id).count()}) return get_json_result(data=res) else: if not (tag_operation == TagOperation.RETRIEVE and not request_data.get('with_model')): try: tag = Tag.get(Tag.f_name == tag_name) except peewee.DoesNotExist: raise Exception("Can not found '{}' tag.".format(tag_name)) if tag_operation == TagOperation.RETRIEVE: if request_data.get('with_model', False): res = {'models': []} models = (MLModel.select().join(ModelTag, on=ModelTag.f_m_id == MLModel.f_id).where(ModelTag.f_t_id == tag.f_id)) for model in models: res["models"].append({ "model_id": model.f_model_id, "model_version": model.f_model_version, "model_size": model.f_size }) res["count"] = models.count() return get_json_result(data=res) else: tags = Tag.filter(Tag.f_name.contains(tag_name)) if not tags: return get_json_result(100, retmsg="No tags found.") res = {'tags': []} for tag in tags: res['tags'].append({'name': tag.f_name, 'description': tag.f_desc}) return get_json_result(data=res) elif tag_operation == TagOperation.UPDATE: new_tag_name = request_data.get('new_tag_name', None) new_tag_desc = request_data.get('new_tag_desc', None) if (tag.f_name == new_tag_name) and (tag.f_desc == new_tag_desc): return get_json_result(100, "Nothing to be updated.") else: if request_data.get('new_tag_name'): if not Tag.get_or_none(Tag.f_name == new_tag_name): tag.f_name = new_tag_name else: return get_json_result(100, retmsg="'{}' tag already exists.".format(new_tag_name)) tag.f_desc = new_tag_desc tag.save() return get_json_result(retmsg="Infomation of '{}' tag has been updated successfully.".format(tag_name)) else: delete_query = ModelTag.delete().where(ModelTag.f_t_id == tag.f_id) delete_query.execute() Tag.delete_instance(tag) return get_json_result(retmsg="'{}' tag has been deleted successfully.".format(tag_name)) def gen_model_operation_job_config(config_data: dict, model_operation: ModelOperation): job_runtime_conf = job_utils.runtime_conf_basic(if_local=True) initiator_role = "local" job_dsl = { "components": {} } if model_operation in [ModelOperation.STORE, ModelOperation.RESTORE]: component_name = "{}_0".format(model_operation) component_parameters = dict() component_parameters["model_id"] = [config_data["model_id"]] component_parameters["model_version"] = [config_data["model_version"]] component_parameters["store_address"] = [MODEL_STORE_ADDRESS] if model_operation == ModelOperation.STORE: component_parameters["force_update"] = [config_data.get("force_update", False)] job_runtime_conf["role_parameters"][initiator_role] = {component_name: component_parameters} job_dsl["components"][component_name] = { "module": "Model{}".format(model_operation.capitalize()) } else: raise Exception("Can not support this model operation: {}".format(model_operation)) return job_dsl, job_runtime_conf @DB.connection_context() def operation_record(data: dict, oper_type, oper_status): try: if oper_type == 'migrate': OperLog.create(f_operation_type=oper_type, f_operation_status=oper_status, f_initiator_role=data.get("migrate_initiator", {}).get("role"), f_initiator_party_id=data.get("migrate_initiator", {}).get("party_id"), f_request_ip=request.remote_addr, f_model_id=data.get("model_id"), f_model_version=data.get("model_version")) elif oper_type == 'load': OperLog.create(f_operation_type=oper_type, f_operation_status=oper_status, f_initiator_role=data.get("initiator").get("role"), f_initiator_party_id=data.get("initiator").get("party_id"), f_request_ip=request.remote_addr, f_model_id=data.get("job_parameters").get("model_id"), f_model_version=data.get("job_parameters").get("model_version")) else: OperLog.create(f_operation_type=oper_type, f_operation_status=oper_status, f_initiator_role=data.get("role") if data.get("role") else data.get("initiator").get("role"), f_initiator_party_id=data.get("party_id") if data.get("party_id") else data.get("initiator").get("party_id"), f_request_ip=request.remote_addr, f_model_id=data.get("model_id") if data.get("model_id") else data.get("job_parameters").get("model_id"), f_model_version=data.get("model_version") if data.get("model_version") else data.get("job_parameters").get("model_version")) except Exception: stat_logger.error(traceback.format_exc())
[ "fate_flow.db.db_models.ModelTag.delete", "fate_flow.db.db_models.Tag", "fate_flow.scheduler.DAGScheduler.submit", "fate_flow.pipelined_model.publish_model.generate_publish_model_info", "fate_flow.db.db_models.DB.connection_context", "fate_flow.db.db_models.ModelTag.filter", "fate_flow.db.db_models.Tag.f_name.contains", "fate_flow.utils.service_utils.ServiceUtils.get", "fate_flow.pipelined_model.publish_model.load_model", "fate_flow.utils.api_utils.federated_api", "fate_flow.utils.api_utils.get_json_result", "fate_flow.pipelined_model.migrate_model.migration", "fate_flow.settings.stat_logger.exception", "fate_flow.db.db_models.Tag.get_or_none", "fate_flow.db.db_models.MachineLearningModelInfo.get_or_none", "fate_flow.utils.model_utils.gen_party_model_id", "fate_flow.db.db_models.Tag.create", "fate_flow.db.db_models.Tag.select", "fate_flow.settings.stat_logger.info", "fate_flow.db.db_models.MachineLearningModelInfo.select", "fate_flow.utils.detect_utils.check_config", "fate_flow.db.db_models.Tag.delete_instance", "fate_flow.db.db_models.Tag.get", "fate_flow.db.db_models.ModelTag.create", "fate_flow.utils.job_utils.runtime_conf_basic", "fate_flow.pipelined_model.pipelined_model.PipelinedModel", "fate_flow.pipelined_model.publish_model.bind_model_service", "fate_flow.pipelined_model.publish_model.download_model", "fate_flow.utils.job_utils.generate_job_id" ]
[((1655, 1670), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1660, 1670), False, 'from flask import Flask, request, send_file\n'), ((19860, 19883), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (19881, 19883), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((23046, 23069), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (23067, 23069), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((28081, 28104), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (28102, 28104), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((1734, 1758), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1755, 1758), False, 'from fate_flow.settings import stat_logger, MODEL_STORE_ADDRESS, TEMP_DIRECTORY\n'), ((3111, 3138), 'fate_flow.utils.job_utils.generate_job_id', 'job_utils.generate_job_id', ([], {}), '()\n', (3136, 3138), False, 'from fate_flow.utils import job_utils\n'), ((3265, 3322), 'fate_flow.pipelined_model.publish_model.generate_publish_model_info', 'publish_model.generate_publish_model_info', (['request_config'], {}), '(request_config)\n', (3306, 3322), False, 'from fate_flow.pipelined_model import migrate_model, pipelined_model, publish_model\n'), ((5464, 5582), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'job_id': '_job_id', 'retcode': '(0 if load_status else 101)', 'retmsg': 'load_status_msg', 'data': 'load_status_info'}), '(job_id=_job_id, retcode=0 if load_status else 101, retmsg=\n load_status_msg, data=load_status_info)\n', (5479, 5582), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((5731, 5758), 'fate_flow.utils.job_utils.generate_job_id', 'job_utils.generate_job_id', ([], {}), '()\n', (5756, 5758), False, 'from fate_flow.utils import job_utils\n'), ((6300, 6347), 'fate_flow.utils.detect_utils.check_config', 'check_config', (['request_config', 'require_arguments'], {}), '(request_config, require_arguments)\n', (6312, 6347), False, 'from fate_flow.utils.detect_utils import check_config\n'), ((8909, 9035), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'job_id': '_job_id', 'retcode': '(0 if migrate_status else 101)', 'retmsg': 'migrate_status_msg', 'data': 'migrate_status_info'}), '(job_id=_job_id, retcode=0 if migrate_status else 101,\n retmsg=migrate_status_msg, data=migrate_status_info)\n', (8924, 9035), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((9195, 9244), 'fate_flow.pipelined_model.migrate_model.migration', 'migrate_model.migration', ([], {'config_data': 'request_data'}), '(config_data=request_data)\n', (9218, 9244), False, 'from fate_flow.pipelined_model import migrate_model, pipelined_model, publish_model\n'), ((9342, 9400), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'retcode', 'retmsg': 'retmsg', 'data': 'data'}), '(retcode=retcode, retmsg=retmsg, data=data)\n', (9357, 9400), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((9604, 9654), 'fate_flow.pipelined_model.publish_model.load_model', 'publish_model.load_model', ([], {'config_data': 'request_data'}), '(config_data=request_data)\n', (9628, 9654), False, 'from fate_flow.pipelined_model import migrate_model, pipelined_model, publish_model\n'), ((11431, 11478), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'retcode', 'retmsg': 'retmsg'}), '(retcode=retcode, retmsg=retmsg)\n', (11446, 11478), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((13068, 13137), 'fate_flow.utils.detect_utils.check_config', 'check_config', (['request_config', "['initiator', 'role', 'job_parameters']"], {}), "(request_config, ['initiator', 'role', 'job_parameters'])\n", (13080, 13137), False, 'from fate_flow.utils.detect_utils import check_config\n'), ((13164, 13224), 'fate_flow.pipelined_model.publish_model.bind_model_service', 'publish_model.bind_model_service', ([], {'config_data': 'request_config'}), '(config_data=request_config)\n', (13196, 13224), False, 'from fate_flow.pipelined_model import migrate_model, pipelined_model, publish_model\n'), ((13521, 13563), 'fate_flow.pipelined_model.publish_model.download_model', 'publish_model.download_model', (['request.json'], {}), '(request.json)\n', (13549, 13563), False, 'from fate_flow.pipelined_model import migrate_model, pipelined_model, publish_model\n'), ((13575, 13636), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'model_data'}), "(retcode=0, retmsg='success', data=model_data)\n", (13590, 13636), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((13810, 13837), 'fate_flow.utils.job_utils.generate_job_id', 'job_utils.generate_job_id', ([], {}), '()\n', (13835, 13837), False, 'from fate_flow.utils import job_utils\n'), ((14131, 14198), 'fate_flow.utils.detect_utils.check_config', 'check_config', (['request_config'], {'required_arguments': 'required_arguments'}), '(request_config, required_arguments=required_arguments)\n', (14143, 14198), False, 'from fate_flow.utils.detect_utils import check_config\n'), ((14232, 14358), 'fate_flow.utils.model_utils.gen_party_model_id', 'gen_party_model_id', ([], {'model_id': "request_config['model_id']", 'role': "request_config['role']", 'party_id': "request_config['party_id']"}), "(model_id=request_config['model_id'], role=request_config\n ['role'], party_id=request_config['party_id'])\n", (14250, 14358), False, 'from fate_flow.utils.model_utils import gen_party_model_id\n'), ((27058, 27101), 'fate_flow.utils.job_utils.runtime_conf_basic', 'job_utils.runtime_conf_basic', ([], {'if_local': '(True)'}), '(if_local=True)\n', (27086, 27101), False, 'from fate_flow.utils import job_utils\n'), ((13011, 13063), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""no service id"""'}), "(retcode=101, retmsg='no service id')\n", (13026, 13063), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((13774, 13796), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (13794, 13796), False, 'from flask import Flask, request, send_file\n'), ((19363, 19461), 'fate_flow.scheduler.DAGScheduler.submit', 'DAGScheduler.submit', (["{'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}"], {'job_id': 'job_id'}), "({'job_dsl': job_dsl, 'job_runtime_conf':\n job_runtime_conf}, job_id=job_id)\n", (19382, 19461), False, 'from fate_flow.scheduler import DAGScheduler\n'), ((19756, 19797), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'job_id': 'job_id', 'data': 'data'}), '(job_id=job_id, data=data)\n', (19771, 19797), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((20618, 20643), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'res'}), '(data=res)\n', (20633, 20643), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((1966, 1989), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (1987, 1989), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((6467, 6647), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(100)', 'retmsg': '"""The config of previous roles is the same with that of migrate roles. There is no need to migrate model. Migration process aborting."""'}), "(retcode=100, retmsg=\n 'The config of previous roles is the same with that of migrate roles. There is no need to migrate model. Migration process aborting.'\n )\n", (6482, 6647), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((7087, 7111), 'copy.deepcopy', 'deepcopy', (['local_template'], {}), '(local_template)\n', (7095, 7111), False, 'from copy import deepcopy\n'), ((9532, 9564), 'fate_flow.utils.service_utils.ServiceUtils.get', 'ServiceUtils.get', (['"""servings"""', '{}'], {}), "('servings', {})\n", (9548, 9564), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((10391, 10424), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['modify_err'], {}), '(modify_err)\n', (10412, 10424), False, 'from fate_flow.settings import stat_logger, MODEL_STORE_ADDRESS, TEMP_DIRECTORY\n'), ((10762, 10801), 'fate_arch.common.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (10799, 10801), False, 'from fate_arch.common import file_utils, WorkMode, FederatedMode\n'), ((10973, 11012), 'fate_arch.common.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (11010, 11012), False, 'from fate_arch.common import file_utils, WorkMode, FederatedMode\n'), ((11163, 11193), 'os.path.exists', 'os.path.exists', (['dst_model_path'], {}), '(dst_model_path)\n', (11177, 11193), False, 'import os\n'), ((11207, 11262), 'shutil.copytree', 'shutil.copytree', ([], {'src': 'src_model_path', 'dst': 'dst_model_path'}), '(src=src_model_path, dst=dst_model_path)\n', (11222, 11262), False, 'import shutil\n'), ((11305, 11336), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['copy_err'], {}), '(copy_err)\n', (11326, 11336), False, 'from fate_flow.settings import stat_logger, MODEL_STORE_ADDRESS, TEMP_DIRECTORY\n'), ((11639, 11662), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (11660, 11662), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((22673, 22724), 'fate_flow.db.db_models.ModelTag.create', 'ModelTag.create', ([], {'f_t_id': 'tag.f_id', 'f_m_id': 'model.f_id'}), '(f_t_id=tag.f_id, f_m_id=model.f_id)\n', (22688, 22724), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((24061, 24073), 'fate_flow.db.db_models.Tag.select', 'Tag.select', ([], {}), '()\n', (24071, 24073), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((24473, 24498), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'res'}), '(data=res)\n', (24488, 24498), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((4254, 4515), 'fate_flow.utils.api_utils.federated_api', 'federated_api', ([], {'job_id': '_job_id', 'method': '"""POST"""', 'endpoint': '"""/model/load/do"""', 'src_party_id': 'initiator_party_id', 'dest_party_id': '_party_id', 'src_role': 'initiator_role', 'json_body': 'request_config', 'federated_mode': "request_config['job_parameters']['federated_mode']"}), "(job_id=_job_id, method='POST', endpoint='/model/load/do',\n src_party_id=initiator_party_id, dest_party_id=_party_id, src_role=\n initiator_role, json_body=request_config, federated_mode=request_config\n ['job_parameters']['federated_mode'])\n", (4267, 4515), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((7816, 8079), 'fate_flow.utils.api_utils.federated_api', 'federated_api', ([], {'job_id': '_job_id', 'method': '"""POST"""', 'endpoint': '"""/model/migrate/do"""', 'src_party_id': 'initiator_party_id', 'dest_party_id': 'party_id', 'src_role': 'initiator_role', 'json_body': 'request_config', 'federated_mode': "request_config['job_parameters']['federated_mode']"}), "(job_id=_job_id, method='POST', endpoint='/model/migrate/do',\n src_party_id=initiator_party_id, dest_party_id=party_id, src_role=\n initiator_role, json_body=request_config, federated_mode=request_config\n ['job_parameters']['federated_mode'])\n", (7829, 8079), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((9705, 9728), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (9726, 9728), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((12873, 12905), 'fate_flow.utils.service_utils.ServiceUtils.get', 'ServiceUtils.get', (['"""servings"""', '{}'], {}), "('servings', {})\n", (12889, 12905), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((14521, 14546), 'flask.request.files.get', 'request.files.get', (['"""file"""'], {}), "('file')\n", (14538, 14546), False, 'from flask import Flask, request, send_file\n'), ((14575, 14618), 'os.path.join', 'os.path.join', (['TEMP_DIRECTORY', 'file.filename'], {}), '(TEMP_DIRECTORY, file.filename)\n', (14587, 14618), False, 'import os\n'), ((15200, 15318), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'pipelined_model.PipelinedModel', ([], {'model_id': "request_config['model_id']", 'model_version': "request_config['model_version']"}), "(model_id=request_config['model_id'],\n model_version=request_config['model_version'])\n", (15230, 15318), False, 'from fate_flow.pipelined_model import migrate_model, pipelined_model, publish_model\n'), ((15489, 15528), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.train_runtime_conf'], {}), '(pipeline.train_runtime_conf)\n', (15499, 15528), False, 'from fate_arch.common.base_utils import json_loads\n'), ((17887, 17904), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {}), '()\n', (17902, 17904), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((18081, 18199), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'pipelined_model.PipelinedModel', ([], {'model_id': "request_config['model_id']", 'model_version': "request_config['model_version']"}), "(model_id=request_config['model_id'],\n model_version=request_config['model_version'])\n", (18111, 18199), False, 'from fate_flow.pipelined_model import migrate_model, pipelined_model, publish_model\n'), ((22066, 22071), 'fate_flow.db.db_models.Tag', 'Tag', ([], {}), '()\n', (22069, 22071), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((23723, 23767), 'fate_flow.db.db_models.Tag.create', 'Tag.create', ([], {'f_name': 'tag_name', 'f_desc': 'tag_desc'}), '(f_name=tag_name, f_desc=tag_desc)\n', (23733, 23767), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((30030, 30052), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (30050, 30052), False, 'import traceback\n'), ((5288, 5312), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (5309, 5312), False, 'from fate_flow.settings import stat_logger, MODEL_STORE_ADDRESS, TEMP_DIRECTORY\n'), ((8725, 8749), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (8746, 8749), False, 'from fate_flow.settings import stat_logger, MODEL_STORE_ADDRESS, TEMP_DIRECTORY\n'), ((15839, 15870), 'shutil.rmtree', 'shutil.rmtree', (['model.model_path'], {}), '(model.model_path)\n', (15852, 15870), False, 'import shutil\n'), ((19045, 19069), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (19066, 19069), False, 'from fate_flow.settings import stat_logger, MODEL_STORE_ADDRESS, TEMP_DIRECTORY\n'), ((21327, 21344), 'fate_flow.db.db_models.ModelTag.delete', 'ModelTag.delete', ([], {}), '()\n', (21342, 21344), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((24645, 24676), 'fate_flow.db.db_models.Tag.get', 'Tag.get', (['(Tag.f_name == tag_name)'], {}), '(Tag.f_name == tag_name)\n', (24652, 24676), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((25411, 25436), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'res'}), '(data=res)\n', (25426, 25436), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((25801, 25826), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'res'}), '(data=res)\n', (25816, 25826), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((26818, 26842), 'fate_flow.db.db_models.Tag.delete_instance', 'Tag.delete_instance', (['tag'], {}), '(tag)\n', (26837, 26842), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((14929, 14955), 'os.path.dirname', 'os.path.dirname', (['file_path'], {}), '(file_path)\n', (14944, 14955), False, 'import os\n'), ((15072, 15096), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (15085, 15096), False, 'import shutil\n'), ((16030, 16053), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (16051, 16053), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((16087, 16228), 'fate_flow.db.db_models.MachineLearningModelInfo.get_or_none', 'MLModel.get_or_none', (["(MLModel.f_job_id == train_runtime_conf['job_parameters']['model_version'])", "(MLModel.f_role == request_config['role'])"], {}), "(MLModel.f_job_id == train_runtime_conf['job_parameters'\n ]['model_version'], MLModel.f_role == request_config['role'])\n", (16106, 16228), True, 'from fate_flow.db.db_models import MachineLearningModelInfo as MLModel\n'), ((17769, 17793), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (17790, 17793), False, 'from fate_flow.settings import stat_logger, MODEL_STORE_ADDRESS, TEMP_DIRECTORY\n'), ((20364, 20376), 'fate_flow.db.db_models.Tag.select', 'Tag.select', ([], {}), '()\n', (20374, 20376), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((25489, 25518), 'fate_flow.db.db_models.Tag.f_name.contains', 'Tag.f_name.contains', (['tag_name'], {}), '(tag_name)\n', (25508, 25518), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((25576, 25621), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', (['(100)'], {'retmsg': '"""No tags found."""'}), "(100, retmsg='No tags found.')\n", (25591, 25621), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((26112, 26158), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', (['(100)', '"""Nothing to be updated."""'], {}), "(100, 'Nothing to be updated.')\n", (26127, 26158), False, 'from fate_flow.utils.api_utils import get_json_result, federated_api, error_response\n'), ((17492, 17658), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['f"""job id: {train_runtime_conf[\'job_parameters\'][\'model_version\']}, role: {request_config[\'role\']} model info already existed in database."""'], {}), '(\n f"job id: {train_runtime_conf[\'job_parameters\'][\'model_version\']}, role: {request_config[\'role\']} model info already existed in database."\n )\n', (17508, 17658), False, 'from fate_flow.settings import stat_logger, MODEL_STORE_ADDRESS, TEMP_DIRECTORY\n'), ((18445, 18480), 'os.path.basename', 'os.path.basename', (['archive_file_path'], {}), '(archive_file_path)\n', (18461, 18480), False, 'import os\n'), ((20878, 20890), 'fate_flow.db.db_models.Tag.select', 'Tag.select', ([], {}), '()\n', (20888, 20890), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((26718, 26735), 'fate_flow.db.db_models.ModelTag.delete', 'ModelTag.delete', ([], {}), '()\n', (26733, 26735), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((22200, 22212), 'fate_flow.db.db_models.Tag.select', 'Tag.select', ([], {}), '()\n', (22210, 22212), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((24403, 24447), 'fate_flow.db.db_models.ModelTag.filter', 'ModelTag.filter', (['(ModelTag.f_t_id == tag.f_id)'], {}), '(ModelTag.f_t_id == tag.f_id)\n', (24418, 24447), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((26257, 26300), 'fate_flow.db.db_models.Tag.get_or_none', 'Tag.get_or_none', (['(Tag.f_name == new_tag_name)'], {}), '(Tag.f_name == new_tag_name)\n', (26272, 26300), False, 'from fate_flow.db.db_models import Tag, DB, ModelTag, ModelOperationLog as OperLog\n'), ((17270, 17300), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.train_dsl'], {}), '(pipeline.train_dsl)\n', (17280, 17300), False, 'from fate_arch.common.base_utils import json_loads\n'), ((24962, 24978), 'fate_flow.db.db_models.MachineLearningModelInfo.select', 'MLModel.select', ([], {}), '()\n', (24976, 24978), True, 'from fate_flow.db.db_models import MachineLearningModelInfo as MLModel\n')]
from fate_arch.common import EngineType, engine_utils from fate_flow.utils.log_utils import schedule_logger from fate_arch.computing import ComputingEngine from fate_flow.db.dependence_registry import DependenceRegistry from fate_flow.entity import ComponentProvider from fate_flow.entity.types import FateDependenceName, ComponentProviderName, FateDependenceStorageEngine, WorkerName from fate_flow.manager.provider_manager import ProviderManager from fate_flow.manager.resource_manager import ResourceManager from fate_flow.manager.worker_manager import WorkerManager from fate_flow.settings import DEPENDENT_DISTRIBUTION, FATE_FLOW_UPDATE_CHECK, ENGINES from fate_flow.utils import schedule_utils class DependenceManager: dependence_config = None @classmethod def init(cls, provider): cls.set_version_dependence(provider) @classmethod def set_version_dependence(cls, provider, storage_engine=FateDependenceStorageEngine.HDFS.value): dependence_config = {} for dependence_type in [FateDependenceName.Fate_Source_Code.value, FateDependenceName.Python_Env.value]: dependencies_storage_info = DependenceRegistry.get_dependencies_storage_meta(storage_engine=storage_engine, version=provider.version, type=dependence_type, get_or_one=True ) dependence_config[dependence_type] = dependencies_storage_info.to_dict() cls.dependence_config = dependence_config @classmethod def check_upload(cls, job_id, provider_group, fate_flow_version_provider_info, storage_engine=FateDependenceStorageEngine.HDFS.value): schedule_logger(job_id).info("start Check if need to upload dependencies") schedule_logger(job_id).info(f"{provider_group}") upload_details = {} check_tag = True upload_total = 0 for version, provider_info in provider_group.items(): upload_details[version] = {} provider = ComponentProvider(**provider_info) for dependence_type in [FateDependenceName.Fate_Source_Code.value, FateDependenceName.Python_Env.value]: schedule_logger(job_id).info(f"{dependence_type}") dependencies_storage_info = DependenceRegistry.get_dependencies_storage_meta( storage_engine=storage_engine, version=provider.version, type=dependence_type, get_or_one=True ) need_upload = False if dependencies_storage_info: if dependencies_storage_info.f_upload_status: # version dependence uploading check_tag = False continue elif not dependencies_storage_info.f_storage_path: need_upload = True upload_total += 1 elif dependence_type == FateDependenceName.Fate_Source_Code.value: if provider.name == ComponentProviderName.FATE.value: flow_provider = ComponentProvider(**list(fate_flow_version_provider_info.values())[0]) if FATE_FLOW_UPDATE_CHECK and DependenceRegistry.get_modify_time(flow_provider.path) != \ dependencies_storage_info.f_fate_flow_snapshot_time: need_upload = True upload_total += 1 elif DependenceRegistry.get_modify_time(provider.path) !=\ dependencies_storage_info.f_snapshot_time: need_upload = True upload_total += 1 elif provider.name == ComponentProviderName.FATE_FLOW.value and FATE_FLOW_UPDATE_CHECK: if DependenceRegistry.get_modify_time(provider.path) !=\ dependencies_storage_info.f_fate_flow_snapshot_time: need_upload = True upload_total += 1 else: need_upload = True upload_total += 1 if need_upload: upload_details[version][dependence_type] = provider if upload_total > 0: check_tag = False schedule_logger(job_id).info(f"check dependencies result: {check_tag}, {upload_details}") return check_tag, upload_total > 0, upload_details @classmethod def check_job_dependence(cls, job): if not DEPENDENT_DISTRIBUTION: return True engine_name = ENGINES.get(EngineType.COMPUTING) schedule_logger(job.f_job_id).info(f"job engine name: {engine_name}") if engine_name not in [ComputingEngine.SPARK]: return True dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl, runtime_conf=job.f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf) provider_group = ProviderManager.get_job_provider_group(dsl_parser=dsl_parser) version_provider_info = {} fate_flow_version_provider_info = {} schedule_logger(job.f_job_id).info(f'group_info:{provider_group}') for group_key, group_info in provider_group.items(): if group_info["provider"]["name"] == ComponentProviderName.FATE_FLOW.value and \ group_info["provider"]["version"] not in fate_flow_version_provider_info: fate_flow_version_provider_info[group_info["provider"]["version"]] = group_info["provider"] if group_info["provider"]["name"] == ComponentProviderName.FATE.value and \ group_info["provider"]["version"] not in version_provider_info: version_provider_info[group_info["provider"]["version"]] = group_info["provider"] schedule_logger(job.f_job_id).info(f'version_provider_info:{version_provider_info}') schedule_logger(job.f_job_id).info(f'fate_flow_version_provider_info:{fate_flow_version_provider_info}') if not version_provider_info: version_provider_info = fate_flow_version_provider_info check_tag, upload_tag, upload_details = cls.check_upload(job.f_job_id, version_provider_info, fate_flow_version_provider_info) if upload_tag: cls.upload_job_dependence(job, upload_details) return check_tag @classmethod def upload_job_dependence(cls, job, upload_details, storage_engine=FateDependenceStorageEngine.HDFS.value): schedule_logger(job.f_job_id).info(f"start upload dependence: {upload_details}") for version, type_provider in upload_details.items(): for dependence_type, provider in type_provider.items(): storage_meta = { "f_storage_engine": storage_engine, "f_type": dependence_type, "f_version": version, "f_upload_status": True } schedule_logger(job.f_job_id).info(f"update dependence storage meta:{storage_meta}") DependenceRegistry.save_dependencies_storage_meta(storage_meta, status_check=True) WorkerManager.start_general_worker(worker_name=WorkerName.DEPENDENCE_UPLOAD, job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id, provider=provider, dependence_type=dependence_type, callback=cls.record_upload_process, callback_param=["dependence_type", "pid", "provider"]) @classmethod def record_upload_process(cls, provider, dependence_type, pid, storage_engine=FateDependenceStorageEngine.HDFS.value): storage_meta = { "f_storage_engine": storage_engine, "f_type": dependence_type, "f_version": provider.version, "f_pid": pid, "f_upload_status": True } DependenceRegistry.save_dependencies_storage_meta(storage_meta) @classmethod def kill_upload_process(cls, version, storage_engine, dependence_type): storage_meta = { "f_storage_engine": storage_engine, "f_type": dependence_type, "f_version": version, "f_upload_status": False, "f_pid": 0 } DependenceRegistry.save_dependencies_storage_meta(storage_meta) @classmethod def get_task_dependence_info(cls): return cls.get_executor_env_pythonpath(), cls.get_executor_python_env(), cls.get_driver_python_env(), \ cls.get_archives() @classmethod def get_executor_env_pythonpath(cls): return cls.dependence_config.get(FateDependenceName.Fate_Source_Code.value).get("f_dependencies_conf").get( "executor_env_pythonpath") @classmethod def get_executor_python_env(cls): return cls.dependence_config.get(FateDependenceName.Python_Env.value).get("f_dependencies_conf").get( "executor_python") @classmethod def get_driver_python_env(cls): return cls.dependence_config.get(FateDependenceName.Python_Env.value).get("f_dependencies_conf").get( "driver_python") @classmethod def get_archives(cls, storage_engine=FateDependenceStorageEngine.HDFS.value): archives = [] name_node = ResourceManager.get_engine_registration_info(engine_type=EngineType.STORAGE, engine_name=storage_engine ).f_engine_config.get("name_node") for dependence_type in [FateDependenceName.Fate_Source_Code.value, FateDependenceName.Python_Env.value]: archives.append( name_node + cls.dependence_config.get(dependence_type).get("f_dependencies_conf").get("archives") ) return ','.join(archives)
[ "fate_flow.utils.log_utils.schedule_logger", "fate_flow.settings.ENGINES.get", "fate_flow.db.dependence_registry.DependenceRegistry.save_dependencies_storage_meta", "fate_flow.db.dependence_registry.DependenceRegistry.get_modify_time", "fate_flow.db.dependence_registry.DependenceRegistry.get_dependencies_storage_meta", "fate_flow.manager.resource_manager.ResourceManager.get_engine_registration_info", "fate_flow.manager.provider_manager.ProviderManager.get_job_provider_group", "fate_flow.utils.schedule_utils.get_job_dsl_parser", "fate_flow.manager.worker_manager.WorkerManager.start_general_worker", "fate_flow.entity.ComponentProvider" ]
[((5040, 5073), 'fate_flow.settings.ENGINES.get', 'ENGINES.get', (['EngineType.COMPUTING'], {}), '(EngineType.COMPUTING)\n', (5051, 5073), False, 'from fate_flow.settings import DEPENDENT_DISTRIBUTION, FATE_FLOW_UPDATE_CHECK, ENGINES\n'), ((5252, 5383), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job.f_dsl', 'runtime_conf': 'job.f_runtime_conf', 'train_runtime_conf': 'job.f_train_runtime_conf'}), '(dsl=job.f_dsl, runtime_conf=job.\n f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf)\n', (5285, 5383), False, 'from fate_flow.utils import schedule_utils\n'), ((5459, 5520), 'fate_flow.manager.provider_manager.ProviderManager.get_job_provider_group', 'ProviderManager.get_job_provider_group', ([], {'dsl_parser': 'dsl_parser'}), '(dsl_parser=dsl_parser)\n', (5497, 5520), False, 'from fate_flow.manager.provider_manager import ProviderManager\n'), ((8546, 8609), 'fate_flow.db.dependence_registry.DependenceRegistry.save_dependencies_storage_meta', 'DependenceRegistry.save_dependencies_storage_meta', (['storage_meta'], {}), '(storage_meta)\n', (8595, 8609), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((8929, 8992), 'fate_flow.db.dependence_registry.DependenceRegistry.save_dependencies_storage_meta', 'DependenceRegistry.save_dependencies_storage_meta', (['storage_meta'], {}), '(storage_meta)\n', (8978, 8992), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((1152, 1305), 'fate_flow.db.dependence_registry.DependenceRegistry.get_dependencies_storage_meta', 'DependenceRegistry.get_dependencies_storage_meta', ([], {'storage_engine': 'storage_engine', 'version': 'provider.version', 'type': 'dependence_type', 'get_or_one': '(True)'}), '(storage_engine=\n storage_engine, version=provider.version, type=dependence_type,\n get_or_one=True)\n', (1200, 1305), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((2291, 2325), 'fate_flow.entity.ComponentProvider', 'ComponentProvider', ([], {}), '(**provider_info)\n', (2308, 2325), False, 'from fate_flow.entity import ComponentProvider\n'), ((1954, 1977), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (1969, 1977), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((2037, 2060), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (2052, 2060), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((2554, 2707), 'fate_flow.db.dependence_registry.DependenceRegistry.get_dependencies_storage_meta', 'DependenceRegistry.get_dependencies_storage_meta', ([], {'storage_engine': 'storage_engine', 'version': 'provider.version', 'type': 'dependence_type', 'get_or_one': '(True)'}), '(storage_engine=\n storage_engine, version=provider.version, type=dependence_type,\n get_or_one=True)\n', (2602, 2707), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((4748, 4771), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (4763, 4771), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((5082, 5111), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (5097, 5111), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((5609, 5638), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (5624, 5638), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((7067, 7096), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (7082, 7096), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((7635, 7721), 'fate_flow.db.dependence_registry.DependenceRegistry.save_dependencies_storage_meta', 'DependenceRegistry.save_dependencies_storage_meta', (['storage_meta'], {'status_check': '(True)'}), '(storage_meta,\n status_check=True)\n', (7684, 7721), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((7734, 8034), 'fate_flow.manager.worker_manager.WorkerManager.start_general_worker', 'WorkerManager.start_general_worker', ([], {'worker_name': 'WorkerName.DEPENDENCE_UPLOAD', 'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id', 'provider': 'provider', 'dependence_type': 'dependence_type', 'callback': 'cls.record_upload_process', 'callback_param': "['dependence_type', 'pid', 'provider']"}), "(worker_name=WorkerName.DEPENDENCE_UPLOAD,\n job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id, provider\n =provider, dependence_type=dependence_type, callback=cls.\n record_upload_process, callback_param=['dependence_type', 'pid',\n 'provider'])\n", (7768, 8034), False, 'from fate_flow.manager.worker_manager import WorkerManager\n'), ((6314, 6343), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (6329, 6343), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((6411, 6440), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (6426, 6440), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((9944, 10052), 'fate_flow.manager.resource_manager.ResourceManager.get_engine_registration_info', 'ResourceManager.get_engine_registration_info', ([], {'engine_type': 'EngineType.STORAGE', 'engine_name': 'storage_engine'}), '(engine_type=EngineType.STORAGE,\n engine_name=storage_engine)\n', (9988, 10052), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((2459, 2482), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (2474, 2482), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((7534, 7563), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (7549, 7563), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((3574, 3628), 'fate_flow.db.dependence_registry.DependenceRegistry.get_modify_time', 'DependenceRegistry.get_modify_time', (['flow_provider.path'], {}), '(flow_provider.path)\n', (3608, 3628), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((3857, 3906), 'fate_flow.db.dependence_registry.DependenceRegistry.get_modify_time', 'DependenceRegistry.get_modify_time', (['provider.path'], {}), '(provider.path)\n', (3891, 3906), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((4234, 4283), 'fate_flow.db.dependence_registry.DependenceRegistry.get_modify_time', 'DependenceRegistry.get_modify_time', (['provider.path'], {}), '(provider.path)\n', (4268, 4283), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import typing from fate_arch import session, storage from fate_arch.abc import CTableABC from fate_arch.common import DTable from fate_arch.common.base_utils import current_timestamp from fate_flow.db.db_models import DB, CacheRecord from fate_flow.entity import DataCache from fate_flow.utils import base_utils class CacheManager: @classmethod def persistent(cls, cache_name: str, cache_data: typing.Dict[str, CTableABC], cache_meta: dict, output_namespace: str, output_name: str, output_storage_engine: str, output_storage_address: dict, token=None) -> DataCache: cache = DataCache(name=cache_name, meta=cache_meta) for name, table in cache_data.items(): table_meta = session.Session.persistent(computing_table=table, namespace=output_namespace, name=f"{output_name}_{name}", schema=None, engine=output_storage_engine, engine_address=output_storage_address, token=token) cache.data[name] = DTable(namespace=table_meta.namespace, name=table_meta.name, partitions=table_meta.partitions) return cache @classmethod def load(cls, cache: DataCache) -> typing.Tuple[typing.Dict[str, CTableABC], dict]: cache_data = {} for name, table in cache.data.items(): storage_table_meta = storage.StorageTableMeta(name=table.name, namespace=table.namespace) computing_table = session.get_computing_session().load( storage_table_meta.get_address(), schema=storage_table_meta.get_schema(), partitions=table.partitions) cache_data[name] = computing_table return cache_data, cache.meta @classmethod @DB.connection_context() def record(cls, cache: DataCache, job_id: str = None, role: str = None, party_id: int = None, component_name: str = None, task_id: str = None, task_version: int = None, cache_name: str = None): for attr in {"job_id", "component_name", "task_id", "task_version"}: if getattr(cache, attr) is None and locals().get(attr) is not None: setattr(cache, attr, locals().get(attr)) record = CacheRecord() record.f_create_time = current_timestamp() record.f_cache_key = base_utils.new_unique_id() cache.key = record.f_cache_key record.f_cache = cache record.f_job_id = job_id record.f_role = role record.f_party_id = party_id record.f_component_name = component_name record.f_task_id = task_id record.f_task_version = task_version record.f_cache_name = cache_name rows = record.save(force_insert=True) if rows != 1: raise Exception("save cache tracking failed") return record.f_cache_key @classmethod @DB.connection_context() def query(cls, cache_key: str = None, role: str = None, party_id: int = None, component_name: str = None, cache_name: str = None, **kwargs) -> typing.List[DataCache]: if cache_key is not None: records = CacheRecord.query(cache_key=cache_key) else: records = CacheRecord.query(role=role, party_id=party_id, component_name=component_name, cache_name=cache_name, **kwargs) return [record.f_cache for record in records]
[ "fate_flow.db.db_models.DB.connection_context", "fate_flow.entity.DataCache", "fate_flow.utils.base_utils.new_unique_id", "fate_flow.db.db_models.CacheRecord", "fate_flow.db.db_models.CacheRecord.query" ]
[((2669, 2692), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (2690, 2692), False, 'from fate_flow.db.db_models import DB, CacheRecord\n'), ((3780, 3803), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (3801, 3803), False, 'from fate_flow.db.db_models import DB, CacheRecord\n'), ((1247, 1290), 'fate_flow.entity.DataCache', 'DataCache', ([], {'name': 'cache_name', 'meta': 'cache_meta'}), '(name=cache_name, meta=cache_meta)\n', (1256, 1290), False, 'from fate_flow.entity import DataCache\n'), ((3137, 3150), 'fate_flow.db.db_models.CacheRecord', 'CacheRecord', ([], {}), '()\n', (3148, 3150), False, 'from fate_flow.db.db_models import DB, CacheRecord\n'), ((3182, 3201), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (3199, 3201), False, 'from fate_arch.common.base_utils import current_timestamp\n'), ((3231, 3257), 'fate_flow.utils.base_utils.new_unique_id', 'base_utils.new_unique_id', ([], {}), '()\n', (3255, 3257), False, 'from fate_flow.utils import base_utils\n'), ((1363, 1575), 'fate_arch.session.Session.persistent', 'session.Session.persistent', ([], {'computing_table': 'table', 'namespace': 'output_namespace', 'name': 'f"""{output_name}_{name}"""', 'schema': 'None', 'engine': 'output_storage_engine', 'engine_address': 'output_storage_address', 'token': 'token'}), "(computing_table=table, namespace=\n output_namespace, name=f'{output_name}_{name}', schema=None, engine=\n output_storage_engine, engine_address=output_storage_address, token=token)\n", (1389, 1575), False, 'from fate_arch import session, storage\n'), ((1909, 2008), 'fate_arch.common.DTable', 'DTable', ([], {'namespace': 'table_meta.namespace', 'name': 'table_meta.name', 'partitions': 'table_meta.partitions'}), '(namespace=table_meta.namespace, name=table_meta.name, partitions=\n table_meta.partitions)\n', (1915, 2008), False, 'from fate_arch.common import DTable\n'), ((2273, 2341), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'table.name', 'namespace': 'table.namespace'}), '(name=table.name, namespace=table.namespace)\n', (2297, 2341), False, 'from fate_arch import session, storage\n'), ((4045, 4083), 'fate_flow.db.db_models.CacheRecord.query', 'CacheRecord.query', ([], {'cache_key': 'cache_key'}), '(cache_key=cache_key)\n', (4062, 4083), False, 'from fate_flow.db.db_models import DB, CacheRecord\n'), ((4120, 4236), 'fate_flow.db.db_models.CacheRecord.query', 'CacheRecord.query', ([], {'role': 'role', 'party_id': 'party_id', 'component_name': 'component_name', 'cache_name': 'cache_name'}), '(role=role, party_id=party_id, component_name=\n component_name, cache_name=cache_name, **kwargs)\n', (4137, 4236), False, 'from fate_flow.db.db_models import DB, CacheRecord\n'), ((2372, 2403), 'fate_arch.session.get_computing_session', 'session.get_computing_session', ([], {}), '()\n', (2401, 2403), False, 'from fate_arch import session, storage\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import io import json import os import shutil import tarfile from flask import Flask, request, send_file, make_response from google.protobuf import json_format from arch.api.utils.core_utils import deserialize_b64 from arch.api.utils.core_utils import fate_uuid from arch.api.utils.core_utils import json_loads from fate_flow.db.db_models import Job, DB from fate_flow.manager.data_manager import query_data_view, delete_metric_data from fate_flow.manager.tracking_manager import Tracking from fate_flow.settings import stat_logger from fate_flow.utils import job_utils, data_utils from fate_flow.utils.api_utils import get_json_result, error_response from federatedml.feature.instance import Instance manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/job/data_view', methods=['post']) def job_view(): request_data = request.json check_request_parameters(request_data) job_tracker = Tracking(job_id=request_data['job_id'], role=request_data['role'], party_id=request_data['party_id']) job_view_data = job_tracker.get_job_view() if job_view_data: job_metric_list = job_tracker.get_metric_list(job_level=True) job_view_data['model_summary'] = {} for metric_namespace, namespace_metrics in job_metric_list.items(): job_view_data['model_summary'][metric_namespace] = job_view_data['model_summary'].get(metric_namespace, {}) for metric_name in namespace_metrics: job_view_data['model_summary'][metric_namespace][metric_name] = job_view_data['model_summary'][ metric_namespace].get(metric_name, {}) for metric_data in job_tracker.get_job_metric_data(metric_namespace=metric_namespace, metric_name=metric_name): job_view_data['model_summary'][metric_namespace][metric_name][metric_data.key] = metric_data.value return get_json_result(retcode=0, retmsg='success', data=job_view_data) else: return get_json_result(retcode=101, retmsg='error') @manager.route('/component/log', methods=['post']) @job_utils.job_server_routing(307) def component_info_log(): job_id = request.json.get('job_id', '') role = request.json.get('role') party_id = request.json.get('party_id') job_log_dir = job_utils.get_job_log_directory(job_id=job_id) file_name = os.path.join(job_log_dir, role, str(party_id), 'INFO.log') if os.path.exists(file_name): return send_file(open(file_name, 'rb'), attachment_filename='{}_{}_{}_INFO.log'.format(job_id, role, party_id), as_attachment=True) else: response = make_response("no find log file") response.status = '500' return response @manager.route('/component/metric/all', methods=['post']) def component_metric_all(): request_data = request.json check_request_parameters(request_data) tracker = Tracking(job_id=request_data['job_id'], component_name=request_data['component_name'], role=request_data['role'], party_id=request_data['party_id']) metrics = tracker.get_metric_list() all_metric_data = {} if metrics: for metric_namespace, metric_names in metrics.items(): all_metric_data[metric_namespace] = all_metric_data.get(metric_namespace, {}) for metric_name in metric_names: all_metric_data[metric_namespace][metric_name] = all_metric_data[metric_namespace].get(metric_name, {}) metric_data, metric_meta = get_metric_all_data(tracker=tracker, metric_namespace=metric_namespace, metric_name=metric_name) all_metric_data[metric_namespace][metric_name]['data'] = metric_data all_metric_data[metric_namespace][metric_name]['meta'] = metric_meta return get_json_result(retcode=0, retmsg='success', data=all_metric_data) else: return get_json_result(retcode=0, retmsg='no data', data={}) @manager.route('/component/metrics', methods=['post']) def component_metrics(): request_data = request.json check_request_parameters(request_data) tracker = Tracking(job_id=request_data['job_id'], component_name=request_data['component_name'], role=request_data['role'], party_id=request_data['party_id']) metrics = tracker.get_metric_list() if metrics: return get_json_result(retcode=0, retmsg='success', data=metrics) else: return get_json_result(retcode=0, retmsg='no data', data={}) @manager.route('/component/metric_data', methods=['post']) def component_metric_data(): request_data = request.json check_request_parameters(request_data) tracker = Tracking(job_id=request_data['job_id'], component_name=request_data['component_name'], role=request_data['role'], party_id=request_data['party_id']) metric_data, metric_meta = get_metric_all_data(tracker=tracker, metric_namespace=request_data['metric_namespace'], metric_name=request_data['metric_name']) if metric_data or metric_meta: return get_json_result(retcode=0, retmsg='success', data=metric_data, meta=metric_meta) else: return get_json_result(retcode=0, retmsg='no data', data=[], meta={}) def get_metric_all_data(tracker, metric_namespace, metric_name): metric_data = tracker.get_metric_data(metric_namespace=metric_namespace, metric_name=metric_name) metric_meta = tracker.get_metric_meta(metric_namespace=metric_namespace, metric_name=metric_name) if metric_data or metric_meta: metric_data_list = [(metric.key, metric.value) for metric in metric_data] metric_data_list.sort(key=lambda x: x[0]) return metric_data_list, metric_meta.to_dict() if metric_meta else {} else: return [], {} @manager.route('/component/metric/delete', methods=['post']) def component_metric_delete(): sql = delete_metric_data(request.json) return get_json_result(retcode=0, retmsg='success', data=sql) @manager.route('/component/parameters', methods=['post']) def component_parameters(): request_data = request.json check_request_parameters(request_data) job_id = request_data.get('job_id', '') job_dsl_parser = job_utils.get_job_dsl_parser_by_job_id(job_id=job_id) if job_dsl_parser: component = job_dsl_parser.get_component_info(request_data['component_name']) parameters = component.get_role_parameters() for role, partys_parameters in parameters.items(): for party_parameters in partys_parameters: if party_parameters.get('local', {}).get('role', '') == request_data['role'] and party_parameters.get( 'local', {}).get('party_id', '') == int(request_data['party_id']): output_parameters = {} output_parameters['module'] = party_parameters.get('module', '') for p_k, p_v in party_parameters.items(): if p_k.endswith('Param'): output_parameters[p_k] = p_v return get_json_result(retcode=0, retmsg='success', data=output_parameters) else: return get_json_result(retcode=0, retmsg='can not found this component parameters') else: return get_json_result(retcode=101, retmsg='can not found this job') @manager.route('/component/output/model', methods=['post']) @job_utils.job_server_routing() def component_output_model(): request_data = request.json check_request_parameters(request_data) job_dsl, job_runtime_conf, train_runtime_conf = job_utils.get_job_configuration(job_id=request_data['job_id'], role=request_data['role'], party_id=request_data['party_id']) model_id = job_runtime_conf['job_parameters']['model_id'] model_version = job_runtime_conf['job_parameters']['model_version'] tracker = Tracking(job_id=request_data['job_id'], component_name=request_data['component_name'], role=request_data['role'], party_id=request_data['party_id'], model_id=model_id, model_version=model_version) dag = job_utils.get_job_dsl_parser(dsl=job_dsl, runtime_conf=job_runtime_conf, train_runtime_conf=train_runtime_conf) component = dag.get_component_info(request_data['component_name']) output_model_json = {} # There is only one model output at the current dsl version. output_model = tracker.get_output_model(component.get_output()['model'][0] if component.get_output().get('model') else 'default') for buffer_name, buffer_object in output_model.items(): if buffer_name.endswith('Param'): output_model_json = json_format.MessageToDict(buffer_object, including_default_value_fields=True) if output_model_json: component_define = tracker.get_component_define() this_component_model_meta = {} for buffer_name, buffer_object in output_model.items(): if buffer_name.endswith('Meta'): this_component_model_meta['meta_data'] = json_format.MessageToDict(buffer_object, including_default_value_fields=True) this_component_model_meta.update(component_define) return get_json_result(retcode=0, retmsg='success', data=output_model_json, meta=this_component_model_meta) else: return get_json_result(retcode=0, retmsg='no data', data={}) @manager.route('/component/output/data', methods=['post']) @job_utils.job_server_routing() def component_output_data(): request_data = request.json output_data_table = get_component_output_data_table(task_data=request_data) if not output_data_table: return get_json_result(retcode=0, retmsg='no data', data=[]) output_data = [] num = 100 have_data_label = False if output_data_table: for k, v in output_data_table.collect(): if num == 0: break data_line, have_data_label = get_component_output_data_line(src_key=k, src_value=v) output_data.append(data_line) num -= 1 total = output_data_table.count() if output_data: header = get_component_output_data_meta(output_data_table=output_data_table, have_data_label=have_data_label) return get_json_result(retcode=0, retmsg='success', data=output_data, meta={'header': header, 'total': total}) else: return get_json_result(retcode=0, retmsg='no data', data=[]) @manager.route('/component/output/data/download', methods=['get']) @job_utils.job_server_routing(307) def component_output_data_download(): request_data = request.json output_data_table = get_component_output_data_table(task_data=request_data) limit = request_data.get('limit', -1) if not output_data_table: return error_response(response_code=500, retmsg='no data') if limit == 0: return error_response(response_code=500, retmsg='limit is 0') output_data_count = 0 have_data_label = False output_tmp_dir = os.path.join(os.getcwd(), 'tmp/{}'.format(fate_uuid())) output_file_path = '{}/output_%s'.format(output_tmp_dir) output_data_file_path = output_file_path % 'data.csv' os.makedirs(os.path.dirname(output_data_file_path), exist_ok=True) with open(output_data_file_path, 'w') as fw: for k, v in output_data_table.collect(): data_line, have_data_label = get_component_output_data_line(src_key=k, src_value=v) fw.write('{}\n'.format(','.join(map(lambda x: str(x), data_line)))) output_data_count += 1 if output_data_count == limit: break if output_data_count: # get meta header = get_component_output_data_meta(output_data_table=output_data_table, have_data_label=have_data_label) output_data_meta_file_path = output_file_path % 'data_meta.json' with open(output_data_meta_file_path, 'w') as fw: json.dump({'header': header}, fw, indent=4) if request_data.get('head', True): with open(output_data_file_path, 'r+') as f: content = f.read() f.seek(0, 0) f.write('{}\n'.format(','.join(header)) + content) # tar memory_file = io.BytesIO() tar = tarfile.open(fileobj=memory_file, mode='w:gz') tar.add(output_data_file_path, os.path.relpath(output_data_file_path, output_tmp_dir)) tar.add(output_data_meta_file_path, os.path.relpath(output_data_meta_file_path, output_tmp_dir)) tar.close() memory_file.seek(0) try: shutil.rmtree(os.path.dirname(output_data_file_path)) except Exception as e: # warning stat_logger.warning(e) tar_file_name = 'job_{}_{}_{}_{}_output_data.tar.gz'.format(request_data['job_id'], request_data['component_name'], request_data['role'], request_data['party_id']) return send_file(memory_file, attachment_filename=tar_file_name, as_attachment=True) @manager.route('/component/output/data/table', methods=['post']) @job_utils.job_server_routing() def component_output_data_table(): request_data = request.json data_views = query_data_view(**request_data) if data_views: return get_json_result(retcode=0, retmsg='success', data={'table_name': data_views[0].f_table_name, 'table_namespace': data_views[0].f_table_namespace}) else: return get_json_result(retcode=100, retmsg='No found table, please check if the parameters are correct') # api using by task executor @manager.route('/<job_id>/<component_name>/<task_id>/<role>/<party_id>/metric_data/save', methods=['POST']) def save_metric_data(job_id, component_name, task_id, role, party_id): request_data = request.json tracker = Tracking(job_id=job_id, component_name=component_name, task_id=task_id, role=role, party_id=party_id) metrics = [deserialize_b64(metric) for metric in request_data['metrics']] tracker.save_metric_data(metric_namespace=request_data['metric_namespace'], metric_name=request_data['metric_name'], metrics=metrics, job_level=request_data['job_level']) return get_json_result() @manager.route('/<job_id>/<component_name>/<task_id>/<role>/<party_id>/metric_meta/save', methods=['POST']) def save_metric_meta(job_id, component_name, task_id, role, party_id): request_data = request.json tracker = Tracking(job_id=job_id, component_name=component_name, task_id=task_id, role=role, party_id=party_id) metric_meta = deserialize_b64(request_data['metric_meta']) tracker.save_metric_meta(metric_namespace=request_data['metric_namespace'], metric_name=request_data['metric_name'], metric_meta=metric_meta, job_level=request_data['job_level']) return get_json_result() def get_component_output_data_table(task_data): check_request_parameters(task_data) tracker = Tracking(job_id=task_data['job_id'], component_name=task_data['component_name'], role=task_data['role'], party_id=task_data['party_id']) job_dsl_parser = job_utils.get_job_dsl_parser_by_job_id(job_id=task_data['job_id']) if not job_dsl_parser: raise Exception('can not get dag parser, please check if the parameters are correct') component = job_dsl_parser.get_component_info(task_data['component_name']) if not component: raise Exception('can not found component, please check if the parameters are correct') output_dsl = component.get_output() output_data_dsl = output_dsl.get('data', []) # The current version will only have one data output. output_data_table = tracker.get_output_data_table(output_data_dsl[0] if output_data_dsl else 'component') return output_data_table def get_component_output_data_line(src_key, src_value): have_data_label = False data_line = [src_key] if isinstance(src_value, Instance): if src_value.label is not None: data_line.append(src_value.label) have_data_label = True data_line.extend(data_utils.dataset_to_list(src_value.features)) else: data_line.extend(data_utils.dataset_to_list(src_value)) return data_line, have_data_label def get_component_output_data_meta(output_data_table, have_data_label): # get meta output_data_meta = output_data_table.get_metas() schema = output_data_meta.get('schema', {}) header = [schema.get('sid_name', 'sid')] if have_data_label: header.append(schema.get('label_name')) header.extend(schema.get('header', [])) return header def check_request_parameters(request_data): with DB.connection_context(): if 'role' not in request_data and 'party_id' not in request_data: jobs = Job.select(Job.f_runtime_conf).where(Job.f_job_id == request_data.get('job_id', ''), Job.f_is_initiator == 1) if jobs: job = jobs[0] job_runtime_conf = json_loads(job.f_runtime_conf) job_initiator = job_runtime_conf.get('initiator', {}) role = job_initiator.get('role', '') party_id = job_initiator.get('party_id', 0) request_data['role'] = role request_data['party_id'] = party_id
[ "fate_flow.utils.job_utils.job_server_routing", "fate_flow.utils.job_utils.get_job_dsl_parser_by_job_id", "fate_flow.db.db_models.DB.connection_context", "fate_flow.settings.stat_logger.warning", "fate_flow.utils.api_utils.get_json_result", "fate_flow.db.db_models.Job.select", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.job_utils.get_job_dsl_parser", "fate_flow.manager.data_manager.delete_metric_data", "fate_flow.utils.api_utils.error_response", "fate_flow.utils.job_utils.get_job_configuration", "fate_flow.utils.data_utils.dataset_to_list", "fate_flow.manager.tracking_manager.Tracking", "fate_flow.utils.job_utils.get_job_log_directory", "fate_flow.manager.data_manager.query_data_view" ]
[((1331, 1346), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1336, 1346), False, 'from flask import Flask, request, send_file, make_response\n'), ((2872, 2905), 'fate_flow.utils.job_utils.job_server_routing', 'job_utils.job_server_routing', (['(307)'], {}), '(307)\n', (2900, 2905), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((8390, 8420), 'fate_flow.utils.job_utils.job_server_routing', 'job_utils.job_server_routing', ([], {}), '()\n', (8418, 8420), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((10698, 10728), 'fate_flow.utils.job_utils.job_server_routing', 'job_utils.job_server_routing', ([], {}), '()\n', (10726, 10728), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((11761, 11794), 'fate_flow.utils.job_utils.job_server_routing', 'job_utils.job_server_routing', (['(307)'], {}), '(307)\n', (11789, 11794), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((14444, 14474), 'fate_flow.utils.job_utils.job_server_routing', 'job_utils.job_server_routing', ([], {}), '()\n', (14472, 14474), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((1410, 1434), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1431, 1434), False, 'from fate_flow.settings import stat_logger\n'), ((1652, 1758), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': "request_data['job_id']", 'role': "request_data['role']", 'party_id': "request_data['party_id']"}), "(job_id=request_data['job_id'], role=request_data['role'], party_id\n =request_data['party_id'])\n", (1660, 1758), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((2945, 2975), 'flask.request.json.get', 'request.json.get', (['"""job_id"""', '""""""'], {}), "('job_id', '')\n", (2961, 2975), False, 'from flask import Flask, request, send_file, make_response\n'), ((2987, 3011), 'flask.request.json.get', 'request.json.get', (['"""role"""'], {}), "('role')\n", (3003, 3011), False, 'from flask import Flask, request, send_file, make_response\n'), ((3027, 3055), 'flask.request.json.get', 'request.json.get', (['"""party_id"""'], {}), "('party_id')\n", (3043, 3055), False, 'from flask import Flask, request, send_file, make_response\n'), ((3074, 3120), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (3105, 3120), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((3203, 3228), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (3217, 3228), False, 'import os\n'), ((3665, 3823), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': "request_data['job_id']", 'component_name': "request_data['component_name']", 'role': "request_data['role']", 'party_id': "request_data['party_id']"}), "(job_id=request_data['job_id'], component_name=request_data[\n 'component_name'], role=request_data['role'], party_id=request_data[\n 'party_id'])\n", (3673, 3823), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((4941, 5099), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': "request_data['job_id']", 'component_name': "request_data['component_name']", 'role': "request_data['role']", 'party_id': "request_data['party_id']"}), "(job_id=request_data['job_id'], component_name=request_data[\n 'component_name'], role=request_data['role'], party_id=request_data[\n 'party_id'])\n", (4949, 5099), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((5501, 5659), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': "request_data['job_id']", 'component_name': "request_data['component_name']", 'role': "request_data['role']", 'party_id': "request_data['party_id']"}), "(job_id=request_data['job_id'], component_name=request_data[\n 'component_name'], role=request_data['role'], party_id=request_data[\n 'party_id'])\n", (5509, 5659), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((6870, 6902), 'fate_flow.manager.data_manager.delete_metric_data', 'delete_metric_data', (['request.json'], {}), '(request.json)\n', (6888, 6902), False, 'from fate_flow.manager.data_manager import query_data_view, delete_metric_data\n'), ((6914, 6968), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'sql'}), "(retcode=0, retmsg='success', data=sql)\n", (6929, 6968), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((7197, 7250), 'fate_flow.utils.job_utils.get_job_dsl_parser_by_job_id', 'job_utils.get_job_dsl_parser_by_job_id', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (7235, 7250), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((8578, 8707), 'fate_flow.utils.job_utils.get_job_configuration', 'job_utils.get_job_configuration', ([], {'job_id': "request_data['job_id']", 'role': "request_data['role']", 'party_id': "request_data['party_id']"}), "(job_id=request_data['job_id'], role=\n request_data['role'], party_id=request_data['party_id'])\n", (8609, 8707), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((9019, 9225), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': "request_data['job_id']", 'component_name': "request_data['component_name']", 'role': "request_data['role']", 'party_id': "request_data['party_id']", 'model_id': 'model_id', 'model_version': 'model_version'}), "(job_id=request_data['job_id'], component_name=request_data[\n 'component_name'], role=request_data['role'], party_id=request_data[\n 'party_id'], model_id=model_id, model_version=model_version)\n", (9027, 9225), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((9272, 9387), 'fate_flow.utils.job_utils.get_job_dsl_parser', 'job_utils.get_job_dsl_parser', ([], {'dsl': 'job_dsl', 'runtime_conf': 'job_runtime_conf', 'train_runtime_conf': 'train_runtime_conf'}), '(dsl=job_dsl, runtime_conf=job_runtime_conf,\n train_runtime_conf=train_runtime_conf)\n', (9300, 9387), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((14559, 14590), 'fate_flow.manager.data_manager.query_data_view', 'query_data_view', ([], {}), '(**request_data)\n', (14574, 14590), False, 'from fate_flow.manager.data_manager import query_data_view, delete_metric_data\n'), ((15216, 15321), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': 'job_id', 'component_name': 'component_name', 'task_id': 'task_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, component_name=component_name, task_id=task_id,\n role=role, party_id=party_id)\n', (15224, 15321), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((15611, 15628), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {}), '()\n', (15626, 15628), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((15856, 15961), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': 'job_id', 'component_name': 'component_name', 'task_id': 'task_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, component_name=component_name, task_id=task_id,\n role=role, party_id=party_id)\n', (15864, 15961), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((15976, 16020), 'arch.api.utils.core_utils.deserialize_b64', 'deserialize_b64', (["request_data['metric_meta']"], {}), "(request_data['metric_meta'])\n", (15991, 16020), False, 'from arch.api.utils.core_utils import deserialize_b64\n'), ((16244, 16261), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {}), '()\n', (16259, 16261), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((16366, 16507), 'fate_flow.manager.tracking_manager.Tracking', 'Tracking', ([], {'job_id': "task_data['job_id']", 'component_name': "task_data['component_name']", 'role': "task_data['role']", 'party_id': "task_data['party_id']"}), "(job_id=task_data['job_id'], component_name=task_data[\n 'component_name'], role=task_data['role'], party_id=task_data['party_id'])\n", (16374, 16507), False, 'from fate_flow.manager.tracking_manager import Tracking\n'), ((16547, 16613), 'fate_flow.utils.job_utils.get_job_dsl_parser_by_job_id', 'job_utils.get_job_dsl_parser_by_job_id', ([], {'job_id': "task_data['job_id']"}), "(job_id=task_data['job_id'])\n", (16585, 16613), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((2683, 2747), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'job_view_data'}), "(retcode=0, retmsg='success', data=job_view_data)\n", (2698, 2747), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((2773, 2817), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""error"""'}), "(retcode=101, retmsg='error')\n", (2788, 2817), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((3399, 3432), 'flask.make_response', 'make_response', (['"""no find log file"""'], {}), "('no find log file')\n", (3412, 3432), False, 'from flask import Flask, request, send_file, make_response\n'), ((4624, 4690), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'all_metric_data'}), "(retcode=0, retmsg='success', data=all_metric_data)\n", (4639, 4690), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((4716, 4769), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no data"""', 'data': '{}'}), "(retcode=0, retmsg='no data', data={})\n", (4731, 4769), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((5184, 5242), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'metrics'}), "(retcode=0, retmsg='success', data=metrics)\n", (5199, 5242), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((5268, 5321), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no data"""', 'data': '{}'}), "(retcode=0, retmsg='no data', data={})\n", (5283, 5321), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((5934, 6019), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'metric_data', 'meta': 'metric_meta'}), "(retcode=0, retmsg='success', data=metric_data, meta=metric_meta\n )\n", (5949, 6019), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((6071, 6133), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no data"""', 'data': '[]', 'meta': '{}'}), "(retcode=0, retmsg='no data', data=[], meta={})\n", (6086, 6133), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((8265, 8326), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""can not found this job"""'}), "(retcode=101, retmsg='can not found this job')\n", (8280, 8326), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((10456, 10561), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'output_model_json', 'meta': 'this_component_model_meta'}), "(retcode=0, retmsg='success', data=output_model_json, meta=\n this_component_model_meta)\n", (10471, 10561), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((10582, 10635), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no data"""', 'data': '{}'}), "(retcode=0, retmsg='no data', data={})\n", (10597, 10635), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((10915, 10968), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no data"""', 'data': '[]'}), "(retcode=0, retmsg='no data', data=[])\n", (10930, 10968), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((11508, 11616), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'output_data', 'meta': "{'header': header, 'total': total}"}), "(retcode=0, retmsg='success', data=output_data, meta={\n 'header': header, 'total': total})\n", (11523, 11616), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((11637, 11690), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""no data"""', 'data': '[]'}), "(retcode=0, retmsg='no data', data=[])\n", (11652, 11690), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((12032, 12083), 'fate_flow.utils.api_utils.error_response', 'error_response', ([], {'response_code': '(500)', 'retmsg': '"""no data"""'}), "(response_code=500, retmsg='no data')\n", (12046, 12083), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((12118, 12172), 'fate_flow.utils.api_utils.error_response', 'error_response', ([], {'response_code': '(500)', 'retmsg': '"""limit is 0"""'}), "(response_code=500, retmsg='limit is 0')\n", (12132, 12172), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((12261, 12272), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (12270, 12272), False, 'import os\n'), ((12439, 12477), 'os.path.dirname', 'os.path.dirname', (['output_data_file_path'], {}), '(output_data_file_path)\n', (12454, 12477), False, 'import os\n'), ((13486, 13498), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (13496, 13498), False, 'import io\n'), ((13513, 13559), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'memory_file', 'mode': '"""w:gz"""'}), "(fileobj=memory_file, mode='w:gz')\n", (13525, 13559), False, 'import tarfile\n'), ((14298, 14375), 'flask.send_file', 'send_file', (['memory_file'], {'attachment_filename': 'tar_file_name', 'as_attachment': '(True)'}), '(memory_file, attachment_filename=tar_file_name, as_attachment=True)\n', (14307, 14375), False, 'from flask import Flask, request, send_file, make_response\n'), ((14625, 14775), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': "{'table_name': data_views[0].f_table_name, 'table_namespace': data_views[0]\n .f_table_namespace}"}), "(retcode=0, retmsg='success', data={'table_name': data_views\n [0].f_table_name, 'table_namespace': data_views[0].f_table_namespace})\n", (14640, 14775), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((14862, 14964), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(100)', 'retmsg': '"""No found table, please check if the parameters are correct"""'}), "(retcode=100, retmsg=\n 'No found table, please check if the parameters are correct')\n", (14877, 14964), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((15333, 15356), 'arch.api.utils.core_utils.deserialize_b64', 'deserialize_b64', (['metric'], {}), '(metric)\n', (15348, 15356), False, 'from arch.api.utils.core_utils import deserialize_b64\n'), ((18099, 18122), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (18120, 18122), False, 'from fate_flow.db.db_models import Job, DB\n'), ((8163, 8239), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""can not found this component parameters"""'}), "(retcode=0, retmsg='can not found this component parameters')\n", (8178, 8239), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((9854, 9931), 'google.protobuf.json_format.MessageToDict', 'json_format.MessageToDict', (['buffer_object'], {'including_default_value_fields': '(True)'}), '(buffer_object, including_default_value_fields=True)\n', (9879, 9931), False, 'from google.protobuf import json_format\n'), ((12290, 12301), 'arch.api.utils.core_utils.fate_uuid', 'fate_uuid', ([], {}), '()\n', (12299, 12301), False, 'from arch.api.utils.core_utils import fate_uuid\n'), ((13175, 13218), 'json.dump', 'json.dump', (["{'header': header}", 'fw'], {'indent': '(4)'}), "({'header': header}, fw, indent=4)\n", (13184, 13218), False, 'import json\n'), ((13599, 13653), 'os.path.relpath', 'os.path.relpath', (['output_data_file_path', 'output_tmp_dir'], {}), '(output_data_file_path, output_tmp_dir)\n', (13614, 13653), False, 'import os\n'), ((13699, 13758), 'os.path.relpath', 'os.path.relpath', (['output_data_meta_file_path', 'output_tmp_dir'], {}), '(output_data_meta_file_path, output_tmp_dir)\n', (13714, 13758), False, 'import os\n'), ((17515, 17561), 'fate_flow.utils.data_utils.dataset_to_list', 'data_utils.dataset_to_list', (['src_value.features'], {}), '(src_value.features)\n', (17541, 17561), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((17598, 17635), 'fate_flow.utils.data_utils.dataset_to_list', 'data_utils.dataset_to_list', (['src_value'], {}), '(src_value)\n', (17624, 17635), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((10221, 10298), 'google.protobuf.json_format.MessageToDict', 'json_format.MessageToDict', (['buffer_object'], {'including_default_value_fields': '(True)'}), '(buffer_object, including_default_value_fields=True)\n', (10246, 10298), False, 'from google.protobuf import json_format\n'), ((13847, 13885), 'os.path.dirname', 'os.path.dirname', (['output_data_file_path'], {}), '(output_data_file_path)\n', (13862, 13885), False, 'import os\n'), ((13952, 13974), 'fate_flow.settings.stat_logger.warning', 'stat_logger.warning', (['e'], {}), '(e)\n', (13971, 13974), False, 'from fate_flow.settings import stat_logger\n'), ((18469, 18499), 'arch.api.utils.core_utils.json_loads', 'json_loads', (['job.f_runtime_conf'], {}), '(job.f_runtime_conf)\n', (18479, 18499), False, 'from arch.api.utils.core_utils import json_loads\n'), ((8061, 8129), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'output_parameters'}), "(retcode=0, retmsg='success', data=output_parameters)\n", (8076, 8129), False, 'from fate_flow.utils.api_utils import get_json_result, error_response\n'), ((18217, 18247), 'fate_flow.db.db_models.Job.select', 'Job.select', (['Job.f_runtime_conf'], {}), '(Job.f_runtime_conf)\n', (18227, 18247), False, 'from fate_flow.db.db_models import Job, DB\n')]
import os import sys from fate_arch import storage from fate_arch.common import EngineType from fate_flow.controller.job_controller import JobController from fate_flow.entity.run_status import JobInheritanceStatus, TaskStatus from fate_flow.operation.job_saver import JobSaver from fate_flow.utils.log_utils import schedule_logger from fate_arch.computing import ComputingEngine from fate_flow.db.dependence_registry import DependenceRegistry from fate_flow.entity import ComponentProvider from fate_flow.entity.types import FateDependenceName, ComponentProviderName, FateDependenceStorageEngine, WorkerName from fate_flow.manager.provider_manager import ProviderManager from fate_flow.manager.worker_manager import WorkerManager from fate_flow.settings import DEPENDENT_DISTRIBUTION, FATE_FLOW_UPDATE_CHECK, ENGINES from fate_flow.utils import schedule_utils, job_utils, process_utils from fate_flow.worker.job_inheritor import JobInherit class DependenceManager: @classmethod def check_job_dependence(cls, job): if cls.check_job_inherit_dependence(job) and cls.check_spark_dependence(job): return True else: return False @classmethod def check_job_inherit_dependence(cls, job): schedule_logger(job.f_job_id).info( f"check job inherit dependence: {job.f_inheritance_info}, {job.f_inheritance_status}") if job.f_inheritance_info: if job.f_inheritance_status == JobInheritanceStatus.WAITING: cls.start_inheriting_job(job) return False elif job.f_inheritance_status == JobInheritanceStatus.RUNNING: return False elif job.f_inheritance_status == JobInheritanceStatus.FAILED: raise Exception("job inheritance failed") else: return True else: return True @classmethod def component_check(cls, job, check_type="inheritance"): if check_type == "rerun": task_list = JobSaver.query_task(job_id=job.f_job_id, party_id=job.f_party_id, role=job.f_role, status=TaskStatus.SUCCESS, only_latest=True) tasks = {} for task in task_list: tasks[task.f_component_name] = task else: tasks = JobController.load_tasks(component_list=job.f_inheritance_info.get("component_list", []), job_id=job.f_inheritance_info.get("job_id"), role=job.f_role, party_id=job.f_party_id) tracker_dict = JobController.load_task_tracker(tasks) missing_dependence_component_list = [] # data dependence for tracker in tracker_dict.values(): table_infos = tracker.get_output_data_info() for table in table_infos: table_meta = storage.StorageTableMeta(name=table.f_table_name, namespace=table.f_table_namespace) if not table_meta: missing_dependence_component_list.append(tracker.component_name) continue if check_type == "rerun": return missing_dependence_component_list elif check_type == "inheritance": # reload component list return list(set(job.f_inheritance_info.get("component_list", [])) - set(missing_dependence_component_list)) @classmethod def start_inheriting_job(cls, job): JobSaver.update_job(job_info={"job_id": job.f_job_id, "role": job.f_role, "party_id": job.f_party_id, "inheritance_status": JobInheritanceStatus.RUNNING}) conf_dir = job_utils.get_job_directory(job_id=job.f_job_id) os.makedirs(conf_dir, exist_ok=True) process_cmd = [ sys.executable or 'python3', sys.modules[JobInherit.__module__].__file__, '--job_id', job.f_job_id, '--role', job.f_role, '--party_id', job.f_party_id, ] log_dir = os.path.join(job_utils.get_job_log_directory(job_id=job.f_job_id), "job_inheritance") p = process_utils.run_subprocess(job_id=job.f_job_id, config_dir=conf_dir, process_cmd=process_cmd, log_dir=log_dir, process_name="job_inheritance") @classmethod def check_spark_dependence(cls, job): if not DEPENDENT_DISTRIBUTION: return True engine_name = ENGINES.get(EngineType.COMPUTING) schedule_logger(job.f_job_id).info(f"job engine name: {engine_name}") if engine_name not in [ComputingEngine.SPARK]: return True dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job.f_dsl, runtime_conf=job.f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf) provider_group = ProviderManager.get_job_provider_group(dsl_parser=dsl_parser) version_provider_info = {} fate_flow_version_provider_info = {} schedule_logger(job.f_job_id).info(f'group_info:{provider_group}') for group_key, group_info in provider_group.items(): if group_info["provider"]["name"] == ComponentProviderName.FATE_FLOW.value and \ group_info["provider"]["version"] not in fate_flow_version_provider_info: fate_flow_version_provider_info[group_info["provider"]["version"]] = group_info["provider"] if group_info["provider"]["name"] == ComponentProviderName.FATE.value and \ group_info["provider"]["version"] not in version_provider_info: version_provider_info[group_info["provider"]["version"]] = group_info["provider"] schedule_logger(job.f_job_id).info(f'version_provider_info:{version_provider_info}') schedule_logger(job.f_job_id).info(f'fate_flow_version_provider_info:{fate_flow_version_provider_info}') if not version_provider_info: version_provider_info = fate_flow_version_provider_info check_tag, upload_tag, upload_details = cls.check_upload(job.f_job_id, version_provider_info, fate_flow_version_provider_info) if upload_tag: cls.upload_spark_dependence(job, upload_details) return check_tag @classmethod def check_upload(cls, job_id, provider_group, fate_flow_version_provider_info, storage_engine=FateDependenceStorageEngine.HDFS.value): schedule_logger(job_id).info("start Check if need to upload dependencies") schedule_logger(job_id).info(f"{provider_group}") upload_details = {} check_tag = True upload_total = 0 for version, provider_info in provider_group.items(): upload_details[version] = {} provider = ComponentProvider(**provider_info) for dependence_type in [FateDependenceName.Fate_Source_Code.value, FateDependenceName.Python_Env.value]: schedule_logger(job_id).info(f"{dependence_type}") dependencies_storage_info = DependenceRegistry.get_dependencies_storage_meta( storage_engine=storage_engine, version=provider.version, type=dependence_type, get_or_one=True ) need_upload = False if dependencies_storage_info: if dependencies_storage_info.f_upload_status: # version dependence uploading check_tag = False continue elif not dependencies_storage_info.f_storage_path: need_upload = True upload_total += 1 elif dependence_type == FateDependenceName.Fate_Source_Code.value: if provider.name == ComponentProviderName.FATE.value: check_fate_flow_provider_status = False if fate_flow_version_provider_info.values(): flow_provider = ComponentProvider(**list(fate_flow_version_provider_info.values())[0]) check_fate_flow_provider_status = DependenceRegistry.get_modify_time(flow_provider.path) \ != dependencies_storage_info.f_fate_flow_snapshot_time if FATE_FLOW_UPDATE_CHECK and check_fate_flow_provider_status: need_upload = True upload_total += 1 elif DependenceRegistry.get_modify_time(provider.path) != \ dependencies_storage_info.f_snapshot_time: need_upload = True upload_total += 1 elif provider.name == ComponentProviderName.FATE_FLOW.value and FATE_FLOW_UPDATE_CHECK: if DependenceRegistry.get_modify_time(provider.path) != \ dependencies_storage_info.f_fate_flow_snapshot_time: need_upload = True upload_total += 1 else: need_upload = True upload_total += 1 if need_upload: upload_details[version][dependence_type] = provider if upload_total > 0: check_tag = False schedule_logger(job_id).info(f"check dependencies result: {check_tag}, {upload_details}") return check_tag, upload_total > 0, upload_details @classmethod def upload_spark_dependence(cls, job, upload_details, storage_engine=FateDependenceStorageEngine.HDFS.value): schedule_logger(job.f_job_id).info(f"start upload dependence: {upload_details}") for version, type_provider in upload_details.items(): for dependence_type, provider in type_provider.items(): storage_meta = { "f_storage_engine": storage_engine, "f_type": dependence_type, "f_version": version, "f_upload_status": True } schedule_logger(job.f_job_id).info(f"update dependence storage meta:{storage_meta}") DependenceRegistry.save_dependencies_storage_meta(storage_meta, status_check=True) WorkerManager.start_general_worker(worker_name=WorkerName.DEPENDENCE_UPLOAD, job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id, provider=provider, dependence_type=dependence_type, callback=cls.record_upload_process, callback_param=["dependence_type", "pid", "provider"]) @classmethod def record_upload_process(cls, provider, dependence_type, pid, storage_engine=FateDependenceStorageEngine.HDFS.value): storage_meta = { "f_storage_engine": storage_engine, "f_type": dependence_type, "f_version": provider.version, "f_pid": pid, "f_upload_status": True } DependenceRegistry.save_dependencies_storage_meta(storage_meta) @classmethod def kill_upload_process(cls, version, storage_engine, dependence_type): storage_meta = { "f_storage_engine": storage_engine, "f_type": dependence_type, "f_version": version, "f_upload_status": False, "f_pid": 0 } DependenceRegistry.save_dependencies_storage_meta(storage_meta)
[ "fate_flow.utils.log_utils.schedule_logger", "fate_flow.utils.job_utils.get_job_log_directory", "fate_flow.utils.job_utils.get_job_directory", "fate_flow.settings.ENGINES.get", "fate_flow.db.dependence_registry.DependenceRegistry.save_dependencies_storage_meta", "fate_flow.db.dependence_registry.DependenceRegistry.get_modify_time", "fate_flow.operation.job_saver.JobSaver.update_job", "fate_flow.db.dependence_registry.DependenceRegistry.get_dependencies_storage_meta", "fate_flow.utils.process_utils.run_subprocess", "fate_flow.manager.provider_manager.ProviderManager.get_job_provider_group", "fate_flow.operation.job_saver.JobSaver.query_task", "fate_flow.utils.schedule_utils.get_job_dsl_parser", "fate_flow.controller.job_controller.JobController.load_task_tracker", "fate_flow.manager.worker_manager.WorkerManager.start_general_worker", "fate_flow.entity.ComponentProvider" ]
[((2674, 2712), 'fate_flow.controller.job_controller.JobController.load_task_tracker', 'JobController.load_task_tracker', (['tasks'], {}), '(tasks)\n', (2705, 2712), False, 'from fate_flow.controller.job_controller import JobController\n'), ((3541, 3704), 'fate_flow.operation.job_saver.JobSaver.update_job', 'JobSaver.update_job', ([], {'job_info': "{'job_id': job.f_job_id, 'role': job.f_role, 'party_id': job.f_party_id,\n 'inheritance_status': JobInheritanceStatus.RUNNING}"}), "(job_info={'job_id': job.f_job_id, 'role': job.f_role,\n 'party_id': job.f_party_id, 'inheritance_status': JobInheritanceStatus.\n RUNNING})\n", (3560, 3704), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((3753, 3801), 'fate_flow.utils.job_utils.get_job_directory', 'job_utils.get_job_directory', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (3780, 3801), False, 'from fate_flow.utils import schedule_utils, job_utils, process_utils\n'), ((3810, 3846), 'os.makedirs', 'os.makedirs', (['conf_dir'], {'exist_ok': '(True)'}), '(conf_dir, exist_ok=True)\n', (3821, 3846), False, 'import os\n'), ((4209, 4357), 'fate_flow.utils.process_utils.run_subprocess', 'process_utils.run_subprocess', ([], {'job_id': 'job.f_job_id', 'config_dir': 'conf_dir', 'process_cmd': 'process_cmd', 'log_dir': 'log_dir', 'process_name': '"""job_inheritance"""'}), "(job_id=job.f_job_id, config_dir=conf_dir,\n process_cmd=process_cmd, log_dir=log_dir, process_name='job_inheritance')\n", (4237, 4357), False, 'from fate_flow.utils import schedule_utils, job_utils, process_utils\n'), ((4540, 4573), 'fate_flow.settings.ENGINES.get', 'ENGINES.get', (['EngineType.COMPUTING'], {}), '(EngineType.COMPUTING)\n', (4551, 4573), False, 'from fate_flow.settings import DEPENDENT_DISTRIBUTION, FATE_FLOW_UPDATE_CHECK, ENGINES\n'), ((4752, 4883), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job.f_dsl', 'runtime_conf': 'job.f_runtime_conf', 'train_runtime_conf': 'job.f_train_runtime_conf'}), '(dsl=job.f_dsl, runtime_conf=job.\n f_runtime_conf, train_runtime_conf=job.f_train_runtime_conf)\n', (4785, 4883), False, 'from fate_flow.utils import schedule_utils, job_utils, process_utils\n'), ((4959, 5020), 'fate_flow.manager.provider_manager.ProviderManager.get_job_provider_group', 'ProviderManager.get_job_provider_group', ([], {'dsl_parser': 'dsl_parser'}), '(dsl_parser=dsl_parser)\n', (4997, 5020), False, 'from fate_flow.manager.provider_manager import ProviderManager\n'), ((11480, 11543), 'fate_flow.db.dependence_registry.DependenceRegistry.save_dependencies_storage_meta', 'DependenceRegistry.save_dependencies_storage_meta', (['storage_meta'], {}), '(storage_meta)\n', (11529, 11543), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((11863, 11926), 'fate_flow.db.dependence_registry.DependenceRegistry.save_dependencies_storage_meta', 'DependenceRegistry.save_dependencies_storage_meta', (['storage_meta'], {}), '(storage_meta)\n', (11912, 11926), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((2023, 2155), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'job_id': 'job.f_job_id', 'party_id': 'job.f_party_id', 'role': 'job.f_role', 'status': 'TaskStatus.SUCCESS', 'only_latest': '(True)'}), '(job_id=job.f_job_id, party_id=job.f_party_id, role=job.\n f_role, status=TaskStatus.SUCCESS, only_latest=True)\n', (2042, 2155), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((4124, 4176), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', ([], {'job_id': 'job.f_job_id'}), '(job_id=job.f_job_id)\n', (4155, 4176), False, 'from fate_flow.utils import schedule_utils, job_utils, process_utils\n'), ((6954, 6988), 'fate_flow.entity.ComponentProvider', 'ComponentProvider', ([], {}), '(**provider_info)\n', (6971, 6988), False, 'from fate_flow.entity import ComponentProvider\n'), ((1248, 1277), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (1263, 1277), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((2956, 3045), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'table.f_table_name', 'namespace': 'table.f_table_namespace'}), '(name=table.f_table_name, namespace=table.\n f_table_namespace)\n', (2980, 3045), False, 'from fate_arch import storage\n'), ((4582, 4611), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (4597, 4611), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((5109, 5138), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (5124, 5138), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((6617, 6640), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (6632, 6640), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((6700, 6723), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (6715, 6723), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((7217, 7370), 'fate_flow.db.dependence_registry.DependenceRegistry.get_dependencies_storage_meta', 'DependenceRegistry.get_dependencies_storage_meta', ([], {'storage_engine': 'storage_engine', 'version': 'provider.version', 'type': 'dependence_type', 'get_or_one': '(True)'}), '(storage_engine=\n storage_engine, version=provider.version, type=dependence_type,\n get_or_one=True)\n', (7265, 7370), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((9682, 9705), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (9697, 9705), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((9971, 10000), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (9986, 10000), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((10539, 10625), 'fate_flow.db.dependence_registry.DependenceRegistry.save_dependencies_storage_meta', 'DependenceRegistry.save_dependencies_storage_meta', (['storage_meta'], {'status_check': '(True)'}), '(storage_meta,\n status_check=True)\n', (10588, 10625), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((10638, 10938), 'fate_flow.manager.worker_manager.WorkerManager.start_general_worker', 'WorkerManager.start_general_worker', ([], {'worker_name': 'WorkerName.DEPENDENCE_UPLOAD', 'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id', 'provider': 'provider', 'dependence_type': 'dependence_type', 'callback': 'cls.record_upload_process', 'callback_param': "['dependence_type', 'pid', 'provider']"}), "(worker_name=WorkerName.DEPENDENCE_UPLOAD,\n job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id, provider\n =provider, dependence_type=dependence_type, callback=cls.\n record_upload_process, callback_param=['dependence_type', 'pid',\n 'provider'])\n", (10672, 10938), False, 'from fate_flow.manager.worker_manager import WorkerManager\n'), ((5814, 5843), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (5829, 5843), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((5911, 5940), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (5926, 5940), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((7122, 7145), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (7137, 7145), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((10438, 10467), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (10453, 10467), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((8386, 8440), 'fate_flow.db.dependence_registry.DependenceRegistry.get_modify_time', 'DependenceRegistry.get_modify_time', (['flow_provider.path'], {}), '(flow_provider.path)\n', (8420, 8440), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((8789, 8838), 'fate_flow.db.dependence_registry.DependenceRegistry.get_modify_time', 'DependenceRegistry.get_modify_time', (['provider.path'], {}), '(provider.path)\n', (8823, 8838), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((9167, 9216), 'fate_flow.db.dependence_registry.DependenceRegistry.get_modify_time', 'DependenceRegistry.get_modify_time', (['provider.path'], {}), '(provider.path)\n', (9201, 9216), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import signal import sys import time import traceback import grpc from flask import Flask from grpc._cython import cygrpc from werkzeug.middleware.dispatcher import DispatcherMiddleware from werkzeug.serving import run_simple from fate_arch.storage.metastore.db_models import init_database_tables as init_arch_db from fate_flow.apps.data_access_app import manager as data_access_app_manager from fate_flow.apps.job_app import manager as job_app_manager from fate_flow.apps.model_app import manager as model_app_manager from fate_flow.apps.permission_app import manager as permission_app_manager from fate_flow.apps.pipeline_app import manager as pipeline_app_manager from fate_flow.apps.proxy_app import manager as proxy_app_manager from fate_flow.apps.table_app import manager as table_app_manager from fate_flow.apps.tracking_app import manager as tracking_app_manager from fate_flow.apps.version_app import manager as version_app_manager from fate_flow.db.db_models import init_database_tables as init_flow_db from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.entity.types import ProcessRole from fate_flow.manager.resource_manager import ResourceManager from fate_flow.scheduler.dag_scheduler import DAGScheduler from fate_flow.scheduler.detector import Detector from fate_flow.scheduling_apps.initiator_app import manager as initiator_app_manager from fate_flow.scheduling_apps.party_app import manager as party_app_manager from fate_flow.scheduling_apps.tracker_app import manager as tracker_app_manager from fate_flow.settings import IP, HTTP_PORT, GRPC_PORT, _ONE_DAY_IN_SECONDS, stat_logger, API_VERSION, GRPC_SERVER_MAX_WORKERS from fate_flow.utils import job_utils from fate_flow.utils.api_utils import get_json_result from fate_flow.utils.authentication_utils import PrivilegeAuth from fate_flow.utils.grpc_utils import UnaryService from fate_flow.utils.proto_compatibility import proxy_pb2_grpc from fate_flow.utils.service_utils import ServiceUtils from fate_flow.utils.xthread import ThreadPoolExecutor ''' Initialize the manager ''' manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) if __name__ == '__main__': manager.url_map.strict_slashes = False app = DispatcherMiddleware( manager, { '/{}/data'.format(API_VERSION): data_access_app_manager, '/{}/model'.format(API_VERSION): model_app_manager, '/{}/job'.format(API_VERSION): job_app_manager, '/{}/table'.format(API_VERSION): table_app_manager, '/{}/tracking'.format(API_VERSION): tracking_app_manager, '/{}/pipeline'.format(API_VERSION): pipeline_app_manager, '/{}/permission'.format(API_VERSION): permission_app_manager, '/{}/version'.format(API_VERSION): version_app_manager, '/{}/party'.format(API_VERSION): party_app_manager, '/{}/initiator'.format(API_VERSION): initiator_app_manager, '/{}/tracker'.format(API_VERSION): tracker_app_manager, '/{}/forward'.format(API_VERSION): proxy_app_manager } ) # init # signal.signal(signal.SIGTERM, job_utils.cleaning) signal.signal(signal.SIGCHLD, job_utils.wait_child_process) # init db init_flow_db() init_arch_db() # init runtime config import argparse parser = argparse.ArgumentParser() parser.add_argument('--standalone_node', default=False, help="if standalone node mode or not ", action='store_true') args = parser.parse_args() RuntimeConfig.init_env() RuntimeConfig.set_process_role(ProcessRole.DRIVER) PrivilegeAuth.init() ServiceUtils.register() ResourceManager.initialize() Detector(interval=5 * 1000).start() DAGScheduler(interval=2 * 1000).start() thread_pool_executor = ThreadPoolExecutor(max_workers=GRPC_SERVER_MAX_WORKERS) stat_logger.info(f"start grpc server thread pool by {thread_pool_executor._max_workers} max workers") server = grpc.server(thread_pool=thread_pool_executor, options=[(cygrpc.ChannelArgKey.max_send_message_length, -1), (cygrpc.ChannelArgKey.max_receive_message_length, -1)]) proxy_pb2_grpc.add_DataTransferServiceServicer_to_server(UnaryService(), server) server.add_insecure_port("{}:{}".format(IP, GRPC_PORT)) server.start() stat_logger.info("FATE Flow grpc server start successfully") # start http server try: stat_logger.info("FATE Flow http server start...") run_simple(hostname=IP, port=HTTP_PORT, application=app, threaded=True) except OSError as e: traceback.print_exc() os.kill(os.getpid(), signal.SIGKILL) except Exception as e: traceback.print_exc() os.kill(os.getpid(), signal.SIGKILL) try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: server.stop(0) sys.exit(0)
[ "fate_flow.entity.runtime_config.RuntimeConfig.set_process_role", "fate_flow.db.db_models.init_database_tables", "fate_flow.utils.xthread.ThreadPoolExecutor", "fate_flow.scheduler.detector.Detector", "fate_flow.scheduler.dag_scheduler.DAGScheduler", "fate_flow.utils.authentication_utils.PrivilegeAuth.init", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.grpc_utils.UnaryService", "fate_flow.entity.runtime_config.RuntimeConfig.init_env", "fate_flow.settings.stat_logger.info", "fate_flow.manager.resource_manager.ResourceManager.initialize", "fate_flow.utils.service_utils.ServiceUtils.register" ]
[((2712, 2727), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (2717, 2727), False, 'from flask import Flask\n'), ((2791, 2815), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (2812, 2815), False, 'from fate_flow.settings import IP, HTTP_PORT, GRPC_PORT, _ONE_DAY_IN_SECONDS, stat_logger, API_VERSION, GRPC_SERVER_MAX_WORKERS\n'), ((3897, 3956), 'signal.signal', 'signal.signal', (['signal.SIGCHLD', 'job_utils.wait_child_process'], {}), '(signal.SIGCHLD, job_utils.wait_child_process)\n', (3910, 3956), False, 'import signal\n'), ((3975, 3989), 'fate_flow.db.db_models.init_database_tables', 'init_flow_db', ([], {}), '()\n', (3987, 3989), True, 'from fate_flow.db.db_models import init_database_tables as init_flow_db\n'), ((3994, 4008), 'fate_arch.storage.metastore.db_models.init_database_tables', 'init_arch_db', ([], {}), '()\n', (4006, 4008), True, 'from fate_arch.storage.metastore.db_models import init_database_tables as init_arch_db\n'), ((4069, 4094), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4092, 4094), False, 'import argparse\n'), ((4251, 4275), 'fate_flow.entity.runtime_config.RuntimeConfig.init_env', 'RuntimeConfig.init_env', ([], {}), '()\n', (4273, 4275), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((4280, 4330), 'fate_flow.entity.runtime_config.RuntimeConfig.set_process_role', 'RuntimeConfig.set_process_role', (['ProcessRole.DRIVER'], {}), '(ProcessRole.DRIVER)\n', (4310, 4330), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((4335, 4355), 'fate_flow.utils.authentication_utils.PrivilegeAuth.init', 'PrivilegeAuth.init', ([], {}), '()\n', (4353, 4355), False, 'from fate_flow.utils.authentication_utils import PrivilegeAuth\n'), ((4360, 4383), 'fate_flow.utils.service_utils.ServiceUtils.register', 'ServiceUtils.register', ([], {}), '()\n', (4381, 4383), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((4388, 4416), 'fate_flow.manager.resource_manager.ResourceManager.initialize', 'ResourceManager.initialize', ([], {}), '()\n', (4414, 4416), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((4528, 4583), 'fate_flow.utils.xthread.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'GRPC_SERVER_MAX_WORKERS'}), '(max_workers=GRPC_SERVER_MAX_WORKERS)\n', (4546, 4583), False, 'from fate_flow.utils.xthread import ThreadPoolExecutor\n'), ((4588, 4699), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['f"""start grpc server thread pool by {thread_pool_executor._max_workers} max workers"""'], {}), "(\n f'start grpc server thread pool by {thread_pool_executor._max_workers} max workers'\n )\n", (4604, 4699), False, 'from fate_flow.settings import IP, HTTP_PORT, GRPC_PORT, _ONE_DAY_IN_SECONDS, stat_logger, API_VERSION, GRPC_SERVER_MAX_WORKERS\n'), ((4703, 4875), 'grpc.server', 'grpc.server', ([], {'thread_pool': 'thread_pool_executor', 'options': '[(cygrpc.ChannelArgKey.max_send_message_length, -1), (cygrpc.ChannelArgKey.\n max_receive_message_length, -1)]'}), '(thread_pool=thread_pool_executor, options=[(cygrpc.\n ChannelArgKey.max_send_message_length, -1), (cygrpc.ChannelArgKey.\n max_receive_message_length, -1)])\n', (4714, 4875), False, 'import grpc\n'), ((5094, 5154), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""FATE Flow grpc server start successfully"""'], {}), "('FATE Flow grpc server start successfully')\n", (5110, 5154), False, 'from fate_flow.settings import IP, HTTP_PORT, GRPC_PORT, _ONE_DAY_IN_SECONDS, stat_logger, API_VERSION, GRPC_SERVER_MAX_WORKERS\n'), ((4987, 5001), 'fate_flow.utils.grpc_utils.UnaryService', 'UnaryService', ([], {}), '()\n', (4999, 5001), False, 'from fate_flow.utils.grpc_utils import UnaryService\n'), ((5196, 5246), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""FATE Flow http server start..."""'], {}), "('FATE Flow http server start...')\n", (5212, 5246), False, 'from fate_flow.settings import IP, HTTP_PORT, GRPC_PORT, _ONE_DAY_IN_SECONDS, stat_logger, API_VERSION, GRPC_SERVER_MAX_WORKERS\n'), ((5255, 5326), 'werkzeug.serving.run_simple', 'run_simple', ([], {'hostname': 'IP', 'port': 'HTTP_PORT', 'application': 'app', 'threaded': '(True)'}), '(hostname=IP, port=HTTP_PORT, application=app, threaded=True)\n', (5265, 5326), False, 'from werkzeug.serving import run_simple\n'), ((4421, 4448), 'fate_flow.scheduler.detector.Detector', 'Detector', ([], {'interval': '(5 * 1000)'}), '(interval=5 * 1000)\n', (4429, 4448), False, 'from fate_flow.scheduler.detector import Detector\n'), ((4461, 4492), 'fate_flow.scheduler.dag_scheduler.DAGScheduler', 'DAGScheduler', ([], {'interval': '(2 * 1000)'}), '(interval=2 * 1000)\n', (4473, 4492), False, 'from fate_flow.scheduler.dag_scheduler import DAGScheduler\n'), ((5360, 5381), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5379, 5381), False, 'import traceback\n'), ((5462, 5483), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (5481, 5483), False, 'import traceback\n'), ((5571, 5602), 'time.sleep', 'time.sleep', (['_ONE_DAY_IN_SECONDS'], {}), '(_ONE_DAY_IN_SECONDS)\n', (5581, 5602), False, 'import time\n'), ((5664, 5675), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5672, 5675), False, 'import sys\n'), ((5398, 5409), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5407, 5409), False, 'import os\n'), ((5500, 5511), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5509, 5511), False, 'import os\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from fate_flow.utils.log_utils import getLogger from fate_flow.components._base import ( BaseParam, ComponentBase, ComponentMeta, ComponentInputProtocol, ) from fate_flow.operation.job_tracker import Tracker from fate_flow.entity import MetricMeta LOGGER = getLogger() cache_loader_cpn_meta = ComponentMeta("CacheLoader") @cache_loader_cpn_meta.bind_param class CacheLoaderParam(BaseParam): def __init__(self, cache_key=None, job_id=None, component_name=None, cache_name=None): super().__init__() self.cache_key = cache_key self.job_id = job_id self.component_name = component_name self.cache_name = cache_name def check(self): return True @cache_loader_cpn_meta.bind_runner.on_guest.on_host class CacheLoader(ComponentBase): def __init__(self): super(CacheLoader, self).__init__() self.parameters = {} self.cache_key = None self.job_id = None self.component_name = None self.cache_name = None def _run(self, cpn_input: ComponentInputProtocol): self.parameters = cpn_input.parameters LOGGER.info(self.parameters) for k, v in self.parameters.items(): if hasattr(self, k): setattr(self, k, v) tracker = Tracker(job_id=self.job_id, role=self.tracker.role, party_id=self.tracker.party_id, component_name=self.component_name) LOGGER.info(f"query cache by cache key: {self.cache_key} cache name: {self.cache_name}") # todo: use tracker client but not tracker caches = tracker.query_output_cache(cache_key=self.cache_key, cache_name=self.cache_name) if not caches: raise Exception("can not found this cache") elif len(caches) > 1: raise Exception(f"found {len(caches)} caches, only support one, please check parameters") else: cache = caches[0] self.cache_output = cache tracker.job_id = self.tracker.job_id tracker.component_name = self.tracker.component_name metric_meta = cache.to_dict() metric_meta.pop("data") metric_meta["component_name"] = self.component_name self.tracker.set_metric_meta(metric_namespace="cache_loader", metric_name=cache.name, metric_meta=MetricMeta(name="cache", metric_type="cache_info", extra_metas=metric_meta))
[ "fate_flow.entity.MetricMeta", "fate_flow.operation.job_tracker.Tracker", "fate_flow.utils.log_utils.getLogger", "fate_flow.components._base.ComponentMeta" ]
[((890, 901), 'fate_flow.utils.log_utils.getLogger', 'getLogger', ([], {}), '()\n', (899, 901), False, 'from fate_flow.utils.log_utils import getLogger\n'), ((927, 955), 'fate_flow.components._base.ComponentMeta', 'ComponentMeta', (['"""CacheLoader"""'], {}), "('CacheLoader')\n", (940, 955), False, 'from fate_flow.components._base import BaseParam, ComponentBase, ComponentMeta, ComponentInputProtocol\n'), ((1913, 2037), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': 'self.job_id', 'role': 'self.tracker.role', 'party_id': 'self.tracker.party_id', 'component_name': 'self.component_name'}), '(job_id=self.job_id, role=self.tracker.role, party_id=self.tracker.\n party_id, component_name=self.component_name)\n', (1920, 2037), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((3016, 3091), 'fate_flow.entity.MetricMeta', 'MetricMeta', ([], {'name': '"""cache"""', 'metric_type': '"""cache_info"""', 'extra_metas': 'metric_meta'}), "(name='cache', metric_type='cache_info', extra_metas=metric_meta)\n", (3026, 3091), False, 'from fate_flow.entity import MetricMeta\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import base64 import typing from typing import List from fate_arch import storage from fate_arch.abc import AddressABC from fate_flow.utils.log_utils import getLogger from fate_flow.entity import RunParameters from fate_arch.common.base_utils import serialize_b64, deserialize_b64 from fate_flow.entity import RetCode from fate_flow.entity import Metric, MetricMeta from fate_flow.operation.job_tracker import Tracker from fate_flow.utils import api_utils LOGGER = getLogger() class TrackerClient(object): def __init__(self, job_id: str, role: str, party_id: int, model_id: str = None, model_version: str = None, component_name: str = None, component_module_name: str = None, task_id: str = None, task_version: int = None, job_parameters: RunParameters = None ): self.job_id = job_id self.role = role self.party_id = party_id self.model_id = model_id self.model_version = model_version self.component_name = component_name if component_name else 'pipeline' self.module_name = component_module_name if component_module_name else 'Pipeline' self.task_id = task_id self.task_version = task_version self.job_parameters = job_parameters self.job_tracker = Tracker(job_id=job_id, role=role, party_id=party_id, component_name=component_name, task_id=task_id, task_version=task_version, model_id=model_id, model_version=model_version, job_parameters=job_parameters) def log_job_metric_data(self, metric_namespace: str, metric_name: str, metrics: List[typing.Union[Metric, dict]]): self.log_metric_data_common(metric_namespace=metric_namespace, metric_name=metric_name, metrics=metrics, job_level=True) def log_metric_data(self, metric_namespace: str, metric_name: str, metrics: List[typing.Union[Metric, dict]]): self.log_metric_data_common(metric_namespace=metric_namespace, metric_name=metric_name, metrics=metrics, job_level=False) def log_metric_data_common(self, metric_namespace: str, metric_name: str, metrics: List[typing.Union[Metric, dict]], job_level=False): LOGGER.info("Request save job {} task {} {} on {} {} metric {} {} data".format(self.job_id, self.task_id, self.task_version, self.role, self.party_id, metric_namespace, metric_name)) request_body = {} request_body['metric_namespace'] = metric_namespace request_body['metric_name'] = metric_name request_body['metrics'] = [serialize_b64(metric if isinstance(metric, Metric) else Metric.from_dict(metric), to_str=True) for metric in metrics] request_body['job_level'] = job_level response = api_utils.local_api(job_id=self.job_id, method='POST', endpoint='/tracker/{}/{}/{}/{}/{}/{}/metric_data/save'.format( self.job_id, self.component_name, self.task_id, self.task_version, self.role, self.party_id), json_body=request_body) if response['retcode'] != RetCode.SUCCESS: raise Exception(f"log metric(namespace: {metric_namespace}, name: {metric_name}) data error, response code: {response['retcode']}, msg: {response['retmsg']}") def set_job_metric_meta(self, metric_namespace: str, metric_name: str, metric_meta: typing.Union[MetricMeta, dict]): self.set_metric_meta_common(metric_namespace=metric_namespace, metric_name=metric_name, metric_meta=metric_meta, job_level=True) def set_metric_meta(self, metric_namespace: str, metric_name: str, metric_meta: typing.Union[MetricMeta, dict]): self.set_metric_meta_common(metric_namespace=metric_namespace, metric_name=metric_name, metric_meta=metric_meta, job_level=False) def set_metric_meta_common(self, metric_namespace: str, metric_name: str, metric_meta: typing.Union[MetricMeta, dict], job_level=False): LOGGER.info("Request save job {} task {} {} on {} {} metric {} {} meta".format(self.job_id, self.task_id, self.task_version, self.role, self.party_id, metric_namespace, metric_name)) request_body = dict() request_body['metric_namespace'] = metric_namespace request_body['metric_name'] = metric_name request_body['metric_meta'] = serialize_b64(metric_meta if isinstance(metric_meta, MetricMeta) else MetricMeta.from_dict(metric_meta), to_str=True) request_body['job_level'] = job_level response = api_utils.local_api(job_id=self.job_id, method='POST', endpoint='/tracker/{}/{}/{}/{}/{}/{}/metric_meta/save'.format( self.job_id, self.component_name, self.task_id, self.task_version, self.role, self.party_id), json_body=request_body) if response['retcode'] != RetCode.SUCCESS: raise Exception(f"log metric(namespace: {metric_namespace}, name: {metric_name}) meta error, response code: {response['retcode']}, msg: {response['retmsg']}") def create_table_meta(self, table_meta): request_body = dict() for k, v in table_meta.to_dict().items(): if k == "part_of_data": request_body[k] = serialize_b64(v, to_str=True) elif k == "schema": request_body[k] = serialize_b64(v, to_str=True) elif issubclass(type(v), AddressABC): request_body[k] = v.__dict__ else: request_body[k] = v response = api_utils.local_api(job_id=self.job_id, method='POST', endpoint='/tracker/{}/{}/{}/{}/{}/{}/table_meta/create'.format( self.job_id, self.component_name, self.task_id, self.task_version, self.role, self.party_id), json_body=request_body) if response['retcode'] != RetCode.SUCCESS: raise Exception(f"create table meta failed:{response['retmsg']}") def get_table_meta(self, table_name, table_namespace): request_body = {"table_name": table_name, "namespace": table_namespace} response = api_utils.local_api(job_id=self.job_id, method='POST', endpoint='/tracker/{}/{}/{}/{}/{}/{}/table_meta/get'.format( self.job_id, self.component_name, self.task_id, self.task_version, self.role, self.party_id), json_body=request_body) if response['retcode'] != RetCode.SUCCESS: raise Exception(f"create table meta failed:{response['retmsg']}") else: data_table_meta = storage.StorageTableMeta(name=table_name, namespace=table_namespace, new=True) data_table_meta.set_metas(**response["data"]) data_table_meta.address = storage.StorageTableMeta.create_address(storage_engine=response["data"].get("engine"), address_dict=response["data"].get("address")) data_table_meta.part_of_data = deserialize_b64(data_table_meta.part_of_data) data_table_meta.schema = deserialize_b64(data_table_meta.schema) return data_table_meta def save_component_output_model(self, model_buffers: dict, model_alias: str, user_specified_run_parameters: dict = None): if not model_buffers: return component_model = self.job_tracker.pipelined_model.create_component_model(component_name=self.component_name, component_module_name=self.module_name, model_alias=model_alias, model_buffers=model_buffers, user_specified_run_parameters=user_specified_run_parameters) json_body = {"model_id": self.model_id, "model_version": self.model_version, "component_model": component_model} response = api_utils.local_api(job_id=self.job_id, method='POST', endpoint='/tracker/{}/{}/{}/{}/{}/{}/model/save'.format( self.job_id, self.component_name, self.task_id, self.task_version, self.role, self.party_id), json_body=json_body) if response['retcode'] != RetCode.SUCCESS: raise Exception(f"save component output model failed:{response['retmsg']}") def read_component_output_model(self, search_model_alias): json_body = {"search_model_alias": search_model_alias, "model_id": self.model_id, "model_version": self.model_version} response = api_utils.local_api(job_id=self.job_id, method='POST', endpoint='/tracker/{}/{}/{}/{}/{}/{}/model/get'.format( self.job_id, self.component_name, self.task_id, self.task_version, self.role, self.party_id), json_body=json_body) if response['retcode'] != RetCode.SUCCESS: raise Exception(f"get output model failed:{response['retmsg']}") else: model_buffers = {} for model_name, v in response['data'].items(): model_buffers[model_name] = (v[0], base64.b64decode(v[1].encode())) return model_buffers def get_model_run_parameters(self): json_body = {"model_id": self.model_id, "model_version": self.model_version} response = api_utils.local_api(job_id=self.job_id, method='POST', endpoint='/tracker/{}/{}/{}/{}/{}/{}/model/run_parameters/get'.format( self.job_id, self.component_name, self.task_id, self.task_version, self.role, self.party_id), json_body=json_body) if response['retcode'] != RetCode.SUCCESS: raise Exception(f"create table meta failed:{response['retmsg']}") else: return response["data"] def log_output_data_info(self, data_name: str, table_namespace: str, table_name: str): LOGGER.info("Request save job {} task {} {} on {} {} data {} info".format(self.job_id, self.task_id, self.task_version, self.role, self.party_id, data_name)) request_body = dict() request_body["data_name"] = data_name request_body["table_namespace"] = table_namespace request_body["table_name"] = table_name response = api_utils.local_api(job_id=self.job_id, method='POST', endpoint='/tracker/{}/{}/{}/{}/{}/{}/output_data_info/save'.format( self.job_id, self.component_name, self.task_id, self.task_version, self.role, self.party_id), json_body=request_body) if response['retcode'] != RetCode.SUCCESS: raise Exception(f"log output data info error, response code: {response['retcode']}, msg: {response['retmsg']}") def get_output_data_info(self, data_name=None): LOGGER.info("Request read job {} task {} {} on {} {} data {} info".format(self.job_id, self.task_id, self.task_version, self.role, self.party_id, data_name)) request_body = dict() request_body["data_name"] = data_name response = api_utils.local_api(job_id=self.job_id, method='POST', endpoint='/tracker/{}/{}/{}/{}/{}/{}/output_data_info/read'.format( self.job_id, self.component_name, self.task_id, self.task_version, self.role, self.party_id), json_body=request_body) if response["retcode"] == RetCode.SUCCESS and "data" in response: return response["data"] else: return None def log_component_summary(self, summary_data: dict): LOGGER.info("Request save job {} task {} {} on {} {} component summary".format(self.job_id, self.task_id, self.task_version, self.role, self.party_id)) request_body = dict() request_body["summary"] = summary_data response = api_utils.local_api(job_id=self.job_id, method='POST', endpoint='/tracker/{}/{}/{}/{}/{}/{}/summary/save'.format( self.job_id, self.component_name, self.task_id, self.task_version, self.role, self.party_id), json_body=request_body) if response['retcode'] != RetCode.SUCCESS: raise Exception(f"log component summary error, response code: {response['retcode']}, msg: {response['retmsg']}")
[ "fate_flow.entity.MetricMeta.from_dict", "fate_flow.utils.log_utils.getLogger", "fate_flow.operation.job_tracker.Tracker", "fate_flow.entity.Metric.from_dict" ]
[((1083, 1094), 'fate_flow.utils.log_utils.getLogger', 'getLogger', ([], {}), '()\n', (1092, 1094), False, 'from fate_flow.utils.log_utils import getLogger\n'), ((1999, 2215), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'component_name': 'component_name', 'task_id': 'task_id', 'task_version': 'task_version', 'model_id': 'model_id', 'model_version': 'model_version', 'job_parameters': 'job_parameters'}), '(job_id=job_id, role=role, party_id=party_id, component_name=\n component_name, task_id=task_id, task_version=task_version, model_id=\n model_id, model_version=model_version, job_parameters=job_parameters)\n', (2006, 2215), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((9796, 9874), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'table_name', 'namespace': 'table_namespace', 'new': '(True)'}), '(name=table_name, namespace=table_namespace, new=True)\n', (9820, 9874), False, 'from fate_arch import storage\n'), ((10280, 10325), 'fate_arch.common.base_utils.deserialize_b64', 'deserialize_b64', (['data_table_meta.part_of_data'], {}), '(data_table_meta.part_of_data)\n', (10295, 10325), False, 'from fate_arch.common.base_utils import serialize_b64, deserialize_b64\n'), ((10363, 10402), 'fate_arch.common.base_utils.deserialize_b64', 'deserialize_b64', (['data_table_meta.schema'], {}), '(data_table_meta.schema)\n', (10378, 10402), False, 'from fate_arch.common.base_utils import serialize_b64, deserialize_b64\n'), ((6678, 6711), 'fate_flow.entity.MetricMeta.from_dict', 'MetricMeta.from_dict', (['metric_meta'], {}), '(metric_meta)\n', (6698, 6711), False, 'from fate_flow.entity import Metric, MetricMeta\n'), ((7820, 7849), 'fate_arch.common.base_utils.serialize_b64', 'serialize_b64', (['v'], {'to_str': '(True)'}), '(v, to_str=True)\n', (7833, 7849), False, 'from fate_arch.common.base_utils import serialize_b64, deserialize_b64\n'), ((4028, 4052), 'fate_flow.entity.Metric.from_dict', 'Metric.from_dict', (['metric'], {}), '(metric)\n', (4044, 4052), False, 'from fate_flow.entity import Metric, MetricMeta\n'), ((7916, 7945), 'fate_arch.common.base_utils.serialize_b64', 'serialize_b64', (['v'], {'to_str': '(True)'}), '(v, to_str=True)\n', (7929, 7945), False, 'from fate_arch.common.base_utils import serialize_b64, deserialize_b64\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import glob import operator from collections import OrderedDict import peewee from fate_arch.common.log import sql_logger from fate_flow.settings import stat_logger from fate_arch.common.base_utils import json_loads, current_timestamp from fate_arch.common.file_utils import get_project_base_directory from fate_flow.pipelined_model.pipelined_model import PipelinedModel from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel from fate_flow.utils.service_utils import ServiceUtils gen_key_string_separator = '#' def gen_party_model_id(model_id, role, party_id): return gen_key_string_separator.join([role, str(party_id), model_id]) if model_id else None def gen_model_id(all_party): return gen_key_string_separator.join([all_party_key(all_party), "model"]) def all_party_key(all_party): """ Join all party as party key :param all_party: "role": { "guest": [9999], "host": [10000], "arbiter": [10000] } :return: """ if not all_party: all_party_key = 'all' elif isinstance(all_party, dict): sorted_role_name = sorted(all_party.keys()) all_party_key = gen_key_string_separator.join([ ('%s-%s' % ( role_name, '_'.join([str(p) for p in sorted(set(all_party[role_name]))])) ) for role_name in sorted_role_name]) else: all_party_key = None return all_party_key @DB.connection_context() def query_model_info_from_db(model_version, role=None, party_id=None, model_id=None, query_filters=None, **kwargs): conditions = [] filters = [] aruments = locals() cond_attrs = [attr for attr in ['model_version', 'model_id', 'role', 'party_id'] if aruments[attr]] for f_n in cond_attrs: conditions.append(operator.attrgetter('f_%s' % f_n)(MLModel) == aruments[f_n]) for f_n in kwargs: if hasattr(MLModel, 'f_%s' % f_n): conditions.append(operator.attrgetter('f_%s' % f_n)(MLModel)) if query_filters and isinstance(query_filters, list): for attr in query_filters: attr_name = 'f_%s' % attr if hasattr(MLModel, attr_name): filters.append(operator.attrgetter(attr_name)(MLModel)) if filters: models = MLModel.select(*filters).where(*conditions) else: models = MLModel.select().where(*conditions) if models: return 0, 'Query model info from db success.', [model.to_json() for model in models] else: return 100, 'Query model info failed, cannot find model from db. ', [] def query_model_info_from_file(model_id=None, model_version=None, role=None, party_id=None, query_filters=None, to_dict=False, **kwargs): res = {} if to_dict else [] model_dir = os.path.join(get_project_base_directory(), 'model_local_cache') glob_dir = f"{model_dir}{os.sep}{role if role else '*'}#{party_id if party_id else '*'}#{model_id if model_id else '*'}{os.sep}{model_version if model_version else '*'}" stat_logger.info(f'glob model dir: {glob_dir}') model_fp_list = glob.glob(glob_dir) if model_fp_list: for fp in model_fp_list: pipeline_model = PipelinedModel(model_id=fp.split('/')[-2], model_version=fp.split('/')[-1]) model_info = gather_model_info_data(pipeline_model, query_filters=query_filters) if model_info: if isinstance(res, dict): res[fp] = model_info else: res.append(model_info) if kwargs.get('save'): try: insert_info = gather_model_info_data(pipeline_model).copy() insert_info['role'] = fp.split('/')[-2].split('#')[0] insert_info['party_id'] = fp.split('/')[-2].split('#')[1] insert_info['job_id'] = insert_info.get('f_model_version') insert_info['size'] = pipeline_model.calculate_model_file_size() if compare_version(insert_info['f_fate_version'], '1.5.1') == 'lt': insert_info['roles'] = insert_info.get('f_train_runtime_conf', {}).get('role', {}) insert_info['initiator_role'] = insert_info.get('f_train_runtime_conf', {}).get('initiator', {}).get('role') insert_info['initiator_party_id'] = insert_info.get('f_train_runtime_conf', {}).get('initiator', {}).get('party_id') save_model_info(insert_info) except Exception as e: stat_logger.exception(e) if res: return 0, 'Query model info from local model success.', res return 100, 'Query model info failed, cannot find model from local model files.', res def gather_model_info_data(model: PipelinedModel, query_filters=None): if model.exists(): pipeline = model.read_component_model('pipeline', 'pipeline')['Pipeline'] model_info = OrderedDict() if query_filters and isinstance(query_filters, list): for attr, field in pipeline.ListFields(): if attr.name in query_filters: if isinstance(field, bytes): model_info["f_" + attr.name] = json_loads(field, OrderedDict) else: model_info["f_" + attr.name] = field else: for attr, field in pipeline.ListFields(): if isinstance(field, bytes): model_info["f_" + attr.name] = json_loads(field, OrderedDict) else: model_info["f_" + attr.name] = field return model_info return [] def query_model_info(model_version, role=None, party_id=None, model_id=None, query_filters=None, **kwargs): arguments = locals() retcode, retmsg, data = query_model_info_from_db(**arguments) if not retcode: return retcode, retmsg, data else: arguments['save'] = True retcode, retmsg, data = query_model_info_from_file(**arguments) if not retcode: return retcode, retmsg, data return 100, 'Query model info failed, cannot find model from db. ' \ 'Try use both model id and model version to query model info from local models', [] @DB.connection_context() def save_model_info(model_info): model = MLModel() model.f_create_time = current_timestamp() for k, v in model_info.items(): attr_name = 'f_%s' % k if hasattr(MLModel, attr_name): setattr(model, attr_name, v) elif hasattr(MLModel, k): setattr(model, k, v) try: rows = model.save(force_insert=True) if rows != 1: raise Exception("Create {} failed".format(MLModel)) if RuntimeConfig.zk_client is not None: ServiceUtils.register(RuntimeConfig.zk_client, gen_party_model_id(role=model.f_role, party_id=model.f_party_id, model_id=model.f_model_id), model.f_model_version) return model except peewee.IntegrityError as e: if e.args[0] == 1062: sql_logger(job_id=model_info.get("job_id", "fate_flow")).warning(e) else: raise Exception("Create {} failed:\n{}".format(MLModel, e)) except Exception as e: raise Exception("Create {} failed:\n{}".format(MLModel, e)) def compare_version(version: str, target_version: str): ver_list = version.split('.') tar_ver_list = target_version.split('.') if int(ver_list[0]) >= int(tar_ver_list[0]): if int(ver_list[1]) > int(tar_ver_list[1]): return 'gt' elif int(ver_list[1]) < int(tar_ver_list[1]): return 'lt' else: if int(ver_list[2]) > int(tar_ver_list[2]): return 'gt' elif int(ver_list[2]) == int(tar_ver_list[2]): return 'eq' else: return 'lt' return 'lt' def check_if_parent_model(pipeline): if compare_version(pipeline.fate_version, '1.5.0') == 'gt': if pipeline.parent: return True return False def check_before_deploy(pipeline_model: PipelinedModel): pipeline = pipeline_model.read_component_model('pipeline', 'pipeline')['Pipeline'] if compare_version(pipeline.fate_version, '1.5.0') == 'gt': if pipeline.parent: return True elif compare_version(pipeline.fate_version, '1.5.0') == 'eq': return True return False def check_if_deployed(role, party_id, model_id, model_version): party_model_id = gen_party_model_id(model_id=model_id, role=role, party_id=party_id) pipeline_model = PipelinedModel(model_id=party_model_id, model_version=model_version) if not pipeline_model.exists(): raise Exception(f"Model {party_model_id} {model_version} not exists in model local cache.") else: pipeline = pipeline_model.read_component_model('pipeline', 'pipeline')['Pipeline'] if compare_version(pipeline.fate_version, '1.5.0') == 'gt': train_runtime_conf = json_loads(pipeline.train_runtime_conf) if str(train_runtime_conf.get('dsl_version', '1')) != '1': if pipeline.parent: return False return True @DB.connection_context() def models_group_by_party_model_id_and_model_version(): args = [ MLModel.f_role, MLModel.f_party_id, MLModel.f_model_id, MLModel.f_model_version, ] models = MLModel.select(*args).group_by(*args) for model in models: model.f_party_model_id = gen_party_model_id(role=model.f_role, party_id=model.f_party_id, model_id=model.f_model_id) return models
[ "fate_flow.db.db_models.DB.connection_context", "fate_flow.pipelined_model.pipelined_model.PipelinedModel", "fate_flow.settings.stat_logger.exception", "fate_flow.db.db_models.MachineLearningModelInfo", "fate_flow.db.db_models.MachineLearningModelInfo.select", "fate_flow.settings.stat_logger.info" ]
[((2168, 2191), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (2189, 2191), False, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((7075, 7098), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (7096, 7098), False, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((10214, 10237), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (10235, 10237), False, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((3744, 3791), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['f"""glob model dir: {glob_dir}"""'], {}), "(f'glob model dir: {glob_dir}')\n", (3760, 3791), False, 'from fate_flow.settings import stat_logger\n'), ((3812, 3831), 'glob.glob', 'glob.glob', (['glob_dir'], {}), '(glob_dir)\n', (3821, 3831), False, 'import glob\n'), ((7144, 7153), 'fate_flow.db.db_models.MachineLearningModelInfo', 'MLModel', ([], {}), '()\n', (7151, 7153), True, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((7180, 7199), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (7197, 7199), False, 'from fate_arch.common.base_utils import json_loads, current_timestamp\n'), ((9604, 9672), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'PipelinedModel', ([], {'model_id': 'party_model_id', 'model_version': 'model_version'}), '(model_id=party_model_id, model_version=model_version)\n', (9618, 9672), False, 'from fate_flow.pipelined_model.pipelined_model import PipelinedModel\n'), ((3515, 3543), 'fate_arch.common.file_utils.get_project_base_directory', 'get_project_base_directory', ([], {}), '()\n', (3541, 3543), False, 'from fate_arch.common.file_utils import get_project_base_directory\n'), ((5740, 5753), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5751, 5753), False, 'from collections import OrderedDict\n'), ((10011, 10050), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.train_runtime_conf'], {}), '(pipeline.train_runtime_conf)\n', (10021, 10050), False, 'from fate_arch.common.base_utils import json_loads, current_timestamp\n'), ((10439, 10460), 'fate_flow.db.db_models.MachineLearningModelInfo.select', 'MLModel.select', (['*args'], {}), '(*args)\n', (10453, 10460), True, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((3009, 3033), 'fate_flow.db.db_models.MachineLearningModelInfo.select', 'MLModel.select', (['*filters'], {}), '(*filters)\n', (3023, 3033), True, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((3080, 3096), 'fate_flow.db.db_models.MachineLearningModelInfo.select', 'MLModel.select', ([], {}), '()\n', (3094, 3096), True, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((2526, 2559), 'operator.attrgetter', 'operator.attrgetter', (["('f_%s' % f_n)"], {}), "('f_%s' % f_n)\n", (2545, 2559), False, 'import operator\n'), ((2683, 2716), 'operator.attrgetter', 'operator.attrgetter', (["('f_%s' % f_n)"], {}), "('f_%s' % f_n)\n", (2702, 2716), False, 'import operator\n'), ((6303, 6333), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['field', 'OrderedDict'], {}), '(field, OrderedDict)\n', (6313, 6333), False, 'from fate_arch.common.base_utils import json_loads, current_timestamp\n'), ((2934, 2964), 'operator.attrgetter', 'operator.attrgetter', (['attr_name'], {}), '(attr_name)\n', (2953, 2964), False, 'import operator\n'), ((6021, 6051), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['field', 'OrderedDict'], {}), '(field, OrderedDict)\n', (6031, 6051), False, 'from fate_arch.common.base_utils import json_loads, current_timestamp\n'), ((5346, 5370), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (5367, 5370), False, 'from fate_flow.settings import stat_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import shutil from flask import Flask, request from fate_flow.entity.types import StatusSet from fate_arch import storage from fate_flow.settings import stat_logger, UPLOAD_DATA_FROM_CLIENT from fate_flow.utils.api_utils import get_json_result from fate_flow.utils import detect_utils, job_utils from fate_flow.scheduler import DAGScheduler from fate_flow.operation import JobSaver manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/<access_module>', methods=['post']) def download_upload(access_module): job_id = job_utils.generate_job_id() if access_module == "upload" and UPLOAD_DATA_FROM_CLIENT and not (request.json and request.json.get("use_local_data") == 0): file = request.files['file'] filename = os.path.join(job_utils.get_job_directory(job_id), 'fate_upload_tmp', file.filename) os.makedirs(os.path.dirname(filename), exist_ok=True) try: file.save(filename) except Exception as e: shutil.rmtree(os.path.join(job_utils.get_job_directory(job_id), 'fate_upload_tmp')) raise e job_config = request.args.to_dict() job_config['file'] = filename else: job_config = request.json required_arguments = ['work_mode', 'namespace', 'table_name'] if access_module == 'upload': required_arguments.extend(['file', 'head', 'partition']) elif access_module == 'download': required_arguments.extend(['output_path']) else: raise Exception('can not support this operating: {}'.format(access_module)) detect_utils.check_config(job_config, required_arguments=required_arguments) data = {} # compatibility if "table_name" in job_config: job_config["name"] = job_config["table_name"] if "backend" not in job_config: job_config["backend"] = 0 for _ in ["work_mode", "backend", "head", "partition", "drop"]: if _ in job_config: job_config[_] = int(job_config[_]) if access_module == "upload": if job_config.get('drop', 0) == 1: job_config["destroy"] = True else: job_config["destroy"] = False data['table_name'] = job_config["table_name"] data['namespace'] = job_config["namespace"] data_table_meta = storage.StorageTableMeta(name=job_config["table_name"], namespace=job_config["namespace"]) if data_table_meta and not job_config["destroy"]: return get_json_result(retcode=100, retmsg='The data table already exists.' 'If you still want to continue uploading, please add the parameter -drop.' ' 0 means not to delete and continue uploading, ' '1 means to upload again after deleting the table') job_dsl, job_runtime_conf = gen_data_access_job_config(job_config, access_module) submit_result = DAGScheduler.submit({'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}, job_id=job_id) data.update(submit_result) return get_json_result(job_id=job_id, data=data) @manager.route('/upload/history', methods=['POST']) def upload_history(): request_data = request.json if request_data.get('job_id'): tasks = JobSaver.query_task(component_name='upload_0', status=StatusSet.SUCCESS, job_id=request_data.get('job_id'), run_on_this_party=True) else: tasks = JobSaver.query_task(component_name='upload_0', status=StatusSet.SUCCESS, run_on_this_party=True) limit = request_data.get('limit') if not limit: tasks = tasks[-1::-1] else: tasks = tasks[-1:-limit - 1:-1] jobs_run_conf = job_utils.get_job_configuration(None, None, None, tasks) data = get_upload_info(jobs_run_conf=jobs_run_conf) return get_json_result(retcode=0, retmsg='success', data=data) def get_upload_info(jobs_run_conf): data = [] for job_id, job_run_conf in jobs_run_conf.items(): info = {} table_name = job_run_conf["name"] namespace = job_run_conf["namespace"] table_meta = storage.StorageTableMeta(name=table_name, namespace=namespace) if table_meta: partition = job_run_conf["partition"] info["upload_info"] = { "table_name": table_name, "namespace": namespace, "partition": partition, 'upload_count': table_meta.get_count() } info["notes"] = job_run_conf["notes"] info["schema"] = table_meta.get_schema() data.append({job_id: info}) return data def gen_data_access_job_config(config_data, access_module): job_runtime_conf = { "initiator": {}, "job_parameters": {"common": {}}, "role": {}, "component_parameters": {"role": {"local": {"0": {}}}} } initiator_role = "local" initiator_party_id = config_data.get('party_id', 0) job_runtime_conf["initiator"]["role"] = initiator_role job_runtime_conf["initiator"]["party_id"] = initiator_party_id for _ in ["work_mode", "backend"]: if _ in config_data: # job_runtime_conf["job_parameters"] = config_data[_] job_runtime_conf["job_parameters"]["common"][_] = config_data[_] job_runtime_conf["role"][initiator_role] = [initiator_party_id] job_dsl = { "components": {} } if access_module == 'upload': parameters = { "head", "partition", "file", "namespace", "name", "delimiter", "storage_engine", "storage_address", "destroy", } job_runtime_conf["component_parameters"]["role"][initiator_role]["0"]["upload_0"] = {} for p in parameters: if p in config_data: job_runtime_conf["component_parameters"]["role"][initiator_role]["0"]["upload_0"][p] = config_data[p] job_runtime_conf['dsl_version'] = 2 job_dsl["components"]["upload_0"] = { "module": "Upload" } if access_module == 'download': parameters = { "delimiter", "output_path", "namespace", "name" } job_runtime_conf["component_parameters"]['role'][initiator_role]["0"]["download_0"] = {} for p in parameters: if p in config_data: job_runtime_conf["component_parameters"]['role'][initiator_role]["0"]["download_0"][p] = config_data[p] job_runtime_conf['dsl_version'] = 2 job_dsl["components"]["download_0"] = { "module": "Download" } return job_dsl, job_runtime_conf
[ "fate_flow.utils.job_utils.get_job_directory", "fate_flow.utils.job_utils.get_job_configuration", "fate_flow.utils.api_utils.get_json_result", "fate_flow.scheduler.DAGScheduler.submit", "fate_flow.utils.detect_utils.check_config", "fate_flow.settings.stat_logger.exception", "fate_flow.operation.JobSaver.query_task", "fate_flow.utils.job_utils.generate_job_id" ]
[((1021, 1036), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1026, 1036), False, 'from flask import Flask, request\n'), ((1100, 1124), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1121, 1124), False, 'from fate_flow.settings import stat_logger, UPLOAD_DATA_FROM_CLIENT\n'), ((1284, 1311), 'fate_flow.utils.job_utils.generate_job_id', 'job_utils.generate_job_id', ([], {}), '()\n', (1309, 1311), False, 'from fate_flow.utils import detect_utils, job_utils\n'), ((2313, 2389), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (['job_config'], {'required_arguments': 'required_arguments'}), '(job_config, required_arguments=required_arguments)\n', (2338, 2389), False, 'from fate_flow.utils import detect_utils, job_utils\n'), ((3713, 3811), 'fate_flow.scheduler.DAGScheduler.submit', 'DAGScheduler.submit', (["{'job_dsl': job_dsl, 'job_runtime_conf': job_runtime_conf}"], {'job_id': 'job_id'}), "({'job_dsl': job_dsl, 'job_runtime_conf':\n job_runtime_conf}, job_id=job_id)\n", (3732, 3811), False, 'from fate_flow.scheduler import DAGScheduler\n'), ((3850, 3891), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'job_id': 'job_id', 'data': 'data'}), '(job_id=job_id, data=data)\n', (3865, 3891), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((4462, 4518), 'fate_flow.utils.job_utils.get_job_configuration', 'job_utils.get_job_configuration', (['None', 'None', 'None', 'tasks'], {}), '(None, None, None, tasks)\n', (4493, 4518), False, 'from fate_flow.utils import detect_utils, job_utils\n'), ((4586, 4641), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'data'}), "(retcode=0, retmsg='success', data=data)\n", (4601, 4641), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1856, 1878), 'flask.request.args.to_dict', 'request.args.to_dict', ([], {}), '()\n', (1876, 1878), False, 'from flask import Flask, request\n'), ((3032, 3127), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': "job_config['table_name']", 'namespace': "job_config['namespace']"}), "(name=job_config['table_name'], namespace=\n job_config['namespace'])\n", (3056, 3127), False, 'from fate_arch import storage\n'), ((4209, 4309), 'fate_flow.operation.JobSaver.query_task', 'JobSaver.query_task', ([], {'component_name': '"""upload_0"""', 'status': 'StatusSet.SUCCESS', 'run_on_this_party': '(True)'}), "(component_name='upload_0', status=StatusSet.SUCCESS,\n run_on_this_party=True)\n", (4228, 4309), False, 'from fate_flow.operation import JobSaver\n'), ((4877, 4939), 'fate_arch.storage.StorageTableMeta', 'storage.StorageTableMeta', ([], {'name': 'table_name', 'namespace': 'namespace'}), '(name=table_name, namespace=namespace)\n', (4901, 4939), False, 'from fate_arch import storage\n'), ((1510, 1545), 'fate_flow.utils.job_utils.get_job_directory', 'job_utils.get_job_directory', (['job_id'], {}), '(job_id)\n', (1537, 1545), False, 'from fate_flow.utils import detect_utils, job_utils\n'), ((1601, 1626), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (1616, 1626), False, 'import os\n'), ((3200, 3446), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(100)', 'retmsg': '"""The data table already exists.If you still want to continue uploading, please add the parameter -drop. 0 means not to delete and continue uploading, 1 means to upload again after deleting the table"""'}), "(retcode=100, retmsg=\n 'The data table already exists.If you still want to continue uploading, please add the parameter -drop. 0 means not to delete and continue uploading, 1 means to upload again after deleting the table'\n )\n", (3215, 3446), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1399, 1433), 'flask.request.json.get', 'request.json.get', (['"""use_local_data"""'], {}), "('use_local_data')\n", (1415, 1433), False, 'from flask import Flask, request\n'), ((1758, 1793), 'fate_flow.utils.job_utils.get_job_directory', 'job_utils.get_job_directory', (['job_id'], {}), '(job_id)\n', (1785, 1793), False, 'from fate_flow.utils import detect_utils, job_utils\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from fate_flow.utils.log_utils import schedule_logger from fate_arch import session class SessionStop(object): @classmethod def run(cls): parser = argparse.ArgumentParser() parser.add_argument('--session', required=True, type=str, help="session manager id") parser.add_argument('--computing', help="computing engine", type=str) parser.add_argument('--federation', help="federation engine", type=str) parser.add_argument('--storage', help="storage engine", type=str) parser.add_argument('-c', '--command', required=True, type=str, help="command") args = parser.parse_args() session_id = args.session fate_job_id = session_id.split('_')[0] command = args.command with session.Session(session_id=session_id, options={"logger": schedule_logger(fate_job_id)}) as sess: sess.destroy_all_sessions() if __name__ == '__main__': SessionStop.run()
[ "fate_flow.utils.log_utils.schedule_logger" ]
[((798, 823), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (821, 823), False, 'import argparse\n'), ((1484, 1512), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['fate_job_id'], {}), '(fate_job_id)\n', (1499, 1512), False, 'from fate_flow.utils.log_utils import schedule_logger\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from flask import request from fate_flow.manager.resource_manager import ResourceManager from fate_flow.utils.api_utils import get_json_result from fate_flow.utils.detect_utils import validate_request @manager.route('/query', methods=['post']) def query_resource(): use_resource_job, computing_engine_resource = ResourceManager.query_resource(**request.json) return get_json_result(retcode=0, retmsg='success', data={"use_resource_job": use_resource_job, "computing_engine_resource": computing_engine_resource}) @manager.route('/return', methods=['post']) @validate_request('job_id') def return_resource(): status = ResourceManager.return_resource(job_id=request.json.get("job_id")) return get_json_result(data=status)
[ "fate_flow.utils.detect_utils.validate_request", "fate_flow.utils.api_utils.get_json_result", "fate_flow.manager.resource_manager.ResourceManager.query_resource" ]
[((1248, 1274), 'fate_flow.utils.detect_utils.validate_request', 'validate_request', (['"""job_id"""'], {}), "('job_id')\n", (1264, 1274), False, 'from fate_flow.utils.detect_utils import validate_request\n'), ((935, 981), 'fate_flow.manager.resource_manager.ResourceManager.query_resource', 'ResourceManager.query_resource', ([], {}), '(**request.json)\n', (965, 981), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((993, 1142), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': "{'use_resource_job': use_resource_job, 'computing_engine_resource':\n computing_engine_resource}"}), "(retcode=0, retmsg='success', data={'use_resource_job':\n use_resource_job, 'computing_engine_resource': computing_engine_resource})\n", (1008, 1142), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1389, 1417), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'status'}), '(data=status)\n', (1404, 1417), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1350, 1376), 'flask.request.json.get', 'request.json.get', (['"""job_id"""'], {}), "('job_id')\n", (1366, 1376), False, 'from flask import request\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # -*- coding: utf-8 -*- import os from arch.api import Backend from arch.api.utils import file_utils, log_utils, core_utils from fate_flow.entity.runtime_config import RuntimeConfig from arch.api.utils.core_utils import get_lan_ip from arch.api.utils.conf_utils import get_base_config import __main__ from fate_flow.utils.setting_utils import CenterConfig WORK_MODE = get_base_config('work_mode', 0) BACKEND = Backend.EGGROLL USE_LOCAL_DATABASE = True # upload data USE_LOCAL_DATA = True # Local authentication switch USE_AUTHENTICATION = False PRIVILEGE_COMMAND_WHITELIST = [] # Node check switch CHECK_NODES_IDENTITY = False # zookeeper USE_CONFIGURATION_CENTER = False ZOOKEEPER_HOSTS = ['127.0.0.1:2181'] MAX_CONCURRENT_JOB_RUN = 5 MAX_CONCURRENT_JOB_RUN_HOST = 5 _ONE_DAY_IN_SECONDS = 60 * 60 * 24 DEFAULT_GRPC_OVERALL_TIMEOUT = 60 * 1000 * 60 # ms JOB_DEFAULT_TIMEOUT = 7 * 24 * 60 * 60 DATABASE = get_base_config("database", {}) DEFAULT_MODEL_STORE_ADDRESS = get_base_config("default_model_store_address", {}) ''' Constants ''' API_VERSION = "v1" ROLE = 'fateflow' SERVERS = 'servers' MAIN_MODULE = os.path.relpath(__main__.__file__) SERVER_MODULE = 'fate_flow_server.py' TASK_EXECUTOR_MODULE = 'driver/task_executor.py' TEMP_DIRECTORY = os.path.join(file_utils.get_project_base_directory(), "fate_flow", "temp") HEADERS = { 'Content-Type': 'application/json', 'Connection': 'close' } DETECT_TABLE = ("fate_flow_detect_table_namespace", "fate_flow_detect_table_name", 16) # fate-serving SERVINGS_ZK_PATH = '/FATE-SERVICES/serving/online/publishLoad/providers' FATE_FLOW_ZK_PATH = '/FATE-SERVICES/flow/online/transfer/providers' FATE_FLOW_MODEL_TRANSFER_PATH = '/v1/model/transfer' # fate-manager FATE_MANAGER_GET_NODE_INFO = '/node/info' FATE_MANAGER_NODE_CHECK = '/node/management/check' # logger log_utils.LoggerFactory.LEVEL = 10 # {CRITICAL: 50, FATAL:50, ERROR:40, WARNING:30, WARN:30, INFO:20, DEBUG:10, NOTSET:0} log_utils.LoggerFactory.set_directory(os.path.join(file_utils.get_project_base_directory(), 'logs', 'fate_flow')) stat_logger = log_utils.getLogger("fate_flow_stat") detect_logger = log_utils.getLogger("fate_flow_detect") access_logger = log_utils.getLogger("fate_flow_access") audit_logger = log_utils.audit_logger() """ Services """ IP = get_base_config("fate_flow", {}).get("host", "0.0.0.0") HTTP_PORT = get_base_config("fate_flow", {}).get("http_port") GRPC_PORT = get_base_config("fate_flow", {}).get("grpc_port") # standalone job will be send to the standalone job server when FATE-Flow work on cluster deploy mode, # but not the port for FATE-Flow on standalone deploy mode. CLUSTER_STANDALONE_JOB_SERVER_PORT = 9381 # services ip and port SERVER_CONF_PATH = 'arch/conf/server_conf.json' SERVING_PATH = '/servers/servings' server_conf = file_utils.load_json_conf(SERVER_CONF_PATH) PROXY_HOST = server_conf.get(SERVERS).get('proxy').get('host') PROXY_PORT = server_conf.get(SERVERS).get('proxy').get('port') BOARD_HOST = server_conf.get(SERVERS).get('fateboard').get('host') if BOARD_HOST == 'localhost': BOARD_HOST = get_lan_ip() BOARD_PORT = server_conf.get(SERVERS).get('fateboard').get('port') MANAGER_HOST = server_conf.get(SERVERS).get('fatemanager', {}).get('host') MANAGER_PORT = server_conf.get(SERVERS).get('fatemanager', {}).get('port') SERVINGS = CenterConfig.get_settings(path=SERVING_PATH, servings_zk_path=SERVINGS_ZK_PATH, use_zk=USE_CONFIGURATION_CENTER, hosts=ZOOKEEPER_HOSTS, server_conf_path=SERVER_CONF_PATH) BOARD_DASHBOARD_URL = 'http://%s:%d/index.html#/dashboard?job_id={}&role={}&party_id={}' % (BOARD_HOST, BOARD_PORT) # switch SAVE_AS_TASK_INPUT_DATA_SWITCH = True SAVE_AS_TASK_INPUT_DATA_IN_MEMORY = True # init RuntimeConfig.init_config(WORK_MODE=WORK_MODE) RuntimeConfig.init_config(HTTP_PORT=HTTP_PORT) RuntimeConfig.init_config(BACKEND=BACKEND)
[ "fate_flow.entity.runtime_config.RuntimeConfig.init_config", "fate_flow.utils.setting_utils.CenterConfig.get_settings" ]
[((988, 1019), 'arch.api.utils.conf_utils.get_base_config', 'get_base_config', (['"""work_mode"""', '(0)'], {}), "('work_mode', 0)\n", (1003, 1019), False, 'from arch.api.utils.conf_utils import get_base_config\n'), ((1530, 1561), 'arch.api.utils.conf_utils.get_base_config', 'get_base_config', (['"""database"""', '{}'], {}), "('database', {})\n", (1545, 1561), False, 'from arch.api.utils.conf_utils import get_base_config\n'), ((1592, 1642), 'arch.api.utils.conf_utils.get_base_config', 'get_base_config', (['"""default_model_store_address"""', '{}'], {}), "('default_model_store_address', {})\n", (1607, 1642), False, 'from arch.api.utils.conf_utils import get_base_config\n'), ((1733, 1767), 'os.path.relpath', 'os.path.relpath', (['__main__.__file__'], {}), '(__main__.__file__)\n', (1748, 1767), False, 'import os\n'), ((2691, 2728), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', (['"""fate_flow_stat"""'], {}), "('fate_flow_stat')\n", (2710, 2728), False, 'from arch.api.utils import file_utils, log_utils, core_utils\n'), ((2745, 2784), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', (['"""fate_flow_detect"""'], {}), "('fate_flow_detect')\n", (2764, 2784), False, 'from arch.api.utils import file_utils, log_utils, core_utils\n'), ((2801, 2840), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', (['"""fate_flow_access"""'], {}), "('fate_flow_access')\n", (2820, 2840), False, 'from arch.api.utils import file_utils, log_utils, core_utils\n'), ((2856, 2880), 'arch.api.utils.log_utils.audit_logger', 'log_utils.audit_logger', ([], {}), '()\n', (2878, 2880), False, 'from arch.api.utils import file_utils, log_utils, core_utils\n'), ((3413, 3456), 'arch.api.utils.file_utils.load_json_conf', 'file_utils.load_json_conf', (['SERVER_CONF_PATH'], {}), '(SERVER_CONF_PATH)\n', (3438, 3456), False, 'from arch.api.utils import file_utils, log_utils, core_utils\n'), ((3938, 4118), 'fate_flow.utils.setting_utils.CenterConfig.get_settings', 'CenterConfig.get_settings', ([], {'path': 'SERVING_PATH', 'servings_zk_path': 'SERVINGS_ZK_PATH', 'use_zk': 'USE_CONFIGURATION_CENTER', 'hosts': 'ZOOKEEPER_HOSTS', 'server_conf_path': 'SERVER_CONF_PATH'}), '(path=SERVING_PATH, servings_zk_path=\n SERVINGS_ZK_PATH, use_zk=USE_CONFIGURATION_CENTER, hosts=\n ZOOKEEPER_HOSTS, server_conf_path=SERVER_CONF_PATH)\n', (3963, 4118), False, 'from fate_flow.utils.setting_utils import CenterConfig\n'), ((4396, 4442), 'fate_flow.entity.runtime_config.RuntimeConfig.init_config', 'RuntimeConfig.init_config', ([], {'WORK_MODE': 'WORK_MODE'}), '(WORK_MODE=WORK_MODE)\n', (4421, 4442), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((4443, 4489), 'fate_flow.entity.runtime_config.RuntimeConfig.init_config', 'RuntimeConfig.init_config', ([], {'HTTP_PORT': 'HTTP_PORT'}), '(HTTP_PORT=HTTP_PORT)\n', (4468, 4489), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((4490, 4532), 'fate_flow.entity.runtime_config.RuntimeConfig.init_config', 'RuntimeConfig.init_config', ([], {'BACKEND': 'BACKEND'}), '(BACKEND=BACKEND)\n', (4515, 4532), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((1885, 1924), 'arch.api.utils.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (1922, 1924), False, 'from arch.api.utils import file_utils, log_utils, core_utils\n'), ((3697, 3709), 'arch.api.utils.core_utils.get_lan_ip', 'get_lan_ip', ([], {}), '()\n', (3707, 3709), False, 'from arch.api.utils.core_utils import get_lan_ip\n'), ((2614, 2653), 'arch.api.utils.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (2651, 2653), False, 'from arch.api.utils import file_utils, log_utils, core_utils\n'), ((2905, 2937), 'arch.api.utils.conf_utils.get_base_config', 'get_base_config', (['"""fate_flow"""', '{}'], {}), "('fate_flow', {})\n", (2920, 2937), False, 'from arch.api.utils.conf_utils import get_base_config\n'), ((2973, 3005), 'arch.api.utils.conf_utils.get_base_config', 'get_base_config', (['"""fate_flow"""', '{}'], {}), "('fate_flow', {})\n", (2988, 3005), False, 'from arch.api.utils.conf_utils import get_base_config\n'), ((3035, 3067), 'arch.api.utils.conf_utils.get_base_config', 'get_base_config', (['"""fate_flow"""', '{}'], {}), "('fate_flow', {})\n", (3050, 3067), False, 'from arch.api.utils.conf_utils import get_base_config\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import datetime import functools import errno import json import operator import os import subprocess import threading import typing import uuid import psutil from arch.api.utils import file_utils from arch.api.utils.core import current_timestamp from arch.api.utils.core import json_loads, json_dumps from fate_flow.db.db_models import DB, Job, Task from fate_flow.driver.dsl_parser import DSLParser from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.settings import stat_logger from fate_flow.utils import detect_utils from fate_flow.utils import api_utils from flask import request, redirect, url_for class IdCounter: _lock = threading.RLock() def __init__(self, initial_value=0): self._value = initial_value def incr(self, delta=1): ''' Increment the counter with locking ''' with IdCounter._lock: self._value += delta return self._value id_counter = IdCounter() def generate_job_id(): return '{}{}'.format(datetime.datetime.now().strftime("%Y%m%d%H%M%S%f"), str(id_counter.incr())) def generate_task_id(job_id, component_name): return '{}_{}'.format(job_id, component_name) def get_job_directory(job_id): return os.path.join(file_utils.get_project_base_directory(), 'jobs', job_id) def get_job_log_directory(job_id): return os.path.join(file_utils.get_project_base_directory(), 'logs', job_id) def check_config(config: typing.Dict, required_parameters: typing.List): for parameter in required_parameters: if parameter not in config: return False, 'configuration no {} parameter'.format(parameter) else: return True, 'ok' def check_pipeline_job_runtime_conf(runtime_conf: typing.Dict): detect_utils.check_config(runtime_conf, ['initiator', 'job_parameters', 'role']) detect_utils.check_config(runtime_conf['initiator'], ['role', 'party_id']) detect_utils.check_config(runtime_conf['job_parameters'], [('work_mode', RuntimeConfig.WORK_MODE)]) # deal party id runtime_conf['initiator']['party_id'] = int(runtime_conf['initiator']['party_id']) for r in runtime_conf['role'].keys(): for i in range(len(runtime_conf['role'][r])): runtime_conf['role'][r][i] = int(runtime_conf['role'][r][i]) def new_runtime_conf(job_dir, method, module, role, party_id): if role: conf_path_dir = os.path.join(job_dir, method, module, role, str(party_id)) else: conf_path_dir = os.path.join(job_dir, method, module, str(party_id)) os.makedirs(conf_path_dir, exist_ok=True) return os.path.join(conf_path_dir, 'runtime_conf.json') def save_job_conf(job_id, job_dsl, job_runtime_conf): job_dsl_path, job_runtime_conf_path = get_job_conf_path(job_id=job_id) os.makedirs(os.path.dirname(job_dsl_path), exist_ok=True) for data, conf_path in [(job_dsl, job_dsl_path), (job_runtime_conf, job_runtime_conf_path)]: with open(conf_path, 'w+') as f: f.truncate() f.write(json.dumps(data, indent=4)) f.flush() return job_dsl_path, job_runtime_conf_path def get_job_conf_path(job_id): job_dir = get_job_directory(job_id) job_dsl_path = os.path.join(job_dir, 'job_dsl.json') job_runtime_conf_path = os.path.join(job_dir, 'job_runtime_conf.json') return job_dsl_path, job_runtime_conf_path def get_job_dsl_parser_by_job_id(job_id): with DB.connection_context(): jobs = Job.select(Job.f_dsl, Job.f_runtime_conf, Job.f_train_runtime_conf).where(Job.f_job_id == job_id) if jobs: job = jobs[0] job_dsl_parser = get_job_dsl_parser(dsl=json_loads(job.f_dsl), runtime_conf=json_loads(job.f_runtime_conf), train_runtime_conf=json_loads(job.f_train_runtime_conf)) return job_dsl_parser else: return None def get_job_dsl_parser(dsl=None, runtime_conf=None, pipeline_dsl=None, train_runtime_conf=None): dsl_parser = DSLParser() default_runtime_conf_path = os.path.join(file_utils.get_project_base_directory(), *['federatedml', 'conf', 'default_runtime_conf']) setting_conf_path = os.path.join(file_utils.get_project_base_directory(), *['federatedml', 'conf', 'setting_conf']) job_type = runtime_conf.get('job_parameters', {}).get('job_type', 'train') dsl_parser.run(dsl=dsl, runtime_conf=runtime_conf, pipeline_dsl=pipeline_dsl, pipeline_runtime_conf=train_runtime_conf, default_runtime_conf_prefix=default_runtime_conf_path, setting_conf_prefix=setting_conf_path, mode=job_type) return dsl_parser def get_job_configuration(job_id, role, party_id): with DB.connection_context(): jobs = Job.select(Job.f_dsl, Job.f_runtime_conf, Job.f_train_runtime_conf).where(Job.f_job_id == job_id, Job.f_role == role, Job.f_party_id == party_id) if jobs: job = jobs[0] return json_loads(job.f_dsl), json_loads(job.f_runtime_conf), json_loads(job.f_train_runtime_conf) else: return {}, {}, {} def query_job(**kwargs): with DB.connection_context(): filters = [] for f_n, f_v in kwargs.items(): attr_name = 'f_%s' % f_n if hasattr(Job, attr_name): filters.append(operator.attrgetter('f_%s' % f_n)(Job) == f_v) if filters: jobs = Job.select().where(*filters) return [job for job in jobs] else: # not allow query all job return [] def job_queue_size(): return RuntimeConfig.JOB_QUEUE.qsize() def show_job_queue(): # TODO pass def query_task(**kwargs): with DB.connection_context(): filters = [] for f_n, f_v in kwargs.items(): attr_name = 'f_%s' % f_n if hasattr(Task, attr_name): filters.append(operator.attrgetter('f_%s' % f_n)(Task) == f_v) if filters: tasks = Task.select().where(*filters) else: tasks = Task.select() return [task for task in tasks] def success_task_count(job_id): count = 0 tasks = query_task(job_id=job_id) job_component_status = {} for task in tasks: job_component_status[task.f_component_name] = job_component_status.get(task.f_component_name, set()) job_component_status[task.f_component_name].add(task.f_status) for component_name, role_status in job_component_status.items(): if len(role_status) == 1 and 'success' in role_status: count += 1 return count def update_job_progress(job_id, dag, current_task_id): component_count = len(dag.get_dependency()['component_list']) success_count = success_task_count(job_id=job_id) job = Job() job.f_progress = float(success_count) / component_count * 100 job.f_update_time = current_timestamp() job.f_current_tasks = json_dumps([current_task_id]) return job def gen_status_id(): return uuid.uuid1().hex def check_job_process(pid): if pid < 0: return False if pid == 0: raise ValueError('invalid PID 0') try: os.kill(pid, 0) except OSError as err: if err.errno == errno.ESRCH: # ESRCH == No such process return False elif err.errno == errno.EPERM: # EPERM clearly means there's a process to deny access to return True else: # According to "man 2 kill" possible error values are # (EINVAL, EPERM, ESRCH) raise else: return True def check_process_by_keyword(keywords): if not keywords: return True keyword_filter_cmd = ' |'.join(['grep %s' % keyword for keyword in keywords]) ret = os.system('ps aux | {} | grep -v grep | grep -v "ps aux "'.format(keyword_filter_cmd)) return ret == 0 def run_subprocess(config_dir, process_cmd, log_dir=None): stat_logger.info('Starting process command: {}'.format(process_cmd)) stat_logger.info(' '.join(process_cmd)) os.makedirs(config_dir, exist_ok=True) if log_dir: os.makedirs(log_dir, exist_ok=True) std_log = open(os.path.join(log_dir if log_dir else config_dir, 'std.log'), 'w') pid_path = os.path.join(config_dir, 'pid') if os.name == 'nt': startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE else: startupinfo = None p = subprocess.Popen(process_cmd, stdout=std_log, stderr=std_log, startupinfo=startupinfo ) with open(pid_path, 'w') as f: f.truncate() f.write(str(p.pid) + "\n") f.flush() return p def wait_child_process(signum, frame): child_pid = None try: while True: child_pid, status = os.waitpid(-1, os.WNOHANG) if child_pid == 0: stat_logger.info('no child process was immediately available') break exitcode = status >> 8 stat_logger.info('child process %s exit with exitcode %s', child_pid, exitcode) except OSError as e: if e.errno == errno.ECHILD: stat_logger.warning('current process has no existing unwaited-for child processes.') else: raise def kill_process(pid): try: if not pid: return False stat_logger.info("terminating process pid:{}".format(pid)) if not check_job_process(pid): return True p = psutil.Process(int(pid)) for child in p.children(recursive=True): if check_job_process(child.pid): child.kill() if check_job_process(p.pid): p.kill() return True except Exception as e: raise e def gen_all_party_key(all_party): """ Join all party as party key :param all_party: "role": { "guest": [9999], "host": [10000], "arbiter": [10000] } :return: """ if not all_party: all_party_key = 'all' elif isinstance(all_party, dict): sorted_role_name = sorted(all_party.keys()) all_party_key = '#'.join([ ('%s-%s' % ( role_name, '_'.join([str(p) for p in sorted(set(all_party[role_name]))])) ) for role_name in sorted_role_name]) else: all_party_key = None return all_party_key def job_server_routing(routing_type=0): def _out_wrapper(func): @functools.wraps(func) def _wrapper(*args, **kwargs): job_server = set() jobs = query_job(job_id=request.json.get('job_id', None)) for job in jobs: if job.f_run_ip: job_server.add(job.f_run_ip) if len(job_server) == 1: execute_host = job_server.pop() if execute_host != RuntimeConfig.JOB_SERVER_HOST: if routing_type == 0: return api_utils.request_execute_server(request=request, execute_host=execute_host) else: return redirect('http://{}{}'.format(execute_host, url_for(request.endpoint)), code=307) return func(*args, **kwargs) return _wrapper return _out_wrapper
[ "fate_flow.db.db_models.Job", "fate_flow.driver.dsl_parser.DSLParser", "fate_flow.db.db_models.DB.connection_context", "fate_flow.settings.stat_logger.warning", "fate_flow.db.db_models.Job.select", "fate_flow.utils.api_utils.request_execute_server", "fate_flow.settings.stat_logger.info", "fate_flow.entity.runtime_config.RuntimeConfig.JOB_QUEUE.qsize", "fate_flow.db.db_models.Task.select", "fate_flow.utils.detect_utils.check_config" ]
[((1275, 1292), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1290, 1292), False, 'import threading\n'), ((2380, 2465), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (['runtime_conf', "['initiator', 'job_parameters', 'role']"], {}), "(runtime_conf, ['initiator', 'job_parameters', 'role']\n )\n", (2405, 2465), False, 'from fate_flow.utils import detect_utils\n'), ((2465, 2539), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (["runtime_conf['initiator']", "['role', 'party_id']"], {}), "(runtime_conf['initiator'], ['role', 'party_id'])\n", (2490, 2539), False, 'from fate_flow.utils import detect_utils\n'), ((2544, 2647), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (["runtime_conf['job_parameters']", "[('work_mode', RuntimeConfig.WORK_MODE)]"], {}), "(runtime_conf['job_parameters'], [('work_mode',\n RuntimeConfig.WORK_MODE)])\n", (2569, 2647), False, 'from fate_flow.utils import detect_utils\n'), ((3172, 3213), 'os.makedirs', 'os.makedirs', (['conf_path_dir'], {'exist_ok': '(True)'}), '(conf_path_dir, exist_ok=True)\n', (3183, 3213), False, 'import os\n'), ((3225, 3273), 'os.path.join', 'os.path.join', (['conf_path_dir', '"""runtime_conf.json"""'], {}), "(conf_path_dir, 'runtime_conf.json')\n", (3237, 3273), False, 'import os\n'), ((3839, 3876), 'os.path.join', 'os.path.join', (['job_dir', '"""job_dsl.json"""'], {}), "(job_dir, 'job_dsl.json')\n", (3851, 3876), False, 'import os\n'), ((3905, 3951), 'os.path.join', 'os.path.join', (['job_dir', '"""job_runtime_conf.json"""'], {}), "(job_dir, 'job_runtime_conf.json')\n", (3917, 3951), False, 'import os\n'), ((4646, 4657), 'fate_flow.driver.dsl_parser.DSLParser', 'DSLParser', ([], {}), '()\n', (4655, 4657), False, 'from fate_flow.driver.dsl_parser import DSLParser\n'), ((6526, 6557), 'fate_flow.entity.runtime_config.RuntimeConfig.JOB_QUEUE.qsize', 'RuntimeConfig.JOB_QUEUE.qsize', ([], {}), '()\n', (6555, 6557), False, 'from fate_flow.entity.runtime_config import RuntimeConfig\n'), ((7718, 7723), 'fate_flow.db.db_models.Job', 'Job', ([], {}), '()\n', (7721, 7723), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((7814, 7833), 'arch.api.utils.core.current_timestamp', 'current_timestamp', ([], {}), '()\n', (7831, 7833), False, 'from arch.api.utils.core import current_timestamp\n'), ((7860, 7889), 'arch.api.utils.core.json_dumps', 'json_dumps', (['[current_task_id]'], {}), '([current_task_id])\n', (7870, 7889), False, 'from arch.api.utils.core import json_loads, json_dumps\n'), ((9006, 9044), 'os.makedirs', 'os.makedirs', (['config_dir'], {'exist_ok': '(True)'}), '(config_dir, exist_ok=True)\n', (9017, 9044), False, 'import os\n'), ((9205, 9236), 'os.path.join', 'os.path.join', (['config_dir', '"""pid"""'], {}), "(config_dir, 'pid')\n", (9217, 9236), False, 'import os\n'), ((9470, 9561), 'subprocess.Popen', 'subprocess.Popen', (['process_cmd'], {'stdout': 'std_log', 'stderr': 'std_log', 'startupinfo': 'startupinfo'}), '(process_cmd, stdout=std_log, stderr=std_log, startupinfo=\n startupinfo)\n', (9486, 9561), False, 'import subprocess\n'), ((1870, 1909), 'arch.api.utils.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (1907, 1909), False, 'from arch.api.utils import file_utils\n'), ((1988, 2027), 'arch.api.utils.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (2025, 2027), False, 'from arch.api.utils import file_utils\n'), ((3421, 3450), 'os.path.dirname', 'os.path.dirname', (['job_dsl_path'], {}), '(job_dsl_path)\n', (3436, 3450), False, 'import os\n'), ((4052, 4075), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (4073, 4075), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((4703, 4742), 'arch.api.utils.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (4740, 4742), False, 'from arch.api.utils import file_utils\n'), ((4876, 4915), 'arch.api.utils.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (4913, 4915), False, 'from arch.api.utils import file_utils\n'), ((5469, 5492), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (5490, 5492), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((6067, 6090), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (6088, 6090), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((6639, 6662), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (6660, 6662), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((7939, 7951), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (7949, 7951), False, 'import uuid\n'), ((8099, 8114), 'os.kill', 'os.kill', (['pid', '(0)'], {}), '(pid, 0)\n', (8106, 8114), False, 'import os\n'), ((9069, 9104), 'os.makedirs', 'os.makedirs', (['log_dir'], {'exist_ok': '(True)'}), '(log_dir, exist_ok=True)\n', (9080, 9104), False, 'import os\n'), ((9124, 9183), 'os.path.join', 'os.path.join', (['(log_dir if log_dir else config_dir)', '"""std.log"""'], {}), "(log_dir if log_dir else config_dir, 'std.log')\n", (9136, 9183), False, 'import os\n'), ((9284, 9308), 'subprocess.STARTUPINFO', 'subprocess.STARTUPINFO', ([], {}), '()\n', (9306, 9308), False, 'import subprocess\n'), ((11620, 11641), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (11635, 11641), False, 'import functools\n'), ((6986, 6999), 'fate_flow.db.db_models.Task.select', 'Task.select', ([], {}), '()\n', (6997, 6999), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((9903, 9929), 'os.waitpid', 'os.waitpid', (['(-1)', 'os.WNOHANG'], {}), '(-1, os.WNOHANG)\n', (9913, 9929), False, 'import os\n'), ((10109, 10188), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""child process %s exit with exitcode %s"""', 'child_pid', 'exitcode'], {}), "('child process %s exit with exitcode %s', child_pid, exitcode)\n", (10125, 10188), False, 'from fate_flow.settings import stat_logger\n'), ((1639, 1662), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1660, 1662), False, 'import datetime\n'), ((3650, 3676), 'json.dumps', 'json.dumps', (['data'], {'indent': '(4)'}), '(data, indent=4)\n', (3660, 3676), False, 'import json\n'), ((4092, 4159), 'fate_flow.db.db_models.Job.select', 'Job.select', (['Job.f_dsl', 'Job.f_runtime_conf', 'Job.f_train_runtime_conf'], {}), '(Job.f_dsl, Job.f_runtime_conf, Job.f_train_runtime_conf)\n', (4102, 4159), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((5509, 5576), 'fate_flow.db.db_models.Job.select', 'Job.select', (['Job.f_dsl', 'Job.f_runtime_conf', 'Job.f_train_runtime_conf'], {}), '(Job.f_dsl, Job.f_runtime_conf, Job.f_train_runtime_conf)\n', (5519, 5576), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((5895, 5916), 'arch.api.utils.core.json_loads', 'json_loads', (['job.f_dsl'], {}), '(job.f_dsl)\n', (5905, 5916), False, 'from arch.api.utils.core import json_loads, json_dumps\n'), ((5918, 5948), 'arch.api.utils.core.json_loads', 'json_loads', (['job.f_runtime_conf'], {}), '(job.f_runtime_conf)\n', (5928, 5948), False, 'from arch.api.utils.core import json_loads, json_dumps\n'), ((5950, 5986), 'arch.api.utils.core.json_loads', 'json_loads', (['job.f_train_runtime_conf'], {}), '(job.f_train_runtime_conf)\n', (5960, 5986), False, 'from arch.api.utils.core import json_loads, json_dumps\n'), ((9977, 10039), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['"""no child process was immediately available"""'], {}), "('no child process was immediately available')\n", (9993, 10039), False, 'from fate_flow.settings import stat_logger\n'), ((10262, 10351), 'fate_flow.settings.stat_logger.warning', 'stat_logger.warning', (['"""current process has no existing unwaited-for child processes."""'], {}), "(\n 'current process has no existing unwaited-for child processes.')\n", (10281, 10351), False, 'from fate_flow.settings import stat_logger\n'), ((4285, 4306), 'arch.api.utils.core.json_loads', 'json_loads', (['job.f_dsl'], {}), '(job.f_dsl)\n', (4295, 4306), False, 'from arch.api.utils.core import json_loads, json_dumps\n'), ((4321, 4351), 'arch.api.utils.core.json_loads', 'json_loads', (['job.f_runtime_conf'], {}), '(job.f_runtime_conf)\n', (4331, 4351), False, 'from arch.api.utils.core import json_loads, json_dumps\n'), ((4420, 4456), 'arch.api.utils.core.json_loads', 'json_loads', (['job.f_train_runtime_conf'], {}), '(job.f_train_runtime_conf)\n', (4430, 4456), False, 'from arch.api.utils.core import json_loads, json_dumps\n'), ((6347, 6359), 'fate_flow.db.db_models.Job.select', 'Job.select', ([], {}), '()\n', (6357, 6359), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((6922, 6935), 'fate_flow.db.db_models.Task.select', 'Task.select', ([], {}), '()\n', (6933, 6935), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((11748, 11780), 'flask.request.json.get', 'request.json.get', (['"""job_id"""', 'None'], {}), "('job_id', None)\n", (11764, 11780), False, 'from flask import request, redirect, url_for\n'), ((12117, 12193), 'fate_flow.utils.api_utils.request_execute_server', 'api_utils.request_execute_server', ([], {'request': 'request', 'execute_host': 'execute_host'}), '(request=request, execute_host=execute_host)\n', (12149, 12193), False, 'from fate_flow.utils import api_utils\n'), ((6261, 6294), 'operator.attrgetter', 'operator.attrgetter', (["('f_%s' % f_n)"], {}), "('f_%s' % f_n)\n", (6280, 6294), False, 'import operator\n'), ((6834, 6867), 'operator.attrgetter', 'operator.attrgetter', (["('f_%s' % f_n)"], {}), "('f_%s' % f_n)\n", (6853, 6867), False, 'import operator\n'), ((12295, 12320), 'flask.url_for', 'url_for', (['request.endpoint'], {}), '(request.endpoint)\n', (12302, 12320), False, 'from flask import request, redirect, url_for\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import operator from arch.api import session from fate_flow.settings import stat_logger from fate_flow.utils import session_utils from fate_flow.db.db_models import DB, DataView, TrackingMetric def query_data_view(**kwargs): with DB.connection_context(): filters = [] for f_n, f_v in kwargs.items(): attr_name = 'f_%s' % f_n if hasattr(DataView, attr_name): filters.append(operator.attrgetter('f_%s' % f_n)(DataView) == f_v) if filters: data_views = DataView.select().where(*filters) else: data_views = [] return [data_view for data_view in data_views] @session_utils.session_detect() def delete_table(data_views): data = [] status = False for data_view in data_views: table_name = data_view.f_table_name namespace = data_view.f_table_namespace table_info = {'table_name': table_name, 'namespace': namespace} if table_name and namespace and table_info not in data: table = session.get_data_table(name=table_name, namespace=namespace) try: table.destroy() data.append(table_info) status = True except: pass return status, data def delete_metric_data(metric_info): if metric_info.get('model'): sql = drop_metric_data_mode(metric_info.get('model')) else: sql = delete_metric_data_from_db(metric_info) return sql def drop_metric_data_mode(model): with DB.connection_context(): try: drop_sql = 'drop table t_tracking_metric_{}'.format(model) DB.execute_sql(drop_sql) stat_logger.info(drop_sql) return drop_sql except Exception as e: stat_logger.exception(e) raise e def delete_metric_data_from_db(metric_info): with DB.connection_context(): try: job_id = metric_info['job_id'] metric_info.pop('job_id') delete_sql = 'delete from t_tracking_metric_{} where f_job_id="{}"'.format(job_id[:8], job_id) for k, v in metric_info.items(): if hasattr(TrackingMetric, "f_" + k): connect_str = " and f_" delete_sql = delete_sql + connect_str + k + '="{}"'.format(v) DB.execute_sql(delete_sql) stat_logger.info(delete_sql) return delete_sql except Exception as e: stat_logger.exception(e) raise e
[ "fate_flow.db.db_models.DB.connection_context", "fate_flow.db.db_models.DataView.select", "fate_flow.db.db_models.DB.execute_sql", "fate_flow.settings.stat_logger.info", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.session_utils.session_detect" ]
[((1283, 1313), 'fate_flow.utils.session_utils.session_detect', 'session_utils.session_detect', ([], {}), '()\n', (1311, 1313), False, 'from fate_flow.utils import session_utils\n'), ((853, 876), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (874, 876), False, 'from fate_flow.db.db_models import DB, DataView, TrackingMetric\n'), ((2161, 2184), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (2182, 2184), False, 'from fate_flow.db.db_models import DB, DataView, TrackingMetric\n'), ((2518, 2541), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (2539, 2541), False, 'from fate_flow.db.db_models import DB, DataView, TrackingMetric\n'), ((1658, 1718), 'arch.api.session.get_data_table', 'session.get_data_table', ([], {'name': 'table_name', 'namespace': 'namespace'}), '(name=table_name, namespace=namespace)\n', (1680, 1718), False, 'from arch.api import session\n'), ((2282, 2306), 'fate_flow.db.db_models.DB.execute_sql', 'DB.execute_sql', (['drop_sql'], {}), '(drop_sql)\n', (2296, 2306), False, 'from fate_flow.db.db_models import DB, DataView, TrackingMetric\n'), ((2319, 2345), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['drop_sql'], {}), '(drop_sql)\n', (2335, 2345), False, 'from fate_flow.settings import stat_logger\n'), ((2982, 3008), 'fate_flow.db.db_models.DB.execute_sql', 'DB.execute_sql', (['delete_sql'], {}), '(delete_sql)\n', (2996, 3008), False, 'from fate_flow.db.db_models import DB, DataView, TrackingMetric\n'), ((3021, 3049), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['delete_sql'], {}), '(delete_sql)\n', (3037, 3049), False, 'from fate_flow.settings import stat_logger\n'), ((2417, 2441), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (2438, 2441), False, 'from fate_flow.settings import stat_logger\n'), ((3123, 3147), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (3144, 3147), False, 'from fate_flow.settings import stat_logger\n'), ((1149, 1166), 'fate_flow.db.db_models.DataView.select', 'DataView.select', ([], {}), '()\n', (1164, 1166), False, 'from fate_flow.db.db_models import DB, DataView, TrackingMetric\n'), ((1052, 1085), 'operator.attrgetter', 'operator.attrgetter', (["('f_%s' % f_n)"], {}), "('f_%s' % f_n)\n", (1071, 1085), False, 'import operator\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import numpy as np from fate_flow.entity.metric import Metric, MetricMeta from federatedml.model_base import ModelBase from federatedml.param.sample_weight_param import SampleWeightParam from federatedml.statistic.data_overview import get_label_count, check_negative_sample_weight from federatedml.util import consts, LOGGER class SampleWeight(ModelBase): def __init__(self): super().__init__() self.model_param = SampleWeightParam() self.metric_name = "sample_weight" self.metric_namespace = "train" self.metric_type = "SAMPLE_WEIGHT" self.weight_mode = None def _init_model(self, params): self.model_param = params self.class_weight = params.class_weight self.sample_weight_name = params.sample_weight_name self.normalize = params.normalize self.need_run = params.need_run @staticmethod def get_class_weight(data_instances): class_weight = get_label_count(data_instances) n_samples = data_instances.count() n_classes = len(class_weight.keys()) res_class_weight = {str(k): n_samples / (n_classes * v) for k, v in class_weight.items()} return res_class_weight @staticmethod def replace_weight(data_instance, class_weight, weight_loc=None, weight_base=None): weighted_data_instance = copy.copy(data_instance) original_features = weighted_data_instance.features if weight_loc is not None: if weight_base is not None: inst_weight = original_features[weight_loc] / weight_base else: inst_weight = original_features[weight_loc] weighted_data_instance.set_weight(inst_weight) weighted_data_instance.features = original_features[np.arange(original_features.shape[0]) != weight_loc] else: weighted_data_instance.set_weight(class_weight.get(str(data_instance.label), 1)) return weighted_data_instance @staticmethod def assign_sample_weight(data_instances, class_weight, weight_loc, normalize): weight_base = None if weight_loc is not None and normalize: def sum_sample_weight(kv_iterator): sample_weight = 0 for _, inst in kv_iterator: sample_weight += inst.features[weight_loc] return sample_weight weight_sum = data_instances.mapPartitions(sum_sample_weight).reduce(lambda x, y: x + y) # LOGGER.debug(f"weight_sum is {weight_sum}") weight_base = weight_sum / data_instances.count() # LOGGER.debug(f"weight_base is {weight_base}") return data_instances.mapValues(lambda v: SampleWeight.replace_weight(v, class_weight, weight_loc, weight_base)) @staticmethod def get_weight_loc(data_instances, sample_weight_name): weight_loc = None if sample_weight_name: try: weight_loc = data_instances.schema["header"].index(sample_weight_name) except ValueError: return return weight_loc def transform_weighted_instance(self, data_instances, weight_loc): if self.class_weight and self.class_weight == 'balanced': self.class_weight = SampleWeight.get_class_weight(data_instances) return SampleWeight.assign_sample_weight(data_instances, self.class_weight, weight_loc, self.normalize) def callback_info(self): class_weight = None classes = None if self.class_weight: class_weight = {str(k): v for k, v in self.class_weight.items()} classes = sorted([str(k) for k in self.class_weight.keys()]) # LOGGER.debug(f"callback class weight is: {class_weight}") metric_meta = MetricMeta(name='train', metric_type=self.metric_type, extra_metas={ "weight_mode": self.weight_mode, "class_weight": class_weight, "classes": classes, "sample_weight_name": self.sample_weight_name }) self.callback_metric(metric_name=self.metric_name, metric_namespace=self.metric_namespace, metric_data=[Metric(self.metric_name, 0)]) self.tracker.set_metric_meta(metric_namespace=self.metric_namespace, metric_name=self.metric_name, metric_meta=metric_meta) def fit(self, data_instances): if self.sample_weight_name is None and self.class_weight is None: return data_instances # if self.class_weight and isinstance(self.class_weight, dict): # self.class_weight = {int(k): v for k, v in self.class_weight.items()} if self.class_weight: self.weight_mode = "class weight" if self.sample_weight_name and self.class_weight: LOGGER.warning(f"Both 'sample_weight_name' and 'class_weight' provided. " f"Only weight from 'sample_weight_name' is used.") new_schema = copy.deepcopy(data_instances.schema) new_schema["sample_weight"] = "weight" weight_loc = None if self.sample_weight_name: self.weight_mode = "sample weight name" weight_loc = SampleWeight.get_weight_loc(data_instances, self.sample_weight_name) if weight_loc is not None: new_schema["header"].pop(weight_loc) else: raise ValueError(f"Cannot find weight column of given sample_weight_name '{self.sample_weight_name}'.") result_instances = self.transform_weighted_instance(data_instances, weight_loc) result_instances.schema = new_schema self.callback_info() if result_instances.mapPartitions(check_negative_sample_weight).reduce(lambda x, y: x or y): LOGGER.warning(f"Negative weight found in weighted instances.") return result_instances
[ "fate_flow.entity.metric.MetricMeta", "fate_flow.entity.metric.Metric" ]
[((1111, 1130), 'federatedml.param.sample_weight_param.SampleWeightParam', 'SampleWeightParam', ([], {}), '()\n', (1128, 1130), False, 'from federatedml.param.sample_weight_param import SampleWeightParam\n'), ((1633, 1664), 'federatedml.statistic.data_overview.get_label_count', 'get_label_count', (['data_instances'], {}), '(data_instances)\n', (1648, 1664), False, 'from federatedml.statistic.data_overview import get_label_count, check_negative_sample_weight\n'), ((2024, 2048), 'copy.copy', 'copy.copy', (['data_instance'], {}), '(data_instance)\n', (2033, 2048), False, 'import copy\n'), ((4463, 4670), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': '"""train"""', 'metric_type': 'self.metric_type', 'extra_metas': "{'weight_mode': self.weight_mode, 'class_weight': class_weight, 'classes':\n classes, 'sample_weight_name': self.sample_weight_name}"}), "(name='train', metric_type=self.metric_type, extra_metas={\n 'weight_mode': self.weight_mode, 'class_weight': class_weight,\n 'classes': classes, 'sample_weight_name': self.sample_weight_name})\n", (4473, 4670), False, 'from fate_flow.entity.metric import Metric, MetricMeta\n'), ((5939, 5975), 'copy.deepcopy', 'copy.deepcopy', (['data_instances.schema'], {}), '(data_instances.schema)\n', (5952, 5975), False, 'import copy\n'), ((5765, 5895), 'federatedml.util.LOGGER.warning', 'LOGGER.warning', (['f"""Both \'sample_weight_name\' and \'class_weight\' provided. Only weight from \'sample_weight_name\' is used."""'], {}), '(\n f"Both \'sample_weight_name\' and \'class_weight\' provided. Only weight from \'sample_weight_name\' is used."\n )\n', (5779, 5895), False, 'from federatedml.util import consts, LOGGER\n'), ((6737, 6800), 'federatedml.util.LOGGER.warning', 'LOGGER.warning', (['f"""Negative weight found in weighted instances."""'], {}), "(f'Negative weight found in weighted instances.')\n", (6751, 6800), False, 'from federatedml.util import consts, LOGGER\n'), ((2459, 2496), 'numpy.arange', 'np.arange', (['original_features.shape[0]'], {}), '(original_features.shape[0])\n', (2468, 2496), True, 'import numpy as np\n'), ((5082, 5109), 'fate_flow.entity.metric.Metric', 'Metric', (['self.metric_name', '(0)'], {}), '(self.metric_name, 0)\n', (5088, 5109), False, 'from fate_flow.entity.metric import Metric, MetricMeta\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import shutil from fate_arch.common import file_utils from fate_flow.utils import model_utils, schedule_utils from fate_flow.settings import stat_logger from fate_arch.common.base_utils import json_loads, json_dumps from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter from fate_flow.pipelined_model.pipelined_model import PipelinedModel from fate_flow.utils.model_utils import check_before_deploy from fate_flow.utils.schedule_utils import get_dsl_parser_by_version def deploy(config_data): model_id = config_data.get('model_id') model_version = config_data.get('model_version') local_role = config_data.get('local').get('role') local_party_id = config_data.get('local').get('party_id') child_model_version = config_data.get('child_model_version') try: party_model_id = model_utils.gen_party_model_id(model_id=model_id, role=local_role, party_id=local_party_id) model = PipelinedModel(model_id=party_model_id, model_version=model_version) model_data = model.collect_models(in_bytes=True) if "pipeline.pipeline:Pipeline" not in model_data: raise Exception("Can not found pipeline file in model.") # check if the model could be executed the deploy process (parent/child) if not check_before_deploy(model): raise Exception('Child model could not be deployed.') # copy proto content from parent model and generate a child model deploy_model = PipelinedModel(model_id=party_model_id, model_version=child_model_version) shutil.copytree(src=model.model_path, dst=deploy_model.model_path) pipeline = deploy_model.read_component_model('pipeline', 'pipeline')['Pipeline'] # modify two pipeline files (model version/ train_runtime_conf) train_runtime_conf = json_loads(pipeline.train_runtime_conf) adapter = JobRuntimeConfigAdapter(train_runtime_conf) train_runtime_conf = adapter.update_model_id_version(model_version=deploy_model.model_version) pipeline.model_version = child_model_version pipeline.train_runtime_conf = json_dumps(train_runtime_conf, byte=True) parser = get_dsl_parser_by_version(train_runtime_conf.get('dsl_version', '1')) train_dsl = json_loads(pipeline.train_dsl) parent_predict_dsl = json_loads(pipeline.inference_dsl) if str(train_runtime_conf.get('dsl_version', '1')) == '1': predict_dsl = json_loads(pipeline.inference_dsl) else: if config_data.get('dsl') or config_data.get('predict_dsl'): predict_dsl = config_data.get('dsl') if config_data.get('dsl') else config_data.get('predict_dsl') if not isinstance(predict_dsl, dict): predict_dsl = json_loads(predict_dsl) else: if config_data.get('cpn_list', None): cpn_list = config_data.pop('cpn_list') else: cpn_list = list(train_dsl.get('components', {}).keys()) parser_version = train_runtime_conf.get('dsl_version', '1') if str(parser_version) == '1': predict_dsl = parent_predict_dsl else: parser = schedule_utils.get_dsl_parser_by_version(parser_version) predict_dsl = parser.deploy_component(cpn_list, train_dsl) # save predict dsl into child model file parser.verify_dsl(predict_dsl, "predict") inference_dsl = parser.get_predict_dsl(role=local_role, predict_dsl=predict_dsl, setting_conf_prefix=os.path.join(file_utils.get_python_base_directory(), *['federatedml', 'conf', 'setting_conf'])) pipeline.inference_dsl = json_dumps(inference_dsl, byte=True) if model_utils.compare_version(pipeline.fate_version, '1.5.0') == 'gt': pipeline.parent_info = json_dumps({'parent_model_id': model_id, 'parent_model_version': model_version}, byte=True) pipeline.parent = False runtime_conf_on_party = json_loads(pipeline.runtime_conf_on_party) runtime_conf_on_party['job_parameters']['model_version'] = child_model_version pipeline.runtime_conf_on_party = json_dumps(runtime_conf_on_party, byte=True) # save model file deploy_model.save_pipeline(pipeline) shutil.copyfile(os.path.join(deploy_model.model_path, "pipeline.pb"), os.path.join(deploy_model.model_path, "variables", "data", "pipeline", "pipeline", "Pipeline")) model_info = model_utils.gather_model_info_data(deploy_model) model_info['job_id'] = model_info['f_model_version'] model_info['size'] = deploy_model.calculate_model_file_size() model_info['role'] = local_role model_info['party_id'] = local_party_id model_info['work_mode'] = adapter.get_job_work_mode() model_info['parent'] = False if model_info.get('f_inference_dsl') else True if model_utils.compare_version(model_info['f_fate_version'], '1.5.0') == 'eq': model_info['roles'] = model_info.get('f_train_runtime_conf', {}).get('role', {}) model_info['initiator_role'] = model_info.get('f_train_runtime_conf', {}).get('initiator', {}).get('role') model_info['initiator_party_id'] = model_info.get('f_train_runtime_conf', {}).get('initiator', {}).get('party_id') model_utils.save_model_info(model_info) except Exception as e: stat_logger.exception(e) return 100, f"deploy model of role {local_role} {local_party_id} failed, details: {str(e)}" else: return 0, f"deploy model of role {local_role} {local_party_id} success"
[ "fate_flow.utils.config_adapter.JobRuntimeConfigAdapter", "fate_flow.utils.model_utils.check_before_deploy", "fate_flow.pipelined_model.pipelined_model.PipelinedModel", "fate_flow.utils.model_utils.save_model_info", "fate_flow.utils.model_utils.gen_party_model_id", "fate_flow.utils.model_utils.compare_version", "fate_flow.utils.schedule_utils.get_dsl_parser_by_version", "fate_flow.utils.model_utils.gather_model_info_data", "fate_flow.settings.stat_logger.exception" ]
[((1446, 1542), 'fate_flow.utils.model_utils.gen_party_model_id', 'model_utils.gen_party_model_id', ([], {'model_id': 'model_id', 'role': 'local_role', 'party_id': 'local_party_id'}), '(model_id=model_id, role=local_role, party_id\n =local_party_id)\n', (1476, 1542), False, 'from fate_flow.utils import model_utils, schedule_utils\n'), ((1554, 1622), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'PipelinedModel', ([], {'model_id': 'party_model_id', 'model_version': 'model_version'}), '(model_id=party_model_id, model_version=model_version)\n', (1568, 1622), False, 'from fate_flow.pipelined_model.pipelined_model import PipelinedModel\n'), ((2097, 2171), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'PipelinedModel', ([], {'model_id': 'party_model_id', 'model_version': 'child_model_version'}), '(model_id=party_model_id, model_version=child_model_version)\n', (2111, 2171), False, 'from fate_flow.pipelined_model.pipelined_model import PipelinedModel\n'), ((2180, 2246), 'shutil.copytree', 'shutil.copytree', ([], {'src': 'model.model_path', 'dst': 'deploy_model.model_path'}), '(src=model.model_path, dst=deploy_model.model_path)\n', (2195, 2246), False, 'import shutil\n'), ((2438, 2477), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.train_runtime_conf'], {}), '(pipeline.train_runtime_conf)\n', (2448, 2477), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((2496, 2539), 'fate_flow.utils.config_adapter.JobRuntimeConfigAdapter', 'JobRuntimeConfigAdapter', (['train_runtime_conf'], {}), '(train_runtime_conf)\n', (2519, 2539), False, 'from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter\n'), ((2734, 2775), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['train_runtime_conf'], {'byte': '(True)'}), '(train_runtime_conf, byte=True)\n', (2744, 2775), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((2884, 2914), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.train_dsl'], {}), '(pipeline.train_dsl)\n', (2894, 2914), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((2944, 2978), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.inference_dsl'], {}), '(pipeline.inference_dsl)\n', (2954, 2978), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((4527, 4563), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['inference_dsl'], {'byte': '(True)'}), '(inference_dsl, byte=True)\n', (4537, 4563), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((5359, 5407), 'fate_flow.utils.model_utils.gather_model_info_data', 'model_utils.gather_model_info_data', (['deploy_model'], {}), '(deploy_model)\n', (5393, 5407), False, 'from fate_flow.utils import model_utils, schedule_utils\n'), ((6207, 6246), 'fate_flow.utils.model_utils.save_model_info', 'model_utils.save_model_info', (['model_info'], {}), '(model_info)\n', (6234, 6246), False, 'from fate_flow.utils import model_utils, schedule_utils\n'), ((1905, 1931), 'fate_flow.utils.model_utils.check_before_deploy', 'check_before_deploy', (['model'], {}), '(model)\n', (1924, 1931), False, 'from fate_flow.utils.model_utils import check_before_deploy\n'), ((3073, 3107), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.inference_dsl'], {}), '(pipeline.inference_dsl)\n', (3083, 3107), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((4575, 4634), 'fate_flow.utils.model_utils.compare_version', 'model_utils.compare_version', (['pipeline.fate_version', '"""1.5.0"""'], {}), "(pipeline.fate_version, '1.5.0')\n", (4602, 4634), False, 'from fate_flow.utils import model_utils, schedule_utils\n'), ((4679, 4774), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (["{'parent_model_id': model_id, 'parent_model_version': model_version}"], {'byte': '(True)'}), "({'parent_model_id': model_id, 'parent_model_version':\n model_version}, byte=True)\n", (4689, 4774), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((4843, 4885), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.runtime_conf_on_party'], {}), '(pipeline.runtime_conf_on_party)\n', (4853, 4885), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((5022, 5066), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['runtime_conf_on_party'], {'byte': '(True)'}), '(runtime_conf_on_party, byte=True)\n', (5032, 5066), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((5163, 5215), 'os.path.join', 'os.path.join', (['deploy_model.model_path', '"""pipeline.pb"""'], {}), "(deploy_model.model_path, 'pipeline.pb')\n", (5175, 5215), False, 'import os\n'), ((5241, 5339), 'os.path.join', 'os.path.join', (['deploy_model.model_path', '"""variables"""', '"""data"""', '"""pipeline"""', '"""pipeline"""', '"""Pipeline"""'], {}), "(deploy_model.model_path, 'variables', 'data', 'pipeline',\n 'pipeline', 'Pipeline')\n", (5253, 5339), False, 'import os\n'), ((5784, 5850), 'fate_flow.utils.model_utils.compare_version', 'model_utils.compare_version', (["model_info['f_fate_version']", '"""1.5.0"""'], {}), "(model_info['f_fate_version'], '1.5.0')\n", (5811, 5850), False, 'from fate_flow.utils import model_utils, schedule_utils\n'), ((6283, 6307), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (6304, 6307), False, 'from fate_flow.settings import stat_logger\n'), ((3398, 3421), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['predict_dsl'], {}), '(predict_dsl)\n', (3408, 3421), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((3878, 3934), 'fate_flow.utils.schedule_utils.get_dsl_parser_by_version', 'schedule_utils.get_dsl_parser_by_version', (['parser_version'], {}), '(parser_version)\n', (3918, 3934), False, 'from fate_flow.utils import model_utils, schedule_utils\n'), ((4331, 4369), 'fate_arch.common.file_utils.get_python_base_directory', 'file_utils.get_python_base_directory', ([], {}), '()\n', (4367, 4369), False, 'from fate_arch.common import file_utils\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os.path import sys from copy import deepcopy from fate_arch.common import file_utils from fate_arch.common.versions import get_versions from fate_flow.entity import ComponentProvider from fate_flow.db.component_registry import ComponentRegistry from fate_flow.db.job_default_config import JobDefaultConfig from fate_flow.manager.worker_manager import WorkerManager from fate_flow.entity.types import WorkerName from fate_flow.settings import stat_logger from fate_flow.utils.base_utils import get_fate_flow_python_directory class ProviderManager: @classmethod def register_default_providers(cls): code, result = cls.register_fate_flow_provider() if code != 0: raise Exception(f"register fate flow tools component failed") code, result, provider = cls.register_default_fate_provider() if code != 0: raise Exception(f"register default fate algorithm component failed") return provider @classmethod def register_fate_flow_provider(cls): provider = cls.get_fate_flow_provider() return WorkerManager.start_general_worker(worker_name=WorkerName.PROVIDER_REGISTRAR, provider=provider, run_in_subprocess=False) @classmethod def register_default_fate_provider(cls): provider = cls.get_default_fate_provider() sys.path.append(provider.env["PYTHONPATH"]) code, result = WorkerManager.start_general_worker(worker_name=WorkerName.PROVIDER_REGISTRAR, provider=provider, run_in_subprocess=False) return code, result, provider @classmethod def get_fate_flow_provider(cls): path = get_fate_flow_python_directory("fate_flow") provider = ComponentProvider(name="fate_flow", version=get_versions()["FATEFlow"], path=path, class_path=ComponentRegistry.get_default_class_path()) return provider @classmethod def get_default_fate_provider_env(cls): provider = cls.get_default_fate_provider() return provider.env @classmethod def get_default_fate_provider(cls): path = JobDefaultConfig.default_component_provider_path.split("/") path = file_utils.get_fate_python_directory(*path) if not os.path.exists(path): raise Exception(f"default fate provider not exists: {path}") provider = ComponentProvider(name="fate", version=get_versions()["FATE"], path=path, class_path=ComponentRegistry.get_default_class_path()) return provider @classmethod def if_default_provider(cls, provider: ComponentProvider): if provider == cls.get_fate_flow_provider() or provider == cls.get_default_fate_provider(): return True else: return False @classmethod def fill_fate_flow_provider(cls, dsl): dest_dsl = deepcopy(dsl) fate_flow_provider = cls.get_fate_flow_provider() support_components = ComponentRegistry.get_provider_components(fate_flow_provider.name, fate_flow_provider.version) provider_key = f"{fate_flow_provider.name}@{fate_flow_provider.version}" for cpn, config in dsl["components"].items(): if config["module"] in support_components: dest_dsl["components"][cpn]["provider"] = provider_key return dest_dsl @classmethod def get_fate_flow_component_module(cls): fate_flow_provider = cls.get_fate_flow_provider() return ComponentRegistry.get_provider_components(fate_flow_provider.name, fate_flow_provider.version) @classmethod def get_provider_object(cls, provider_info, check_registration=True): name, version = provider_info["name"], provider_info["version"] if check_registration and ComponentRegistry.get_providers().get(name, {}).get(version, None) is None: raise Exception(f"{name} {version} provider is not registered") path = ComponentRegistry.get_providers().get(name, {}).get(version, {}).get("path", []) class_path = ComponentRegistry.get_providers().get(name, {}).get(version, {}).get("class_path", None) if class_path is None: class_path = ComponentRegistry.REGISTRY["default_settings"]["class_path"] return ComponentProvider(name=name, version=version, path=path, class_path=class_path) @classmethod def get_job_provider_group(cls, dsl_parser, components: list = None, check_registration=True): providers_info = dsl_parser.get_job_providers(provider_detail=ComponentRegistry.REGISTRY) group = {} if components is not None: _providers_info = {} for component_name in components: _providers_info[component_name] = providers_info.get(component_name) providers_info = _providers_info for component_name, provider_info in providers_info.items(): provider = cls.get_provider_object(provider_info["provider"], check_registration=check_registration) group_key = "@".join([provider.name, provider.version]) if group_key not in group: group[group_key] = { "provider": provider.to_dict(), "if_default_provider": cls.if_default_provider(provider), "components": [component_name] } else: group[group_key]["components"].append(component_name) return group @classmethod def get_component_provider(cls, dsl_parser, component_name): providers = dsl_parser.get_job_providers(provider_detail=ComponentRegistry.REGISTRY) return cls.get_provider_object(providers[component_name]["provider"]) @classmethod def get_component_parameters(cls, dsl_parser, component_name, role, party_id, provider: ComponentProvider = None, previous_components_parameters: dict = None): if not provider: provider = cls.get_component_provider(dsl_parser=dsl_parser, component_name=component_name) parameters = dsl_parser.parse_component_parameters(component_name, ComponentRegistry.REGISTRY, provider.name, provider.version, local_role=role, local_party_id=int(party_id)) user_specified_parameters = dsl_parser.parse_user_specified_component_parameters(component_name, ComponentRegistry.REGISTRY, provider.name, provider.version, local_role=role, local_party_id=int(party_id), previous_parameters=previous_components_parameters) return parameters, user_specified_parameters @classmethod def get_component_run_info(cls, dsl_parser, component_name, role, party_id, previous_components_parameters: dict = None): provider = cls.get_component_provider(dsl_parser, component_name) parameters, user_specified_parameters = cls.get_component_parameters(dsl_parser, component_name, role, party_id, provider, previous_components_parameters) return provider, parameters, user_specified_parameters
[ "fate_flow.db.job_default_config.JobDefaultConfig.default_component_provider_path.split", "fate_flow.db.component_registry.ComponentRegistry.get_provider_components", "fate_flow.db.component_registry.ComponentRegistry.get_default_class_path", "fate_flow.db.component_registry.ComponentRegistry.get_providers", "fate_flow.manager.worker_manager.WorkerManager.start_general_worker", "fate_flow.entity.ComponentProvider", "fate_flow.utils.base_utils.get_fate_flow_python_directory" ]
[((1704, 1830), 'fate_flow.manager.worker_manager.WorkerManager.start_general_worker', 'WorkerManager.start_general_worker', ([], {'worker_name': 'WorkerName.PROVIDER_REGISTRAR', 'provider': 'provider', 'run_in_subprocess': '(False)'}), '(worker_name=WorkerName.\n PROVIDER_REGISTRAR, provider=provider, run_in_subprocess=False)\n', (1738, 1830), False, 'from fate_flow.manager.worker_manager import WorkerManager\n'), ((1948, 1991), 'sys.path.append', 'sys.path.append', (["provider.env['PYTHONPATH']"], {}), "(provider.env['PYTHONPATH'])\n", (1963, 1991), False, 'import sys\n'), ((2015, 2141), 'fate_flow.manager.worker_manager.WorkerManager.start_general_worker', 'WorkerManager.start_general_worker', ([], {'worker_name': 'WorkerName.PROVIDER_REGISTRAR', 'provider': 'provider', 'run_in_subprocess': '(False)'}), '(worker_name=WorkerName.\n PROVIDER_REGISTRAR, provider=provider, run_in_subprocess=False)\n', (2049, 2141), False, 'from fate_flow.manager.worker_manager import WorkerManager\n'), ((2245, 2288), 'fate_flow.utils.base_utils.get_fate_flow_python_directory', 'get_fate_flow_python_directory', (['"""fate_flow"""'], {}), "('fate_flow')\n", (2275, 2288), False, 'from fate_flow.utils.base_utils import get_fate_flow_python_directory\n'), ((2684, 2743), 'fate_flow.db.job_default_config.JobDefaultConfig.default_component_provider_path.split', 'JobDefaultConfig.default_component_provider_path.split', (['"""/"""'], {}), "('/')\n", (2738, 2743), False, 'from fate_flow.db.job_default_config import JobDefaultConfig\n'), ((2759, 2802), 'fate_arch.common.file_utils.get_fate_python_directory', 'file_utils.get_fate_python_directory', (['*path'], {}), '(*path)\n', (2795, 2802), False, 'from fate_arch.common import file_utils\n'), ((3409, 3422), 'copy.deepcopy', 'deepcopy', (['dsl'], {}), '(dsl)\n', (3417, 3422), False, 'from copy import deepcopy\n'), ((3510, 3608), 'fate_flow.db.component_registry.ComponentRegistry.get_provider_components', 'ComponentRegistry.get_provider_components', (['fate_flow_provider.name', 'fate_flow_provider.version'], {}), '(fate_flow_provider.name,\n fate_flow_provider.version)\n', (3551, 3608), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((4026, 4124), 'fate_flow.db.component_registry.ComponentRegistry.get_provider_components', 'ComponentRegistry.get_provider_components', (['fate_flow_provider.name', 'fate_flow_provider.version'], {}), '(fate_flow_provider.name,\n fate_flow_provider.version)\n', (4067, 4124), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((4809, 4888), 'fate_flow.entity.ComponentProvider', 'ComponentProvider', ([], {'name': 'name', 'version': 'version', 'path': 'path', 'class_path': 'class_path'}), '(name=name, version=version, path=path, class_path=class_path)\n', (4826, 4888), False, 'from fate_flow.entity import ComponentProvider\n'), ((2402, 2444), 'fate_flow.db.component_registry.ComponentRegistry.get_default_class_path', 'ComponentRegistry.get_default_class_path', ([], {}), '()\n', (2442, 2444), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((3017, 3059), 'fate_flow.db.component_registry.ComponentRegistry.get_default_class_path', 'ComponentRegistry.get_default_class_path', ([], {}), '()\n', (3057, 3059), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((2352, 2366), 'fate_arch.common.versions.get_versions', 'get_versions', ([], {}), '()\n', (2364, 2366), False, 'from fate_arch.common.versions import get_versions\n'), ((2971, 2985), 'fate_arch.common.versions.get_versions', 'get_versions', ([], {}), '()\n', (2983, 2985), False, 'from fate_arch.common.versions import get_versions\n'), ((4319, 4352), 'fate_flow.db.component_registry.ComponentRegistry.get_providers', 'ComponentRegistry.get_providers', ([], {}), '()\n', (4350, 4352), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((4486, 4519), 'fate_flow.db.component_registry.ComponentRegistry.get_providers', 'ComponentRegistry.get_providers', ([], {}), '()\n', (4517, 4519), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((4588, 4621), 'fate_flow.db.component_registry.ComponentRegistry.get_providers', 'ComponentRegistry.get_providers', ([], {}), '()\n', (4619, 4621), False, 'from fate_flow.db.component_registry import ComponentRegistry\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import functools import os import shutil import zipfile from fate_arch.common import file_utils from fate_flow.utils.log_utils import getLogger from fate_flow.db.db_models import ComponentProviderInfo from fate_flow.db.dependence_registry import DependenceRegistry from fate_flow.entity import ComponentProvider from fate_flow.entity.types import FateDependenceName, ComponentProviderName, FateDependenceStorageEngine from fate_flow.settings import FATE_VERSION_DEPENDENCIES_PATH from fate_flow.worker.base_worker import BaseWorker from fate_flow.utils.base_utils import get_fate_flow_python_directory LOGGER = getLogger() def upload_except_exit(func): @functools.wraps(func) def _wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: provider = kwargs.get("provider") dependence_type = kwargs.get("dependence_type") storage_engine = FateDependenceStorageEngine.HDFS.value storage_meta = { "f_storage_engine": storage_engine, "f_type": dependence_type, "f_version": provider.version, "f_upload_status": False } DependenceRegistry.save_dependencies_storage_meta(storage_meta) raise e return _wrapper class DependenceUpload(BaseWorker): def _run(self): provider = ComponentProvider(**self.args.config.get("provider")) dependence_type = self.args.dependence_type self.upload_dependencies_to_hadoop(provider=provider, dependence_type=dependence_type) @classmethod @upload_except_exit def upload_dependencies_to_hadoop(cls, provider, dependence_type, storage_engine=FateDependenceStorageEngine.HDFS.value): LOGGER.info(f'upload {dependence_type} dependencies to hadoop') LOGGER.info(f'dependencies loading ...') if dependence_type == FateDependenceName.Python_Env.value: # todo: version python env target_file = os.path.join(FATE_VERSION_DEPENDENCIES_PATH, provider.version, "python_env.zip") source_path = os.path.dirname(os.path.dirname(os.getenv("VIRTUAL_ENV"))) cls.rewrite_pyvenv_cfg(os.path.join(os.getenv("VIRTUAL_ENV"), "pyvenv.cfg"), "python_env") env_dir_list = ["python", "miniconda3"] cls.zip_dir(source_path, target_file, env_dir_list) dependencies_conf = {"executor_python": f"./{dependence_type}/python/venv/bin/python", "driver_python": f"{os.path.join(os.getenv('VIRTUAL_ENV'), 'bin', 'python')}"} else: fate_code_dependencies = { "fate_flow": get_fate_flow_python_directory("fate_flow"), "fate_arch": file_utils.get_fate_python_directory("fate_arch"), "conf": file_utils.get_project_base_directory("conf") } fate_flow_snapshot_time = DependenceRegistry.get_modify_time(fate_code_dependencies["fate_flow"]) fate_code_base_dir = os.path.join(FATE_VERSION_DEPENDENCIES_PATH, provider.version, "fate_code", "fate") python_base_dir = os.path.join(fate_code_base_dir, "python") if os.path.exists(os.path.dirname(python_base_dir)): shutil.rmtree(os.path.dirname(python_base_dir)) for key, path in fate_code_dependencies.items(): cls.copy_dir(path, os.path.join(python_base_dir, key)) if key == "conf": cls.move_dir(os.path.join(python_base_dir, key), os.path.dirname(fate_code_base_dir)) if provider.name == ComponentProviderName.FATE.value: source_path = provider.path else: source_path = ComponentProviderInfo.get_or_none( ComponentProviderInfo.f_version == provider.version, ComponentProviderInfo.f_provider_name == ComponentProviderName.FATE.value ).f_path cls.copy_dir(source_path, os.path.join(python_base_dir, "federatedml")) target_file = os.path.join(FATE_VERSION_DEPENDENCIES_PATH, provider.version, "fate.zip") cls.zip_dir(os.path.dirname(fate_code_base_dir), target_file) dependencies_conf = {"executor_env_pythonpath": f"./{dependence_type}/fate/python:$PYTHONPATH"} LOGGER.info(f'dependencies loading success') LOGGER.info(f'start upload') snapshot_time = DependenceRegistry.get_modify_time(source_path) storage_dir = f"/fate_dependence/{provider.version}" os.system(f" {os.getenv('HADOOP_HOME')}/bin/hdfs dfs -mkdir -p {storage_dir}") status = os.system(f"{os.getenv('HADOOP_HOME')}/bin/hdfs dfs -put -f {target_file} {storage_dir}") LOGGER.info(f'upload end, status is {status}') if status == 0: storage_path = os.path.join(storage_dir, os.path.basename(target_file)) storage_meta = { "f_storage_engine": storage_engine, "f_type": dependence_type, "f_version": provider.version, "f_storage_path": storage_path, "f_snapshot_time": snapshot_time, "f_fate_flow_snapshot_time": fate_flow_snapshot_time if dependence_type == FateDependenceName.Fate_Source_Code.value else None, "f_dependencies_conf": {"archives": "#".join([storage_path, dependence_type])}, "f_upload_status": False, "f_pid": 0 } storage_meta["f_dependencies_conf"].update(dependencies_conf) DependenceRegistry.save_dependencies_storage_meta(storage_meta) else: raise Exception(f"{os.getenv('HADOOP_HOME')}/bin/hdfs dfs -put {target_file} {storage_dir} failed status: {status}") return storage_meta @classmethod def zip_dir(cls, input_dir_path, output_zip_full_name, dir_list=None): with zipfile.ZipFile(output_zip_full_name, "w", zipfile.ZIP_DEFLATED) as zip_object: if not dir_list: cls.zip_write(zip_object, input_dir_path, input_dir_path) else: for dir_name in dir_list: dir_path = os.path.join(input_dir_path, dir_name) cls.zip_write(zip_object, dir_path, input_dir_path) @classmethod def zip_write(cls, zip_object, dir_path, input_dir_path): for path, dirnames, filenames in os.walk(dir_path): fpath = path.replace(input_dir_path, '') for filename in filenames: zip_object.write(os.path.join(path, filename), os.path.join(fpath, filename)) @staticmethod def copy_dir(source_path, target_path): if os.path.exists(target_path): shutil.rmtree(target_path) shutil.copytree(source_path, target_path) @staticmethod def move_dir(source_path, target_path): shutil.move(source_path, target_path) @classmethod def rewrite_pyvenv_cfg(cls, file, dir_name): import re bak_file = file + '.bak' shutil.copyfile(file, bak_file) with open(file, "w") as fw: with open(bak_file, 'r') as fr: lines = fr.readlines() match_str = None for line in lines: change_line = re.findall(".*=(.*)miniconda.*", line) if change_line: if not match_str: match_str = change_line[0] line = re.sub(match_str, f" ./{dir_name}/", line) fw.write(line) if __name__ == '__main__': DependenceUpload().run()
[ "fate_flow.utils.log_utils.getLogger", "fate_flow.db.dependence_registry.DependenceRegistry.get_modify_time", "fate_flow.db.db_models.ComponentProviderInfo.get_or_none", "fate_flow.db.dependence_registry.DependenceRegistry.save_dependencies_storage_meta", "fate_flow.utils.base_utils.get_fate_flow_python_directory" ]
[((1229, 1240), 'fate_flow.utils.log_utils.getLogger', 'getLogger', ([], {}), '()\n', (1238, 1240), False, 'from fate_flow.utils.log_utils import getLogger\n'), ((1278, 1299), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1293, 1299), False, 'import functools\n'), ((5091, 5138), 'fate_flow.db.dependence_registry.DependenceRegistry.get_modify_time', 'DependenceRegistry.get_modify_time', (['source_path'], {}), '(source_path)\n', (5125, 5138), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((7083, 7100), 'os.walk', 'os.walk', (['dir_path'], {}), '(dir_path)\n', (7090, 7100), False, 'import os\n'), ((7362, 7389), 'os.path.exists', 'os.path.exists', (['target_path'], {}), '(target_path)\n', (7376, 7389), False, 'import os\n'), ((7438, 7479), 'shutil.copytree', 'shutil.copytree', (['source_path', 'target_path'], {}), '(source_path, target_path)\n', (7453, 7479), False, 'import shutil\n'), ((7551, 7588), 'shutil.move', 'shutil.move', (['source_path', 'target_path'], {}), '(source_path, target_path)\n', (7562, 7588), False, 'import shutil\n'), ((7716, 7747), 'shutil.copyfile', 'shutil.copyfile', (['file', 'bak_file'], {}), '(file, bak_file)\n', (7731, 7747), False, 'import shutil\n'), ((2635, 2720), 'os.path.join', 'os.path.join', (['FATE_VERSION_DEPENDENCIES_PATH', 'provider.version', '"""python_env.zip"""'], {}), "(FATE_VERSION_DEPENDENCIES_PATH, provider.version, 'python_env.zip'\n )\n", (2647, 2720), False, 'import os\n'), ((3561, 3632), 'fate_flow.db.dependence_registry.DependenceRegistry.get_modify_time', 'DependenceRegistry.get_modify_time', (["fate_code_dependencies['fate_flow']"], {}), "(fate_code_dependencies['fate_flow'])\n", (3595, 3632), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((3666, 3753), 'os.path.join', 'os.path.join', (['FATE_VERSION_DEPENDENCIES_PATH', 'provider.version', '"""fate_code"""', '"""fate"""'], {}), "(FATE_VERSION_DEPENDENCIES_PATH, provider.version, 'fate_code',\n 'fate')\n", (3678, 3753), False, 'import os\n'), ((3780, 3822), 'os.path.join', 'os.path.join', (['fate_code_base_dir', '"""python"""'], {}), "(fate_code_base_dir, 'python')\n", (3792, 3822), False, 'import os\n'), ((4719, 4793), 'os.path.join', 'os.path.join', (['FATE_VERSION_DEPENDENCIES_PATH', 'provider.version', '"""fate.zip"""'], {}), "(FATE_VERSION_DEPENDENCIES_PATH, provider.version, 'fate.zip')\n", (4731, 4793), False, 'import os\n'), ((6236, 6299), 'fate_flow.db.dependence_registry.DependenceRegistry.save_dependencies_storage_meta', 'DependenceRegistry.save_dependencies_storage_meta', (['storage_meta'], {}), '(storage_meta)\n', (6285, 6299), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((6577, 6641), 'zipfile.ZipFile', 'zipfile.ZipFile', (['output_zip_full_name', '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "(output_zip_full_name, 'w', zipfile.ZIP_DEFLATED)\n", (6592, 6641), False, 'import zipfile\n'), ((7403, 7429), 'shutil.rmtree', 'shutil.rmtree', (['target_path'], {}), '(target_path)\n', (7416, 7429), False, 'import shutil\n'), ((1832, 1895), 'fate_flow.db.dependence_registry.DependenceRegistry.save_dependencies_storage_meta', 'DependenceRegistry.save_dependencies_storage_meta', (['storage_meta'], {}), '(storage_meta)\n', (1881, 1895), False, 'from fate_flow.db.dependence_registry import DependenceRegistry\n'), ((3314, 3357), 'fate_flow.utils.base_utils.get_fate_flow_python_directory', 'get_fate_flow_python_directory', (['"""fate_flow"""'], {}), "('fate_flow')\n", (3344, 3357), False, 'from fate_flow.utils.base_utils import get_fate_flow_python_directory\n'), ((3388, 3437), 'fate_arch.common.file_utils.get_fate_python_directory', 'file_utils.get_fate_python_directory', (['"""fate_arch"""'], {}), "('fate_arch')\n", (3424, 3437), False, 'from fate_arch.common import file_utils\n'), ((3463, 3508), 'fate_arch.common.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', (['"""conf"""'], {}), "('conf')\n", (3500, 3508), False, 'from fate_arch.common import file_utils\n'), ((3853, 3885), 'os.path.dirname', 'os.path.dirname', (['python_base_dir'], {}), '(python_base_dir)\n', (3868, 3885), False, 'import os\n'), ((4647, 4691), 'os.path.join', 'os.path.join', (['python_base_dir', '"""federatedml"""'], {}), "(python_base_dir, 'federatedml')\n", (4659, 4691), False, 'import os\n'), ((4818, 4853), 'os.path.dirname', 'os.path.dirname', (['fate_code_base_dir'], {}), '(fate_code_base_dir)\n', (4833, 4853), False, 'import os\n'), ((5527, 5556), 'os.path.basename', 'os.path.basename', (['target_file'], {}), '(target_file)\n', (5543, 5556), False, 'import os\n'), ((2774, 2798), 'os.getenv', 'os.getenv', (['"""VIRTUAL_ENV"""'], {}), "('VIRTUAL_ENV')\n", (2783, 2798), False, 'import os\n'), ((2849, 2873), 'os.getenv', 'os.getenv', (['"""VIRTUAL_ENV"""'], {}), "('VIRTUAL_ENV')\n", (2858, 2873), False, 'import os\n'), ((3918, 3950), 'os.path.dirname', 'os.path.dirname', (['python_base_dir'], {}), '(python_base_dir)\n', (3933, 3950), False, 'import os\n'), ((4048, 4082), 'os.path.join', 'os.path.join', (['python_base_dir', 'key'], {}), '(python_base_dir, key)\n', (4060, 4082), False, 'import os\n'), ((4382, 4551), 'fate_flow.db.db_models.ComponentProviderInfo.get_or_none', 'ComponentProviderInfo.get_or_none', (['(ComponentProviderInfo.f_version == provider.version)', '(ComponentProviderInfo.f_provider_name == ComponentProviderName.FATE.value)'], {}), '(ComponentProviderInfo.f_version ==\n provider.version, ComponentProviderInfo.f_provider_name ==\n ComponentProviderName.FATE.value)\n', (4415, 4551), False, 'from fate_flow.db.db_models import ComponentProviderInfo\n'), ((5222, 5246), 'os.getenv', 'os.getenv', (['"""HADOOP_HOME"""'], {}), "('HADOOP_HOME')\n", (5231, 5246), False, 'import os\n'), ((5318, 5342), 'os.getenv', 'os.getenv', (['"""HADOOP_HOME"""'], {}), "('HADOOP_HOME')\n", (5327, 5342), False, 'import os\n'), ((6851, 6889), 'os.path.join', 'os.path.join', (['input_dir_path', 'dir_name'], {}), '(input_dir_path, dir_name)\n', (6863, 6889), False, 'import os\n'), ((7227, 7255), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (7239, 7255), False, 'import os\n'), ((7257, 7286), 'os.path.join', 'os.path.join', (['fpath', 'filename'], {}), '(fpath, filename)\n', (7269, 7286), False, 'import os\n'), ((7969, 8007), 're.findall', 're.findall', (['""".*=(.*)miniconda.*"""', 'line'], {}), "('.*=(.*)miniconda.*', line)\n", (7979, 8007), False, 'import re\n'), ((4151, 4185), 'os.path.join', 'os.path.join', (['python_base_dir', 'key'], {}), '(python_base_dir, key)\n', (4163, 4185), False, 'import os\n'), ((4187, 4222), 'os.path.dirname', 'os.path.dirname', (['fate_code_base_dir'], {}), '(fate_code_base_dir)\n', (4202, 4222), False, 'import os\n'), ((6345, 6369), 'os.getenv', 'os.getenv', (['"""HADOOP_HOME"""'], {}), "('HADOOP_HOME')\n", (6354, 6369), False, 'import os\n'), ((8172, 8214), 're.sub', 're.sub', (['match_str', 'f""" ./{dir_name}/"""', 'line'], {}), "(match_str, f' ./{dir_name}/', line)\n", (8178, 8214), False, 'import re\n'), ((3186, 3210), 'os.getenv', 'os.getenv', (['"""VIRTUAL_ENV"""'], {}), "('VIRTUAL_ENV')\n", (3195, 3210), False, 'import os\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from fate_arch.common import log from fate_flow.entity.metric import Metric, MetricMeta from fate_arch import storage from fate_flow.utils import job_utils from fate_flow.scheduling_apps.client import ControllerClient from fate_flow.components.component_base import ComponentBase LOGGER = log.getLogger() class Download(ComponentBase): def __init__(self): super(Download, self).__init__() self.parameters = {} def run(self, component_parameters=None, args=None): self.parameters = component_parameters["DownloadParam"] self.parameters["role"] = component_parameters["role"] self.parameters["local"] = component_parameters["local"] name, namespace = self.parameters.get("name"), self.parameters.get("namespace") with open(os.path.abspath(self.parameters["output_path"]), "w") as fout: with storage.Session.build(session_id=job_utils.generate_session_id(self.tracker.task_id, self.tracker.task_version, self.tracker.role, self.tracker.party_id, suffix="storage", random_end=True), name=name, namespace=namespace) as storage_session: data_table = storage_session.get_table() count = data_table.count() LOGGER.info('===== begin to export data =====') lines = 0 job_info = {} job_info["job_id"] = self.tracker.job_id job_info["role"] = self.tracker.role job_info["party_id"] = self.tracker.party_id for key, value in data_table.collect(): if not value: fout.write(key + "\n") else: fout.write(key + self.parameters.get("delimiter", ",") + str(value) + "\n") lines += 1 if lines % 2000 == 0: LOGGER.info("===== export {} lines =====".format(lines)) if lines % 10000 == 0: job_info["progress"] = lines/count*100//1 ControllerClient.update_job(job_info=job_info) job_info["progress"] = 100 ControllerClient.update_job(job_info=job_info) self.callback_metric(metric_name='data_access', metric_namespace='download', metric_data=[Metric("count", data_table.count())]) LOGGER.info("===== export {} lines totally =====".format(lines)) LOGGER.info('===== export data finish =====') LOGGER.info('===== export data file path:{} ====='.format(os.path.abspath(self.parameters["output_path"]))) def callback_metric(self, metric_name, metric_namespace, metric_data): self.tracker.log_metric_data(metric_name=metric_name, metric_namespace=metric_namespace, metrics=metric_data) self.tracker.set_metric_meta(metric_namespace, metric_name, MetricMeta(name='download', metric_type='DOWNLOAD'))
[ "fate_flow.entity.metric.MetricMeta", "fate_flow.utils.job_utils.generate_session_id", "fate_flow.scheduling_apps.client.ControllerClient.update_job" ]
[((918, 933), 'fate_arch.common.log.getLogger', 'log.getLogger', ([], {}), '()\n', (931, 933), False, 'from fate_arch.common import log\n'), ((3794, 3845), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': '"""download"""', 'metric_type': '"""DOWNLOAD"""'}), "(name='download', metric_type='DOWNLOAD')\n", (3804, 3845), False, 'from fate_flow.entity.metric import Metric, MetricMeta\n'), ((1417, 1464), 'os.path.abspath', 'os.path.abspath', (["self.parameters['output_path']"], {}), "(self.parameters['output_path'])\n", (1432, 1464), False, 'import os\n'), ((2864, 2910), 'fate_flow.scheduling_apps.client.ControllerClient.update_job', 'ControllerClient.update_job', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (2891, 2910), False, 'from fate_flow.scheduling_apps.client import ControllerClient\n'), ((3334, 3381), 'os.path.abspath', 'os.path.abspath', (["self.parameters['output_path']"], {}), "(self.parameters['output_path'])\n", (3349, 3381), False, 'import os\n'), ((1530, 1695), 'fate_flow.utils.job_utils.generate_session_id', 'job_utils.generate_session_id', (['self.tracker.task_id', 'self.tracker.task_version', 'self.tracker.role', 'self.tracker.party_id'], {'suffix': '"""storage"""', 'random_end': '(True)'}), "(self.tracker.task_id, self.tracker.\n task_version, self.tracker.role, self.tracker.party_id, suffix=\n 'storage', random_end=True)\n", (1559, 1695), False, 'from fate_flow.utils import job_utils\n'), ((2758, 2804), 'fate_flow.scheduling_apps.client.ControllerClient.update_job', 'ControllerClient.update_job', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (2785, 2804), False, 'from fate_flow.scheduling_apps.client import ControllerClient\n')]
# # Copyright 2021 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy from pathlib import Path from flask import request from fate_flow.db.component_registry import ComponentRegistry from fate_flow.entity import ComponentProvider, RetCode from fate_flow.entity.types import WorkerName from fate_flow.manager.worker_manager import WorkerManager from fate_flow.utils.api_utils import error_response, get_json_result from fate_flow.utils.detect_utils import validate_request @manager.route('/register', methods=['POST']) @validate_request("name", "version", "path") def register(): info = request.json or request.form.to_dict() if not Path(info["path"]).is_dir(): return error_response(400, "invalid path") provider = ComponentProvider(name=info["name"], version=info["version"], path=info["path"], class_path=info.get("class_path", ComponentRegistry.get_default_class_path())) code, std = WorkerManager.start_general_worker(worker_name=WorkerName.PROVIDER_REGISTRAR, provider=provider) if code == 0: ComponentRegistry.load() if ComponentRegistry.get_providers().get(provider.name, {}).get(provider.version, None) is None: return get_json_result(retcode=RetCode.OPERATING_ERROR, retmsg=f"not load into memory") else: return get_json_result() else: return get_json_result(retcode=RetCode.OPERATING_ERROR, retmsg=f"register failed:\n{std}") @manager.route('/registry/get', methods=['POST']) def get_registry(): return get_json_result(data=ComponentRegistry.REGISTRY) @manager.route('/get', methods=['POST']) def get_providers(): providers = ComponentRegistry.get_providers() result = {} for name, group_detail in providers.items(): result[name] = {} for version, detail in group_detail.items(): result[name][version] = copy.deepcopy(detail) if "components" in detail: result[name][version]["components"] = set([c.lower() for c in detail["components"].keys()]) return get_json_result(data=result) @manager.route('/<provider_name>/get', methods=['POST']) def get_provider(provider_name): return get_json_result(data=ComponentRegistry.get_providers().get(provider_name))
[ "fate_flow.db.component_registry.ComponentRegistry.load", "fate_flow.utils.api_utils.get_json_result", "fate_flow.db.component_registry.ComponentRegistry.get_default_class_path", "fate_flow.utils.api_utils.error_response", "fate_flow.db.component_registry.ComponentRegistry.get_providers", "fate_flow.utils.detect_utils.validate_request", "fate_flow.manager.worker_manager.WorkerManager.start_general_worker" ]
[((1081, 1124), 'fate_flow.utils.detect_utils.validate_request', 'validate_request', (['"""name"""', '"""version"""', '"""path"""'], {}), "('name', 'version', 'path')\n", (1097, 1124), False, 'from fate_flow.utils.detect_utils import validate_request\n'), ((1573, 1674), 'fate_flow.manager.worker_manager.WorkerManager.start_general_worker', 'WorkerManager.start_general_worker', ([], {'worker_name': 'WorkerName.PROVIDER_REGISTRAR', 'provider': 'provider'}), '(worker_name=WorkerName.\n PROVIDER_REGISTRAR, provider=provider)\n', (1607, 1674), False, 'from fate_flow.manager.worker_manager import WorkerManager\n'), ((2169, 2217), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'ComponentRegistry.REGISTRY'}), '(data=ComponentRegistry.REGISTRY)\n', (2184, 2217), False, 'from fate_flow.utils.api_utils import error_response, get_json_result\n'), ((2298, 2331), 'fate_flow.db.component_registry.ComponentRegistry.get_providers', 'ComponentRegistry.get_providers', ([], {}), '()\n', (2329, 2331), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((2692, 2720), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': 'result'}), '(data=result)\n', (2707, 2720), False, 'from fate_flow.utils.api_utils import error_response, get_json_result\n'), ((1168, 1190), 'flask.request.form.to_dict', 'request.form.to_dict', ([], {}), '()\n', (1188, 1190), False, 'from flask import request\n'), ((1246, 1281), 'fate_flow.utils.api_utils.error_response', 'error_response', (['(400)', '"""invalid path"""'], {}), "(400, 'invalid path')\n", (1260, 1281), False, 'from fate_flow.utils.api_utils import error_response, get_json_result\n'), ((1696, 1720), 'fate_flow.db.component_registry.ComponentRegistry.load', 'ComponentRegistry.load', ([], {}), '()\n', (1718, 1720), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((2002, 2093), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'RetCode.OPERATING_ERROR', 'retmsg': 'f"""register failed:\n{std}"""'}), '(retcode=RetCode.OPERATING_ERROR, retmsg=\n f"""register failed:\n{std}""")\n', (2017, 2093), False, 'from fate_flow.utils.api_utils import error_response, get_json_result\n'), ((1845, 1930), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'RetCode.OPERATING_ERROR', 'retmsg': 'f"""not load into memory"""'}), "(retcode=RetCode.OPERATING_ERROR, retmsg=f'not load into memory'\n )\n", (1860, 1930), False, 'from fate_flow.utils.api_utils import error_response, get_json_result\n'), ((1959, 1976), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {}), '()\n', (1974, 1976), False, 'from fate_flow.utils.api_utils import error_response, get_json_result\n'), ((2512, 2533), 'copy.deepcopy', 'copy.deepcopy', (['detail'], {}), '(detail)\n', (2525, 2533), False, 'import copy\n'), ((1202, 1220), 'pathlib.Path', 'Path', (["info['path']"], {}), "(info['path'])\n", (1206, 1220), False, 'from pathlib import Path\n'), ((1512, 1554), 'fate_flow.db.component_registry.ComponentRegistry.get_default_class_path', 'ComponentRegistry.get_default_class_path', ([], {}), '()\n', (1552, 1554), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((2845, 2878), 'fate_flow.db.component_registry.ComponentRegistry.get_providers', 'ComponentRegistry.get_providers', ([], {}), '()\n', (2876, 2878), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((1732, 1765), 'fate_flow.db.component_registry.ComponentRegistry.get_providers', 'ComponentRegistry.get_providers', ([], {}), '()\n', (1763, 1765), False, 'from fate_flow.db.component_registry import ComponentRegistry\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from flask import Flask, request from fate_arch.common import log from fate_flow.db.db_models import Task from fate_flow.entity.types import RetCode from fate_flow.operation.job_saver import JobSaver from fate_flow.scheduler.dag_scheduler import DAGScheduler from fate_flow.settings import stat_logger from fate_flow.utils.api_utils import get_json_result manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=RetCode.EXCEPTION_ERROR, retmsg=log.exception_to_trace_string(e)) # apply initiator for control operation @manager.route('/<job_id>/<role>/<party_id>/stop/<stop_status>', methods=['POST']) def stop_job(job_id, role, party_id, stop_status): retcode, retmsg = DAGScheduler.stop_job(job_id=job_id, role=role, party_id=party_id, stop_status=stop_status) return get_json_result(retcode=retcode, retmsg=retmsg) @manager.route('/<job_id>/<role>/<party_id>/rerun', methods=['POST']) def rerun_job(job_id, role, party_id): DAGScheduler.rerun_job(job_id=job_id, initiator_role=role, initiator_party_id=party_id, component_name=request.json.get("component_name")) return get_json_result(retcode=0, retmsg='success') @manager.route('/<job_id>/<component_name>/<task_id>/<task_version>/<role>/<party_id>/report', methods=['POST']) def report_task(job_id, component_name, task_id, task_version, role, party_id): task_info = {} task_info.update(request.json) task_info.update({ "job_id": job_id, "task_id": task_id, "task_version": task_version, "role": role, "party_id": party_id, }) JobSaver.update_task(task_info=task_info) if task_info.get("party_status"): JobSaver.update_status(Task, task_info) return get_json_result(retcode=0, retmsg='success')
[ "fate_flow.operation.job_saver.JobSaver.update_task", "fate_flow.operation.job_saver.JobSaver.update_status", "fate_flow.utils.api_utils.get_json_result", "fate_flow.scheduler.dag_scheduler.DAGScheduler.stop_job", "fate_flow.settings.stat_logger.exception" ]
[((985, 1000), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (990, 1000), False, 'from flask import Flask, request\n'), ((1064, 1088), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1085, 1088), False, 'from fate_flow.settings import stat_logger\n'), ((1390, 1485), 'fate_flow.scheduler.dag_scheduler.DAGScheduler.stop_job', 'DAGScheduler.stop_job', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'stop_status': 'stop_status'}), '(job_id=job_id, role=role, party_id=party_id,\n stop_status=stop_status)\n', (1411, 1485), False, 'from fate_flow.scheduler.dag_scheduler import DAGScheduler\n'), ((1493, 1540), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': 'retcode', 'retmsg': 'retmsg'}), '(retcode=retcode, retmsg=retmsg)\n', (1508, 1540), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1833, 1877), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (1848, 1877), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((2305, 2346), 'fate_flow.operation.job_saver.JobSaver.update_task', 'JobSaver.update_task', ([], {'task_info': 'task_info'}), '(task_info=task_info)\n', (2325, 2346), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((2444, 2488), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (2459, 2488), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((2393, 2432), 'fate_flow.operation.job_saver.JobSaver.update_status', 'JobSaver.update_status', (['Task', 'task_info'], {}), '(Task, task_info)\n', (2415, 2432), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((1156, 1188), 'fate_arch.common.log.exception_to_trace_string', 'log.exception_to_trace_string', (['e'], {}), '(e)\n', (1185, 1188), False, 'from fate_arch.common import log\n'), ((1786, 1820), 'flask.request.json.get', 'request.json.get', (['"""component_name"""'], {}), "('component_name')\n", (1802, 1820), False, 'from flask import Flask, request\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy from fate_arch.abc import Components from fate_flow.component_env_utils import provider_utils from fate_flow.entity import ComponentProvider from fate_flow.db.component_registry import ComponentRegistry class RuntimeConfParserUtil(object): @classmethod def get_input_parameters(cls, submit_dict, components=None): return RuntimeConfParserV2.get_input_parameters(submit_dict, components=components) @classmethod def get_job_parameters(cls, submit_dict, conf_version=1): if conf_version == 1: return RuntimeConfParserV1.get_job_parameters(submit_dict) else: return RuntimeConfParserV2.get_job_parameters(submit_dict) @staticmethod def merge_dict(dict1, dict2): merge_ret = {} key_set = dict1.keys() | dict2.keys() for key in key_set: if key in dict1 and key in dict2: val1 = dict1.get(key) val2 = dict2.get(key) if isinstance(val1, dict): merge_ret[key] = RuntimeConfParserUtil.merge_dict(val1, val2) else: merge_ret[key] = val2 elif key in dict1: merge_ret[key] = dict1.get(key) else: merge_ret[key] = dict2.get(key) return merge_ret @staticmethod def generate_predict_conf_template(predict_dsl, train_conf, model_id, model_version): return RuntimeConfParserV2.generate_predict_conf_template(predict_dsl, train_conf, model_id, model_version) @staticmethod def get_module_name(module, role, provider: Components): return provider.get(module, ComponentRegistry.get_provider_components(provider.provider_name, provider.provider_version)).get_run_obj_name(role) @staticmethod def get_component_parameters( provider, runtime_conf, module, alias, redundant_param_check, local_role, local_party_id, parse_user_specified_only, pre_parameters=None ): provider_components = ComponentRegistry.get_provider_components( provider.provider_name, provider.provider_version ) support_roles = provider.get(module, provider_components).get_supported_roles() if runtime_conf["role"] is not None: support_roles = [r for r in runtime_conf["role"] if r in support_roles] role_on_module = copy.deepcopy(runtime_conf["role"]) for role in runtime_conf["role"]: if role not in support_roles: del role_on_module[role] if local_role not in role_on_module: return {} conf = dict() for key, value in runtime_conf.items(): if key not in [ "algorithm_parameters", "role_parameters", "component_parameters", ]: conf[key] = value conf["role"] = role_on_module conf["local"] = runtime_conf.get("local", {}) conf["local"].update({"role": local_role, "party_id": local_party_id}) conf["module"] = module conf["CodePath"] = provider.get(module, provider_components).get_run_obj_name( local_role ) param_class = provider.get(module, provider_components).get_param_obj(alias) role_idx = role_on_module[local_role].index(local_party_id) user_specified_parameters = dict() if pre_parameters: if parse_user_specified_only: user_specified_parameters.update( pre_parameters.get("ComponentParam", {}) ) else: param_class = param_class.update( pre_parameters.get("ComponentParam", {}) ) common_parameters = ( runtime_conf.get("component_parameters", {}).get("common", {}).get(alias, {}) ) if parse_user_specified_only: user_specified_parameters.update(common_parameters) else: param_class = param_class.update( common_parameters, not redundant_param_check ) # update role parameters for role_id, role_id_parameters in ( runtime_conf.get("component_parameters", {}) .get("role", {}) .get(local_role, {}) .items() ): if role_id == "all" or str(role_idx) in role_id.split("|"): parameters = role_id_parameters.get(alias, {}) if parse_user_specified_only: user_specified_parameters.update(parameters) else: param_class.update(parameters, not redundant_param_check) if not parse_user_specified_only: conf["ComponentParam"] = param_class.as_dict() param_class.check() else: conf["ComponentParam"] = user_specified_parameters return conf @staticmethod def convert_parameters_v1_to_v2(party_idx, parameter_v1, not_builtin_vars): parameter_v2 = {} for key, values in parameter_v1.items(): # stop here, values support to be a list if key not in not_builtin_vars: parameter_v2[key] = values[party_idx] else: parameter_v2[key] = RuntimeConfParserUtil.convert_parameters_v1_to_v2(party_idx, values, not_builtin_vars) return parameter_v2 @staticmethod def get_v1_role_parameters(provider, component, runtime_conf, dsl): component_role_parameters = dict() if "role_parameters" not in runtime_conf: return component_role_parameters role_parameters = runtime_conf["role_parameters"] module = dsl["components"][component]["module"] if module == "Reader": data_key = dsl["components"][component]["output"]["data"][0] for role, role_params in role_parameters.items(): if not role_params.get("args", {}).get("data", {}).get(data_key): continue component_role_parameters[role] = dict() dataset = role_params["args"]["data"][data_key] for idx, table in enumerate(dataset): component_role_parameters[role][str(idx)] = {component: {"table": table}} else: provider_components = ComponentRegistry.get_provider_components( provider.provider_name, provider.provider_version ) param_class = provider.get(module, provider_components).get_param_obj(component) extract_not_builtin = getattr(param_class, "extract_not_builtin", None) not_builtin_vars = extract_not_builtin() if extract_not_builtin is not None else {} for role, role_params in role_parameters.items(): params = role_params.get(component, {}) if not params: continue component_role_parameters[role] = dict() party_num = len(runtime_conf["role"][role]) for party_idx in range(party_num): party_param = RuntimeConfParserUtil.convert_parameters_v1_to_v2(party_idx, params, not_builtin_vars) component_role_parameters[role][str(party_idx)] = {component: party_param} return component_role_parameters @staticmethod def get_job_providers(dsl, provider_detail): provider_info = {} global_provider_name = None global_provider_version = None if "provider" in dsl: global_provider_msg = dsl["provider"].split("@", -1) if global_provider_msg[0] == "@" or len(global_provider_msg) > 2: raise ValueError("Provider format should be provider_name@provider_version or provider_name, " "@provider_version is not supported") if len(global_provider_msg) == 1: global_provider_name = global_provider_msg[0] else: global_provider_name, global_provider_version = global_provider_msg for component in dsl["components"]: module = dsl["components"][component]["module"] provider = dsl["components"][component].get("provider") name, version = None, None if provider: provider_msg = provider.split("@", -1) if provider[0] == "@" or len(provider_msg) > 2: raise ValueError("Provider format should be provider_name@provider_version or provider_name, " "@provider_version is not supported") if len(provider_msg) == 2: name, version = provider.split("@", -1) else: name = provider_msg[0] if not name: if global_provider_name: name = global_provider_name version = global_provider_version if name and name not in provider_detail["components"][module]["support_provider"]: raise ValueError(f"Provider: {name} does not support in {module}, please register") if version and version not in provider_detail["providers"][name]: raise ValueError(f"Provider: {name} version: {version} does not support in {module}, please register") if name and not version: version = RuntimeConfParserUtil.get_component_provider(alias=component, module=module, provider_detail=provider_detail, name=name) elif not name and not version: name, version = RuntimeConfParserUtil.get_component_provider(alias=component, module=module, provider_detail=provider_detail) provider_info.update({component: { "module": module, "provider": { "name": name, "version": version } }}) return provider_info @staticmethod def get_component_provider(alias, module, provider_detail, detect=True, name=None): if module not in provider_detail["components"]: if detect: raise ValueError(f"component {alias}, module {module}'s provider does not exist") else: return None if name is None: name = provider_detail["components"][module]["default_provider"] version = provider_detail["providers"][name]["default"]["version"] return name, version else: if name not in provider_detail["components"][module]["support_provider"]: raise ValueError(f"Provider {name} does not support, please register in fate-flow") version = provider_detail["providers"][name]["default"]["version"] return version @staticmethod def instantiate_component_provider(provider_detail, alias=None, module=None, provider_name=None, provider_version=None, local_role=None, local_party_id=None, detect=True, provider_cache=None, job_parameters=None): if provider_name and provider_version: provider_path = provider_detail["providers"][provider_name][provider_version]["path"] provider = provider_utils.get_provider_interface(ComponentProvider(name=provider_name, version=provider_version, path=provider_path, class_path=ComponentRegistry.get_default_class_path())) if provider_cache is not None: if provider_name not in provider_cache: provider_cache[provider_name] = {} provider_cache[provider_name][provider_version] = provider return provider provider_name, provider_version = RuntimeConfParserUtil.get_component_provider(alias=alias, module=module, provider_detail=provider_detail, local_role=local_role, local_party_id=local_party_id, job_parameters=job_parameters, provider_cache=provider_cache, detect=detect) return RuntimeConfParserUtil.instantiate_component_provider(provider_detail, provider_name=provider_name, provider_version=provider_version) @classmethod def merge_predict_runtime_conf(cls, train_conf, predict_conf): runtime_conf = copy.deepcopy(train_conf) train_role = train_conf.get("role") predict_role = predict_conf.get("role") if len(train_conf) < len(predict_role): raise ValueError(f"Predict roles is {predict_role}, train roles is {train_conf}, " "predict roles should be subset of train role") for role in train_role: if role not in predict_role: del runtime_conf["role"][role] if runtime_conf.get("job_parameters", {}).get("role", {}).get(role): del runtime_conf["job_parameters"]["role"][role] if runtime_conf.get("component_parameters", {}).get("role", {}).get(role): del runtime_conf["component_parameters"]["role"][role] continue train_party_ids = train_role[role] predict_party_ids = predict_role[role] diff = False for idx, party_id in enumerate(predict_party_ids): if party_id not in train_party_ids: raise ValueError(f"Predict role: {role} party_id: {party_id} not occurs in training") if train_party_ids[idx] != party_id: diff = True if not diff and len(train_party_ids) == len(predict_party_ids): continue for p_type in ["job_parameters", "component_parameters"]: if not runtime_conf.get(p_type, {}).get("role", {}).get(role): continue conf = runtime_conf[p_type]["role"][role] party_keys = conf.keys() new_conf = {} for party_key in party_keys: party_list = party_key.split("|", -1) new_party_list = [] for party in party_list: party_id = train_party_ids[int(party)] if party_id in predict_party_ids: new_idx = predict_party_ids.index(party_id) new_party_list.append(str(new_idx)) if not new_party_list: continue new_party_key = new_party_list[0] if len(new_party_list) == 1 else "|".join(new_party_list) if new_party_key not in new_conf: new_conf[new_party_key] = {} new_conf[new_party_key].update(conf[party_key]) runtime_conf[p_type]["role"][role] = new_conf runtime_conf = cls.merge_dict(runtime_conf, predict_conf) return runtime_conf class RuntimeConfParserV1(object): @staticmethod def get_job_parameters(submit_dict): ret = {} job_parameters = submit_dict.get("job_parameters", {}) for role in submit_dict["role"]: party_id_list = submit_dict["role"][role] ret[role] = {party_id: copy.deepcopy(job_parameters) for party_id in party_id_list} return ret class RuntimeConfParserV2(object): @classmethod def get_input_parameters(cls, submit_dict, components=None): if submit_dict.get("component_parameters", {}).get("role") is None or components is None: return {} roles = submit_dict["component_parameters"]["role"].keys() if not roles: return {} input_parameters = {"dsl_version": 2} cpn_dict = {} for reader_cpn in components: cpn_dict[reader_cpn] = {} for role in roles: role_parameters = submit_dict["component_parameters"]["role"][role] input_parameters[role] = [copy.deepcopy(cpn_dict)] * len(submit_dict["role"][role]) for idx, parameters in role_parameters.items(): for reader in components: if reader not in parameters: continue if idx == "all": party_id_list = submit_dict["role"][role] for i in range(len(party_id_list)): input_parameters[role][i][reader] = parameters[reader] elif len(idx.split("|")) == 1: input_parameters[role][int(idx)][reader] = parameters[reader] else: id_set = list(map(int, idx.split("|"))) for _id in id_set: input_parameters[role][_id][reader] = parameters[reader] return input_parameters @staticmethod def get_job_parameters(submit_dict): ret = {} job_parameters = submit_dict.get("job_parameters", {}) common_job_parameters = job_parameters.get("common", {}) role_job_parameters = job_parameters.get("role", {}) for role in submit_dict["role"]: party_id_list = submit_dict["role"][role] if not role_job_parameters: ret[role] = {party_id: copy.deepcopy(common_job_parameters) for party_id in party_id_list} continue ret[role] = {} for idx in range(len(party_id_list)): role_ids = role_job_parameters.get(role, {}).keys() parameters = copy.deepcopy(common_job_parameters) for role_id in role_ids: if role_id == "all" or str(idx) in role_id.split("|"): parameters = RuntimeConfParserUtil.merge_dict(parameters, role_job_parameters.get(role, {})[role_id]) ret[role][party_id_list[idx]] = parameters return ret @staticmethod def generate_predict_conf_template(predict_dsl, train_conf, model_id, model_version): if not train_conf.get("role") or not train_conf.get("initiator"): raise ValueError("role and initiator should be contain in job's trainconf") predict_conf = dict() predict_conf["dsl_version"] = 2 predict_conf["role"] = train_conf.get("role") predict_conf["initiator"] = train_conf.get("initiator") predict_conf["job_parameters"] = train_conf.get("job_parameters", {}) predict_conf["job_parameters"]["common"].update({"model_id": model_id, "model_version": model_version, "job_type": "predict"}) predict_conf["component_parameters"] = {"role": {}} for role in predict_conf["role"]: if role not in ["guest", "host"]: continue reader_components = [] for module_alias, module_info in predict_dsl.get("components", {}).items(): if module_info["module"] == "Reader": reader_components.append(module_alias) predict_conf["component_parameters"]["role"][role] = dict() fill_template = {} for idx, reader_alias in enumerate(reader_components): fill_template[reader_alias] = {"table": {"name": "name_to_be_filled_" + str(idx), "namespace": "namespace_to_be_filled_" + str(idx)}} for idx in range(len(predict_conf["role"][role])): predict_conf["component_parameters"]["role"][role][str(idx)] = fill_template return predict_conf
[ "fate_flow.db.component_registry.ComponentRegistry.get_default_class_path", "fate_flow.db.component_registry.ComponentRegistry.get_provider_components" ]
[((2700, 2797), 'fate_flow.db.component_registry.ComponentRegistry.get_provider_components', 'ComponentRegistry.get_provider_components', (['provider.provider_name', 'provider.provider_version'], {}), '(provider.provider_name, provider.\n provider_version)\n', (2741, 2797), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((3057, 3092), 'copy.deepcopy', 'copy.deepcopy', (["runtime_conf['role']"], {}), "(runtime_conf['role'])\n", (3070, 3092), False, 'import copy\n'), ((14361, 14386), 'copy.deepcopy', 'copy.deepcopy', (['train_conf'], {}), '(train_conf)\n', (14374, 14386), False, 'import copy\n'), ((7069, 7166), 'fate_flow.db.component_registry.ComponentRegistry.get_provider_components', 'ComponentRegistry.get_provider_components', (['provider.provider_name', 'provider.provider_version'], {}), '(provider.provider_name, provider.\n provider_version)\n', (7110, 7166), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((17286, 17315), 'copy.deepcopy', 'copy.deepcopy', (['job_parameters'], {}), '(job_parameters)\n', (17299, 17315), False, 'import copy\n'), ((19596, 19632), 'copy.deepcopy', 'copy.deepcopy', (['common_job_parameters'], {}), '(common_job_parameters)\n', (19609, 19632), False, 'import copy\n'), ((2284, 2381), 'fate_flow.db.component_registry.ComponentRegistry.get_provider_components', 'ComponentRegistry.get_provider_components', (['provider.provider_name', 'provider.provider_version'], {}), '(provider.provider_name, provider.\n provider_version)\n', (2325, 2381), False, 'from fate_flow.db.component_registry import ComponentRegistry\n'), ((18010, 18033), 'copy.deepcopy', 'copy.deepcopy', (['cpn_dict'], {}), '(cpn_dict)\n', (18023, 18033), False, 'import copy\n'), ((19328, 19364), 'copy.deepcopy', 'copy.deepcopy', (['common_job_parameters'], {}), '(common_job_parameters)\n', (19341, 19364), False, 'import copy\n'), ((12774, 12816), 'fate_flow.db.component_registry.ComponentRegistry.get_default_class_path', 'ComponentRegistry.get_default_class_path', ([], {}), '()\n', (12814, 12816), False, 'from fate_flow.db.component_registry import ComponentRegistry\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json import os import sys import time from arch.api import storage from arch.api.utils.core import current_timestamp, base64_encode, json_loads, get_lan_ip from fate_flow.db.db_models import Job from fate_flow.driver.task_executor import TaskExecutor from fate_flow.entity.runtime_config import RuntimeConfig from fate_flow.settings import API_VERSION, schedule_logger from fate_flow.utils import job_utils from fate_flow.utils.api_utils import federated_api from fate_flow.utils.job_utils import query_task, get_job_dsl_parser from fate_flow.entity.constant_config import JobStatus, TaskStatus class TaskScheduler(object): @staticmethod def distribute_job(job, roles, job_initiator): for role, partys in roles.items(): job.f_role = role for party_id in partys: job.f_party_id = party_id if role == job_initiator['role'] and party_id == job_initiator['party_id']: job.f_is_initiator = 1 else: job.f_is_initiator = 0 federated_api(job_id=job.f_job_id, method='POST', endpoint='/{}/schedule/{}/{}/{}/create'.format( API_VERSION, job.f_job_id, role, party_id), src_party_id=job_initiator['party_id'], dest_party_id=party_id, json_body=job.to_json(), work_mode=job.f_work_mode) @staticmethod def run_job(job_id, initiator_role, initiator_party_id): job_dsl, job_runtime_conf, train_runtime_conf = job_utils.get_job_configuration(job_id=job_id, role=initiator_role, party_id=initiator_party_id) job_parameters = job_runtime_conf.get('job_parameters', {}) job_initiator = job_runtime_conf.get('initiator', {}) dag = get_job_dsl_parser(dsl=job_dsl, runtime_conf=job_runtime_conf, train_runtime_conf=train_runtime_conf) job_args = dag.get_args_input() if not job_initiator: return False storage.init_storage(job_id=job_id, work_mode=RuntimeConfig.WORK_MODE) job = Job() job.f_job_id = job_id job.f_start_time = current_timestamp() job.f_status = JobStatus.RUNNING job.f_update_time = current_timestamp() TaskScheduler.sync_job_status(job_id=job_id, roles=job_runtime_conf['role'], work_mode=job_parameters['work_mode'], initiator_party_id=job_initiator['party_id'], job_info=job.to_json()) top_level_task_status = set() components = dag.get_next_components(None) schedule_logger.info( 'job {} root components is {}'.format(job.f_job_id, [component.get_name() for component in components], None)) for component in components: try: # run a component as task run_status = TaskScheduler.run_component(job_id, job_runtime_conf, job_parameters, job_initiator, job_args, dag, component) except Exception as e: schedule_logger.info(e) run_status = False top_level_task_status.add(run_status) if not run_status: break if len(top_level_task_status) == 2: job.f_status = JobStatus.PARTIAL elif True in top_level_task_status: job.f_status = JobStatus.SUCCESS else: job.f_status = JobStatus.FAILED job.f_end_time = current_timestamp() job.f_elapsed = job.f_end_time - job.f_start_time if job.f_status == JobStatus.SUCCESS: job.f_progress = 100 job.f_update_time = current_timestamp() TaskScheduler.sync_job_status(job_id=job_id, roles=job_runtime_conf['role'], work_mode=job_parameters['work_mode'], initiator_party_id=job_initiator['party_id'], job_info=job.to_json()) TaskScheduler.finish_job(job_id=job_id, job_runtime_conf=job_runtime_conf) schedule_logger.info('job {} finished, status is {}'.format(job.f_job_id, job.f_status)) @staticmethod def run_component(job_id, job_runtime_conf, job_parameters, job_initiator, job_args, dag, component): parameters = component.get_role_parameters() component_name = component.get_name() module_name = component.get_module() task_id = job_utils.generate_task_id(job_id=job_id, component_name=component_name) schedule_logger.info('job {} run component {}'.format(job_id, component_name)) for role, partys_parameters in parameters.items(): for party_index in range(len(partys_parameters)): party_parameters = partys_parameters[party_index] if role in job_args: party_job_args = job_args[role][party_index]['args'] else: party_job_args = {} dest_party_id = party_parameters.get('local', {}).get('party_id') federated_api(job_id=job_id, method='POST', endpoint='/{}/schedule/{}/{}/{}/{}/{}/run'.format( API_VERSION, job_id, component_name, task_id, role, dest_party_id), src_party_id=job_initiator['party_id'], dest_party_id=dest_party_id, json_body={'job_parameters': job_parameters, 'job_initiator': job_initiator, 'job_args': party_job_args, 'parameters': party_parameters, 'module_name': module_name, 'input': component.get_input(), 'output': component.get_output(), 'job_server': {'ip': get_lan_ip(), 'http_port': RuntimeConfig.HTTP_PORT}}, work_mode=job_parameters['work_mode']) component_task_status = TaskScheduler.check_task_status(job_id=job_id, component=component) if component_task_status: task_success = True else: task_success = False schedule_logger.info( 'job {} component {} run {}'.format(job_id, component_name, 'success' if task_success else 'failed')) # update progress TaskScheduler.sync_job_status(job_id=job_id, roles=job_runtime_conf['role'], work_mode=job_parameters['work_mode'], initiator_party_id=job_initiator['party_id'], job_info=job_utils.update_job_progress(job_id=job_id, dag=dag, current_task_id=task_id).to_json()) if task_success: next_components = dag.get_next_components(component_name) schedule_logger.info('job {} component {} next components is {}'.format(job_id, component_name, [next_component.get_name() for next_component in next_components])) for next_component in next_components: try: schedule_logger.info( 'job {} check component {} dependencies status'.format(job_id, next_component.get_name())) dependencies_status = TaskScheduler.check_dependencies(job_id=job_id, dag=dag, component=next_component) schedule_logger.info( 'job {} component {} dependencies status is {}'.format(job_id, next_component.get_name(), dependencies_status)) if dependencies_status: run_status = TaskScheduler.run_component(job_id, job_runtime_conf, job_parameters, job_initiator, job_args, dag, next_component) else: run_status = False except Exception as e: schedule_logger.info(e) run_status = False if not run_status: return False return True else: return False @staticmethod def check_dependencies(job_id, dag, component): dependencies = dag.get_dependency().get('dependencies', {}) if not dependencies: return False dependent_component_names = dependencies.get(component.get_name(), []) schedule_logger.info('job {} component {} all dependent component: {}'.format(job_id, component.get_name(), dependent_component_names)) for dependent_component_name in dependent_component_names: dependent_component = dag.get_component_info(dependent_component_name) dependent_component_task_status = TaskScheduler.check_task_status(job_id, dependent_component) schedule_logger.info('job {} component {} dependency {} status is {}'.format(job_id, component.get_name(), dependent_component_name, dependent_component_task_status)) if not dependent_component_task_status: # dependency component run failed, break return False else: return True @staticmethod def check_task_status(job_id, component, interval=0.25): task_id = job_utils.generate_task_id(job_id=job_id, component_name=component.get_name()) while True: try: status_collect = set() parameters = component.get_role_parameters() for _role, _partys_parameters in parameters.items(): for _party_parameters in _partys_parameters: _party_id = _party_parameters.get('local', {}).get('party_id') tasks = query_task(job_id=job_id, task_id=task_id, role=_role, party_id=_party_id) if tasks: task_status = tasks[0].f_status else: task_status = 'notRunning' schedule_logger.info( 'job {} component {} run on {} {} status is {}'.format(job_id, component.get_name(), _role, _party_id, task_status)) status_collect.add(task_status) if 'failed' in status_collect: return False elif len(status_collect) == 1 and 'success' in status_collect: return True else: time.sleep(interval) except Exception as e: schedule_logger.exception(e) return False @staticmethod def start_task(job_id, component_name, task_id, role, party_id, task_config): schedule_logger.info( 'job {} {} {} {} task subprocess is ready'.format(job_id, component_name, role, party_id, task_config)) task_process_start_status = False try: task_dir = os.path.join(job_utils.get_job_directory(job_id=job_id), role, party_id, component_name) os.makedirs(task_dir, exist_ok=True) task_config_path = os.path.join(task_dir, 'task_config.json') with open(task_config_path, 'w') as fw: json.dump(task_config, fw) process_cmd = [ 'python3', sys.modules[TaskExecutor.__module__].__file__, '-j', job_id, '-n', component_name, '-t', task_id, '-r', role, '-p', party_id, '-c', task_config_path, '--job_server', '{}:{}'.format(task_config['job_server']['ip'], task_config['job_server']['http_port']), ] task_log_dir = os.path.join(job_utils.get_job_log_directory(job_id=job_id), role, party_id, component_name) schedule_logger.info( 'job {} {} {} {} task subprocess start'.format(job_id, component_name, role, party_id, task_config)) p = job_utils.run_subprocess(config_dir=task_dir, process_cmd=process_cmd, log_dir=task_log_dir) if p: task_process_start_status = True except Exception as e: schedule_logger.exception(e) finally: schedule_logger.info( 'job {} component {} on {} {} start task subprocess {}'.format(job_id, component_name, role, party_id, 'success' if task_process_start_status else 'failed')) @staticmethod def sync_job_status(job_id, roles, work_mode, initiator_party_id, job_info): for role, partys in roles.items(): job_info['f_role'] = role for party_id in partys: job_info['f_party_id'] = party_id federated_api(job_id=job_id, method='POST', endpoint='/{}/schedule/{}/{}/{}/status'.format( API_VERSION, job_id, role, party_id), src_party_id=initiator_party_id, dest_party_id=party_id, json_body=job_info, work_mode=work_mode) @staticmethod def finish_job(job_id, job_runtime_conf): job_parameters = job_runtime_conf['job_parameters'] job_initiator = job_runtime_conf['initiator'] model_id_base64 = base64_encode(job_parameters['model_id']) model_version_base64 = base64_encode(job_parameters['model_version']) for role, partys in job_runtime_conf['role'].items(): for party_id in partys: # save pipeline federated_api(job_id=job_id, method='POST', endpoint='/{}/schedule/{}/{}/{}/{}/{}/save/pipeline'.format( API_VERSION, job_id, role, party_id, model_id_base64, model_version_base64 ), src_party_id=job_initiator['party_id'], dest_party_id=party_id, json_body={}, work_mode=job_parameters['work_mode']) # clean federated_api(job_id=job_id, method='POST', endpoint='/{}/schedule/{}/{}/{}/clean'.format( API_VERSION, job_id, role, party_id), src_party_id=job_initiator['party_id'], dest_party_id=party_id, json_body={}, work_mode=job_parameters['work_mode']) @staticmethod def stop_job(job_id): schedule_logger.info('get stop job {} command'.format(job_id)) jobs = job_utils.query_job(job_id=job_id, is_initiator=1) if jobs: initiator_job = jobs[0] job_info = {'f_job_id': job_id, 'f_status': JobStatus.FAILED} roles = json_loads(initiator_job.f_roles) job_work_mode = initiator_job.f_work_mode initiator_party_id = initiator_job.f_party_id # set status first TaskScheduler.sync_job_status(job_id=job_id, roles=roles, initiator_party_id=initiator_party_id, work_mode=job_work_mode, job_info=job_info) for role, partys in roles.items(): for party_id in partys: response = federated_api(job_id=job_id, method='POST', endpoint='/{}/schedule/{}/{}/{}/kill'.format( API_VERSION, job_id, role, party_id), src_party_id=initiator_party_id, dest_party_id=party_id, json_body={'job_initiator': {'party_id': initiator_job.f_party_id, 'role': initiator_job.f_role}}, work_mode=job_work_mode) if response['retcode'] == 0: schedule_logger.info( 'send {} {} kill job {} command successfully'.format(role, party_id, job_id)) else: schedule_logger.info( 'send {} {} kill job {} command failed: {}'.format(role, party_id, job_id, response['retmsg'])) else: schedule_logger.info('send stop job {} command failed'.format(job_id)) raise Exception('can not found job: {}'.format(job_id))
[ "fate_flow.db.db_models.Job", "fate_flow.utils.job_utils.get_job_directory", "fate_flow.settings.schedule_logger.exception", "fate_flow.utils.job_utils.run_subprocess", "fate_flow.utils.job_utils.get_job_dsl_parser", "fate_flow.utils.job_utils.query_job", "fate_flow.settings.schedule_logger.info", "fate_flow.utils.job_utils.get_job_configuration", "fate_flow.utils.job_utils.generate_task_id", "fate_flow.utils.job_utils.get_job_log_directory", "fate_flow.utils.job_utils.update_job_progress", "fate_flow.utils.job_utils.query_task" ]
[((2396, 2496), 'fate_flow.utils.job_utils.get_job_configuration', 'job_utils.get_job_configuration', ([], {'job_id': 'job_id', 'role': 'initiator_role', 'party_id': 'initiator_party_id'}), '(job_id=job_id, role=initiator_role,\n party_id=initiator_party_id)\n', (2427, 2496), False, 'from fate_flow.utils import job_utils\n'), ((2813, 2918), 'fate_flow.utils.job_utils.get_job_dsl_parser', 'get_job_dsl_parser', ([], {'dsl': 'job_dsl', 'runtime_conf': 'job_runtime_conf', 'train_runtime_conf': 'train_runtime_conf'}), '(dsl=job_dsl, runtime_conf=job_runtime_conf,\n train_runtime_conf=train_runtime_conf)\n', (2831, 2918), False, 'from fate_flow.utils.job_utils import query_task, get_job_dsl_parser\n'), ((3084, 3154), 'arch.api.storage.init_storage', 'storage.init_storage', ([], {'job_id': 'job_id', 'work_mode': 'RuntimeConfig.WORK_MODE'}), '(job_id=job_id, work_mode=RuntimeConfig.WORK_MODE)\n', (3104, 3154), False, 'from arch.api import storage\n'), ((3169, 3174), 'fate_flow.db.db_models.Job', 'Job', ([], {}), '()\n', (3172, 3174), False, 'from fate_flow.db.db_models import Job\n'), ((3232, 3251), 'arch.api.utils.core.current_timestamp', 'current_timestamp', ([], {}), '()\n', (3249, 3251), False, 'from arch.api.utils.core import current_timestamp, base64_encode, json_loads, get_lan_ip\n'), ((3321, 3340), 'arch.api.utils.core.current_timestamp', 'current_timestamp', ([], {}), '()\n', (3338, 3340), False, 'from arch.api.utils.core import current_timestamp, base64_encode, json_loads, get_lan_ip\n'), ((4728, 4747), 'arch.api.utils.core.current_timestamp', 'current_timestamp', ([], {}), '()\n', (4745, 4747), False, 'from arch.api.utils.core import current_timestamp, base64_encode, json_loads, get_lan_ip\n'), ((4913, 4932), 'arch.api.utils.core.current_timestamp', 'current_timestamp', ([], {}), '()\n', (4930, 4932), False, 'from arch.api.utils.core import current_timestamp, base64_encode, json_loads, get_lan_ip\n'), ((5670, 5742), 'fate_flow.utils.job_utils.generate_task_id', 'job_utils.generate_task_id', ([], {'job_id': 'job_id', 'component_name': 'component_name'}), '(job_id=job_id, component_name=component_name)\n', (5696, 5742), False, 'from fate_flow.utils import job_utils\n'), ((15968, 16009), 'arch.api.utils.core.base64_encode', 'base64_encode', (["job_parameters['model_id']"], {}), "(job_parameters['model_id'])\n", (15981, 16009), False, 'from arch.api.utils.core import current_timestamp, base64_encode, json_loads, get_lan_ip\n'), ((16041, 16087), 'arch.api.utils.core.base64_encode', 'base64_encode', (["job_parameters['model_version']"], {}), "(job_parameters['model_version'])\n", (16054, 16087), False, 'from arch.api.utils.core import current_timestamp, base64_encode, json_loads, get_lan_ip\n'), ((17681, 17731), 'fate_flow.utils.job_utils.query_job', 'job_utils.query_job', ([], {'job_id': 'job_id', 'is_initiator': '(1)'}), '(job_id=job_id, is_initiator=1)\n', (17700, 17731), False, 'from fate_flow.utils import job_utils\n'), ((13471, 13507), 'os.makedirs', 'os.makedirs', (['task_dir'], {'exist_ok': '(True)'}), '(task_dir, exist_ok=True)\n', (13482, 13507), False, 'import os\n'), ((13539, 13581), 'os.path.join', 'os.path.join', (['task_dir', '"""task_config.json"""'], {}), "(task_dir, 'task_config.json')\n", (13551, 13581), False, 'import os\n'), ((14400, 14496), 'fate_flow.utils.job_utils.run_subprocess', 'job_utils.run_subprocess', ([], {'config_dir': 'task_dir', 'process_cmd': 'process_cmd', 'log_dir': 'task_log_dir'}), '(config_dir=task_dir, process_cmd=process_cmd,\n log_dir=task_log_dir)\n', (14424, 14496), False, 'from fate_flow.utils import job_utils\n'), ((17879, 17912), 'arch.api.utils.core.json_loads', 'json_loads', (['initiator_job.f_roles'], {}), '(initiator_job.f_roles)\n', (17889, 17912), False, 'from arch.api.utils.core import current_timestamp, base64_encode, json_loads, get_lan_ip\n'), ((13383, 13425), 'fate_flow.utils.job_utils.get_job_directory', 'job_utils.get_job_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (13410, 13425), False, 'from fate_flow.utils import job_utils\n'), ((13650, 13676), 'json.dump', 'json.dump', (['task_config', 'fw'], {}), '(task_config, fw)\n', (13659, 13676), False, 'import json\n'), ((14153, 14199), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (14184, 14199), False, 'from fate_flow.utils import job_utils\n'), ((14603, 14631), 'fate_flow.settings.schedule_logger.exception', 'schedule_logger.exception', (['e'], {}), '(e)\n', (14628, 14631), False, 'from fate_flow.settings import API_VERSION, schedule_logger\n'), ((4305, 4328), 'fate_flow.settings.schedule_logger.info', 'schedule_logger.info', (['e'], {}), '(e)\n', (4325, 4328), False, 'from fate_flow.settings import API_VERSION, schedule_logger\n'), ((12987, 13015), 'fate_flow.settings.schedule_logger.exception', 'schedule_logger.exception', (['e'], {}), '(e)\n', (13012, 13015), False, 'from fate_flow.settings import API_VERSION, schedule_logger\n'), ((8212, 8290), 'fate_flow.utils.job_utils.update_job_progress', 'job_utils.update_job_progress', ([], {'job_id': 'job_id', 'dag': 'dag', 'current_task_id': 'task_id'}), '(job_id=job_id, dag=dag, current_task_id=task_id)\n', (8241, 8290), False, 'from fate_flow.utils import job_utils\n'), ((10045, 10068), 'fate_flow.settings.schedule_logger.info', 'schedule_logger.info', (['e'], {}), '(e)\n', (10065, 10068), False, 'from fate_flow.settings import API_VERSION, schedule_logger\n'), ((12098, 12172), 'fate_flow.utils.job_utils.query_task', 'query_task', ([], {'job_id': 'job_id', 'task_id': 'task_id', 'role': '_role', 'party_id': '_party_id'}), '(job_id=job_id, task_id=task_id, role=_role, party_id=_party_id)\n', (12108, 12172), False, 'from fate_flow.utils.job_utils import query_task, get_job_dsl_parser\n'), ((12915, 12935), 'time.sleep', 'time.sleep', (['interval'], {}), '(interval)\n', (12925, 12935), False, 'import time\n'), ((7413, 7425), 'arch.api.utils.core.get_lan_ip', 'get_lan_ip', ([], {}), '()\n', (7423, 7425), False, 'from arch.api.utils.core import current_timestamp, base64_encode, json_loads, get_lan_ip\n')]
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import io import os import tarfile from flask import Flask, request, send_file from arch.api.utils.core import json_loads from fate_flow.driver.job_controller import JobController from fate_flow.driver.task_scheduler import TaskScheduler from fate_flow.settings import stat_logger, CLUSTER_STANDALONE_JOB_SERVER_PORT from fate_flow.utils import job_utils, detect_utils from fate_flow.utils.api_utils import get_json_result, request_execute_server from fate_flow.entity.constant_config import WorkMode from fate_flow.entity.runtime_config import RuntimeConfig manager = Flask(__name__) @manager.errorhandler(500) def internal_server_error(e): stat_logger.exception(e) return get_json_result(retcode=100, retmsg=str(e)) @manager.route('/submit', methods=['POST']) def submit_job(): work_mode = request.json.get('job_runtime_conf', {}).get('job_parameters', {}).get('work_mode', None) detect_utils.check_config({'work_mode': work_mode}, required_arguments=[('work_mode', (WorkMode.CLUSTER, WorkMode.STANDALONE))]) if work_mode == RuntimeConfig.WORK_MODE: job_id, job_dsl_path, job_runtime_conf_path, model_info, board_url = JobController.submit_job(request.json) return get_json_result(job_id=job_id, data={'job_dsl_path': job_dsl_path, 'job_runtime_conf_path': job_runtime_conf_path, 'model_info': model_info, 'board_url': board_url }) else: if RuntimeConfig.WORK_MODE == WorkMode.CLUSTER and work_mode == WorkMode.STANDALONE: # use cluster standalone job server to execute standalone job return request_execute_server(request=request, execute_host='{}:{}'.format(request.remote_addr, CLUSTER_STANDALONE_JOB_SERVER_PORT)) else: raise Exception('server run on standalone can not support cluster mode job') @manager.route('/stop', methods=['POST']) @job_utils.job_server_routing() def stop_job(): TaskScheduler.stop_job(job_id=request.json.get('job_id', '')) return get_json_result(retcode=0, retmsg='success') @manager.route('/query', methods=['POST']) def query_job(): jobs = job_utils.query_job(**request.json) if not jobs: return get_json_result(retcode=101, retmsg='find job failed') return get_json_result(retcode=0, retmsg='success', data=[job.to_json() for job in jobs]) @manager.route('/config', methods=['POST']) def job_config(): jobs = job_utils.query_job(**request.json) if not jobs: return get_json_result(retcode=101, retmsg='find job failed') else: job = jobs[0] response_data = dict() response_data['job_id'] = job.f_job_id response_data['dsl'] = json_loads(job.f_dsl) response_data['runtime_conf'] = json_loads(job.f_runtime_conf) response_data['train_runtime_conf'] = json_loads(job.f_train_runtime_conf) response_data['model_info'] = {'model_id': response_data['runtime_conf']['job_parameters']['model_id'], 'model_version': response_data['runtime_conf']['job_parameters'][ 'model_version']} return get_json_result(retcode=0, retmsg='success', data=response_data) @manager.route('/log', methods=['get']) @job_utils.job_server_routing(307) def job_log(): job_id = request.json.get('job_id', '') memory_file = io.BytesIO() tar = tarfile.open(fileobj=memory_file, mode='w:gz') job_log_dir = job_utils.get_job_log_directory(job_id=job_id) for root, dir, files in os.walk(job_log_dir): for file in files: full_path = os.path.join(root, file) rel_path = os.path.relpath(full_path, job_log_dir) tar.add(full_path, rel_path) tar.close() memory_file.seek(0) return send_file(memory_file, attachment_filename='job_{}_log.tar.gz'.format(job_id), as_attachment=True) @manager.route('/task/query', methods=['POST']) def query_task(): tasks = job_utils.query_task(**request.json) if not tasks: return get_json_result(retcode=101, retmsg='find task failed') return get_json_result(retcode=0, retmsg='success', data=[task.to_json() for task in tasks])
[ "fate_flow.utils.job_utils.job_server_routing", "fate_flow.utils.job_utils.get_job_log_directory", "fate_flow.driver.job_controller.JobController.submit_job", "fate_flow.utils.api_utils.get_json_result", "fate_flow.utils.detect_utils.check_config", "fate_flow.settings.stat_logger.exception", "fate_flow.utils.job_utils.query_job", "fate_flow.utils.job_utils.query_task" ]
[((1188, 1203), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1193, 1203), False, 'from flask import Flask, request, send_file\n'), ((2671, 2701), 'fate_flow.utils.job_utils.job_server_routing', 'job_utils.job_server_routing', ([], {}), '()\n', (2699, 2701), False, 'from fate_flow.utils import job_utils, detect_utils\n'), ((4046, 4079), 'fate_flow.utils.job_utils.job_server_routing', 'job_utils.job_server_routing', (['(307)'], {}), '(307)\n', (4074, 4079), False, 'from fate_flow.utils import job_utils, detect_utils\n'), ((1267, 1291), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1288, 1291), False, 'from fate_flow.settings import stat_logger, CLUSTER_STANDALONE_JOB_SERVER_PORT\n'), ((1521, 1654), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (["{'work_mode': work_mode}"], {'required_arguments': "[('work_mode', (WorkMode.CLUSTER, WorkMode.STANDALONE))]"}), "({'work_mode': work_mode}, required_arguments=[(\n 'work_mode', (WorkMode.CLUSTER, WorkMode.STANDALONE))])\n", (1546, 1654), False, 'from fate_flow.utils import job_utils, detect_utils\n'), ((2795, 2839), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""'}), "(retcode=0, retmsg='success')\n", (2810, 2839), False, 'from fate_flow.utils.api_utils import get_json_result, request_execute_server\n'), ((2913, 2948), 'fate_flow.utils.job_utils.query_job', 'job_utils.query_job', ([], {}), '(**request.json)\n', (2932, 2948), False, 'from fate_flow.utils import job_utils, detect_utils\n'), ((3205, 3240), 'fate_flow.utils.job_utils.query_job', 'job_utils.query_job', ([], {}), '(**request.json)\n', (3224, 3240), False, 'from fate_flow.utils import job_utils, detect_utils\n'), ((4108, 4138), 'flask.request.json.get', 'request.json.get', (['"""job_id"""', '""""""'], {}), "('job_id', '')\n", (4124, 4138), False, 'from flask import Flask, request, send_file\n'), ((4157, 4169), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (4167, 4169), False, 'import io\n'), ((4180, 4226), 'tarfile.open', 'tarfile.open', ([], {'fileobj': 'memory_file', 'mode': '"""w:gz"""'}), "(fileobj=memory_file, mode='w:gz')\n", (4192, 4226), False, 'import tarfile\n'), ((4245, 4291), 'fate_flow.utils.job_utils.get_job_log_directory', 'job_utils.get_job_log_directory', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (4276, 4291), False, 'from fate_flow.utils import job_utils, detect_utils\n'), ((4320, 4340), 'os.walk', 'os.walk', (['job_log_dir'], {}), '(job_log_dir)\n', (4327, 4340), False, 'import os\n'), ((4752, 4788), 'fate_flow.utils.job_utils.query_task', 'job_utils.query_task', ([], {}), '(**request.json)\n', (4772, 4788), False, 'from fate_flow.utils import job_utils, detect_utils\n'), ((1772, 1810), 'fate_flow.driver.job_controller.JobController.submit_job', 'JobController.submit_job', (['request.json'], {}), '(request.json)\n', (1796, 1810), False, 'from fate_flow.driver.job_controller import JobController\n'), ((1826, 1999), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'job_id': 'job_id', 'data': "{'job_dsl_path': job_dsl_path, 'job_runtime_conf_path':\n job_runtime_conf_path, 'model_info': model_info, 'board_url': board_url}"}), "(job_id=job_id, data={'job_dsl_path': job_dsl_path,\n 'job_runtime_conf_path': job_runtime_conf_path, 'model_info':\n model_info, 'board_url': board_url})\n", (1841, 1999), False, 'from fate_flow.utils.api_utils import get_json_result, request_execute_server\n'), ((2981, 3035), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""find job failed"""'}), "(retcode=101, retmsg='find job failed')\n", (2996, 3035), False, 'from fate_flow.utils.api_utils import get_json_result, request_execute_server\n'), ((3273, 3327), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""find job failed"""'}), "(retcode=101, retmsg='find job failed')\n", (3288, 3327), False, 'from fate_flow.utils.api_utils import get_json_result, request_execute_server\n'), ((3469, 3490), 'arch.api.utils.core.json_loads', 'json_loads', (['job.f_dsl'], {}), '(job.f_dsl)\n', (3479, 3490), False, 'from arch.api.utils.core import json_loads\n'), ((3531, 3561), 'arch.api.utils.core.json_loads', 'json_loads', (['job.f_runtime_conf'], {}), '(job.f_runtime_conf)\n', (3541, 3561), False, 'from arch.api.utils.core import json_loads\n'), ((3608, 3644), 'arch.api.utils.core.json_loads', 'json_loads', (['job.f_train_runtime_conf'], {}), '(job.f_train_runtime_conf)\n', (3618, 3644), False, 'from arch.api.utils.core import json_loads\n'), ((3938, 4002), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0)', 'retmsg': '"""success"""', 'data': 'response_data'}), "(retcode=0, retmsg='success', data=response_data)\n", (3953, 4002), False, 'from fate_flow.utils.api_utils import get_json_result, request_execute_server\n'), ((4822, 4877), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(101)', 'retmsg': '"""find task failed"""'}), "(retcode=101, retmsg='find task failed')\n", (4837, 4877), False, 'from fate_flow.utils.api_utils import get_json_result, request_execute_server\n'), ((2752, 2782), 'flask.request.json.get', 'request.json.get', (['"""job_id"""', '""""""'], {}), "('job_id', '')\n", (2768, 2782), False, 'from flask import Flask, request, send_file\n'), ((4393, 4417), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (4405, 4417), False, 'import os\n'), ((4441, 4480), 'os.path.relpath', 'os.path.relpath', (['full_path', 'job_log_dir'], {}), '(full_path, job_log_dir)\n', (4456, 4480), False, 'import os\n'), ((1427, 1467), 'flask.request.json.get', 'request.json.get', (['"""job_runtime_conf"""', '{}'], {}), "('job_runtime_conf', {})\n", (1443, 1467), False, 'from flask import Flask, request, send_file\n')]