Dataset Viewer
code
stringlengths 1.36k
41.2k
| apis
sequence | extract_api
stringlengths 328
40.6k
|
---|---|---|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import grpc
import requests
from grpc._cython import cygrpc
from fate_arch.common.base_utils import json_dumps, json_loads
from fate_flow.entity.runtime_config import RuntimeConfig
from fate_flow.settings import FATEFLOW_SERVICE_NAME, HEADERS, DEFAULT_REMOTE_REQUEST_TIMEOUT
from fate_flow.settings import IP, GRPC_PORT, stat_logger
from fate_flow.utils.proto_compatibility import basic_meta_pb2
from fate_flow.utils.proto_compatibility import proxy_pb2
from fate_flow.utils.proto_compatibility import proxy_pb2_grpc
import time
import sys
from fate_flow.tests.grpc.xthread import ThreadPoolExecutor
def wrap_grpc_packet(json_body, http_method, url, src_party_id, dst_party_id, job_id=None, overall_timeout=DEFAULT_REMOTE_REQUEST_TIMEOUT):
_src_end_point = basic_meta_pb2.Endpoint(ip=IP, port=GRPC_PORT)
_src = proxy_pb2.Topic(name=job_id, partyId="{}".format(src_party_id), role=FATEFLOW_SERVICE_NAME, callback=_src_end_point)
_dst = proxy_pb2.Topic(name=job_id, partyId="{}".format(dst_party_id), role=FATEFLOW_SERVICE_NAME, callback=None)
_task = proxy_pb2.Task(taskId=job_id)
_command = proxy_pb2.Command(name=FATEFLOW_SERVICE_NAME)
_conf = proxy_pb2.Conf(overallTimeout=overall_timeout)
_meta = proxy_pb2.Metadata(src=_src, dst=_dst, task=_task, command=_command, operator=http_method, conf=_conf)
_data = proxy_pb2.Data(key=url, value=bytes(json_dumps(json_body), 'utf-8'))
return proxy_pb2.Packet(header=_meta, body=_data)
def get_url(_suffix):
return "http://{}:{}/{}".format(RuntimeConfig.JOB_SERVER_HOST, RuntimeConfig.HTTP_PORT, _suffix.lstrip('/'))
class UnaryService(proxy_pb2_grpc.DataTransferServiceServicer):
def unaryCall(self, _request, context):
packet = _request
header = packet.header
_suffix = packet.body.key
param_bytes = packet.body.value
param = bytes.decode(param_bytes)
job_id = header.task.taskId
src = header.src
dst = header.dst
method = header.operator
param_dict = json_loads(param)
param_dict['src_party_id'] = str(src.partyId)
source_routing_header = []
for key, value in context.invocation_metadata():
source_routing_header.append((key, value))
stat_logger.info(f"grpc request routing header: {source_routing_header}")
param = bytes.decode(bytes(json_dumps(param_dict), 'utf-8'))
action = getattr(requests, method.lower(), None)
if action:
print(_suffix)
#resp = action(url=get_url(_suffix), data=param, headers=HEADERS)
else:
pass
#resp_json = resp.json()
resp_json = {"status": "test"}
import time
print("sleep")
time.sleep(60)
return wrap_grpc_packet(resp_json, method, _suffix, dst.partyId, src.partyId, job_id)
thread_pool_executor = ThreadPoolExecutor(max_workers=5)
print(f"start grpc server pool on {thread_pool_executor._max_workers} max workers")
server = grpc.server(thread_pool_executor,
options=[(cygrpc.ChannelArgKey.max_send_message_length, -1),
(cygrpc.ChannelArgKey.max_receive_message_length, -1)])
proxy_pb2_grpc.add_DataTransferServiceServicer_to_server(UnaryService(), server)
server.add_insecure_port("{}:{}".format("127.0.0.1", 7777))
server.start()
try:
while True:
time.sleep(60 * 60 * 24)
except KeyboardInterrupt:
server.stop(0)
sys.exit(0)
|
[
"fate_flow.utils.proto_compatibility.basic_meta_pb2.Endpoint",
"fate_flow.utils.proto_compatibility.proxy_pb2.Conf",
"fate_flow.tests.grpc.xthread.ThreadPoolExecutor",
"fate_flow.utils.proto_compatibility.proxy_pb2.Command",
"fate_flow.settings.stat_logger.info",
"fate_flow.utils.proto_compatibility.proxy_pb2.Packet",
"fate_flow.utils.proto_compatibility.proxy_pb2.Metadata",
"fate_flow.utils.proto_compatibility.proxy_pb2.Task"
] |
[((3486, 3519), 'fate_flow.tests.grpc.xthread.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(5)'}), '(max_workers=5)\n', (3504, 3519), False, 'from fate_flow.tests.grpc.xthread import ThreadPoolExecutor\n'), ((3613, 3773), 'grpc.server', 'grpc.server', (['thread_pool_executor'], {'options': '[(cygrpc.ChannelArgKey.max_send_message_length, -1), (cygrpc.ChannelArgKey.\n max_receive_message_length, -1)]'}), '(thread_pool_executor, options=[(cygrpc.ChannelArgKey.\n max_send_message_length, -1), (cygrpc.ChannelArgKey.\n max_receive_message_length, -1)])\n', (3624, 3773), False, 'import grpc\n'), ((1381, 1427), 'fate_flow.utils.proto_compatibility.basic_meta_pb2.Endpoint', 'basic_meta_pb2.Endpoint', ([], {'ip': 'IP', 'port': 'GRPC_PORT'}), '(ip=IP, port=GRPC_PORT)\n', (1404, 1427), False, 'from fate_flow.utils.proto_compatibility import basic_meta_pb2\n'), ((1686, 1715), 'fate_flow.utils.proto_compatibility.proxy_pb2.Task', 'proxy_pb2.Task', ([], {'taskId': 'job_id'}), '(taskId=job_id)\n', (1700, 1715), False, 'from fate_flow.utils.proto_compatibility import proxy_pb2\n'), ((1731, 1776), 'fate_flow.utils.proto_compatibility.proxy_pb2.Command', 'proxy_pb2.Command', ([], {'name': 'FATEFLOW_SERVICE_NAME'}), '(name=FATEFLOW_SERVICE_NAME)\n', (1748, 1776), False, 'from fate_flow.utils.proto_compatibility import proxy_pb2\n'), ((1789, 1835), 'fate_flow.utils.proto_compatibility.proxy_pb2.Conf', 'proxy_pb2.Conf', ([], {'overallTimeout': 'overall_timeout'}), '(overallTimeout=overall_timeout)\n', (1803, 1835), False, 'from fate_flow.utils.proto_compatibility import proxy_pb2\n'), ((1848, 1954), 'fate_flow.utils.proto_compatibility.proxy_pb2.Metadata', 'proxy_pb2.Metadata', ([], {'src': '_src', 'dst': '_dst', 'task': '_task', 'command': '_command', 'operator': 'http_method', 'conf': '_conf'}), '(src=_src, dst=_dst, task=_task, command=_command,\n operator=http_method, conf=_conf)\n', (1866, 1954), False, 'from fate_flow.utils.proto_compatibility import proxy_pb2\n'), ((2043, 2085), 'fate_flow.utils.proto_compatibility.proxy_pb2.Packet', 'proxy_pb2.Packet', ([], {'header': '_meta', 'body': '_data'}), '(header=_meta, body=_data)\n', (2059, 2085), False, 'from fate_flow.utils.proto_compatibility import proxy_pb2\n'), ((2646, 2663), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['param'], {}), '(param)\n', (2656, 2663), False, 'from fate_arch.common.base_utils import json_dumps, json_loads\n'), ((2873, 2946), 'fate_flow.settings.stat_logger.info', 'stat_logger.info', (['f"""grpc request routing header: {source_routing_header}"""'], {}), "(f'grpc request routing header: {source_routing_header}')\n", (2889, 2946), False, 'from fate_flow.settings import IP, GRPC_PORT, stat_logger\n'), ((3353, 3367), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (3363, 3367), False, 'import time\n'), ((4002, 4026), 'time.sleep', 'time.sleep', (['(60 * 60 * 24)'], {}), '(60 * 60 * 24)\n', (4012, 4026), False, 'import time\n'), ((4076, 4087), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4084, 4087), False, 'import sys\n'), ((1999, 2020), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['json_body'], {}), '(json_body)\n', (2009, 2020), False, 'from fate_arch.common.base_utils import json_dumps, json_loads\n'), ((2983, 3005), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['param_dict'], {}), '(param_dict)\n', (2993, 3005), False, 'from fate_arch.common.base_utils import json_dumps, json_loads\n')]
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, request
from fate_arch.common import conf_utils
from fate_flow.entity.runtime_config import RuntimeConfig
from fate_flow.settings import stat_logger
from fate_flow.utils.api_utils import get_json_result
from fate_flow.utils.service_utils import ServiceUtils
manager = Flask(__name__)
@manager.errorhandler(500)
def internal_server_error(e):
stat_logger.exception(e)
return get_json_result(retcode=100, retmsg=str(e))
@manager.route('/get', methods=['POST'])
def get_fate_version_info():
version = RuntimeConfig.get_env(request.json.get('module', 'FATE'))
return get_json_result(data={request.json.get('module'): version})
@manager.route('/registry', methods=['POST'])
def service_registry():
update_server = ServiceUtils.register_service(request.json)
return get_json_result(data={"update_server": update_server})
@manager.route('/query', methods=['POST'])
def service_query():
service_info = ServiceUtils.get(request.json.get("service_name"))
return get_json_result(data={"service_info": service_info})
|
[
"fate_flow.utils.service_utils.ServiceUtils.register_service",
"fate_flow.settings.stat_logger.exception",
"fate_flow.utils.api_utils.get_json_result"
] |
[((911, 926), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (916, 926), False, 'from flask import Flask, request\n'), ((990, 1014), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1011, 1014), False, 'from fate_flow.settings import stat_logger\n'), ((1377, 1420), 'fate_flow.utils.service_utils.ServiceUtils.register_service', 'ServiceUtils.register_service', (['request.json'], {}), '(request.json)\n', (1406, 1420), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((1432, 1486), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'update_server': update_server}"}), "(data={'update_server': update_server})\n", (1447, 1486), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1634, 1686), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'data': "{'service_info': service_info}"}), "(data={'service_info': service_info})\n", (1649, 1686), False, 'from fate_flow.utils.api_utils import get_json_result\n'), ((1178, 1212), 'flask.request.json.get', 'request.json.get', (['"""module"""', '"""FATE"""'], {}), "('module', 'FATE')\n", (1194, 1212), False, 'from flask import Flask, request\n'), ((1589, 1621), 'flask.request.json.get', 'request.json.get', (['"""service_name"""'], {}), "('service_name')\n", (1605, 1621), False, 'from flask import Flask, request\n'), ((1247, 1273), 'flask.request.json.get', 'request.json.get', (['"""module"""'], {}), "('module')\n", (1263, 1273), False, 'from flask import Flask, request\n')]
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import time
from fate_arch.common import log, file_utils, EngineType
from fate_arch.storage import StorageEngine, EggRollStorageType
from fate_flow.entity.metric import Metric, MetricMeta
from fate_flow.utils import job_utils, data_utils
from fate_flow.scheduling_apps.client import ControllerClient
from fate_arch import storage
LOGGER = log.getLogger()
class Upload(object):
def __init__(self):
self.taskid = ''
self.tracker = None
self.MAX_PARTITIONS = 1024
self.MAX_BYTES = 1024*1024*8
self.parameters = {}
self.table = None
def run(self, component_parameters=None, args=None):
self.parameters = component_parameters["UploadParam"]
LOGGER.info(self.parameters)
LOGGER.info(args)
self.parameters["role"] = component_parameters["role"]
self.parameters["local"] = component_parameters["local"]
storage_engine = self.parameters["storage_engine"]
storage_address = self.parameters["storage_address"]
# if not set storage, use job storage as default
if not storage_engine:
storage_engine = args["job_parameters"].storage_engine
if not storage_address:
storage_address = args["job_parameters"].engines_address[EngineType.STORAGE]
job_id = self.taskid.split("_")[0]
if not os.path.isabs(self.parameters.get("file", "")):
self.parameters["file"] = os.path.join(file_utils.get_project_base_directory(), self.parameters["file"])
if not os.path.exists(self.parameters["file"]):
raise Exception("%s is not exist, please check the configure" % (self.parameters["file"]))
if not os.path.getsize(self.parameters["file"]):
raise Exception("%s is an empty file" % (self.parameters["file"]))
name, namespace = self.parameters.get("name"), self.parameters.get("namespace")
_namespace, _table_name = self.generate_table_name(self.parameters["file"])
if namespace is None:
namespace = _namespace
if name is None:
name = _table_name
read_head = self.parameters['head']
if read_head == 0:
head = False
elif read_head == 1:
head = True
else:
raise Exception("'head' in conf.json should be 0 or 1")
partitions = self.parameters["partition"]
if partitions <= 0 or partitions >= self.MAX_PARTITIONS:
raise Exception("Error number of partition, it should between %d and %d" % (0, self.MAX_PARTITIONS))
with storage.Session.build(session_id=job_utils.generate_session_id(self.tracker.task_id, self.tracker.task_version, self.tracker.role, self.tracker.party_id, suffix="storage", random_end=True), namespace=namespace, name=name) as storage_session:
if self.parameters.get("destroy", False):
table = storage_session.get_table()
if table:
LOGGER.info(f"destroy table name: {name} namespace: {namespace} engine: {table.get_engine()}")
table.destroy()
else:
LOGGER.info(f"can not found table name: {name} namespace: {namespace}, pass destroy")
address_dict = storage_address.copy()
with storage.Session.build(session_id=job_utils.generate_session_id(self.tracker.task_id, self.tracker.task_version, self.tracker.role, self.tracker.party_id, suffix="storage", random_end=True),
storage_engine=storage_engine, options=self.parameters.get("options")) as storage_session:
if storage_engine in {StorageEngine.EGGROLL, StorageEngine.STANDALONE}:
upload_address = {"name": name, "namespace": namespace, "storage_type": EggRollStorageType.ROLLPAIR_LMDB}
elif storage_engine in {StorageEngine.MYSQL}:
upload_address = {"db": namespace, "name": name}
elif storage_engine in {StorageEngine.HDFS}:
upload_address = {"path": data_utils.default_input_fs_path(name=name, namespace=namespace, prefix=address_dict.get("path_prefix"))}
else:
raise RuntimeError(f"can not support this storage engine: {storage_engine}")
address_dict.update(upload_address)
LOGGER.info(f"upload to {storage_engine} storage, address: {address_dict}")
address = storage.StorageTableMeta.create_address(storage_engine=storage_engine, address_dict=address_dict)
self.parameters["partitions"] = partitions
self.parameters["name"] = name
self.table = storage_session.create_table(address=address, **self.parameters)
data_table_count = self.save_data_table(job_id, name, namespace, head)
self.table.get_meta().update_metas(in_serialized=True)
LOGGER.info("------------load data finish!-----------------")
# rm tmp file
try:
if '{}/fate_upload_tmp'.format(job_id) in self.parameters['file']:
LOGGER.info("remove tmp upload file")
shutil.rmtree(os.path.join(self.parameters["file"].split('tmp')[0], 'tmp'))
except:
LOGGER.info("remove tmp file failed")
LOGGER.info("file: {}".format(self.parameters["file"]))
LOGGER.info("total data_count: {}".format(data_table_count))
LOGGER.info("table name: {}, table namespace: {}".format(name, namespace))
def set_taskid(self, taskid):
self.taskid = taskid
def set_tracker(self, tracker):
self.tracker = tracker
def save_data_table(self, job_id, dst_table_name, dst_table_namespace, head=True):
input_file = self.parameters["file"]
input_feature_count = self.get_count(input_file)
with open(input_file, 'r') as fin:
lines_count = 0
if head is True:
data_head = fin.readline()
input_feature_count -= 1
self.table.get_meta().update_metas(schema=data_utils.get_header_schema(header_line=data_head, id_delimiter=self.parameters["id_delimiter"]))
n = 0
while True:
data = list()
lines = fin.readlines(self.MAX_BYTES)
if lines:
for line in lines:
values = line.rstrip().split(self.parameters["id_delimiter"])
data.append((values[0], data_utils.list_to_str(values[1:], id_delimiter=self.parameters["id_delimiter"])))
lines_count += len(data)
save_progress = lines_count/input_feature_count*100//1
job_info = {'progress': save_progress, "job_id": job_id, "role": self.parameters["local"]['role'], "party_id": self.parameters["local"]['party_id']}
ControllerClient.update_job(job_info=job_info)
self.table.put_all(data)
if n == 0:
self.table.get_meta().update_metas(part_of_data=data)
else:
table_count = self.table.count()
self.table.get_meta().update_metas(count=table_count, partitions=self.parameters["partition"])
self.tracker.log_output_data_info(data_name='upload',
table_namespace=dst_table_namespace,
table_name=dst_table_name)
self.tracker.log_metric_data(metric_namespace="upload",
metric_name="data_access",
metrics=[Metric("count", table_count)])
self.tracker.set_metric_meta(metric_namespace="upload",
metric_name="data_access",
metric_meta=MetricMeta(name='upload', metric_type='UPLOAD'))
return table_count
n += 1
def get_count(self, input_file):
with open(input_file, 'r', encoding='utf-8') as fp:
count = 0
for line in fp:
count += 1
return count
def generate_table_name(self, input_file_path):
str_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
file_name = input_file_path.split(".")[0]
file_name = file_name.split("/")[-1]
return file_name, str_time
def save_data(self):
return None
def export_model(self):
return None
|
[
"fate_flow.utils.data_utils.get_header_schema",
"fate_flow.utils.data_utils.list_to_str",
"fate_flow.entity.metric.MetricMeta",
"fate_flow.utils.job_utils.generate_session_id",
"fate_flow.entity.metric.Metric",
"fate_flow.scheduling_apps.client.ControllerClient.update_job"
] |
[((981, 996), 'fate_arch.common.log.getLogger', 'log.getLogger', ([], {}), '()\n', (994, 996), False, 'from fate_arch.common import log, file_utils, EngineType\n'), ((2170, 2209), 'os.path.exists', 'os.path.exists', (["self.parameters['file']"], {}), "(self.parameters['file'])\n", (2184, 2209), False, 'import os\n'), ((2329, 2369), 'os.path.getsize', 'os.path.getsize', (["self.parameters['file']"], {}), "(self.parameters['file'])\n", (2344, 2369), False, 'import os\n'), ((5046, 5147), 'fate_arch.storage.StorageTableMeta.create_address', 'storage.StorageTableMeta.create_address', ([], {'storage_engine': 'storage_engine', 'address_dict': 'address_dict'}), '(storage_engine=storage_engine,\n address_dict=address_dict)\n', (5085, 5147), False, 'from fate_arch import storage\n'), ((8975, 8991), 'time.localtime', 'time.localtime', ([], {}), '()\n', (8989, 8991), False, 'import time\n'), ((2089, 2128), 'fate_arch.common.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (2126, 2128), False, 'from fate_arch.common import log, file_utils, EngineType\n'), ((3248, 3413), 'fate_flow.utils.job_utils.generate_session_id', 'job_utils.generate_session_id', (['self.tracker.task_id', 'self.tracker.task_version', 'self.tracker.role', 'self.tracker.party_id'], {'suffix': '"""storage"""', 'random_end': '(True)'}), "(self.tracker.task_id, self.tracker.\n task_version, self.tracker.role, self.tracker.party_id, suffix=\n 'storage', random_end=True)\n", (3277, 3413), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((3960, 4125), 'fate_flow.utils.job_utils.generate_session_id', 'job_utils.generate_session_id', (['self.tracker.task_id', 'self.tracker.task_version', 'self.tracker.role', 'self.tracker.party_id'], {'suffix': '"""storage"""', 'random_end': '(True)'}), "(self.tracker.task_id, self.tracker.\n task_version, self.tracker.role, self.tracker.party_id, suffix=\n 'storage', random_end=True)\n", (3989, 4125), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((7474, 7520), 'fate_flow.scheduling_apps.client.ControllerClient.update_job', 'ControllerClient.update_job', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (7501, 7520), False, 'from fate_flow.scheduling_apps.client import ControllerClient\n'), ((6658, 6760), 'fate_flow.utils.data_utils.get_header_schema', 'data_utils.get_header_schema', ([], {'header_line': 'data_head', 'id_delimiter': "self.parameters['id_delimiter']"}), "(header_line=data_head, id_delimiter=self.\n parameters['id_delimiter'])\n", (6686, 6760), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((8566, 8613), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': '"""upload"""', 'metric_type': '"""UPLOAD"""'}), "(name='upload', metric_type='UPLOAD')\n", (8576, 8613), False, 'from fate_flow.entity.metric import Metric, MetricMeta\n'), ((7082, 7167), 'fate_flow.utils.data_utils.list_to_str', 'data_utils.list_to_str', (['values[1:]'], {'id_delimiter': "self.parameters['id_delimiter']"}), "(values[1:], id_delimiter=self.parameters['id_delimiter']\n )\n", (7104, 7167), False, 'from fate_flow.utils import job_utils, data_utils\n'), ((8322, 8350), 'fate_flow.entity.metric.Metric', 'Metric', (['"""count"""', 'table_count'], {}), "('count', table_count)\n", (8328, 8350), False, 'from fate_flow.entity.metric import Metric, MetricMeta\n')]
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
from ruamel import yaml
from datetime import datetime
from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel
from fate_flow.pipelined_model import pipelined_model
from fate_arch.common.base_utils import json_loads, json_dumps
from fate_arch.common.file_utils import get_project_base_directory
from fate_flow.settings import stat_logger
from fate_flow.utils import model_utils
from federatedml.protobuf.model_migrate.model_migrate import model_migration
from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter
def gen_model_file_path(model_id, model_version):
return os.path.join(get_project_base_directory(), "model_local_cache", model_id, model_version)
def compare_roles(request_conf_roles: dict, run_time_conf_roles: dict):
if request_conf_roles.keys() == run_time_conf_roles.keys():
verify_format = True
verify_equality = True
for key in request_conf_roles.keys():
verify_format = verify_format and (len(request_conf_roles[key]) == len(run_time_conf_roles[key])) and (isinstance(request_conf_roles[key], list))
request_conf_roles_set = set(str(item) for item in request_conf_roles[key])
run_time_conf_roles_set = set(str(item) for item in run_time_conf_roles[key])
verify_equality = verify_equality and (request_conf_roles_set == run_time_conf_roles_set)
if not verify_format:
raise Exception("The structure of roles data of local configuration is different from "
"model runtime configuration's. Migration aborting.")
else:
return verify_equality
raise Exception("The structure of roles data of local configuration is different from "
"model runtime configuration's. Migration aborting.")
def import_from_files(config: dict):
model = pipelined_model.PipelinedModel(model_id=config["model_id"],
model_version=config["model_version"])
if config['force']:
model.force = True
model.unpack_model(config["file"])
def import_from_db(config: dict):
model_path = gen_model_file_path(config["model_id"], config["model_version"])
if config['force']:
os.rename(model_path, model_path + '_backup_{}'.format(datetime.now().strftime('%Y%m%d%H%M')))
def migration(config_data: dict):
try:
party_model_id = model_utils.gen_party_model_id(model_id=config_data["model_id"],
role=config_data["local"]["role"],
party_id=config_data["local"]["party_id"])
model = pipelined_model.PipelinedModel(model_id=party_model_id,
model_version=config_data["model_version"])
if not model.exists():
raise Exception("Can not found {} {} model local cache".format(config_data["model_id"],
config_data["model_version"]))
with DB.connection_context():
if MLModel.get_or_none(MLModel.f_model_version == config_data["unify_model_version"]):
raise Exception("Unify model version {} has been occupied in database. "
"Please choose another unify model version and try again.".format(
config_data["unify_model_version"]))
model_data = model.collect_models(in_bytes=True)
if "pipeline.pipeline:Pipeline" not in model_data:
raise Exception("Can not found pipeline file in model.")
migrate_model = pipelined_model.PipelinedModel(model_id=model_utils.gen_party_model_id(model_id=model_utils.gen_model_id(config_data["migrate_role"]),
role=config_data["local"]["role"],
party_id=config_data["local"]["migrate_party_id"]),
model_version=config_data["unify_model_version"])
# migrate_model.create_pipelined_model()
shutil.copytree(src=model.model_path, dst=migrate_model.model_path)
pipeline = migrate_model.read_component_model('pipeline', 'pipeline')['Pipeline']
# Utilize Pipeline_model collect model data. And modify related inner information of model
train_runtime_conf = json_loads(pipeline.train_runtime_conf)
train_runtime_conf["role"] = config_data["migrate_role"]
train_runtime_conf["initiator"] = config_data["migrate_initiator"]
adapter = JobRuntimeConfigAdapter(train_runtime_conf)
train_runtime_conf = adapter.update_model_id_version(model_id=model_utils.gen_model_id(train_runtime_conf["role"]),
model_version=migrate_model.model_version)
# update pipeline.pb file
pipeline.train_runtime_conf = json_dumps(train_runtime_conf, byte=True)
pipeline.model_id = bytes(adapter.get_common_parameters().to_dict().get("model_id"), "utf-8")
pipeline.model_version = bytes(adapter.get_common_parameters().to_dict().get("model_version"), "utf-8")
if model_utils.compare_version(pipeline.fate_version, '1.5.0') == 'gt':
pipeline.initiator_role = config_data["migrate_initiator"]['role']
pipeline.initiator_party_id = config_data["migrate_initiator"]['party_id']
# save updated pipeline.pb file
migrate_model.save_pipeline(pipeline)
shutil.copyfile(os.path.join(migrate_model.model_path, "pipeline.pb"),
os.path.join(migrate_model.model_path, "variables", "data", "pipeline", "pipeline", "Pipeline"))
# modify proto
with open(os.path.join(migrate_model.model_path, 'define', 'define_meta.yaml'), 'r') as fin:
define_yaml = yaml.safe_load(fin)
for key, value in define_yaml['model_proto'].items():
if key == 'pipeline':
continue
for v in value.keys():
buffer_obj = migrate_model.read_component_model(key, v)
module_name = define_yaml['component_define'].get(key, {}).get('module_name')
modified_buffer = model_migration(model_contents=buffer_obj,
module_name=module_name,
old_guest_list=config_data['role']['guest'],
new_guest_list=config_data['migrate_role']['guest'],
old_host_list=config_data['role']['host'],
new_host_list=config_data['migrate_role']['host'],
old_arbiter_list=config_data.get('role', {}).get('arbiter', None),
new_arbiter_list=config_data.get('migrate_role', {}).get('arbiter', None))
migrate_model.save_component_model(component_name=key, component_module_name=module_name,
model_alias=v, model_buffers=modified_buffer)
archive_path = migrate_model.packaging_model()
shutil.rmtree(os.path.abspath(migrate_model.model_path))
return (0, f"Migrating model successfully. " \
"The configuration of model has been modified automatically. " \
"New model id is: {}, model version is: {}. " \
"Model files can be found at '{}'.".format(adapter.get_common_parameters().to_dict().get("model_id"),
migrate_model.model_version,
os.path.abspath(archive_path)),
{"model_id": migrate_model.model_id,
"model_version": migrate_model.model_version,
"path": os.path.abspath(archive_path)})
except Exception as e:
stat_logger.exception(e)
return 100, str(e), {}
|
[
"fate_flow.db.db_models.MachineLearningModelInfo.get_or_none",
"fate_flow.utils.config_adapter.JobRuntimeConfigAdapter",
"fate_flow.utils.model_utils.compare_version",
"fate_flow.pipelined_model.pipelined_model.PipelinedModel",
"fate_flow.db.db_models.DB.connection_context",
"fate_flow.settings.stat_logger.exception",
"fate_flow.utils.model_utils.gen_party_model_id",
"fate_flow.utils.model_utils.gen_model_id"
] |
[((2493, 2596), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'pipelined_model.PipelinedModel', ([], {'model_id': "config['model_id']", 'model_version': "config['model_version']"}), "(model_id=config['model_id'], model_version=\n config['model_version'])\n", (2523, 2596), False, 'from fate_flow.pipelined_model import pipelined_model\n'), ((1257, 1285), 'fate_arch.common.file_utils.get_project_base_directory', 'get_project_base_directory', ([], {}), '()\n', (1283, 1285), False, 'from fate_arch.common.file_utils import get_project_base_directory\n'), ((3040, 3187), 'fate_flow.utils.model_utils.gen_party_model_id', 'model_utils.gen_party_model_id', ([], {'model_id': "config_data['model_id']", 'role': "config_data['local']['role']", 'party_id': "config_data['local']['party_id']"}), "(model_id=config_data['model_id'], role=\n config_data['local']['role'], party_id=config_data['local']['party_id'])\n", (3070, 3187), False, 'from fate_flow.utils import model_utils\n'), ((3311, 3415), 'fate_flow.pipelined_model.pipelined_model.PipelinedModel', 'pipelined_model.PipelinedModel', ([], {'model_id': 'party_model_id', 'model_version': "config_data['model_version']"}), "(model_id=party_model_id, model_version=\n config_data['model_version'])\n", (3341, 3415), False, 'from fate_flow.pipelined_model import pipelined_model\n'), ((4863, 4930), 'shutil.copytree', 'shutil.copytree', ([], {'src': 'model.model_path', 'dst': 'migrate_model.model_path'}), '(src=model.model_path, dst=migrate_model.model_path)\n', (4878, 4930), False, 'import shutil\n'), ((5151, 5190), 'fate_arch.common.base_utils.json_loads', 'json_loads', (['pipeline.train_runtime_conf'], {}), '(pipeline.train_runtime_conf)\n', (5161, 5190), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((5350, 5393), 'fate_flow.utils.config_adapter.JobRuntimeConfigAdapter', 'JobRuntimeConfigAdapter', (['train_runtime_conf'], {}), '(train_runtime_conf)\n', (5373, 5393), False, 'from fate_flow.utils.config_adapter import JobRuntimeConfigAdapter\n'), ((5695, 5736), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['train_runtime_conf'], {'byte': '(True)'}), '(train_runtime_conf, byte=True)\n', (5705, 5736), False, 'from fate_arch.common.base_utils import json_loads, json_dumps\n'), ((3708, 3731), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (3729, 3731), False, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((3748, 3835), 'fate_flow.db.db_models.MachineLearningModelInfo.get_or_none', 'MLModel.get_or_none', (["(MLModel.f_model_version == config_data['unify_model_version'])"], {}), "(MLModel.f_model_version == config_data[\n 'unify_model_version'])\n", (3767, 3835), True, 'from fate_flow.db.db_models import DB, MachineLearningModelInfo as MLModel\n'), ((5963, 6022), 'fate_flow.utils.model_utils.compare_version', 'model_utils.compare_version', (['pipeline.fate_version', '"""1.5.0"""'], {}), "(pipeline.fate_version, '1.5.0')\n", (5990, 6022), False, 'from fate_flow.utils import model_utils\n'), ((6309, 6362), 'os.path.join', 'os.path.join', (['migrate_model.model_path', '"""pipeline.pb"""'], {}), "(migrate_model.model_path, 'pipeline.pb')\n", (6321, 6362), False, 'import os\n'), ((6388, 6487), 'os.path.join', 'os.path.join', (['migrate_model.model_path', '"""variables"""', '"""data"""', '"""pipeline"""', '"""pipeline"""', '"""Pipeline"""'], {}), "(migrate_model.model_path, 'variables', 'data', 'pipeline',\n 'pipeline', 'Pipeline')\n", (6400, 6487), False, 'import os\n'), ((6636, 6655), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['fin'], {}), '(fin)\n', (6650, 6655), False, 'from ruamel import yaml\n'), ((8046, 8087), 'os.path.abspath', 'os.path.abspath', (['migrate_model.model_path'], {}), '(migrate_model.model_path)\n', (8061, 8087), False, 'import os\n'), ((8806, 8830), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (8827, 8830), False, 'from fate_flow.settings import stat_logger\n'), ((5464, 5516), 'fate_flow.utils.model_utils.gen_model_id', 'model_utils.gen_model_id', (["train_runtime_conf['role']"], {}), "(train_runtime_conf['role'])\n", (5488, 5516), False, 'from fate_flow.utils import model_utils\n'), ((6527, 6595), 'os.path.join', 'os.path.join', (['migrate_model.model_path', '"""define"""', '"""define_meta.yaml"""'], {}), "(migrate_model.model_path, 'define', 'define_meta.yaml')\n", (6539, 6595), False, 'import os\n'), ((8565, 8594), 'os.path.abspath', 'os.path.abspath', (['archive_path'], {}), '(archive_path)\n', (8580, 8594), False, 'import os\n'), ((8738, 8767), 'os.path.abspath', 'os.path.abspath', (['archive_path'], {}), '(archive_path)\n', (8753, 8767), False, 'import os\n'), ((4368, 4421), 'fate_flow.utils.model_utils.gen_model_id', 'model_utils.gen_model_id', (["config_data['migrate_role']"], {}), "(config_data['migrate_role'])\n", (4392, 4421), False, 'from fate_flow.utils import model_utils\n'), ((2930, 2944), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2942, 2944), False, 'from datetime import datetime\n')]
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from flask import Flask, request
from arch.api.utils import file_utils
from fate_flow.settings import JOB_MODULE_CONF
from fate_flow.settings import stat_logger, CLUSTER_STANDALONE_JOB_SERVER_PORT
from fate_flow.utils.api_utils import get_json_result, request_execute_server
from fate_flow.utils.job_utils import generate_job_id, get_job_directory, new_runtime_conf, run_subprocess
from fate_flow.utils import detect_utils
from fate_flow.entity.constant_config import WorkMode
from fate_flow.entity.runtime_config import RuntimeConfig
manager = Flask(__name__)
@manager.errorhandler(500)
def internal_server_error(e):
stat_logger.exception(e)
return get_json_result(retcode=100, retmsg=str(e))
@manager.route('/<data_func>', methods=['post'])
def download_upload(data_func):
request_config = request.json
_job_id = generate_job_id()
stat_logger.info('generated job_id {}, body {}'.format(_job_id, request_config))
_job_dir = get_job_directory(_job_id)
os.makedirs(_job_dir, exist_ok=True)
module = data_func
required_arguments = ['work_mode', 'namespace', 'table_name']
if module == 'upload':
required_arguments.extend(['file', 'head', 'partition'])
elif module == 'download':
required_arguments.extend(['output_path'])
else:
raise Exception('can not support this operating: {}'.format(module))
detect_utils.check_config(request_config, required_arguments=required_arguments)
job_work_mode = request_config['work_mode']
# todo: The current code here is redundant with job_app/submit_job, the next version of this function will be implemented by job_app/submit_job
if job_work_mode != RuntimeConfig.WORK_MODE:
if RuntimeConfig.WORK_MODE == WorkMode.CLUSTER and job_work_mode == WorkMode.STANDALONE:
# use cluster standalone job server to execute standalone job
return request_execute_server(request=request, execute_host='{}:{}'.format(request.remote_addr, CLUSTER_STANDALONE_JOB_SERVER_PORT))
else:
raise Exception('server run on standalone can not support cluster mode job')
if module == "upload":
if not os.path.isabs(request_config['file']):
request_config["file"] = os.path.join(file_utils.get_project_base_directory(), request_config["file"])
try:
conf_file_path = new_runtime_conf(job_dir=_job_dir, method=data_func, module=module,
role=request_config.get('local', {}).get("role"),
party_id=request_config.get('local', {}).get("party_id", ''))
file_utils.dump_json_conf(request_config, conf_file_path)
progs = ["python3",
os.path.join(file_utils.get_project_base_directory(), JOB_MODULE_CONF[module]["module_path"]),
"-j", _job_id,
"-c", conf_file_path
]
try:
p = run_subprocess(config_dir=_job_dir, process_cmd=progs)
except Exception as e:
stat_logger.exception(e)
p = None
return get_json_result(retcode=(0 if p else 101), job_id=_job_id,
data={'table_name': request_config['table_name'],
'namespace': request_config['namespace'], 'pid': p.pid if p else ''})
except Exception as e:
stat_logger.exception(e)
return get_json_result(retcode=-104, retmsg="failed", job_id=_job_id)
|
[
"fate_flow.utils.job_utils.run_subprocess",
"fate_flow.utils.detect_utils.check_config",
"fate_flow.utils.job_utils.generate_job_id",
"fate_flow.settings.stat_logger.exception",
"fate_flow.utils.api_utils.get_json_result",
"fate_flow.utils.job_utils.get_job_directory"
] |
[((1174, 1189), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1179, 1189), False, 'from flask import Flask, request\n'), ((1253, 1277), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (1274, 1277), False, 'from fate_flow.settings import stat_logger, CLUSTER_STANDALONE_JOB_SERVER_PORT\n'), ((1464, 1481), 'fate_flow.utils.job_utils.generate_job_id', 'generate_job_id', ([], {}), '()\n', (1479, 1481), False, 'from fate_flow.utils.job_utils import generate_job_id, get_job_directory, new_runtime_conf, run_subprocess\n'), ((1582, 1608), 'fate_flow.utils.job_utils.get_job_directory', 'get_job_directory', (['_job_id'], {}), '(_job_id)\n', (1599, 1608), False, 'from fate_flow.utils.job_utils import generate_job_id, get_job_directory, new_runtime_conf, run_subprocess\n'), ((1613, 1649), 'os.makedirs', 'os.makedirs', (['_job_dir'], {'exist_ok': '(True)'}), '(_job_dir, exist_ok=True)\n', (1624, 1649), False, 'import os\n'), ((2004, 2089), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (['request_config'], {'required_arguments': 'required_arguments'}), '(request_config, required_arguments=required_arguments\n )\n', (2029, 2089), False, 'from fate_flow.utils import detect_utils\n'), ((3252, 3309), 'arch.api.utils.file_utils.dump_json_conf', 'file_utils.dump_json_conf', (['request_config', 'conf_file_path'], {}), '(request_config, conf_file_path)\n', (3277, 3309), False, 'from arch.api.utils import file_utils\n'), ((3727, 3911), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(0 if p else 101)', 'job_id': '_job_id', 'data': "{'table_name': request_config['table_name'], 'namespace': request_config[\n 'namespace'], 'pid': p.pid if p else ''}"}), "(retcode=0 if p else 101, job_id=_job_id, data={'table_name':\n request_config['table_name'], 'namespace': request_config['namespace'],\n 'pid': p.pid if p else ''})\n", (3742, 3911), False, 'from fate_flow.utils.api_utils import get_json_result, request_execute_server\n'), ((2792, 2829), 'os.path.isabs', 'os.path.isabs', (["request_config['file']"], {}), "(request_config['file'])\n", (2805, 2829), False, 'import os\n'), ((3568, 3622), 'fate_flow.utils.job_utils.run_subprocess', 'run_subprocess', ([], {'config_dir': '_job_dir', 'process_cmd': 'progs'}), '(config_dir=_job_dir, process_cmd=progs)\n', (3582, 3622), False, 'from fate_flow.utils.job_utils import generate_job_id, get_job_directory, new_runtime_conf, run_subprocess\n'), ((4009, 4033), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (4030, 4033), False, 'from fate_flow.settings import stat_logger, CLUSTER_STANDALONE_JOB_SERVER_PORT\n'), ((4049, 4111), 'fate_flow.utils.api_utils.get_json_result', 'get_json_result', ([], {'retcode': '(-104)', 'retmsg': '"""failed"""', 'job_id': '_job_id'}), "(retcode=-104, retmsg='failed', job_id=_job_id)\n", (4064, 4111), False, 'from fate_flow.utils.api_utils import get_json_result, request_execute_server\n'), ((2881, 2920), 'arch.api.utils.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (2918, 2920), False, 'from arch.api.utils import file_utils\n'), ((3368, 3407), 'arch.api.utils.file_utils.get_project_base_directory', 'file_utils.get_project_base_directory', ([], {}), '()\n', (3405, 3407), False, 'from arch.api.utils import file_utils\n'), ((3666, 3690), 'fate_flow.settings.stat_logger.exception', 'stat_logger.exception', (['e'], {}), '(e)\n', (3687, 3690), False, 'from fate_flow.settings import stat_logger, CLUSTER_STANDALONE_JOB_SERVER_PORT\n')]
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.common import EngineType
from fate_arch.common import engine_utils
from fate_arch.common.base_utils import json_dumps, current_timestamp
from fate_arch.computing import ComputingEngine
from fate_flow.controller.task_controller import TaskController
from fate_flow.db.job_default_config import JobDefaultConfig
from fate_flow.db.runtime_config import RuntimeConfig
from fate_flow.entity import RunParameters
from fate_flow.entity.run_status import JobStatus, EndStatus
from fate_flow.entity.types import InputSearchType, WorkerName
from fate_flow.manager.provider_manager import ProviderManager
from fate_flow.manager.resource_manager import ResourceManager
from fate_flow.manager.worker_manager import WorkerManager
from fate_flow.operation.job_saver import JobSaver
from fate_flow.operation.job_tracker import Tracker
from fate_flow.protobuf.python import pipeline_pb2
from fate_flow.settings import USE_AUTHENTICATION, USE_DATA_AUTHENTICATION, ENGINES
from fate_flow.utils import job_utils, schedule_utils, data_utils
from fate_flow.utils.authentication_utils import authentication_check
from fate_flow.utils.authentication_utils import data_authentication_check
from fate_flow.utils.log_utils import schedule_logger
class JobController(object):
@classmethod
def create_job(cls, job_id, role, party_id, job_info):
# parse job configuration
dsl = job_info['dsl']
runtime_conf = job_info['runtime_conf']
train_runtime_conf = job_info['train_runtime_conf']
if USE_AUTHENTICATION:
authentication_check(src_role=job_info.get('src_role', None), src_party_id=job_info.get('src_party_id', None),
dsl=dsl, runtime_conf=runtime_conf, role=role, party_id=party_id)
dsl_parser = schedule_utils.get_job_dsl_parser(dsl=dsl,
runtime_conf=runtime_conf,
train_runtime_conf=train_runtime_conf)
job_parameters = dsl_parser.get_job_parameters(runtime_conf)
schedule_logger(job_id).info('job parameters:{}'.format(job_parameters))
dest_user = job_parameters.get(role, {}).get(party_id, {}).get('user', '')
user = {}
src_party_id = int(job_info['src_party_id']) if job_info.get('src_party_id') else 0
src_role = job_info.get('src_role', '')
src_user = job_parameters.get(src_role, {}).get(src_party_id, {}).get('user', '') if src_role else ''
for _role, party_id_item in job_parameters.items():
user[_role] = {}
for _party_id, _parameters in party_id_item.items():
user[_role][_party_id] = _parameters.get("user", "")
schedule_logger(job_id).info('job user:{}'.format(user))
if USE_DATA_AUTHENTICATION:
job_args = dsl_parser.get_args_input()
schedule_logger(job_id).info('job args:{}'.format(job_args))
dataset_dict = cls.get_dataset(False, role, party_id, runtime_conf.get("role"), job_args)
dataset_list = []
if dataset_dict.get(role, {}).get(party_id):
for k, v in dataset_dict[role][party_id].items():
dataset_list.append({"namespace": v.split('.')[0], "table_name": v.split('.')[1]})
data_authentication_check(src_role=job_info.get('src_role'), src_party_id=job_info.get('src_party_id'),
src_user=src_user, dest_user=dest_user, dataset_list=dataset_list)
job_parameters = RunParameters(**job_parameters.get(role, {}).get(party_id, {}))
# save new job into db
if role == job_info["initiator_role"] and party_id == job_info["initiator_party_id"]:
is_initiator = True
else:
is_initiator = False
job_info["status"] = JobStatus.READY
job_info["user_id"] = dest_user
job_info["src_user"] = src_user
job_info["user"] = user
# this party configuration
job_info["role"] = role
job_info["party_id"] = party_id
job_info["is_initiator"] = is_initiator
job_info["progress"] = 0
cls.create_job_parameters_on_party(role=role, party_id=party_id, job_parameters=job_parameters)
# update job parameters on party
job_info["runtime_conf_on_party"]["job_parameters"] = job_parameters.to_dict()
JobSaver.create_job(job_info=job_info)
initialized_result, provider_group = cls.initialize_tasks(job_id=job_id, role=role, party_id=party_id, run_on_this_party=True,
initiator_role=job_info["initiator_role"], initiator_party_id=job_info["initiator_party_id"], job_parameters=job_parameters, dsl_parser=dsl_parser)
for provider_key, group_info in provider_group.items():
for cpn in group_info["components"]:
dsl["components"][cpn]["provider"] = provider_key
roles = job_info['roles']
cls.initialize_job_tracker(job_id=job_id, role=role, party_id=party_id,
job_parameters=job_parameters, roles=roles, is_initiator=is_initiator, dsl_parser=dsl_parser)
job_utils.save_job_conf(job_id=job_id,
role=role,
party_id=party_id,
dsl=dsl,
runtime_conf=runtime_conf,
runtime_conf_on_party=job_info["runtime_conf_on_party"],
train_runtime_conf=train_runtime_conf,
pipeline_dsl=None)
return {"components": initialized_result}
@classmethod
def set_federated_mode(cls, job_parameters: RunParameters):
if not job_parameters.federated_mode:
job_parameters.federated_mode = ENGINES["federated_mode"]
@classmethod
def set_engines(cls, job_parameters: RunParameters, engine_type=None):
engines = engine_utils.get_engines()
if not engine_type:
engine_type = {EngineType.COMPUTING, EngineType.FEDERATION, EngineType.STORAGE}
for k in engine_type:
setattr(job_parameters, f"{k}_engine", engines[k])
@classmethod
def create_common_job_parameters(cls, job_id, initiator_role, common_job_parameters: RunParameters):
JobController.set_federated_mode(job_parameters=common_job_parameters)
JobController.set_engines(job_parameters=common_job_parameters, engine_type={EngineType.COMPUTING})
JobController.fill_default_job_parameters(job_id=job_id, job_parameters=common_job_parameters)
JobController.adapt_job_parameters(role=initiator_role, job_parameters=common_job_parameters, create_initiator_baseline=True)
@classmethod
def create_job_parameters_on_party(cls, role, party_id, job_parameters: RunParameters):
JobController.set_engines(job_parameters=job_parameters)
cls.fill_party_specific_parameters(role=role,
party_id=party_id,
job_parameters=job_parameters)
@classmethod
def fill_party_specific_parameters(cls, role, party_id, job_parameters: RunParameters):
cls.adapt_job_parameters(role=role, job_parameters=job_parameters)
engines_info = cls.get_job_engines_address(job_parameters=job_parameters)
cls.check_parameters(job_parameters=job_parameters,
role=role, party_id=party_id, engines_info=engines_info)
@classmethod
def fill_default_job_parameters(cls, job_id, job_parameters: RunParameters):
keys = {"task_parallelism", "auto_retries", "auto_retry_delay", "federated_status_collect_type"}
for key in keys:
if hasattr(job_parameters, key) and getattr(job_parameters, key) is None:
if hasattr(JobDefaultConfig, key):
setattr(job_parameters, key, getattr(JobDefaultConfig, key))
else:
schedule_logger(job_id).warning(f"can not found {key} job parameter default value from job_default_settings")
@classmethod
def adapt_job_parameters(cls, role, job_parameters: RunParameters, create_initiator_baseline=False):
ResourceManager.adapt_engine_parameters(
role=role, job_parameters=job_parameters, create_initiator_baseline=create_initiator_baseline)
if create_initiator_baseline:
if job_parameters.task_parallelism is None:
job_parameters.task_parallelism = JobDefaultConfig.task_parallelism
if job_parameters.federated_status_collect_type is None:
job_parameters.federated_status_collect_type = JobDefaultConfig.federated_status_collect_type
if create_initiator_baseline and not job_parameters.computing_partitions:
job_parameters.computing_partitions = job_parameters.adaptation_parameters[
"task_cores_per_node"] * job_parameters.adaptation_parameters["task_nodes"]
@classmethod
def get_job_engines_address(cls, job_parameters: RunParameters):
engines_info = {}
engine_list = [
(EngineType.COMPUTING, job_parameters.computing_engine),
(EngineType.FEDERATION, job_parameters.federation_engine),
(EngineType.STORAGE, job_parameters.storage_engine)
]
for engine_type, engine_name in engine_list:
engine_info = ResourceManager.get_engine_registration_info(
engine_type=engine_type, engine_name=engine_name)
job_parameters.engines_address[engine_type] = engine_info.f_engine_config if engine_info else {}
engines_info[engine_type] = engine_info
return engines_info
@classmethod
def check_parameters(cls, job_parameters: RunParameters, role, party_id, engines_info):
status, cores_submit, max_cores_per_job = ResourceManager.check_resource_apply(
job_parameters=job_parameters, role=role, party_id=party_id, engines_info=engines_info)
if not status:
msg = ""
msg2 = "default value is fate_flow/settings.py#DEFAULT_TASK_CORES_PER_NODE, refer fate_flow/examples/simple/simple_job_conf.json"
if job_parameters.computing_engine in {ComputingEngine.EGGROLL, ComputingEngine.STANDALONE}:
msg = "please use task_cores job parameters to set request task cores or you can customize it with eggroll_run job parameters"
elif job_parameters.computing_engine in {ComputingEngine.SPARK}:
msg = "please use task_cores job parameters to set request task cores or you can customize it with spark_run job parameters"
raise RuntimeError(
f"max cores per job is {max_cores_per_job} base on (fate_flow/settings#MAX_CORES_PERCENT_PER_JOB * conf/service_conf.yaml#nodes * conf/service_conf.yaml#cores_per_node), expect {cores_submit} cores, {msg}, {msg2}")
@classmethod
def gen_updated_parameters(cls, job_id, initiator_role, initiator_party_id, input_job_parameters, input_component_parameters):
# todo: check can not update job parameters
job_configuration = job_utils.get_job_configuration(job_id=job_id,
role=initiator_role,
party_id=initiator_party_id)
updated_job_parameters = job_configuration.runtime_conf["job_parameters"]
updated_component_parameters = job_configuration.runtime_conf["component_parameters"]
if input_job_parameters:
if input_job_parameters.get("common"):
common_job_parameters = RunParameters(**input_job_parameters["common"])
cls.create_common_job_parameters(job_id=job_id, initiator_role=initiator_role, common_job_parameters=common_job_parameters)
for attr in {"model_id", "model_version"}:
setattr(common_job_parameters, attr, updated_job_parameters["common"].get(attr))
updated_job_parameters["common"] = common_job_parameters.to_dict()
# not support role
updated_components = set()
if input_component_parameters:
cls.merge_update(input_component_parameters, updated_component_parameters)
return updated_job_parameters, updated_component_parameters, list(updated_components)
@classmethod
def merge_update(cls, inputs: dict, results: dict):
if not isinstance(inputs, dict) or not isinstance(results, dict):
raise ValueError(f"must both dict, but {type(inputs)} inputs and {type(results)} results")
for k, v in inputs.items():
if k not in results:
results[k] = v
elif isinstance(v, dict):
cls.merge_update(v, results[k])
else:
results[k] = v
@classmethod
def update_parameter(cls, job_id, role, party_id, updated_parameters: dict):
job_configuration = job_utils.get_job_configuration(job_id=job_id,
role=role,
party_id=party_id)
job_parameters = updated_parameters.get("job_parameters")
component_parameters = updated_parameters.get("component_parameters")
if job_parameters:
job_configuration.runtime_conf["job_parameters"] = job_parameters
job_parameters = RunParameters(**job_parameters["common"])
cls.create_job_parameters_on_party(role=role,
party_id=party_id,
job_parameters=job_parameters)
job_configuration.runtime_conf_on_party["job_parameters"] = job_parameters.to_dict()
if component_parameters:
job_configuration.runtime_conf["component_parameters"] = component_parameters
job_configuration.runtime_conf_on_party["component_parameters"] = component_parameters
job_info = {}
job_info["job_id"] = job_id
job_info["role"] = role
job_info["party_id"] = party_id
job_info["runtime_conf"] = job_configuration.runtime_conf
job_info["runtime_conf_on_party"] = job_configuration.runtime_conf_on_party
JobSaver.update_job(job_info)
@classmethod
def initialize_task(cls, role, party_id, task_info: dict):
task_info["role"] = role
task_info["party_id"] = party_id
initialized_result, provider_group = cls.initialize_tasks(components=[task_info["component_name"]], **task_info)
return initialized_result
@classmethod
def initialize_tasks(cls, job_id, role, party_id, run_on_this_party, initiator_role, initiator_party_id, job_parameters: RunParameters = None, dsl_parser=None, components: list = None, **kwargs):
common_task_info = {}
common_task_info["job_id"] = job_id
common_task_info["initiator_role"] = initiator_role
common_task_info["initiator_party_id"] = initiator_party_id
common_task_info["role"] = role
common_task_info["party_id"] = party_id
common_task_info["run_on_this_party"] = run_on_this_party
common_task_info["federated_mode"] = kwargs.get("federated_mode", job_parameters.federated_mode if job_parameters else None)
common_task_info["federated_status_collect_type"] = kwargs.get("federated_status_collect_type", job_parameters.federated_status_collect_type if job_parameters else None)
common_task_info["auto_retries"] = kwargs.get("auto_retries", job_parameters.auto_retries if job_parameters else None)
common_task_info["auto_retry_delay"] = kwargs.get("auto_retry_delay", job_parameters.auto_retry_delay if job_parameters else None)
common_task_info["task_version"] = kwargs.get("task_version")
if dsl_parser is None:
dsl_parser = schedule_utils.get_job_dsl_parser_by_job_id(job_id)
provider_group = ProviderManager.get_job_provider_group(dsl_parser=dsl_parser,
components=components)
initialized_result = {}
for group_key, group_info in provider_group.items():
initialized_config = {}
initialized_config.update(group_info)
initialized_config["common_task_info"] = common_task_info
if run_on_this_party:
code, _result = WorkerManager.start_general_worker(worker_name=WorkerName.TASK_INITIALIZER,
job_id=job_id,
role=role,
party_id=party_id,
initialized_config=initialized_config,
run_in_subprocess=False if initialized_config["if_default_provider"] else True)
initialized_result.update(_result)
else:
cls.initialize_task_holder_for_scheduling(role=role,
party_id=party_id,
components=initialized_config["components"],
common_task_info=common_task_info,
provider_info=initialized_config["provider"])
return initialized_result, provider_group
@classmethod
def initialize_task_holder_for_scheduling(cls, role, party_id, components, common_task_info, provider_info):
for component_name in components:
task_info = {}
task_info.update(common_task_info)
task_info["component_name"] = component_name
task_info["component_module"] = ""
task_info["provider_info"] = provider_info
task_info["component_parameters"] = {}
TaskController.create_task(role=role, party_id=party_id,
run_on_this_party=common_task_info["run_on_this_party"],
task_info=task_info)
@classmethod
def initialize_job_tracker(cls, job_id, role, party_id, job_parameters: RunParameters, roles, is_initiator, dsl_parser):
tracker = Tracker(job_id=job_id, role=role, party_id=party_id,
model_id=job_parameters.model_id,
model_version=job_parameters.model_version,
job_parameters=job_parameters)
if job_parameters.job_type != "predict":
tracker.init_pipeline_model()
partner = {}
show_role = {}
for _role, _role_party in roles.items():
if is_initiator or _role == role:
show_role[_role] = show_role.get(_role, [])
for _party_id in _role_party:
if is_initiator or _party_id == party_id:
show_role[_role].append(_party_id)
if _role != role:
partner[_role] = partner.get(_role, [])
partner[_role].extend(_role_party)
else:
for _party_id in _role_party:
if _party_id != party_id:
partner[_role] = partner.get(_role, [])
partner[_role].append(_party_id)
job_args = dsl_parser.get_args_input()
dataset = cls.get_dataset(
is_initiator, role, party_id, roles, job_args)
tracker.log_job_view(
{'partner': partner, 'dataset': dataset, 'roles': show_role})
@classmethod
def get_dataset(cls, is_initiator, role, party_id, roles, job_args):
dataset = {}
dsl_version = 1
if job_args.get('dsl_version'):
if job_args.get('dsl_version') == 2:
dsl_version = 2
for _role, _role_party_args in job_args.items():
if _role == "dsl_version":
continue
if is_initiator or _role == role:
for _party_index in range(len(_role_party_args)):
_party_id = roles[_role][_party_index]
if is_initiator or _party_id == party_id:
dataset[_role] = dataset.get(_role, {})
dataset[_role][_party_id] = dataset[_role].get(
_party_id, {})
if dsl_version == 1:
for _data_type, _data_location in _role_party_args[_party_index]['args']['data'].items():
dataset[_role][_party_id][_data_type] = '{}.{}'.format(
_data_location['namespace'], _data_location['name'])
else:
for key in _role_party_args[_party_index].keys():
for _data_type, _data_location in _role_party_args[_party_index][key].items():
search_type = data_utils.get_input_search_type(parameters=_data_location)
if search_type is InputSearchType.TABLE_INFO:
dataset[_role][_party_id][key] = '{}.{}'.format(_data_location['namespace'], _data_location['name'])
elif search_type is InputSearchType.JOB_COMPONENT_OUTPUT:
dataset[_role][_party_id][key] = '{}.{}.{}'.format(_data_location['job_id'], _data_location['component_name'], _data_location['data_name'])
else:
dataset[_role][_party_id][key] = "unknown"
return dataset
@classmethod
def query_job_input_args(cls, input_data, role, party_id):
min_partition = data_utils.get_input_data_min_partitions(
input_data, role, party_id)
return {'min_input_data_partition': min_partition}
@classmethod
def start_job(cls, job_id, role, party_id, extra_info=None):
schedule_logger(job_id).info(
f"try to start job on {role} {party_id}")
job_info = {
"job_id": job_id,
"role": role,
"party_id": party_id,
"status": JobStatus.RUNNING,
"start_time": current_timestamp()
}
if extra_info:
schedule_logger(job_id).info(f"extra info: {extra_info}")
job_info.update(extra_info)
cls.update_job_status(job_info=job_info)
cls.update_job(job_info=job_info)
schedule_logger(job_id).info(
f"start job on {role} {party_id} successfully")
@classmethod
def update_job(cls, job_info):
"""
Save to local database
:param job_info:
:return:
"""
return JobSaver.update_job(job_info=job_info)
@classmethod
def update_job_status(cls, job_info):
update_status = JobSaver.update_job_status(job_info=job_info)
if update_status and EndStatus.contains(job_info.get("status")):
ResourceManager.return_job_resource(
job_id=job_info["job_id"], role=job_info["role"], party_id=job_info["party_id"])
return update_status
@classmethod
def stop_jobs(cls, job_id, stop_status, role=None, party_id=None):
if role and party_id:
jobs = JobSaver.query_job(
job_id=job_id, role=role, party_id=party_id)
else:
jobs = JobSaver.query_job(job_id=job_id)
kill_status = True
kill_details = {}
for job in jobs:
kill_job_status, kill_job_details = cls.stop_job(
job=job, stop_status=stop_status)
kill_status = kill_status & kill_job_status
kill_details[job_id] = kill_job_details
return kill_status, kill_details
@classmethod
def stop_job(cls, job, stop_status):
tasks = JobSaver.query_task(
job_id=job.f_job_id, role=job.f_role, party_id=job.f_party_id, reverse=True)
kill_status = True
kill_details = {}
for task in tasks:
kill_task_status = TaskController.stop_task(
task=task, stop_status=stop_status)
kill_status = kill_status & kill_task_status
kill_details[task.f_task_id] = 'success' if kill_task_status else 'failed'
if kill_status:
job_info = job.to_human_model_dict(only_primary_with=["status"])
job_info["status"] = stop_status
JobController.update_job_status(job_info)
return kill_status, kill_details
# Job status depends on the final operation result and initiator calculate
@classmethod
def save_pipelined_model(cls, job_id, role, party_id):
schedule_logger(job_id).info(f"start to save pipeline model on {role} {party_id}")
job_configuration = job_utils.get_job_configuration(job_id=job_id, role=role,
party_id=party_id)
runtime_conf_on_party = job_configuration.runtime_conf_on_party
job_parameters = runtime_conf_on_party.get('job_parameters', {})
if role in job_parameters.get("assistant_role", []):
return
model_id = job_parameters['model_id']
model_version = job_parameters['model_version']
job_type = job_parameters.get('job_type', '')
roles = runtime_conf_on_party['role']
initiator_role = runtime_conf_on_party['initiator']['role']
initiator_party_id = runtime_conf_on_party['initiator']['party_id']
if job_type == 'predict':
return
dsl_parser = schedule_utils.get_job_dsl_parser(dsl=job_configuration.dsl,
runtime_conf=job_configuration.runtime_conf,
train_runtime_conf=job_configuration.train_runtime_conf)
components_parameters = {}
tasks = JobSaver.query_task(job_id=job_id, role=role, party_id=party_id, only_latest=True)
for task in tasks:
components_parameters[task.f_component_name] = task.f_component_parameters
predict_dsl = schedule_utils.fill_inference_dsl(dsl_parser, origin_inference_dsl=job_configuration.dsl, components_parameters=components_parameters)
pipeline = pipeline_pb2.Pipeline()
pipeline.inference_dsl = json_dumps(predict_dsl, byte=True)
pipeline.train_dsl = json_dumps(job_configuration.dsl, byte=True)
pipeline.train_runtime_conf = json_dumps(job_configuration.runtime_conf, byte=True)
pipeline.fate_version = RuntimeConfig.get_env("FATE")
pipeline.model_id = model_id
pipeline.model_version = model_version
pipeline.parent = True
pipeline.loaded_times = 0
pipeline.roles = json_dumps(roles, byte=True)
pipeline.initiator_role = initiator_role
pipeline.initiator_party_id = initiator_party_id
pipeline.runtime_conf_on_party = json_dumps(
runtime_conf_on_party, byte=True)
pipeline.parent_info = json_dumps({}, byte=True)
tracker = Tracker(job_id=job_id, role=role, party_id=party_id,
model_id=model_id, model_version=model_version, job_parameters=RunParameters(**job_parameters))
tracker.save_pipeline_model(pipeline_buffer_object=pipeline)
if role != 'local':
tracker.save_machine_learning_model_info()
schedule_logger(job_id).info(f"save pipeline on {role} {party_id} successfully")
@classmethod
def clean_job(cls, job_id, role, party_id, roles):
schedule_logger(job_id).info(f"start to clean job on {role} {party_id}")
# todo
schedule_logger(job_id).info(f"job on {role} {party_id} clean done")
|
[
"fate_flow.operation.job_saver.JobSaver.update_job_status",
"fate_flow.utils.data_utils.get_input_data_min_partitions",
"fate_flow.manager.resource_manager.ResourceManager.adapt_engine_parameters",
"fate_flow.manager.resource_manager.ResourceManager.get_engine_registration_info",
"fate_flow.utils.schedule_utils.get_job_dsl_parser_by_job_id",
"fate_flow.manager.resource_manager.ResourceManager.return_job_resource",
"fate_flow.controller.task_controller.TaskController.create_task",
"fate_flow.manager.provider_manager.ProviderManager.get_job_provider_group",
"fate_flow.utils.data_utils.get_input_search_type",
"fate_flow.protobuf.python.pipeline_pb2.Pipeline",
"fate_flow.utils.schedule_utils.fill_inference_dsl",
"fate_flow.utils.schedule_utils.get_job_dsl_parser",
"fate_flow.operation.job_tracker.Tracker",
"fate_flow.utils.job_utils.save_job_conf",
"fate_flow.entity.RunParameters",
"fate_flow.controller.task_controller.TaskController.stop_task",
"fate_flow.operation.job_saver.JobSaver.create_job",
"fate_flow.operation.job_saver.JobSaver.update_job",
"fate_flow.utils.job_utils.get_job_configuration",
"fate_flow.operation.job_saver.JobSaver.query_task",
"fate_flow.operation.job_saver.JobSaver.query_job",
"fate_flow.utils.log_utils.schedule_logger",
"fate_flow.manager.resource_manager.ResourceManager.check_resource_apply",
"fate_flow.db.runtime_config.RuntimeConfig.get_env",
"fate_flow.manager.worker_manager.WorkerManager.start_general_worker"
] |
[((2404, 2516), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'dsl', 'runtime_conf': 'runtime_conf', 'train_runtime_conf': 'train_runtime_conf'}), '(dsl=dsl, runtime_conf=runtime_conf,\n train_runtime_conf=train_runtime_conf)\n', (2437, 2516), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((5030, 5068), 'fate_flow.operation.job_saver.JobSaver.create_job', 'JobSaver.create_job', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (5049, 5068), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((5850, 6083), 'fate_flow.utils.job_utils.save_job_conf', 'job_utils.save_job_conf', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'dsl': 'dsl', 'runtime_conf': 'runtime_conf', 'runtime_conf_on_party': "job_info['runtime_conf_on_party']", 'train_runtime_conf': 'train_runtime_conf', 'pipeline_dsl': 'None'}), "(job_id=job_id, role=role, party_id=party_id, dsl=\n dsl, runtime_conf=runtime_conf, runtime_conf_on_party=job_info[\n 'runtime_conf_on_party'], train_runtime_conf=train_runtime_conf,\n pipeline_dsl=None)\n", (5873, 6083), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((6653, 6679), 'fate_arch.common.engine_utils.get_engines', 'engine_utils.get_engines', ([], {}), '()\n', (6677, 6679), False, 'from fate_arch.common import engine_utils\n'), ((8948, 9087), 'fate_flow.manager.resource_manager.ResourceManager.adapt_engine_parameters', 'ResourceManager.adapt_engine_parameters', ([], {'role': 'role', 'job_parameters': 'job_parameters', 'create_initiator_baseline': 'create_initiator_baseline'}), '(role=role, job_parameters=\n job_parameters, create_initiator_baseline=create_initiator_baseline)\n', (8987, 9087), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((10606, 10735), 'fate_flow.manager.resource_manager.ResourceManager.check_resource_apply', 'ResourceManager.check_resource_apply', ([], {'job_parameters': 'job_parameters', 'role': 'role', 'party_id': 'party_id', 'engines_info': 'engines_info'}), '(job_parameters=job_parameters, role=\n role, party_id=party_id, engines_info=engines_info)\n', (10642, 10735), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((11888, 11988), 'fate_flow.utils.job_utils.get_job_configuration', 'job_utils.get_job_configuration', ([], {'job_id': 'job_id', 'role': 'initiator_role', 'party_id': 'initiator_party_id'}), '(job_id=job_id, role=initiator_role,\n party_id=initiator_party_id)\n', (11919, 11988), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((13735, 13811), 'fate_flow.utils.job_utils.get_job_configuration', 'job_utils.get_job_configuration', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, role=role, party_id=party_id)\n', (13766, 13811), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((15062, 15091), 'fate_flow.operation.job_saver.JobSaver.update_job', 'JobSaver.update_job', (['job_info'], {}), '(job_info)\n', (15081, 15091), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((16756, 16845), 'fate_flow.manager.provider_manager.ProviderManager.get_job_provider_group', 'ProviderManager.get_job_provider_group', ([], {'dsl_parser': 'dsl_parser', 'components': 'components'}), '(dsl_parser=dsl_parser, components=\n components)\n', (16794, 16845), False, 'from fate_flow.manager.provider_manager import ProviderManager\n'), ((19203, 19373), 'fate_flow.operation.job_tracker.Tracker', 'Tracker', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'model_id': 'job_parameters.model_id', 'model_version': 'job_parameters.model_version', 'job_parameters': 'job_parameters'}), '(job_id=job_id, role=role, party_id=party_id, model_id=\n job_parameters.model_id, model_version=job_parameters.model_version,\n job_parameters=job_parameters)\n', (19210, 19373), False, 'from fate_flow.operation.job_tracker import Tracker\n'), ((22724, 22792), 'fate_flow.utils.data_utils.get_input_data_min_partitions', 'data_utils.get_input_data_min_partitions', (['input_data', 'role', 'party_id'], {}), '(input_data, role, party_id)\n', (22764, 22792), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((23735, 23773), 'fate_flow.operation.job_saver.JobSaver.update_job', 'JobSaver.update_job', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (23754, 23773), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((23858, 23903), 'fate_flow.operation.job_saver.JobSaver.update_job_status', 'JobSaver.update_job_status', ([], {'job_info': 'job_info'}), '(job_info=job_info)\n', (23884, 23903), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((24852, 24953), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'job_id': 'job.f_job_id', 'role': 'job.f_role', 'party_id': 'job.f_party_id', 'reverse': '(True)'}), '(job_id=job.f_job_id, role=job.f_role, party_id=job.\n f_party_id, reverse=True)\n', (24871, 24953), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((25815, 25891), 'fate_flow.utils.job_utils.get_job_configuration', 'job_utils.get_job_configuration', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, role=role, party_id=party_id)\n', (25846, 25891), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((26597, 26769), 'fate_flow.utils.schedule_utils.get_job_dsl_parser', 'schedule_utils.get_job_dsl_parser', ([], {'dsl': 'job_configuration.dsl', 'runtime_conf': 'job_configuration.runtime_conf', 'train_runtime_conf': 'job_configuration.train_runtime_conf'}), '(dsl=job_configuration.dsl, runtime_conf=\n job_configuration.runtime_conf, train_runtime_conf=job_configuration.\n train_runtime_conf)\n', (26630, 26769), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((26922, 27008), 'fate_flow.operation.job_saver.JobSaver.query_task', 'JobSaver.query_task', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'only_latest': '(True)'}), '(job_id=job_id, role=role, party_id=party_id,\n only_latest=True)\n', (26941, 27008), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((27141, 27280), 'fate_flow.utils.schedule_utils.fill_inference_dsl', 'schedule_utils.fill_inference_dsl', (['dsl_parser'], {'origin_inference_dsl': 'job_configuration.dsl', 'components_parameters': 'components_parameters'}), '(dsl_parser, origin_inference_dsl=\n job_configuration.dsl, components_parameters=components_parameters)\n', (27174, 27280), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((27296, 27319), 'fate_flow.protobuf.python.pipeline_pb2.Pipeline', 'pipeline_pb2.Pipeline', ([], {}), '()\n', (27317, 27319), False, 'from fate_flow.protobuf.python import pipeline_pb2\n'), ((27353, 27387), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['predict_dsl'], {'byte': '(True)'}), '(predict_dsl, byte=True)\n', (27363, 27387), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((27417, 27461), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['job_configuration.dsl'], {'byte': '(True)'}), '(job_configuration.dsl, byte=True)\n', (27427, 27461), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((27500, 27553), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['job_configuration.runtime_conf'], {'byte': '(True)'}), '(job_configuration.runtime_conf, byte=True)\n', (27510, 27553), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((27586, 27615), 'fate_flow.db.runtime_config.RuntimeConfig.get_env', 'RuntimeConfig.get_env', (['"""FATE"""'], {}), "('FATE')\n", (27607, 27615), False, 'from fate_flow.db.runtime_config import RuntimeConfig\n'), ((27791, 27819), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['roles'], {'byte': '(True)'}), '(roles, byte=True)\n', (27801, 27819), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((27967, 28011), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['runtime_conf_on_party'], {'byte': '(True)'}), '(runtime_conf_on_party, byte=True)\n', (27977, 28011), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((28056, 28081), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['{}'], {'byte': '(True)'}), '({}, byte=True)\n', (28066, 28081), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((10145, 10243), 'fate_flow.manager.resource_manager.ResourceManager.get_engine_registration_info', 'ResourceManager.get_engine_registration_info', ([], {'engine_type': 'engine_type', 'engine_name': 'engine_name'}), '(engine_type=engine_type,\n engine_name=engine_name)\n', (10189, 10243), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((14210, 14251), 'fate_flow.entity.RunParameters', 'RunParameters', ([], {}), "(**job_parameters['common'])\n", (14223, 14251), False, 'from fate_flow.entity import RunParameters\n'), ((16679, 16730), 'fate_flow.utils.schedule_utils.get_job_dsl_parser_by_job_id', 'schedule_utils.get_job_dsl_parser_by_job_id', (['job_id'], {}), '(job_id)\n', (16722, 16730), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n'), ((18829, 18968), 'fate_flow.controller.task_controller.TaskController.create_task', 'TaskController.create_task', ([], {'role': 'role', 'party_id': 'party_id', 'run_on_this_party': "common_task_info['run_on_this_party']", 'task_info': 'task_info'}), "(role=role, party_id=party_id, run_on_this_party=\n common_task_info['run_on_this_party'], task_info=task_info)\n", (18855, 18968), False, 'from fate_flow.controller.task_controller import TaskController\n'), ((23218, 23237), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (23235, 23237), False, 'from fate_arch.common.base_utils import json_dumps, current_timestamp\n'), ((23989, 24110), 'fate_flow.manager.resource_manager.ResourceManager.return_job_resource', 'ResourceManager.return_job_resource', ([], {'job_id': "job_info['job_id']", 'role': "job_info['role']", 'party_id': "job_info['party_id']"}), "(job_id=job_info['job_id'], role=\n job_info['role'], party_id=job_info['party_id'])\n", (24024, 24110), False, 'from fate_flow.manager.resource_manager import ResourceManager\n'), ((24290, 24353), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id'}), '(job_id=job_id, role=role, party_id=party_id)\n', (24308, 24353), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((24404, 24437), 'fate_flow.operation.job_saver.JobSaver.query_job', 'JobSaver.query_job', ([], {'job_id': 'job_id'}), '(job_id=job_id)\n', (24422, 24437), False, 'from fate_flow.operation.job_saver import JobSaver\n'), ((25073, 25133), 'fate_flow.controller.task_controller.TaskController.stop_task', 'TaskController.stop_task', ([], {'task': 'task', 'stop_status': 'stop_status'}), '(task=task, stop_status=stop_status)\n', (25097, 25133), False, 'from fate_flow.controller.task_controller import TaskController\n'), ((2700, 2723), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (2715, 2723), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((3355, 3378), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (3370, 3378), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((12405, 12452), 'fate_flow.entity.RunParameters', 'RunParameters', ([], {}), "(**input_job_parameters['common'])\n", (12418, 12452), False, 'from fate_flow.entity import RunParameters\n'), ((17220, 17473), 'fate_flow.manager.worker_manager.WorkerManager.start_general_worker', 'WorkerManager.start_general_worker', ([], {'worker_name': 'WorkerName.TASK_INITIALIZER', 'job_id': 'job_id', 'role': 'role', 'party_id': 'party_id', 'initialized_config': 'initialized_config', 'run_in_subprocess': "(False if initialized_config['if_default_provider'] else True)"}), "(worker_name=WorkerName.TASK_INITIALIZER,\n job_id=job_id, role=role, party_id=party_id, initialized_config=\n initialized_config, run_in_subprocess=False if initialized_config[\n 'if_default_provider'] else True)\n", (17254, 17473), False, 'from fate_flow.manager.worker_manager import WorkerManager\n'), ((22956, 22979), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (22971, 22979), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((23480, 23503), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (23495, 23503), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((25704, 25727), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (25719, 25727), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((28243, 28274), 'fate_flow.entity.RunParameters', 'RunParameters', ([], {}), '(**job_parameters)\n', (28256, 28274), False, 'from fate_flow.entity import RunParameters\n'), ((28436, 28459), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (28451, 28459), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((28598, 28621), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (28613, 28621), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((28694, 28717), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (28709, 28717), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((3511, 3534), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (3526, 3534), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((23283, 23306), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (23298, 23306), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((8707, 8730), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (8722, 8730), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((21914, 21973), 'fate_flow.utils.data_utils.get_input_search_type', 'data_utils.get_input_search_type', ([], {'parameters': '_data_location'}), '(parameters=_data_location)\n', (21946, 21973), False, 'from fate_flow.utils import job_utils, schedule_utils, data_utils\n')]
|
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import errno
import os
import sys
import threading
import typing
from fate_arch.common import file_utils, FederatedMode
from fate_arch.common.base_utils import json_dumps, fate_uuid, current_timestamp
from fate_flow.utils.log_utils import schedule_logger
from fate_flow.db.db_models import DB, Job, Task
from fate_flow.entity import JobConfiguration
from fate_flow.entity.run_status import JobStatus, TaskStatus
from fate_flow.entity import RunParameters
from fate_flow.db.job_default_config import JobDefaultConfig
from fate_flow.settings import FATE_BOARD_DASHBOARD_ENDPOINT
from fate_flow.db.service_registry import ServiceRegistry
from fate_flow.utils import detect_utils, process_utils
from fate_flow.utils import session_utils
from fate_flow.utils.base_utils import get_fate_flow_directory
class JobIdGenerator(object):
_lock = threading.RLock()
def __init__(self, initial_value=0):
self._value = initial_value
self._pre_timestamp = None
self._max = 99999
def next_id(self):
'''
generate next job id with locking
'''
#todo: there is duplication in the case of multiple instances deployment
now = datetime.datetime.now()
with JobIdGenerator._lock:
if self._pre_timestamp == now:
if self._value < self._max:
self._value += 1
else:
now += datetime.timedelta(microseconds=1)
self._pre_timestamp = now
self._value = 0
else:
self._pre_timestamp = now
self._value = 0
return "{}{}".format(now.strftime("%Y%m%d%H%M%S%f"), self._value)
job_id_generator = JobIdGenerator()
def generate_job_id():
return job_id_generator.next_id()
def generate_task_id(job_id, component_name):
return '{}_{}'.format(job_id, component_name)
def generate_task_version_id(task_id, task_version):
return "{}_{}".format(task_id, task_version)
def generate_session_id(task_id, task_version, role, party_id, suffix=None, random_end=False):
items = [task_id, str(task_version), role, str(party_id)]
if suffix:
items.append(suffix)
if random_end:
items.append(fate_uuid())
return "_".join(items)
def generate_task_input_data_namespace(task_id, task_version, role, party_id):
return "input_data_{}".format(generate_session_id(task_id=task_id,
task_version=task_version,
role=role,
party_id=party_id))
def get_job_directory(job_id, *args):
return os.path.join(get_fate_flow_directory(), 'jobs', job_id, *args)
def get_job_log_directory(job_id, *args):
return os.path.join(get_fate_flow_directory(), 'logs', job_id, *args)
def get_task_directory(job_id, role, party_id, component_name, task_id, task_version, **kwargs):
return get_job_directory(job_id, role, party_id, component_name, task_id, task_version)
def get_general_worker_directory(worker_name, worker_id, *args):
return os.path.join(get_fate_flow_directory(), worker_name, worker_id, *args)
def get_general_worker_log_directory(worker_name, worker_id, *args):
return os.path.join(get_fate_flow_directory(), 'logs', worker_name, worker_id, *args)
def check_config(config: typing.Dict, required_parameters: typing.List):
for parameter in required_parameters:
if parameter not in config:
return False, 'configuration no {} parameter'.format(parameter)
else:
return True, 'ok'
def check_job_runtime_conf(runtime_conf: typing.Dict):
detect_utils.check_config(runtime_conf, ['initiator', 'role'])
detect_utils.check_config(runtime_conf['initiator'], ['role', 'party_id'])
# deal party id
runtime_conf['initiator']['party_id'] = int(runtime_conf['initiator']['party_id'])
for r in runtime_conf['role'].keys():
for i in range(len(runtime_conf['role'][r])):
runtime_conf['role'][r][i] = int(runtime_conf['role'][r][i])
def runtime_conf_basic(if_local=False):
job_runtime_conf = {
"dsl_version": 2,
"initiator": {},
"job_parameters": {
"common": {
"federated_mode": FederatedMode.SINGLE
},
},
"role": {},
"component_parameters": {}
}
if if_local:
job_runtime_conf["initiator"]["role"] = "local"
job_runtime_conf["initiator"]["party_id"] = 0
job_runtime_conf["role"]["local"] = [0]
return job_runtime_conf
def new_runtime_conf(job_dir, method, module, role, party_id):
if role:
conf_path_dir = os.path.join(job_dir, method, module, role, str(party_id))
else:
conf_path_dir = os.path.join(job_dir, method, module, str(party_id))
os.makedirs(conf_path_dir, exist_ok=True)
return os.path.join(conf_path_dir, 'runtime_conf.json')
def save_job_conf(job_id, role, party_id, dsl, runtime_conf, runtime_conf_on_party, train_runtime_conf, pipeline_dsl=None):
path_dict = get_job_conf_path(job_id=job_id, role=role, party_id=party_id)
dump_job_conf(path_dict=path_dict,
dsl=dsl,
runtime_conf=runtime_conf,
runtime_conf_on_party=runtime_conf_on_party,
train_runtime_conf=train_runtime_conf,
pipeline_dsl=pipeline_dsl)
return path_dict
def save_task_using_job_conf(task: Task):
task_dir = get_task_directory(job_id=task.f_job_id,
role=task.f_role,
party_id=task.f_party_id,
component_name=task.f_component_name,
task_id=task.f_task_id,
task_version=str(task.f_task_version))
return save_using_job_conf(task.f_job_id, task.f_role, task.f_party_id, config_dir=task_dir)
def save_using_job_conf(job_id, role, party_id, config_dir):
path_dict = get_job_conf_path(job_id=job_id, role=role, party_id=party_id, specified_dir=config_dir)
job_configuration = get_job_configuration(job_id=job_id,
role=role,
party_id=party_id)
dump_job_conf(path_dict=path_dict,
dsl=job_configuration.dsl,
runtime_conf=job_configuration.runtime_conf,
runtime_conf_on_party=job_configuration.runtime_conf_on_party,
train_runtime_conf=job_configuration.train_runtime_conf,
pipeline_dsl=None)
return path_dict
def dump_job_conf(path_dict, dsl, runtime_conf, runtime_conf_on_party, train_runtime_conf, pipeline_dsl=None):
os.makedirs(os.path.dirname(path_dict.get('dsl_path')), exist_ok=True)
os.makedirs(os.path.dirname(path_dict.get('runtime_conf_on_party_path')), exist_ok=True)
for data, conf_path in [(dsl, path_dict['dsl_path']),
(runtime_conf, path_dict['runtime_conf_path']),
(runtime_conf_on_party, path_dict['runtime_conf_on_party_path']),
(train_runtime_conf, path_dict['train_runtime_conf_path']),
(pipeline_dsl, path_dict['pipeline_dsl_path'])]:
with open(conf_path, 'w+') as f:
f.truncate()
if not data:
data = {}
f.write(json_dumps(data, indent=4))
f.flush()
return path_dict
@DB.connection_context()
def get_job_configuration(job_id, role, party_id) -> JobConfiguration:
jobs = Job.select(Job.f_dsl, Job.f_runtime_conf, Job.f_train_runtime_conf, Job.f_runtime_conf_on_party).where(Job.f_job_id == job_id,
Job.f_role == role,
Job.f_party_id == party_id)
if jobs:
job = jobs[0]
return JobConfiguration(**job.to_human_model_dict())
def get_task_using_job_conf(task_info: dict):
task_dir = get_task_directory(**task_info)
return read_job_conf(task_info["job_id"], task_info["role"], task_info["party_id"], task_dir)
def read_job_conf(job_id, role, party_id, specified_dir=None):
path_dict = get_job_conf_path(job_id=job_id, role=role, party_id=party_id, specified_dir=specified_dir)
conf_dict = {}
for key, path in path_dict.items():
config = file_utils.load_json_conf(path)
conf_dict[key.rstrip("_path")] = config
return JobConfiguration(**conf_dict)
def get_job_conf_path(job_id, role, party_id, specified_dir=None):
conf_dir = get_job_directory(job_id) if not specified_dir else specified_dir
job_dsl_path = os.path.join(conf_dir, 'job_dsl.json')
job_runtime_conf_path = os.path.join(conf_dir, 'job_runtime_conf.json')
if not specified_dir:
job_runtime_conf_on_party_path = os.path.join(conf_dir, role, str(party_id), 'job_runtime_on_party_conf.json')
else:
job_runtime_conf_on_party_path = os.path.join(conf_dir, 'job_runtime_on_party_conf.json')
train_runtime_conf_path = os.path.join(conf_dir, 'train_runtime_conf.json')
pipeline_dsl_path = os.path.join(conf_dir, 'pipeline_dsl.json')
return {'dsl_path': job_dsl_path,
'runtime_conf_path': job_runtime_conf_path,
'runtime_conf_on_party_path': job_runtime_conf_on_party_path,
'train_runtime_conf_path': train_runtime_conf_path,
'pipeline_dsl_path': pipeline_dsl_path}
@DB.connection_context()
def get_upload_job_configuration_summary(upload_tasks: typing.List[Task]):
jobs_run_conf = {}
for task in upload_tasks:
jobs = Job.select(Job.f_job_id, Job.f_runtime_conf_on_party, Job.f_description).where(Job.f_job_id == task.f_job_id)
job = jobs[0]
jobs_run_conf[job.f_job_id] = job.f_runtime_conf_on_party["component_parameters"]["role"]["local"]["0"]["upload_0"]
jobs_run_conf[job.f_job_id]["notes"] = job.f_description
return jobs_run_conf
@DB.connection_context()
def get_job_parameters(job_id, role, party_id):
jobs = Job.select(Job.f_runtime_conf_on_party).where(Job.f_job_id == job_id,
Job.f_role == role,
Job.f_party_id == party_id)
if jobs:
job = jobs[0]
return job.f_runtime_conf_on_party.get("job_parameters")
else:
return {}
@DB.connection_context()
def get_job_dsl(job_id, role, party_id):
jobs = Job.select(Job.f_dsl).where(Job.f_job_id == job_id,
Job.f_role == role,
Job.f_party_id == party_id)
if jobs:
job = jobs[0]
return job.f_dsl
else:
return {}
def job_pipeline_component_name():
return "pipeline"
def job_pipeline_component_module_name():
return "Pipeline"
@DB.connection_context()
def list_job(limit):
if limit > 0:
jobs = Job.select().order_by(Job.f_create_time.desc()).limit(limit)
else:
jobs = Job.select().order_by(Job.f_create_time.desc())
return [job for job in jobs]
@DB.connection_context()
def list_task(limit):
if limit > 0:
tasks = Task.select().order_by(Task.f_create_time.desc()).limit(limit)
else:
tasks = Task.select().order_by(Task.f_create_time.desc())
return [task for task in tasks]
def check_job_process(pid):
if pid < 0:
return False
if pid == 0:
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def check_job_is_timeout(job: Job):
job_parameters = job.f_runtime_conf_on_party["job_parameters"]
timeout = job_parameters.get("timeout", JobDefaultConfig.job_timeout)
now_time = current_timestamp()
running_time = (now_time - job.f_create_time)/1000
if running_time > timeout:
schedule_logger(job.f_job_id).info(f'run time {running_time}s timeout')
return True
else:
return False
def start_session_stop(task):
job_parameters = RunParameters(**get_job_parameters(job_id=task.f_job_id, role=task.f_role, party_id=task.f_party_id))
session_manager_id = generate_session_id(task.f_task_id, task.f_task_version, task.f_role, task.f_party_id)
if task.f_status != TaskStatus.WAITING:
schedule_logger(task.f_job_id).info(f'start run subprocess to stop task sessions {session_manager_id}')
else:
schedule_logger(task.f_job_id).info(f'task is waiting, pass stop sessions {session_manager_id}')
return
task_dir = os.path.join(get_job_directory(job_id=task.f_job_id), task.f_role,
task.f_party_id, task.f_component_name, 'session_stop')
os.makedirs(task_dir, exist_ok=True)
process_cmd = [
sys.executable or 'python3',
sys.modules[session_utils.SessionStop.__module__].__file__,
'--session', session_manager_id,
'--computing', job_parameters.computing_engine,
'--federation', job_parameters.federation_engine,
'--storage', job_parameters.storage_engine,
'-c', 'stop' if task.f_status == JobStatus.SUCCESS else 'kill'
]
p = process_utils.run_subprocess(job_id=task.f_job_id, config_dir=task_dir, process_cmd=process_cmd)
p.wait()
p.poll()
def get_timeout(job_id, timeout, runtime_conf, dsl):
try:
if timeout > 0:
schedule_logger(job_id).info(f'setting job timeout {timeout}')
return timeout
else:
default_timeout = job_default_timeout(runtime_conf, dsl)
schedule_logger(job_id).info(f'setting job timeout {timeout} not a positive number, using the default timeout {default_timeout}')
return default_timeout
except:
default_timeout = job_default_timeout(runtime_conf, dsl)
schedule_logger(job_id).info(f'setting job timeout {timeout} is incorrect, using the default timeout {default_timeout}')
return default_timeout
def job_default_timeout(runtime_conf, dsl):
# future versions will improve
timeout = JobDefaultConfig.job_timeout
return timeout
def get_board_url(job_id, role, party_id):
board_url = "http://{}:{}{}".format(
ServiceRegistry.FATEBOARD.get("host"),
ServiceRegistry.FATEBOARD.get("port"),
FATE_BOARD_DASHBOARD_ENDPOINT).format(job_id, role, party_id)
return board_url
|
[
"fate_flow.utils.base_utils.get_fate_flow_directory",
"fate_flow.db.db_models.DB.connection_context",
"fate_flow.db.db_models.Job.select",
"fate_flow.db.db_models.Job.f_create_time.desc",
"fate_flow.db.db_models.Task.select",
"fate_flow.entity.JobConfiguration",
"fate_flow.db.service_registry.ServiceRegistry.FATEBOARD.get",
"fate_flow.utils.log_utils.schedule_logger",
"fate_flow.db.db_models.Task.f_create_time.desc",
"fate_flow.utils.detect_utils.check_config",
"fate_flow.utils.process_utils.run_subprocess"
] |
[((8246, 8269), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (8267, 8269), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((10386, 10409), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (10407, 10409), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((10902, 10925), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (10923, 10925), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((11348, 11371), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (11369, 11371), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((11818, 11841), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (11839, 11841), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((12066, 12089), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (12087, 12089), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((1470, 1487), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1485, 1487), False, 'import threading\n'), ((4348, 4410), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (['runtime_conf', "['initiator', 'role']"], {}), "(runtime_conf, ['initiator', 'role'])\n", (4373, 4410), False, 'from fate_flow.utils import detect_utils, process_utils\n'), ((4415, 4489), 'fate_flow.utils.detect_utils.check_config', 'detect_utils.check_config', (["runtime_conf['initiator']", "['role', 'party_id']"], {}), "(runtime_conf['initiator'], ['role', 'party_id'])\n", (4440, 4489), False, 'from fate_flow.utils import detect_utils, process_utils\n'), ((5533, 5574), 'os.makedirs', 'os.makedirs', (['conf_path_dir'], {'exist_ok': '(True)'}), '(conf_path_dir, exist_ok=True)\n', (5544, 5574), False, 'import os\n'), ((5586, 5634), 'os.path.join', 'os.path.join', (['conf_path_dir', '"""runtime_conf.json"""'], {}), "(conf_path_dir, 'runtime_conf.json')\n", (5598, 5634), False, 'import os\n'), ((9384, 9413), 'fate_flow.entity.JobConfiguration', 'JobConfiguration', ([], {}), '(**conf_dict)\n', (9400, 9413), False, 'from fate_flow.entity import JobConfiguration\n'), ((9583, 9621), 'os.path.join', 'os.path.join', (['conf_dir', '"""job_dsl.json"""'], {}), "(conf_dir, 'job_dsl.json')\n", (9595, 9621), False, 'import os\n'), ((9650, 9697), 'os.path.join', 'os.path.join', (['conf_dir', '"""job_runtime_conf.json"""'], {}), "(conf_dir, 'job_runtime_conf.json')\n", (9662, 9697), False, 'import os\n'), ((9981, 10030), 'os.path.join', 'os.path.join', (['conf_dir', '"""train_runtime_conf.json"""'], {}), "(conf_dir, 'train_runtime_conf.json')\n", (9993, 10030), False, 'import os\n'), ((10055, 10098), 'os.path.join', 'os.path.join', (['conf_dir', '"""pipeline_dsl.json"""'], {}), "(conf_dir, 'pipeline_dsl.json')\n", (10067, 10098), False, 'import os\n'), ((13100, 13119), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (13117, 13119), False, 'from fate_arch.common.base_utils import json_dumps, fate_uuid, current_timestamp\n'), ((14060, 14096), 'os.makedirs', 'os.makedirs', (['task_dir'], {'exist_ok': '(True)'}), '(task_dir, exist_ok=True)\n', (14071, 14096), False, 'import os\n'), ((14514, 14614), 'fate_flow.utils.process_utils.run_subprocess', 'process_utils.run_subprocess', ([], {'job_id': 'task.f_job_id', 'config_dir': 'task_dir', 'process_cmd': 'process_cmd'}), '(job_id=task.f_job_id, config_dir=task_dir,\n process_cmd=process_cmd)\n', (14542, 14614), False, 'from fate_flow.utils import detect_utils, process_utils\n'), ((1812, 1835), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1833, 1835), False, 'import datetime\n'), ((3353, 3378), 'fate_flow.utils.base_utils.get_fate_flow_directory', 'get_fate_flow_directory', ([], {}), '()\n', (3376, 3378), False, 'from fate_flow.utils.base_utils import get_fate_flow_directory\n'), ((3471, 3496), 'fate_flow.utils.base_utils.get_fate_flow_directory', 'get_fate_flow_directory', ([], {}), '()\n', (3494, 3496), False, 'from fate_flow.utils.base_utils import get_fate_flow_directory\n'), ((3803, 3828), 'fate_flow.utils.base_utils.get_fate_flow_directory', 'get_fate_flow_directory', ([], {}), '()\n', (3826, 3828), False, 'from fate_flow.utils.base_utils import get_fate_flow_directory\n'), ((3956, 3981), 'fate_flow.utils.base_utils.get_fate_flow_directory', 'get_fate_flow_directory', ([], {}), '()\n', (3979, 3981), False, 'from fate_flow.utils.base_utils import get_fate_flow_directory\n'), ((9293, 9324), 'fate_arch.common.file_utils.load_json_conf', 'file_utils.load_json_conf', (['path'], {}), '(path)\n', (9318, 9324), False, 'from fate_arch.common import file_utils, FederatedMode\n'), ((9894, 9950), 'os.path.join', 'os.path.join', (['conf_dir', '"""job_runtime_on_party_conf.json"""'], {}), "(conf_dir, 'job_runtime_on_party_conf.json')\n", (9906, 9950), False, 'import os\n'), ((12464, 12479), 'os.kill', 'os.kill', (['pid', '(0)'], {}), '(pid, 0)\n', (12471, 12479), False, 'import os\n'), ((2877, 2888), 'fate_arch.common.base_utils.fate_uuid', 'fate_uuid', ([], {}), '()\n', (2886, 2888), False, 'from fate_arch.common.base_utils import json_dumps, fate_uuid, current_timestamp\n'), ((8352, 8453), 'fate_flow.db.db_models.Job.select', 'Job.select', (['Job.f_dsl', 'Job.f_runtime_conf', 'Job.f_train_runtime_conf', 'Job.f_runtime_conf_on_party'], {}), '(Job.f_dsl, Job.f_runtime_conf, Job.f_train_runtime_conf, Job.\n f_runtime_conf_on_party)\n', (8362, 8453), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((10985, 11024), 'fate_flow.db.db_models.Job.select', 'Job.select', (['Job.f_runtime_conf_on_party'], {}), '(Job.f_runtime_conf_on_party)\n', (10995, 11024), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((11424, 11445), 'fate_flow.db.db_models.Job.select', 'Job.select', (['Job.f_dsl'], {}), '(Job.f_dsl)\n', (11434, 11445), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((12004, 12028), 'fate_flow.db.db_models.Job.f_create_time.desc', 'Job.f_create_time.desc', ([], {}), '()\n', (12026, 12028), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((12258, 12283), 'fate_flow.db.db_models.Task.f_create_time.desc', 'Task.f_create_time.desc', ([], {}), '()\n', (12281, 12283), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((8172, 8198), 'fate_arch.common.base_utils.json_dumps', 'json_dumps', (['data'], {'indent': '(4)'}), '(data, indent=4)\n', (8182, 8198), False, 'from fate_arch.common.base_utils import json_dumps, fate_uuid, current_timestamp\n'), ((10553, 10625), 'fate_flow.db.db_models.Job.select', 'Job.select', (['Job.f_job_id', 'Job.f_runtime_conf_on_party', 'Job.f_description'], {}), '(Job.f_job_id, Job.f_runtime_conf_on_party, Job.f_description)\n', (10563, 10625), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((11982, 11994), 'fate_flow.db.db_models.Job.select', 'Job.select', ([], {}), '()\n', (11992, 11994), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((12235, 12248), 'fate_flow.db.db_models.Task.select', 'Task.select', ([], {}), '()\n', (12246, 12248), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((13214, 13243), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job.f_job_id'], {}), '(job.f_job_id)\n', (13229, 13243), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((13656, 13686), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (13671, 13686), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((13778, 13808), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['task.f_job_id'], {}), '(task.f_job_id)\n', (13793, 13808), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((15561, 15598), 'fate_flow.db.service_registry.ServiceRegistry.FATEBOARD.get', 'ServiceRegistry.FATEBOARD.get', (['"""host"""'], {}), "('host')\n", (15590, 15598), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((15608, 15645), 'fate_flow.db.service_registry.ServiceRegistry.FATEBOARD.get', 'ServiceRegistry.FATEBOARD.get', (['"""port"""'], {}), "('port')\n", (15637, 15645), False, 'from fate_flow.db.service_registry import ServiceRegistry\n'), ((2044, 2078), 'datetime.timedelta', 'datetime.timedelta', ([], {'microseconds': '(1)'}), '(microseconds=1)\n', (2062, 2078), False, 'import datetime\n'), ((11918, 11942), 'fate_flow.db.db_models.Job.f_create_time.desc', 'Job.f_create_time.desc', ([], {}), '()\n', (11940, 11942), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((12169, 12194), 'fate_flow.db.db_models.Task.f_create_time.desc', 'Task.f_create_time.desc', ([], {}), '()\n', (12192, 12194), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((14737, 14760), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (14752, 14760), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((14922, 14945), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (14937, 14945), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((15172, 15195), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['job_id'], {}), '(job_id)\n', (15187, 15195), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((11896, 11908), 'fate_flow.db.db_models.Job.select', 'Job.select', ([], {}), '()\n', (11906, 11908), False, 'from fate_flow.db.db_models import DB, Job, Task\n'), ((12146, 12159), 'fate_flow.db.db_models.Task.select', 'Task.select', ([], {}), '()\n', (12157, 12159), False, 'from fate_flow.db.db_models import DB, Job, Task\n')]
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import requests
from arch.api.utils import file_utils
from flask import jsonify
from flask import Response
from arch.api.utils.log_utils import audit_logger
from fate_flow.entity.constant_config import WorkMode
from fate_flow.settings import DEFAULT_GRPC_OVERALL_TIMEOUT, CHECK_NODES_IDENTITY,\
FATE_MANAGER_GET_NODE_INFO_ENDPOINT, HEADERS, SERVER_CONF_PATH, SERVERS
from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_proxy_data_channel, forward_grpc_packet
from fate_flow.utils.service_utils import ServiceUtils
from fate_flow.entity.runtime_config import RuntimeConfig
def get_json_result(retcode=0, retmsg='success', data=None, job_id=None, meta=None):
result_dict = {"retcode": retcode, "retmsg": retmsg, "data": data, "jobId": job_id, "meta": meta}
response = {}
for key, value in result_dict.items():
if not value and key != "retcode":
continue
else:
response[key] = value
return jsonify(response)
def error_response(response_code, retmsg):
return Response(json.dumps({'retmsg': retmsg, 'retcode': response_code}), status=response_code, mimetype='application/json')
def federated_api(job_id, method, endpoint, src_party_id, dest_party_id, src_role, json_body, work_mode,
overall_timeout=DEFAULT_GRPC_OVERALL_TIMEOUT):
if int(dest_party_id) == 0:
return local_api(job_id=job_id, method=method, endpoint=endpoint, json_body=json_body)
if work_mode == WorkMode.STANDALONE:
return local_api(job_id=job_id, method=method, endpoint=endpoint, json_body=json_body)
elif work_mode == WorkMode.CLUSTER:
return remote_api(job_id=job_id, method=method, endpoint=endpoint, src_party_id=src_party_id, src_role=src_role,
dest_party_id=dest_party_id, json_body=json_body, overall_timeout=overall_timeout)
else:
raise Exception('{} work mode is not supported'.format(work_mode))
def remote_api(job_id, method, endpoint, src_party_id, dest_party_id, src_role, json_body,
overall_timeout=DEFAULT_GRPC_OVERALL_TIMEOUT):
json_body['src_role'] = src_role
if CHECK_NODES_IDENTITY:
get_node_identity(json_body, src_party_id)
_packet = wrap_grpc_packet(json_body, method, endpoint, src_party_id, dest_party_id, job_id,
overall_timeout=overall_timeout)
try:
channel, stub = get_proxy_data_channel()
_return = stub.unaryCall(_packet)
audit_logger(job_id).info("grpc api response: {}".format(_return))
channel.close()
json_body = json.loads(_return.body.value)
return json_body
except Exception as e:
tips = ''
if 'Error received from peer' in str(e):
tips = 'Please check if the fate flow server of the other party is started. '
if 'failed to connect to all addresses' in str(e):
tips = 'Please check whether the rollsite service(port: 9370) is started. '
raise Exception('{}rpc request error: {}'.format(tips,e))
def local_api(method, endpoint, json_body, job_id=None):
try:
url = "http://{}{}".format(RuntimeConfig.JOB_SERVER_HOST, endpoint)
audit_logger(job_id).info('local api request: {}'.format(url))
action = getattr(requests, method.lower(), None)
response = action(url=url, json=json_body, headers=HEADERS)
audit_logger(job_id).info(response.text)
response_json_body = response.json()
audit_logger(job_id).info('local api response: {} {}'.format(endpoint, response_json_body))
return response_json_body
except Exception as e:
raise Exception('local request error: {}'.format(e))
def request_execute_server(request, execute_host):
try:
endpoint = request.base_url.replace(request.host_url, '')
method = request.method
url = "http://{}/{}".format(execute_host, endpoint)
audit_logger().info('sub request: {}'.format(url))
action = getattr(requests, method.lower(), None)
response = action(url=url, json=request.json, headers=HEADERS)
return jsonify(response.json())
except requests.exceptions.ConnectionError as e:
return get_json_result(retcode=999, retmsg='please start fate flow server: {}'.format(execute_host))
except Exception as e:
raise Exception('local request error: {}'.format(e))
def get_node_identity(json_body, src_party_id):
params = {
'partyId': int(src_party_id),
'federatedId': file_utils.load_json_conf_real_time(SERVER_CONF_PATH).get(SERVERS).get('fatemanager', {}).get('federatedId')
}
try:
response = requests.post(url="http://{}:{}{}".format(
ServiceUtils.get_item("fatemanager", "host"),
ServiceUtils.get_item("fatemanager", "port"),
FATE_MANAGER_GET_NODE_INFO_ENDPOINT), json=params)
json_body['appKey'] = response.json().get('data').get('appKey')
json_body['appSecret'] = response.json().get('data').get('appSecret')
json_body['_src_role'] = response.json().get('data').get('role')
except Exception as e:
raise Exception('get appkey and secret failed: {}'.format(str(e)))
def forward_api(job_id, method, endpoint, src_party_id, dest_party_id, json_body, role,
overall_timeout=DEFAULT_GRPC_OVERALL_TIMEOUT):
_packet = forward_grpc_packet(json_body, method, endpoint, src_party_id, dest_party_id, job_id=job_id, role=role,
overall_timeout=overall_timeout)
channel, stub = get_proxy_data_channel()
_return = stub.unaryCall(_packet)
channel.close()
json_body = json.loads(_return.body.value)
return json_body
|
[
"fate_flow.utils.grpc_utils.forward_grpc_packet",
"fate_flow.utils.grpc_utils.get_proxy_data_channel",
"fate_flow.utils.service_utils.ServiceUtils.get_item",
"fate_flow.utils.grpc_utils.wrap_grpc_packet"
] |
[((1588, 1605), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (1595, 1605), False, 'from flask import jsonify\n'), ((2856, 2975), 'fate_flow.utils.grpc_utils.wrap_grpc_packet', 'wrap_grpc_packet', (['json_body', 'method', 'endpoint', 'src_party_id', 'dest_party_id', 'job_id'], {'overall_timeout': 'overall_timeout'}), '(json_body, method, endpoint, src_party_id, dest_party_id,\n job_id, overall_timeout=overall_timeout)\n', (2872, 2975), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_proxy_data_channel, forward_grpc_packet\n'), ((6011, 6151), 'fate_flow.utils.grpc_utils.forward_grpc_packet', 'forward_grpc_packet', (['json_body', 'method', 'endpoint', 'src_party_id', 'dest_party_id'], {'job_id': 'job_id', 'role': 'role', 'overall_timeout': 'overall_timeout'}), '(json_body, method, endpoint, src_party_id,\n dest_party_id, job_id=job_id, role=role, overall_timeout=overall_timeout)\n', (6030, 6151), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_proxy_data_channel, forward_grpc_packet\n'), ((6203, 6227), 'fate_flow.utils.grpc_utils.get_proxy_data_channel', 'get_proxy_data_channel', ([], {}), '()\n', (6225, 6227), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_proxy_data_channel, forward_grpc_packet\n'), ((6302, 6332), 'json.loads', 'json.loads', (['_return.body.value'], {}), '(_return.body.value)\n', (6312, 6332), False, 'import json\n'), ((1671, 1727), 'json.dumps', 'json.dumps', (["{'retmsg': retmsg, 'retcode': response_code}"], {}), "({'retmsg': retmsg, 'retcode': response_code})\n", (1681, 1727), False, 'import json\n'), ((3036, 3060), 'fate_flow.utils.grpc_utils.get_proxy_data_channel', 'get_proxy_data_channel', ([], {}), '()\n', (3058, 3060), False, 'from fate_flow.utils.grpc_utils import wrap_grpc_packet, get_proxy_data_channel, forward_grpc_packet\n'), ((3222, 3252), 'json.loads', 'json.loads', (['_return.body.value'], {}), '(_return.body.value)\n', (3232, 3252), False, 'import json\n'), ((3111, 3131), 'arch.api.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (3123, 3131), False, 'from arch.api.utils.log_utils import audit_logger\n'), ((3827, 3847), 'arch.api.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (3839, 3847), False, 'from arch.api.utils.log_utils import audit_logger\n'), ((4023, 4043), 'arch.api.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (4035, 4043), False, 'from arch.api.utils.log_utils import audit_logger\n'), ((4117, 4137), 'arch.api.utils.log_utils.audit_logger', 'audit_logger', (['job_id'], {}), '(job_id)\n', (4129, 4137), False, 'from arch.api.utils.log_utils import audit_logger\n'), ((4559, 4573), 'arch.api.utils.log_utils.audit_logger', 'audit_logger', ([], {}), '()\n', (4571, 4573), False, 'from arch.api.utils.log_utils import audit_logger\n'), ((5352, 5396), 'fate_flow.utils.service_utils.ServiceUtils.get_item', 'ServiceUtils.get_item', (['"""fatemanager"""', '"""host"""'], {}), "('fatemanager', 'host')\n", (5373, 5396), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((5410, 5454), 'fate_flow.utils.service_utils.ServiceUtils.get_item', 'ServiceUtils.get_item', (['"""fatemanager"""', '"""port"""'], {}), "('fatemanager', 'port')\n", (5431, 5454), False, 'from fate_flow.utils.service_utils import ServiceUtils\n'), ((5154, 5207), 'arch.api.utils.file_utils.load_json_conf_real_time', 'file_utils.load_json_conf_real_time', (['SERVER_CONF_PATH'], {}), '(SERVER_CONF_PATH)\n', (5189, 5207), False, 'from arch.api.utils import file_utils\n')]
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64
from fate_flow.utils.log_utils import schedule_logger
from fate_flow.db import db_utils
from fate_flow.db.db_models import (DB, TrackingMetric)
from fate_flow.entity import Metric
from fate_flow.utils import job_utils
class MetricManager:
def __init__(self, job_id: str, role: str, party_id: int,
component_name: str,
task_id: str = None,
task_version: int = None):
self.job_id = job_id
self.role = role
self.party_id = party_id
self.component_name = component_name
self.task_id = task_id
self.task_version = task_version
@DB.connection_context()
def read_metric_data(self, metric_namespace: str, metric_name: str, job_level=False):
metrics = []
for k, v in self.read_metrics_from_db(metric_namespace, metric_name, 1, job_level):
metrics.append(Metric(key=k, value=v))
return metrics
@DB.connection_context()
def insert_metrics_into_db(self, metric_namespace: str, metric_name: str, data_type: int, kv, job_level=False):
try:
model_class = self.get_model_class()
tracking_metric = model_class()
tracking_metric.f_job_id = self.job_id
tracking_metric.f_component_name = (
self.component_name if not job_level else job_utils.job_pipeline_component_name())
tracking_metric.f_task_id = self.task_id
tracking_metric.f_task_version = self.task_version
tracking_metric.f_role = self.role
tracking_metric.f_party_id = self.party_id
tracking_metric.f_metric_namespace = metric_namespace
tracking_metric.f_metric_name = metric_name
tracking_metric.f_type = data_type
default_db_source = tracking_metric.to_dict()
tracking_metric_data_source = []
for k, v in kv:
db_source = default_db_source.copy()
db_source['f_key'] = serialize_b64(k)
db_source['f_value'] = serialize_b64(v)
db_source['f_create_time'] = current_timestamp()
tracking_metric_data_source.append(db_source)
db_utils.bulk_insert_into_db(model_class, tracking_metric_data_source, schedule_logger(self.job_id))
except Exception as e:
schedule_logger(self.job_id).exception(
"An exception where inserted metric {} of metric namespace: {} to database:\n{}".format(
metric_name,
metric_namespace,
e
))
@DB.connection_context()
def read_metrics_from_db(self, metric_namespace: str, metric_name: str, data_type, job_level=False):
metrics = []
try:
tracking_metric_model = self.get_model_class()
tracking_metrics = tracking_metric_model.select(tracking_metric_model.f_key,
tracking_metric_model.f_value).where(
tracking_metric_model.f_job_id == self.job_id,
tracking_metric_model.f_component_name == (
self.component_name if not job_level else job_utils.job_pipeline_component_name()),
tracking_metric_model.f_role == self.role,
tracking_metric_model.f_party_id == self.party_id,
tracking_metric_model.f_metric_namespace == metric_namespace,
tracking_metric_model.f_metric_name == metric_name,
tracking_metric_model.f_type == data_type
)
for tracking_metric in tracking_metrics:
yield deserialize_b64(tracking_metric.f_key), deserialize_b64(tracking_metric.f_value)
except Exception as e:
schedule_logger(self.job_id).exception(e)
raise e
return metrics
@DB.connection_context()
def clean_metrics(self):
tracking_metric_model = self.get_model_class()
operate = tracking_metric_model.delete().where(
tracking_metric_model.f_task_id == self.task_id,
tracking_metric_model.f_task_version == self.task_version,
tracking_metric_model.f_role == self.role,
tracking_metric_model.f_party_id == self.party_id
)
return operate.execute() > 0
@DB.connection_context()
def get_metric_list(self, job_level: bool = False):
metrics = {}
tracking_metric_model = self.get_model_class()
if tracking_metric_model.table_exists():
tracking_metrics = tracking_metric_model.select(
tracking_metric_model.f_metric_namespace,
tracking_metric_model.f_metric_name
).where(
tracking_metric_model.f_job_id == self.job_id,
tracking_metric_model.f_component_name == (self.component_name if not job_level else 'dag'),
tracking_metric_model.f_role == self.role,
tracking_metric_model.f_party_id == self.party_id
).distinct()
for tracking_metric in tracking_metrics:
metrics[tracking_metric.f_metric_namespace] = metrics.get(tracking_metric.f_metric_namespace, [])
metrics[tracking_metric.f_metric_namespace].append(tracking_metric.f_metric_name)
return metrics
@DB.connection_context()
def read_component_metrics(self):
try:
tracking_metric_model = self.get_model_class()
tracking_metrics = tracking_metric_model.select().where(
tracking_metric_model.f_job_id == self.job_id,
tracking_metric_model.f_component_name == self.component_name,
tracking_metric_model.f_role == self.role,
tracking_metric_model.f_party_id == self.party_id,
tracking_metric_model.f_task_version == self.task_version
)
return [tracking_metric for tracking_metric in tracking_metrics]
except Exception as e:
schedule_logger(self.job_id).exception(e)
raise e
@DB.connection_context()
def reload_metric(self, source_metric_manager):
component_metrics = source_metric_manager.read_component_metrics()
for component_metric in component_metrics:
model_class = self.get_model_class()
tracking_metric = model_class()
tracking_metric.f_job_id = self.job_id
tracking_metric.f_component_name = self.component_name
tracking_metric.f_task_id = self.task_id
tracking_metric.f_task_version = self.task_version
tracking_metric.f_role = self.role
tracking_metric.f_party_id = self.party_id
tracking_metric.f_metric_namespace = component_metric.f_metric_namespace
tracking_metric.f_metric_name = component_metric.f_metric_name
tracking_metric.f_type = component_metric.f_type
tracking_metric.f_key = component_metric.f_key
tracking_metric.f_value = component_metric.f_value
tracking_metric.save()
def get_model_class(self):
return db_utils.get_dynamic_db_model(TrackingMetric, self.job_id)
|
[
"fate_flow.utils.job_utils.job_pipeline_component_name",
"fate_flow.db.db_models.DB.connection_context",
"fate_flow.db.db_utils.get_dynamic_db_model",
"fate_flow.utils.log_utils.schedule_logger",
"fate_flow.entity.Metric"
] |
[((1340, 1363), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (1361, 1363), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((1647, 1670), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (1668, 1670), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((3319, 3342), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (3340, 3342), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((4589, 4612), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (4610, 4612), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((5055, 5078), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (5076, 5078), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((6071, 6094), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (6092, 6094), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((6818, 6841), 'fate_flow.db.db_models.DB.connection_context', 'DB.connection_context', ([], {}), '()\n', (6839, 6841), False, 'from fate_flow.db.db_models import DB, TrackingMetric\n'), ((7874, 7932), 'fate_flow.db.db_utils.get_dynamic_db_model', 'db_utils.get_dynamic_db_model', (['TrackingMetric', 'self.job_id'], {}), '(TrackingMetric, self.job_id)\n', (7903, 7932), False, 'from fate_flow.db import db_utils\n'), ((1594, 1616), 'fate_flow.entity.Metric', 'Metric', ([], {'key': 'k', 'value': 'v'}), '(key=k, value=v)\n', (1600, 1616), False, 'from fate_flow.entity import Metric\n'), ((2051, 2090), 'fate_flow.utils.job_utils.job_pipeline_component_name', 'job_utils.job_pipeline_component_name', ([], {}), '()\n', (2088, 2090), False, 'from fate_flow.utils import job_utils\n'), ((2700, 2716), 'fate_arch.common.base_utils.serialize_b64', 'serialize_b64', (['k'], {}), '(k)\n', (2713, 2716), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((2756, 2772), 'fate_arch.common.base_utils.serialize_b64', 'serialize_b64', (['v'], {}), '(v)\n', (2769, 2772), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((2818, 2837), 'fate_arch.common.base_utils.current_timestamp', 'current_timestamp', ([], {}), '()\n', (2835, 2837), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((2983, 3011), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (2998, 3011), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((3056, 3084), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (3071, 3084), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((3913, 3952), 'fate_flow.utils.job_utils.job_pipeline_component_name', 'job_utils.job_pipeline_component_name', ([], {}), '()\n', (3950, 3952), False, 'from fate_flow.utils import job_utils\n'), ((4374, 4412), 'fate_arch.common.base_utils.deserialize_b64', 'deserialize_b64', (['tracking_metric.f_key'], {}), '(tracking_metric.f_key)\n', (4389, 4412), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((4414, 4454), 'fate_arch.common.base_utils.deserialize_b64', 'deserialize_b64', (['tracking_metric.f_value'], {}), '(tracking_metric.f_value)\n', (4429, 4454), False, 'from fate_arch.common.base_utils import current_timestamp, serialize_b64, deserialize_b64\n'), ((4498, 4526), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (4513, 4526), False, 'from fate_flow.utils.log_utils import schedule_logger\n'), ((6750, 6778), 'fate_flow.utils.log_utils.schedule_logger', 'schedule_logger', (['self.job_id'], {}), '(self.job_id)\n', (6765, 6778), False, 'from fate_flow.utils.log_utils import schedule_logger\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
# =============================================================================
# HeteroSecureBoostingGuest
# =============================================================================
import functools
from operator import itemgetter
import numpy as np
from federatedml.tree.tree_core.predict_cache import PredictDataCache
from federatedml.util.io_check import assert_io_num_rows_equal
from numpy import random
from arch.api.utils import log_utils
from fate_flow.entity.metric import Metric
from fate_flow.entity.metric import MetricMeta
from federatedml.feature.binning.quantile_binning import QuantileBinning
from federatedml.feature.fate_element_type import NoneType
from federatedml.loss import FairLoss
from federatedml.loss import HuberLoss
from federatedml.loss import LeastAbsoluteErrorLoss
from federatedml.loss import LeastSquaredErrorLoss
from federatedml.loss import LogCoshLoss
from federatedml.loss import SigmoidBinaryCrossEntropyLoss
from federatedml.loss import SoftmaxCrossEntropyLoss
from federatedml.loss import TweedieLoss
from federatedml.optim.convergence import converge_func_factory
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.param.feature_binning_param import FeatureBinningParam
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import ObjectiveMeta
from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam
from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo
from federatedml.secureprotol import IterativeAffineEncrypt
from federatedml.secureprotol import PaillierEncrypt
from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator
from federatedml.statistic import data_overview
from federatedml.transfer_variable.transfer_class.hetero_secure_boost_transfer_variable import \
HeteroSecureBoostingTreeTransferVariable
from federatedml.tree import BoostingTree
from federatedml.tree import HeteroDecisionTreeGuest
from federatedml.util import consts
from federatedml.util.classify_label_checker import ClassifyLabelChecker
from federatedml.util.classify_label_checker import RegressionLabelChecker
LOGGER = log_utils.getLogger()
class HeteroSecureBoostingTreeGuest(BoostingTree):
def __init__(self):
super(HeteroSecureBoostingTreeGuest, self).__init__()
self.convegence = None
self.y = None
self.F = None
self.predict_F = None
self.data_bin = None
self.loss = None
self.init_score = None
self.classes_dict = {}
self.classes_ = []
self.num_classes = 0
self.classify_target = "binary"
self.feature_num = None
self.encrypter = None
self.grad_and_hess = None
self.tree_dim = 1
self.tree_meta = None
self.trees_ = []
self.history_loss = []
self.bin_split_points = None
self.bin_sparse_points = None
self.encrypted_mode_calculator = None
self.predict_data_cache = PredictDataCache()
self.feature_importances_ = {}
self.role = consts.GUEST
self.transfer_variable = HeteroSecureBoostingTreeTransferVariable()
self.data_alignment_map = {}
def set_loss(self, objective_param):
loss_type = objective_param.objective
params = objective_param.params
LOGGER.info("set objective, objective is {}".format(loss_type))
if self.task_type == consts.CLASSIFICATION:
if loss_type == "cross_entropy":
if self.num_classes == 2:
self.loss = SigmoidBinaryCrossEntropyLoss()
else:
self.loss = SoftmaxCrossEntropyLoss()
else:
raise NotImplementedError("objective %s not supported yet" % (loss_type))
elif self.task_type == consts.REGRESSION:
if loss_type == "lse":
self.loss = LeastSquaredErrorLoss()
elif loss_type == "lae":
self.loss = LeastAbsoluteErrorLoss()
elif loss_type == "huber":
self.loss = HuberLoss(params[0])
elif loss_type == "fair":
self.loss = FairLoss(params[0])
elif loss_type == "tweedie":
self.loss = TweedieLoss(params[0])
elif loss_type == "log_cosh":
self.loss = LogCoshLoss()
else:
raise NotImplementedError("objective %s not supported yet" % (loss_type))
else:
raise NotImplementedError("objective %s not supported yet" % (loss_type))
def convert_feature_to_bin(self, data_instance):
LOGGER.info("convert feature to bins")
param_obj = FeatureBinningParam(bin_num=self.bin_num)
if self.use_missing:
binning_obj = QuantileBinning(param_obj, abnormal_list=[NoneType()])
else:
binning_obj = QuantileBinning(param_obj)
binning_obj.fit_split_points(data_instance)
self.data_bin, self.bin_split_points, self.bin_sparse_points = binning_obj.convert_feature_to_bin(data_instance)
LOGGER.info("convert feature to bins over")
def set_y(self):
LOGGER.info("set label from data and check label")
self.y = self.data_bin.mapValues(lambda instance: instance.label)
self.check_label()
def generate_flowid(self, round_num, tree_num):
LOGGER.info("generate flowid, flowid {}".format(self.flowid))
return ".".join(map(str, [self.flowid, round_num, tree_num]))
def check_label(self):
LOGGER.info("check label")
if self.task_type == consts.CLASSIFICATION:
self.num_classes, self.classes_ = ClassifyLabelChecker.validate_label(self.data_bin)
if self.num_classes > 2:
self.classify_target = "multinomial"
self.tree_dim = self.num_classes
range_from_zero = True
for _class in self.classes_:
try:
if _class >= 0 and _class < self.num_classes and isinstance(_class, int):
continue
else:
range_from_zero = False
break
except:
range_from_zero = False
self.classes_ = sorted(self.classes_)
if not range_from_zero:
class_mapping = dict(zip(self.classes_, range(self.num_classes)))
self.y = self.y.mapValues(lambda _class: class_mapping[_class])
else:
RegressionLabelChecker.validate_label(self.data_bin)
self.set_loss(self.objective_param)
def generate_encrypter(self):
LOGGER.info("generate encrypter")
if self.encrypt_param.method.lower() == consts.PAILLIER.lower():
self.encrypter = PaillierEncrypt()
self.encrypter.generate_key(self.encrypt_param.key_length)
elif self.encrypt_param.method.lower() == consts.ITERATIVEAFFINE.lower():
self.encrypter = IterativeAffineEncrypt()
self.encrypter.generate_key(self.encrypt_param.key_length)
else:
raise NotImplementedError("encrypt method not supported yes!!!")
self.encrypted_calculator = EncryptModeCalculator(self.encrypter, self.calculated_mode, self.re_encrypted_rate)
@staticmethod
def accumulate_f(f_val, new_f_val, lr=0.1, idx=0):
f_val[idx] += lr * new_f_val
return f_val
def update_feature_importance(self, tree_feature_importance):
for fid in tree_feature_importance:
if fid not in self.feature_importances_:
self.feature_importances_[fid] = 0
self.feature_importances_[fid] += tree_feature_importance[fid]
def update_f_value(self, new_f=None, tidx=-1, mode="train"):
LOGGER.info("update tree f value, tree idx is {}".format(tidx))
if mode == "train" and self.F is None:
if self.tree_dim > 1:
self.F, self.init_score = self.loss.initialize(self.y, self.tree_dim)
else:
self.F, self.init_score = self.loss.initialize(self.y)
else:
accumulate_f = functools.partial(self.accumulate_f,
lr=self.learning_rate,
idx=tidx)
if mode == "train":
self.F = self.F.join(new_f, accumulate_f)
else:
self.predict_F = self.predict_F.join(new_f, accumulate_f)
def compute_grad_and_hess(self):
LOGGER.info("compute grad and hess")
loss_method = self.loss
if self.task_type == consts.CLASSIFICATION:
self.grad_and_hess = self.y.join(self.F, lambda y, f_val: \
(loss_method.compute_grad(y, loss_method.predict(f_val)), \
loss_method.compute_hess(y, loss_method.predict(f_val))))
else:
self.grad_and_hess = self.y.join(self.F, lambda y, f_val:
(loss_method.compute_grad(y, f_val),
loss_method.compute_hess(y, f_val)))
def compute_loss(self):
LOGGER.info("compute loss")
if self.task_type == consts.CLASSIFICATION:
loss_method = self.loss
y_predict = self.F.mapValues(lambda val: loss_method.predict(val))
loss = loss_method.compute_loss(self.y, y_predict)
elif self.task_type == consts.REGRESSION:
if self.objective_param.objective in ["lse", "lae", "logcosh", "tweedie", "log_cosh", "huber"]:
loss_method = self.loss
loss = loss_method.compute_loss(self.y, self.F)
else:
loss_method = self.loss
y_predict = self.F.mapValues(lambda val: loss_method.predict(val))
loss = loss_method.compute_loss(self.y, y_predict)
return float(loss)
def get_grad_and_hess(self, tree_idx):
LOGGER.info("get grad and hess of tree {}".format(tree_idx))
grad_and_hess_subtree = self.grad_and_hess.mapValues(
lambda grad_and_hess: (grad_and_hess[0][tree_idx], grad_and_hess[1][tree_idx]))
return grad_and_hess_subtree
def check_convergence(self, loss):
LOGGER.info("check convergence")
if self.convegence is None:
self.convegence = converge_func_factory("diff", self.tol)
return self.convegence.is_converge(loss)
def sample_valid_features(self):
LOGGER.info("sample valid features")
if self.feature_num is None:
self.feature_num = self.bin_split_points.shape[0]
choose_feature = random.choice(range(0, self.feature_num), \
max(1, int(self.subsample_feature_rate * self.feature_num)), replace=False)
valid_features = [False for i in range(self.feature_num)]
for fid in choose_feature:
valid_features[fid] = True
return valid_features
def sync_tree_dim(self):
LOGGER.info("sync tree dim to host")
self.transfer_variable.tree_dim.remote(self.tree_dim,
role=consts.HOST,
idx=-1)
def sync_stop_flag(self, stop_flag, num_round):
LOGGER.info("sync stop flag to host, boost round is {}".format(num_round))
self.transfer_variable.stop_flag.remote(stop_flag,
role=consts.HOST,
idx=-1,
suffix=(num_round,))
def sync_predict_start_round(self, num_round):
LOGGER.info("sync predict start round {}".format(num_round))
self.transfer_variable.predict_start_round.remote(num_round,
role=consts.HOST,
idx=-1)
def fit(self, data_inst, validate_data=None):
LOGGER.info("begin to train secureboosting guest model")
self.gen_feature_fid_mapping(data_inst.schema)
self.validation_strategy = self.init_validation_strategy(data_inst, validate_data)
data_inst = self.data_alignment(data_inst)
self.convert_feature_to_bin(data_inst)
self.set_y()
self.update_f_value()
self.generate_encrypter()
self.sync_tree_dim()
self.callback_meta("loss",
"train",
MetricMeta(name="train",
metric_type="LOSS",
extra_metas={"unit_name": "iters"}))
for i in range(self.num_trees):
self.compute_grad_and_hess()
for tidx in range(self.tree_dim):
LOGGER.info("start to fit, boost round: {}, tree index: {}".format(i, tidx))
tree_inst = HeteroDecisionTreeGuest(self.tree_param)
tree_inst.set_inputinfo(self.data_bin, self.get_grad_and_hess(tidx), self.bin_split_points,
self.bin_sparse_points)
valid_features = self.sample_valid_features()
tree_inst.set_valid_features(valid_features)
tree_inst.set_encrypter(self.encrypter)
tree_inst.set_encrypted_mode_calculator(self.encrypted_calculator)
tree_inst.set_flowid(self.generate_flowid(i, tidx))
tree_inst.set_host_party_idlist(self.component_properties.host_party_idlist)
tree_inst.set_runtime_idx(self.component_properties.local_partyid)
tree_inst.fit()
tree_meta, tree_param = tree_inst.get_model()
self.trees_.append(tree_param)
if self.tree_meta is None:
self.tree_meta = tree_meta
self.update_f_value(new_f=tree_inst.predict_weights, tidx=tidx)
self.update_feature_importance(tree_inst.get_feature_importance())
loss = self.compute_loss()
self.history_loss.append(loss)
LOGGER.debug("boost round {} loss is {}".format(i, loss))
self.callback_metric("loss",
"train",
[Metric(i, loss)])
if self.validation_strategy:
self.validation_strategy.validate(self, i)
if self.validation_strategy.need_stop():
LOGGER.debug('early stopping triggered')
break
if self.n_iter_no_change is True:
if self.check_convergence(loss):
self.sync_stop_flag(True, i)
LOGGER.debug("check loss convergence on boost round {}".format(i))
break
else:
self.sync_stop_flag(False, i)
LOGGER.debug("history loss is {}".format(self.history_loss))
self.callback_meta("loss",
"train",
MetricMeta(name="train",
metric_type="LOSS",
extra_metas={"Best": min(self.history_loss)}))
if self.validation_strategy and self.validation_strategy.has_saved_best_model():
self.load_model(self.validation_strategy.cur_best_model)
LOGGER.info("end to train secureboosting guest model")
def predict_f_value(self, data_inst, cache_dataset_key):
LOGGER.debug("predict tree f value, there are {} trees".format(len(self.trees_)))
init_score = self.init_score
last_round = self.predict_data_cache.predict_data_last_round(cache_dataset_key)
rounds = len(self.trees_) // self.tree_dim
if last_round == -1:
self.predict_F = data_inst.mapValues(lambda v: init_score)
else:
LOGGER.debug("hit cache, cached round is {}".format(last_round))
if last_round >= rounds - 1:
LOGGER.debug("predict data cached, rounds is {}, total cached round is {}".format(rounds, last_round))
self.predict_F = self.predict_data_cache.predict_data_at(cache_dataset_key, min(rounds - 1, last_round))
self.sync_predict_start_round(last_round + 1)
for i in range(last_round + 1, rounds):
for tidx in range(self.tree_dim):
LOGGER.info("start to predict, boost round: {}, tree index: {}".format(i, tidx))
tree_inst = HeteroDecisionTreeGuest(self.tree_param)
tree_inst.load_model(self.tree_meta, self.trees_[i * self.tree_dim + tidx])
# tree_inst.set_tree_model(self.trees_[i * self.tree_dim + tidx])
tree_inst.set_flowid(self.generate_flowid(i, tidx))
tree_inst.set_runtime_idx(self.component_properties.local_partyid)
tree_inst.set_host_party_idlist(self.component_properties.host_party_idlist)
predict_data = tree_inst.predict(data_inst)
self.update_f_value(new_f=predict_data, tidx=tidx, mode="predict")
self.predict_data_cache.add_data(cache_dataset_key, self.predict_F)
@assert_io_num_rows_equal
def predict(self, data_inst):
LOGGER.info("start predict")
cache_dataset_key = self.predict_data_cache.get_data_key(data_inst)
if cache_dataset_key in self.data_alignment_map:
data_inst = self.data_alignment_map[cache_dataset_key]
else:
data_inst = self.data_alignment(data_inst)
header = [None] * len(self.feature_name_fid_mapping)
for idx, col in self.feature_name_fid_mapping.items():
header[idx] = col
data_inst = data_overview.header_alignment(data_inst, header)
self.data_alignment_map[cache_dataset_key] = data_inst
self.predict_f_value(data_inst, cache_dataset_key)
if self.task_type == consts.CLASSIFICATION:
loss_method = self.loss
if self.num_classes == 2:
predicts = self.predict_F.mapValues(lambda f: float(loss_method.predict(f)))
else:
predicts = self.predict_F.mapValues(lambda f: loss_method.predict(f).tolist())
elif self.task_type == consts.REGRESSION:
if self.objective_param.objective in ["lse", "lae", "huber", "log_cosh", "fair", "tweedie"]:
predicts = self.predict_F
else:
raise NotImplementedError("objective {} not supported yet".format(self.objective_param.objective))
if self.task_type == consts.CLASSIFICATION:
classes_ = self.classes_
if self.num_classes == 2:
threshold = self.predict_param.threshold
predict_result = data_inst.join(predicts, lambda inst, pred: [inst.label,
classes_[1] if pred > threshold else
classes_[0], pred,
{"0": 1 - pred, "1": pred}])
else:
predict_label = predicts.mapValues(lambda preds: classes_[np.argmax(preds)])
predict_result = data_inst.join(predicts, lambda inst, preds: [inst.label, classes_[np.argmax(preds)],
np.max(preds),
dict(zip(map(str, classes_), preds))])
elif self.task_type == consts.REGRESSION:
predict_result = data_inst.join(predicts, lambda inst, pred: [inst.label, float(pred), float(pred),
{"label": float(pred)}])
else:
raise NotImplementedError("task type {} not supported yet".format(self.task_type))
LOGGER.info("end predict")
return predict_result
def get_feature_importance(self):
return self.feature_importances_
def get_model_meta(self):
model_meta = BoostingTreeModelMeta()
model_meta.tree_meta.CopyFrom(self.tree_meta)
model_meta.learning_rate = self.learning_rate
model_meta.num_trees = self.num_trees
model_meta.quantile_meta.CopyFrom(QuantileMeta(bin_num=self.bin_num))
model_meta.objective_meta.CopyFrom(ObjectiveMeta(objective=self.objective_param.objective,
param=self.objective_param.params))
model_meta.task_type = self.task_type
# model_meta.tree_dim = self.tree_dim
model_meta.n_iter_no_change = self.n_iter_no_change
model_meta.tol = self.tol
# model_meta.num_classes = self.num_classes
# model_meta.classes_.extend(map(str, self.classes_))
# model_meta.need_run = self.need_run
meta_name = "HeteroSecureBoostingTreeGuestMeta"
return meta_name, model_meta
def set_model_meta(self, model_meta):
self.tree_meta = model_meta.tree_meta
self.learning_rate = model_meta.learning_rate
self.num_trees = model_meta.num_trees
self.bin_num = model_meta.quantile_meta.bin_num
self.objective_param.objective = model_meta.objective_meta.objective
self.objective_param.params = list(model_meta.objective_meta.param)
self.task_type = model_meta.task_type
# self.tree_dim = model_meta.tree_dim
# self.num_classes = model_meta.num_classes
self.n_iter_no_change = model_meta.n_iter_no_change
self.tol = model_meta.tol
# self.classes_ = list(model_meta.classes_)
# self.set_loss(self.objective_param)
def get_model_param(self):
model_param = BoostingTreeModelParam()
model_param.tree_num = len(list(self.trees_))
model_param.tree_dim = self.tree_dim
model_param.trees_.extend(self.trees_)
model_param.init_score.extend(self.init_score)
model_param.losses.extend(self.history_loss)
model_param.classes_.extend(map(str, self.classes_))
model_param.num_classes = self.num_classes
model_param.best_iteration = -1 if self.validation_strategy is None else self.validation_strategy.best_iteration
feature_importances = list(self.get_feature_importance().items())
feature_importances = sorted(feature_importances, key=itemgetter(1), reverse=True)
feature_importance_param = []
for (sitename, fid), _importance in feature_importances:
feature_importance_param.append(FeatureImportanceInfo(sitename=sitename,
fid=fid,
importance=_importance))
model_param.feature_importances.extend(feature_importance_param)
model_param.feature_name_fid_mapping.update(self.feature_name_fid_mapping)
param_name = "HeteroSecureBoostingTreeGuestParam"
return param_name, model_param
def set_model_param(self, model_param):
self.trees_ = list(model_param.trees_)
self.init_score = np.array(list(model_param.init_score))
self.history_loss = list(model_param.losses)
self.classes_ = list(map(int, model_param.classes_))
self.tree_dim = model_param.tree_dim
self.num_classes = model_param.num_classes
self.feature_name_fid_mapping.update(model_param.feature_name_fid_mapping)
def get_metrics_param(self):
if self.task_type == consts.CLASSIFICATION:
if self.num_classes == 2:
return EvaluateParam(eval_type="binary",
pos_label=self.classes_[1], metrics=self.metrics)
else:
return EvaluateParam(eval_type="multi", metrics=self.metrics)
else:
return EvaluateParam(eval_type="regression", metrics=self.metrics)
def export_model(self):
if self.need_cv:
return None
meta_name, meta_protobuf = self.get_model_meta()
param_name, param_protobuf = self.get_model_param()
return {meta_name: meta_protobuf, param_name: param_protobuf}
def load_model(self, model_dict):
model_param = None
model_meta = None
for _, value in model_dict["model"].items():
for model in value:
if model.endswith("Meta"):
model_meta = value[model]
if model.endswith("Param"):
model_param = value[model]
LOGGER.info("load model")
self.set_model_meta(model_meta)
self.set_model_param(model_param)
self.set_loss(self.objective_param)
|
[
"fate_flow.entity.metric.MetricMeta",
"fate_flow.entity.metric.Metric"
] |
[((3203, 3224), 'arch.api.utils.log_utils.getLogger', 'log_utils.getLogger', ([], {}), '()\n', (3222, 3224), False, 'from arch.api.utils import log_utils\n'), ((4045, 4063), 'federatedml.tree.tree_core.predict_cache.PredictDataCache', 'PredictDataCache', ([], {}), '()\n', (4061, 4063), False, 'from federatedml.tree.tree_core.predict_cache import PredictDataCache\n'), ((4171, 4213), 'federatedml.transfer_variable.transfer_class.hetero_secure_boost_transfer_variable.HeteroSecureBoostingTreeTransferVariable', 'HeteroSecureBoostingTreeTransferVariable', ([], {}), '()\n', (4211, 4213), False, 'from federatedml.transfer_variable.transfer_class.hetero_secure_boost_transfer_variable import HeteroSecureBoostingTreeTransferVariable\n'), ((5748, 5789), 'federatedml.param.feature_binning_param.FeatureBinningParam', 'FeatureBinningParam', ([], {'bin_num': 'self.bin_num'}), '(bin_num=self.bin_num)\n', (5767, 5789), False, 'from federatedml.param.feature_binning_param import FeatureBinningParam\n'), ((8294, 8382), 'federatedml.secureprotol.encrypt_mode.EncryptModeCalculator', 'EncryptModeCalculator', (['self.encrypter', 'self.calculated_mode', 'self.re_encrypted_rate'], {}), '(self.encrypter, self.calculated_mode, self.\n re_encrypted_rate)\n', (8315, 8382), False, 'from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator\n'), ((21287, 21310), 'federatedml.protobuf.generated.boosting_tree_model_meta_pb2.BoostingTreeModelMeta', 'BoostingTreeModelMeta', ([], {}), '()\n', (21308, 21310), False, 'from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta\n'), ((22964, 22988), 'federatedml.protobuf.generated.boosting_tree_model_param_pb2.BoostingTreeModelParam', 'BoostingTreeModelParam', ([], {}), '()\n', (22986, 22988), False, 'from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam\n'), ((5941, 5967), 'federatedml.feature.binning.quantile_binning.QuantileBinning', 'QuantileBinning', (['param_obj'], {}), '(param_obj)\n', (5956, 5967), False, 'from federatedml.feature.binning.quantile_binning import QuantileBinning\n'), ((6730, 6780), 'federatedml.util.classify_label_checker.ClassifyLabelChecker.validate_label', 'ClassifyLabelChecker.validate_label', (['self.data_bin'], {}), '(self.data_bin)\n', (6765, 6780), False, 'from federatedml.util.classify_label_checker import ClassifyLabelChecker\n'), ((7593, 7645), 'federatedml.util.classify_label_checker.RegressionLabelChecker.validate_label', 'RegressionLabelChecker.validate_label', (['self.data_bin'], {}), '(self.data_bin)\n', (7630, 7645), False, 'from federatedml.util.classify_label_checker import RegressionLabelChecker\n'), ((7816, 7839), 'federatedml.util.consts.PAILLIER.lower', 'consts.PAILLIER.lower', ([], {}), '()\n', (7837, 7839), False, 'from federatedml.util import consts\n'), ((7870, 7887), 'federatedml.secureprotol.PaillierEncrypt', 'PaillierEncrypt', ([], {}), '()\n', (7885, 7887), False, 'from federatedml.secureprotol import PaillierEncrypt\n'), ((9236, 9305), 'functools.partial', 'functools.partial', (['self.accumulate_f'], {'lr': 'self.learning_rate', 'idx': 'tidx'}), '(self.accumulate_f, lr=self.learning_rate, idx=tidx)\n', (9253, 9305), False, 'import functools\n'), ((11396, 11435), 'federatedml.optim.convergence.converge_func_factory', 'converge_func_factory', (['"""diff"""', 'self.tol'], {}), "('diff', self.tol)\n", (11417, 11435), False, 'from federatedml.optim.convergence import converge_func_factory\n'), ((13575, 13660), 'fate_flow.entity.metric.MetricMeta', 'MetricMeta', ([], {'name': '"""train"""', 'metric_type': '"""LOSS"""', 'extra_metas': "{'unit_name': 'iters'}"}), "(name='train', metric_type='LOSS', extra_metas={'unit_name': 'iters'}\n )\n", (13585, 13660), False, 'from fate_flow.entity.metric import MetricMeta\n'), ((18840, 18889), 'federatedml.statistic.data_overview.header_alignment', 'data_overview.header_alignment', (['data_inst', 'header'], {}), '(data_inst, header)\n', (18870, 18889), False, 'from federatedml.statistic import data_overview\n'), ((21507, 21541), 'federatedml.protobuf.generated.boosting_tree_model_meta_pb2.QuantileMeta', 'QuantileMeta', ([], {'bin_num': 'self.bin_num'}), '(bin_num=self.bin_num)\n', (21519, 21541), False, 'from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta\n'), ((21586, 21681), 'federatedml.protobuf.generated.boosting_tree_model_meta_pb2.ObjectiveMeta', 'ObjectiveMeta', ([], {'objective': 'self.objective_param.objective', 'param': 'self.objective_param.params'}), '(objective=self.objective_param.objective, param=self.\n objective_param.params)\n', (21599, 21681), False, 'from federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import ObjectiveMeta\n'), ((25100, 25159), 'federatedml.param.evaluation_param.EvaluateParam', 'EvaluateParam', ([], {'eval_type': '"""regression"""', 'metrics': 'self.metrics'}), "(eval_type='regression', metrics=self.metrics)\n", (25113, 25159), False, 'from federatedml.param.evaluation_param import EvaluateParam\n'), ((8009, 8039), 'federatedml.util.consts.ITERATIVEAFFINE.lower', 'consts.ITERATIVEAFFINE.lower', ([], {}), '()\n', (8037, 8039), False, 'from federatedml.util import consts\n'), ((8070, 8094), 'federatedml.secureprotol.IterativeAffineEncrypt', 'IterativeAffineEncrypt', ([], {}), '()\n', (8092, 8094), False, 'from federatedml.secureprotol import IterativeAffineEncrypt\n'), ((13982, 14022), 'federatedml.tree.HeteroDecisionTreeGuest', 'HeteroDecisionTreeGuest', (['self.tree_param'], {}), '(self.tree_param)\n', (14005, 14022), False, 'from federatedml.tree import HeteroDecisionTreeGuest\n'), ((17595, 17635), 'federatedml.tree.HeteroDecisionTreeGuest', 'HeteroDecisionTreeGuest', (['self.tree_param'], {}), '(self.tree_param)\n', (17618, 17635), False, 'from federatedml.tree import HeteroDecisionTreeGuest\n'), ((23614, 23627), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (23624, 23627), False, 'from operator import itemgetter\n'), ((23790, 23863), 'federatedml.protobuf.generated.boosting_tree_model_param_pb2.FeatureImportanceInfo', 'FeatureImportanceInfo', ([], {'sitename': 'sitename', 'fid': 'fid', 'importance': '_importance'}), '(sitename=sitename, fid=fid, importance=_importance)\n', (23811, 23863), False, 'from federatedml.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo\n'), ((24850, 24938), 'federatedml.param.evaluation_param.EvaluateParam', 'EvaluateParam', ([], {'eval_type': '"""binary"""', 'pos_label': 'self.classes_[1]', 'metrics': 'self.metrics'}), "(eval_type='binary', pos_label=self.classes_[1], metrics=self.\n metrics)\n", (24863, 24938), False, 'from federatedml.param.evaluation_param import EvaluateParam\n'), ((25012, 25066), 'federatedml.param.evaluation_param.EvaluateParam', 'EvaluateParam', ([], {'eval_type': '"""multi"""', 'metrics': 'self.metrics'}), "(eval_type='multi', metrics=self.metrics)\n", (25025, 25066), False, 'from federatedml.param.evaluation_param import EvaluateParam\n'), ((4622, 4653), 'federatedml.loss.SigmoidBinaryCrossEntropyLoss', 'SigmoidBinaryCrossEntropyLoss', ([], {}), '()\n', (4651, 4653), False, 'from federatedml.loss import SigmoidBinaryCrossEntropyLoss\n'), ((4708, 4733), 'federatedml.loss.SoftmaxCrossEntropyLoss', 'SoftmaxCrossEntropyLoss', ([], {}), '()\n', (4731, 4733), False, 'from federatedml.loss import SoftmaxCrossEntropyLoss\n'), ((4955, 4978), 'federatedml.loss.LeastSquaredErrorLoss', 'LeastSquaredErrorLoss', ([], {}), '()\n', (4976, 4978), False, 'from federatedml.loss import LeastSquaredErrorLoss\n'), ((15370, 15385), 'fate_flow.entity.metric.Metric', 'Metric', (['i', 'loss'], {}), '(i, loss)\n', (15376, 15385), False, 'from fate_flow.entity.metric import Metric\n'), ((5044, 5068), 'federatedml.loss.LeastAbsoluteErrorLoss', 'LeastAbsoluteErrorLoss', ([], {}), '()\n', (5066, 5068), False, 'from federatedml.loss import LeastAbsoluteErrorLoss\n'), ((5888, 5898), 'federatedml.feature.fate_element_type.NoneType', 'NoneType', ([], {}), '()\n', (5896, 5898), False, 'from federatedml.feature.fate_element_type import NoneType\n'), ((5136, 5156), 'federatedml.loss.HuberLoss', 'HuberLoss', (['params[0]'], {}), '(params[0])\n', (5145, 5156), False, 'from federatedml.loss import HuberLoss\n'), ((20366, 20382), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (20375, 20382), True, 'import numpy as np\n'), ((20583, 20596), 'numpy.max', 'np.max', (['preds'], {}), '(preds)\n', (20589, 20596), True, 'import numpy as np\n'), ((5223, 5242), 'federatedml.loss.FairLoss', 'FairLoss', (['params[0]'], {}), '(params[0])\n', (5231, 5242), False, 'from federatedml.loss import FairLoss\n'), ((20485, 20501), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (20494, 20501), True, 'import numpy as np\n'), ((5312, 5334), 'federatedml.loss.TweedieLoss', 'TweedieLoss', (['params[0]'], {}), '(params[0])\n', (5323, 5334), False, 'from federatedml.loss import TweedieLoss\n'), ((5405, 5418), 'federatedml.loss.LogCoshLoss', 'LogCoshLoss', ([], {}), '()\n', (5416, 5418), False, 'from federatedml.loss import LogCoshLoss\n')]
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 3