status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
369
body
stringlengths
0
254k
issue_url
stringlengths
37
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
timestamp[us, tz=UTC]
language
stringclasses
5 values
commit_datetime
timestamp[us, tz=UTC]
updated_file
stringlengths
4
188
file_content
stringlengths
0
5.12M
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,487
[Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class
Dolphin scheduler 目前已经移除了数据质量检测, 可见在配置文件中也已经移除了对 相关数据质量涉及的db的 但是代码中依旧存在TaskRecordDao对数据质量的query, 并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'" 中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中, 但是在重要的抽象类AbstractTask 中依旧存在对 TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净 public void after() { if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) { // task recor flat : if true , start up qianfan if (TaskRecordDao.getTaskRecordFlag() && TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) { AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams()); // replace placeholder Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()), taskExecutionContext.getDefinedParams(), params.getLocalParametersMap(), CommandType.of(taskExecutionContext.getCmdTypeIfComplement()), taskExecutionContext.getScheduleTime()); if (paramsMap != null && !paramsMap.isEmpty() && paramsMap.containsKey("v_proc_date")) { String vProcDate = paramsMap.get("v_proc_date").getValue(); if (!StringUtils.isEmpty(vProcDate)) { TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate); logger.info("task record status : {}", taskRecordState); if (taskRecordState == TaskRecordStatus.FAILURE) { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } } } } } else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) { setExitStatusCode(Constants.EXIT_CODE_KILL); } else { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } }
https://github.com/apache/dolphinscheduler/issues/5487
https://github.com/apache/dolphinscheduler/pull/5492
018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c
bc22ae7c91c9cbd7c971796ba3a45358c2f11864
2021-05-17T09:46:25Z
java
2021-05-18T09:00:03Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/TaskRecordService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import java.util.Map; /** * task record service */ public interface TaskRecordService { /** * query task record list paging * * @param taskName task name * @param state state * @param sourceTable source table * @param destTable destination table * @param taskDate task date * @param startDate start time * @param endDate end time * @param pageNo page numbere * @param pageSize page size * @param isHistory is history * @return task record list */ Map<String,Object> queryTaskRecordListPaging(boolean isHistory, String taskName, String startDate, String taskDate, String sourceTable, String destTable, String endDate, String state, Integer pageNo, Integer pageSize); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,487
[Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class
Dolphin scheduler 目前已经移除了数据质量检测, 可见在配置文件中也已经移除了对 相关数据质量涉及的db的 但是代码中依旧存在TaskRecordDao对数据质量的query, 并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'" 中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中, 但是在重要的抽象类AbstractTask 中依旧存在对 TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净 public void after() { if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) { // task recor flat : if true , start up qianfan if (TaskRecordDao.getTaskRecordFlag() && TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) { AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams()); // replace placeholder Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()), taskExecutionContext.getDefinedParams(), params.getLocalParametersMap(), CommandType.of(taskExecutionContext.getCmdTypeIfComplement()), taskExecutionContext.getScheduleTime()); if (paramsMap != null && !paramsMap.isEmpty() && paramsMap.containsKey("v_proc_date")) { String vProcDate = paramsMap.get("v_proc_date").getValue(); if (!StringUtils.isEmpty(vProcDate)) { TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate); logger.info("task record status : {}", taskRecordState); if (taskRecordState == TaskRecordStatus.FAILURE) { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } } } } } else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) { setExitStatusCode(Constants.EXIT_CODE_KILL); } else { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } }
https://github.com/apache/dolphinscheduler/issues/5487
https://github.com/apache/dolphinscheduler/pull/5492
018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c
bc22ae7c91c9cbd7c971796ba3a45358c2f11864
2021-05-17T09:46:25Z
java
2021-05-18T09:00:03Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/TaskRecordServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.TASK_RECORD_TABLE_HISTORY_HIVE_LOG; import static org.apache.dolphinscheduler.common.Constants.TASK_RECORD_TABLE_HIVE_LOG; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.TaskRecordService; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.dao.TaskRecordDao; import org.apache.dolphinscheduler.dao.entity.TaskRecord; import java.util.HashMap; import java.util.List; import java.util.Map; import org.springframework.stereotype.Service; /** * task record service impl */ @Service public class TaskRecordServiceImpl extends BaseServiceImpl implements TaskRecordService { /** * query task record list paging * * @param taskName task name * @param state state * @param sourceTable source table * @param destTable destination table * @param taskDate task date * @param startDate start time * @param endDate end time * @param pageNo page numbere * @param pageSize page size * @param isHistory is history * @return task record list */ @Override public Map<String,Object> queryTaskRecordListPaging(boolean isHistory, String taskName, String startDate, String taskDate, String sourceTable, String destTable, String endDate, String state, Integer pageNo, Integer pageSize) { Map<String, Object> result = new HashMap<>(); PageInfo<TaskRecord> pageInfo = new PageInfo<>(pageNo, pageSize); Map<String, String> map = new HashMap<>(); map.put("taskName", taskName); map.put("taskDate", taskDate); map.put("state", state); map.put("sourceTable", sourceTable); map.put("targetTable", destTable); map.put("startTime", startDate); map.put("endTime", endDate); map.put("offset", pageInfo.getStart().toString()); map.put("pageSize", pageInfo.getPageSize().toString()); String table = isHistory ? TASK_RECORD_TABLE_HISTORY_HIVE_LOG : TASK_RECORD_TABLE_HIVE_LOG; int count = TaskRecordDao.countTaskRecord(map, table); List<TaskRecord> recordList = TaskRecordDao.queryAllTaskRecord(map, table); pageInfo.setTotalCount(count); pageInfo.setLists(recordList); result.put(Constants.DATA_LIST, pageInfo); putMsg(result, Status.SUCCESS); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,487
[Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class
Dolphin scheduler 目前已经移除了数据质量检测, 可见在配置文件中也已经移除了对 相关数据质量涉及的db的 但是代码中依旧存在TaskRecordDao对数据质量的query, 并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'" 中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中, 但是在重要的抽象类AbstractTask 中依旧存在对 TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净 public void after() { if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) { // task recor flat : if true , start up qianfan if (TaskRecordDao.getTaskRecordFlag() && TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) { AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams()); // replace placeholder Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()), taskExecutionContext.getDefinedParams(), params.getLocalParametersMap(), CommandType.of(taskExecutionContext.getCmdTypeIfComplement()), taskExecutionContext.getScheduleTime()); if (paramsMap != null && !paramsMap.isEmpty() && paramsMap.containsKey("v_proc_date")) { String vProcDate = paramsMap.get("v_proc_date").getValue(); if (!StringUtils.isEmpty(vProcDate)) { TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate); logger.info("task record status : {}", taskRecordState); if (taskRecordState == TaskRecordStatus.FAILURE) { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } } } } } else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) { setExitStatusCode(Constants.EXIT_CODE_KILL); } else { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } }
https://github.com/apache/dolphinscheduler/issues/5487
https://github.com/apache/dolphinscheduler/pull/5492
018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c
bc22ae7c91c9cbd7c971796ba3a45358c2f11864
2021-05-17T09:46:25Z
java
2021-05-18T09:00:03Z
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/TaskRecordControllerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.junit.Assert; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.http.MediaType; import org.springframework.test.web.servlet.MvcResult; import org.springframework.util.LinkedMultiValueMap; import org.springframework.util.MultiValueMap; /** * task record controller test */ public class TaskRecordControllerTest extends AbstractControllerTest { private static final Logger logger = LoggerFactory.getLogger(TaskRecordControllerTest.class); @Test public void testQueryTaskRecordListPaging() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("taskName","taskName"); paramsMap.add("state","state"); paramsMap.add("sourceTable",""); paramsMap.add("destTable",""); paramsMap.add("taskDate",""); paramsMap.add("startDate","2019-12-16 00:00:00"); paramsMap.add("endDate","2019-12-17 00:00:00"); paramsMap.add("pageNo","1"); paramsMap.add("pageSize","30"); MvcResult mvcResult = mockMvc.perform(get("/projects/task-record/list-paging") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } @Test public void testQueryHistoryTaskRecordListPaging() throws Exception { MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("taskName","taskName"); paramsMap.add("state","state"); paramsMap.add("sourceTable",""); paramsMap.add("destTable",""); paramsMap.add("taskDate",""); paramsMap.add("startDate","2019-12-16 00:00:00"); paramsMap.add("endDate","2019-12-17 00:00:00"); paramsMap.add("pageNo","1"); paramsMap.add("pageSize","30"); MvcResult mvcResult = mockMvc.perform(get("/projects/task-record/history-list-paging") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,487
[Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class
Dolphin scheduler 目前已经移除了数据质量检测, 可见在配置文件中也已经移除了对 相关数据质量涉及的db的 但是代码中依旧存在TaskRecordDao对数据质量的query, 并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'" 中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中, 但是在重要的抽象类AbstractTask 中依旧存在对 TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净 public void after() { if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) { // task recor flat : if true , start up qianfan if (TaskRecordDao.getTaskRecordFlag() && TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) { AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams()); // replace placeholder Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()), taskExecutionContext.getDefinedParams(), params.getLocalParametersMap(), CommandType.of(taskExecutionContext.getCmdTypeIfComplement()), taskExecutionContext.getScheduleTime()); if (paramsMap != null && !paramsMap.isEmpty() && paramsMap.containsKey("v_proc_date")) { String vProcDate = paramsMap.get("v_proc_date").getValue(); if (!StringUtils.isEmpty(vProcDate)) { TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate); logger.info("task record status : {}", taskRecordState); if (taskRecordState == TaskRecordStatus.FAILURE) { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } } } } } else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) { setExitStatusCode(Constants.EXIT_CODE_KILL); } else { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } }
https://github.com/apache/dolphinscheduler/issues/5487
https://github.com/apache/dolphinscheduler/pull/5492
018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c
bc22ae7c91c9cbd7c971796ba3a45358c2f11864
2021-05-17T09:46:25Z
java
2021-05-18T09:00:03Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import java.util.regex.Pattern; /** * Constants */ public final class Constants { private Constants() { throw new UnsupportedOperationException("Construct Constants"); } /** * quartz config */ public static final String ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS = "org.quartz.jobStore.driverDelegateClass"; public static final String ORG_QUARTZ_SCHEDULER_INSTANCENAME = "org.quartz.scheduler.instanceName"; public static final String ORG_QUARTZ_SCHEDULER_INSTANCEID = "org.quartz.scheduler.instanceId"; public static final String ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON = "org.quartz.scheduler.makeSchedulerThreadDaemon"; public static final String ORG_QUARTZ_JOBSTORE_USEPROPERTIES = "org.quartz.jobStore.useProperties"; public static final String ORG_QUARTZ_THREADPOOL_CLASS = "org.quartz.threadPool.class"; public static final String ORG_QUARTZ_THREADPOOL_THREADCOUNT = "org.quartz.threadPool.threadCount"; public static final String ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS = "org.quartz.threadPool.makeThreadsDaemons"; public static final String ORG_QUARTZ_THREADPOOL_THREADPRIORITY = "org.quartz.threadPool.threadPriority"; public static final String ORG_QUARTZ_JOBSTORE_CLASS = "org.quartz.jobStore.class"; public static final String ORG_QUARTZ_JOBSTORE_TABLEPREFIX = "org.quartz.jobStore.tablePrefix"; public static final String ORG_QUARTZ_JOBSTORE_ISCLUSTERED = "org.quartz.jobStore.isClustered"; public static final String ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD = "org.quartz.jobStore.misfireThreshold"; public static final String ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL = "org.quartz.jobStore.clusterCheckinInterval"; public static final String ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK = "org.quartz.jobStore.acquireTriggersWithinLock"; public static final String ORG_QUARTZ_JOBSTORE_DATASOURCE = "org.quartz.jobStore.dataSource"; public static final String ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS = "org.quartz.dataSource.myDs.connectionProvider.class"; /** * quartz config default value */ public static final String QUARTZ_TABLE_PREFIX = "QRTZ_"; public static final String QUARTZ_MISFIRETHRESHOLD = "60000"; public static final String QUARTZ_CLUSTERCHECKININTERVAL = "5000"; public static final String QUARTZ_DATASOURCE = "myDs"; public static final String QUARTZ_THREADCOUNT = "25"; public static final String QUARTZ_THREADPRIORITY = "5"; public static final String QUARTZ_INSTANCENAME = "DolphinScheduler"; public static final String QUARTZ_INSTANCEID = "AUTO"; public static final String QUARTZ_ACQUIRETRIGGERSWITHINLOCK = "true"; /** * common properties path */ public static final String COMMON_PROPERTIES_PATH = "/common.properties"; /** * fs.defaultFS */ public static final String FS_DEFAULTFS = "fs.defaultFS"; /** * fs s3a endpoint */ public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint"; /** * fs s3a access key */ public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key"; /** * fs s3a secret key */ public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key"; /** * yarn.resourcemanager.ha.rm.ids */ public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids"; public static final String YARN_RESOURCEMANAGER_HA_XX = "xx"; /** * yarn.application.status.address */ public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address"; /** * yarn.job.history.status.address */ public static final String YARN_JOB_HISTORY_STATUS_ADDRESS = "yarn.job.history.status.address"; /** * hdfs configuration * hdfs.root.user */ public static final String HDFS_ROOT_USER = "hdfs.root.user"; /** * hdfs/s3 configuration * resource.upload.path */ public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path"; /** * data basedir path */ public static final String DATA_BASEDIR_PATH = "data.basedir.path"; /** * dolphinscheduler.env.path */ public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path"; /** * environment properties default path */ public static final String ENV_PATH = "env/dolphinscheduler_env.sh"; /** * python home */ public static final String PYTHON_HOME = "PYTHON_HOME"; /** * resource.view.suffixs */ public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs"; public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js"; /** * development.state */ public static final String DEVELOPMENT_STATE = "development.state"; public static final String DEVELOPMENT_STATE_DEFAULT_VALUE = "true"; /** * sudo enable */ public static final String SUDO_ENABLE = "sudo.enable"; /** * string true */ public static final String STRING_TRUE = "true"; /** * string false */ public static final String STRING_FALSE = "false"; /** * resource storage type */ public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type"; /** * MasterServer directory registered in zookeeper */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_MASTERS = "/nodes/master"; /** * WorkerServer directory registered in zookeeper */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_WORKERS = "/nodes/worker"; /** * all servers directory registered in zookeeper */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_DEAD_SERVERS = "/dead-servers"; /** * MasterServer lock directory registered in zookeeper */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters"; /** * MasterServer failover directory registered in zookeeper */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS = "/lock/failover/masters"; /** * WorkerServer failover directory registered in zookeeper */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS = "/lock/failover/workers"; /** * MasterServer startup failover runing and fault tolerance process */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters"; /** * comma , */ public static final String COMMA = ","; /** * slash / */ public static final String SLASH = "/"; /** * COLON : */ public static final String COLON = ":"; /** * SPACE " " */ public static final String SPACE = " "; /** * SINGLE_SLASH / */ public static final String SINGLE_SLASH = "/"; /** * DOUBLE_SLASH // */ public static final String DOUBLE_SLASH = "//"; /** * SINGLE_QUOTES "'" */ public static final String SINGLE_QUOTES = "'"; /** * DOUBLE_QUOTES "\"" */ public static final String DOUBLE_QUOTES = "\""; /** * SEMICOLON ; */ public static final String SEMICOLON = ";"; /** * EQUAL SIGN */ public static final String EQUAL_SIGN = "="; /** * AT SIGN */ public static final String AT_SIGN = "@"; public static final String WORKER_MAX_CPULOAD_AVG = "worker.max.cpuload.avg"; public static final String WORKER_RESERVED_MEMORY = "worker.reserved.memory"; public static final String MASTER_MAX_CPULOAD_AVG = "master.max.cpuload.avg"; public static final String MASTER_RESERVED_MEMORY = "master.reserved.memory"; /** * date format of yyyy-MM-dd HH:mm:ss */ public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss"; /** * date format of yyyyMMddHHmmss */ public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss"; /** * date format of yyyyMMddHHmmssSSS */ public static final String YYYYMMDDHHMMSSSSS = "yyyyMMddHHmmssSSS"; /** * http connect time out */ public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000; /** * http connect request time out */ public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000; /** * httpclient soceket time out */ public static final int SOCKET_TIMEOUT = 60 * 1000; /** * http header */ public static final String HTTP_HEADER_UNKNOWN = "unKnown"; /** * http X-Forwarded-For */ public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For"; /** * http X-Real-IP */ public static final String HTTP_X_REAL_IP = "X-Real-IP"; /** * UTF-8 */ public static final String UTF_8 = "UTF-8"; /** * user name regex */ public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,39}$"); /** * email regex */ public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[_|\\-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$"); /** * default display rows */ public static final int DEFAULT_DISPLAY_ROWS = 10; /** * read permission */ public static final int READ_PERMISSION = 2 * 1; /** * write permission */ public static final int WRITE_PERMISSION = 2 * 2; /** * execute permission */ public static final int EXECUTE_PERMISSION = 1; /** * default admin permission */ public static final int DEFAULT_ADMIN_PERMISSION = 7; /** * all permissions */ public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION; /** * max task timeout */ public static final int MAX_TASK_TIMEOUT = 24 * 3600; /** * master cpu load */ public static final int DEFAULT_MASTER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2; /** * worker cpu load */ public static final int DEFAULT_WORKER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2; /** * worker host weight */ public static final int DEFAULT_WORKER_HOST_WEIGHT = 100; /** * default log cache rows num,output when reach the number */ public static final int DEFAULT_LOG_ROWS_NUM = 4 * 16; /** * log flush interval?output when reach the interval */ public static final int DEFAULT_LOG_FLUSH_INTERVAL = 1000; /** * time unit secong to minutes */ public static final int SEC_2_MINUTES_TIME_UNIT = 60; /*** * * rpc port */ public static final int RPC_PORT = 50051; /*** * alert rpc port */ public static final int ALERT_RPC_PORT = 50052; /** * forbid running task */ public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN"; /** * normal running task */ public static final String FLOWNODE_RUN_FLAG_NORMAL = "NORMAL"; /** * datasource configuration path */ public static final String DATASOURCE_PROPERTIES = "/datasource.properties"; public static final String TASK_RECORD_URL = "task.record.datasource.url"; public static final String TASK_RECORD_FLAG = "task.record.flag"; public static final String TASK_RECORD_USER = "task.record.datasource.username"; public static final String TASK_RECORD_PWD = "task.record.datasource.password"; public static final String DEFAULT = "Default"; public static final String USER = "user"; public static final String PASSWORD = "password"; public static final String XXXXXX = "******"; public static final String NULL = "NULL"; public static final String THREAD_NAME_MASTER_SERVER = "Master-Server"; public static final String THREAD_NAME_WORKER_SERVER = "Worker-Server"; public static final String TASK_RECORD_TABLE_HIVE_LOG = "eamp_hive_log_hd"; public static final String TASK_RECORD_TABLE_HISTORY_HIVE_LOG = "eamp_hive_hist_log_hd"; /** * command parameter keys */ public static final String CMD_PARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId"; public static final String CMD_PARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList"; public static final String CMD_PARAM_RECOVERY_WAITING_THREAD = "WaitingThreadInstanceId"; public static final String CMD_PARAM_SUB_PROCESS = "processInstanceId"; public static final String CMD_PARAM_EMPTY_SUB_PROCESS = "0"; public static final String CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId"; public static final String CMD_PARAM_SUB_PROCESS_DEFINE_ID = "processDefinitionId"; public static final String CMD_PARAM_START_NODE_NAMES = "StartNodeNameList"; public static final String CMD_PARAM_START_PARAMS = "StartParams"; public static final String CMD_PARAM_FATHER_PARAMS = "fatherParams"; /** * complement data start date */ public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate"; /** * complement data end date */ public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate"; /** * hadoop configuration */ public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE"; public static final String HADOOP_RM_STATE_STANDBY = "STANDBY"; public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port"; /** * data source config */ public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name"; public static final String SPRING_DATASOURCE_URL = "spring.datasource.url"; public static final String SPRING_DATASOURCE_USERNAME = "spring.datasource.username"; public static final String SPRING_DATASOURCE_PASSWORD = "spring.datasource.password"; public static final String SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT = "spring.datasource.validationQueryTimeout"; public static final String SPRING_DATASOURCE_INITIAL_SIZE = "spring.datasource.initialSize"; public static final String SPRING_DATASOURCE_MIN_IDLE = "spring.datasource.minIdle"; public static final String SPRING_DATASOURCE_MAX_ACTIVE = "spring.datasource.maxActive"; public static final String SPRING_DATASOURCE_MAX_WAIT = "spring.datasource.maxWait"; public static final String SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS = "spring.datasource.timeBetweenEvictionRunsMillis"; public static final String SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS = "spring.datasource.timeBetweenConnectErrorMillis"; public static final String SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS = "spring.datasource.minEvictableIdleTimeMillis"; public static final String SPRING_DATASOURCE_VALIDATION_QUERY = "spring.datasource.validationQuery"; public static final String SPRING_DATASOURCE_TEST_WHILE_IDLE = "spring.datasource.testWhileIdle"; public static final String SPRING_DATASOURCE_TEST_ON_BORROW = "spring.datasource.testOnBorrow"; public static final String SPRING_DATASOURCE_TEST_ON_RETURN = "spring.datasource.testOnReturn"; public static final String SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS = "spring.datasource.poolPreparedStatements"; public static final String SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT = "spring.datasource.defaultAutoCommit"; public static final String SPRING_DATASOURCE_KEEP_ALIVE = "spring.datasource.keepAlive"; public static final String SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE = "spring.datasource.maxPoolPreparedStatementPerConnectionSize"; public static final String DEVELOPMENT = "development"; public static final String QUARTZ_PROPERTIES_PATH = "quartz.properties"; /** * sleep time */ public static final int SLEEP_TIME_MILLIS = 1000; /** * heartbeat for zk info length */ public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 10; public static final int HEARTBEAT_WITH_WEIGHT_FOR_ZOOKEEPER_INFO_LENGTH = 11; /** * jar */ public static final String JAR = "jar"; /** * hadoop */ public static final String HADOOP = "hadoop"; /** * -D <property>=<value> */ public static final String D = "-D"; /** * -D mapreduce.job.name=name */ public static final String MR_NAME = "mapreduce.job.name"; /** * -D mapreduce.job.queuename=queuename */ public static final String MR_QUEUE = "mapreduce.job.queuename"; /** * spark params constant */ public static final String MASTER = "--master"; public static final String DEPLOY_MODE = "--deploy-mode"; /** * --class CLASS_NAME */ public static final String MAIN_CLASS = "--class"; /** * --driver-cores NUM */ public static final String DRIVER_CORES = "--driver-cores"; /** * --driver-memory MEM */ public static final String DRIVER_MEMORY = "--driver-memory"; /** * --num-executors NUM */ public static final String NUM_EXECUTORS = "--num-executors"; /** * --executor-cores NUM */ public static final String EXECUTOR_CORES = "--executor-cores"; /** * --executor-memory MEM */ public static final String EXECUTOR_MEMORY = "--executor-memory"; /** * --name NAME */ public static final String SPARK_NAME = "--name"; /** * --queue QUEUE */ public static final String SPARK_QUEUE = "--queue"; /** * exit code success */ public static final int EXIT_CODE_SUCCESS = 0; /** * exit code kill */ public static final int EXIT_CODE_KILL = 137; /** * exit code failure */ public static final int EXIT_CODE_FAILURE = -1; /** * process or task definition failure */ public static final int DEFINITION_FAILURE = -1; /** * date format of yyyyMMdd */ public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd"; /** * date format of yyyyMMddHHmmss */ public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss"; /** * system date(yyyyMMddHHmmss) */ public static final String PARAMETER_DATETIME = "system.datetime"; /** * system date(yyyymmdd) today */ public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate"; /** * system date(yyyymmdd) yesterday */ public static final String PARAMETER_BUSINESS_DATE = "system.biz.date"; /** * ACCEPTED */ public static final String ACCEPTED = "ACCEPTED"; /** * SUCCEEDED */ public static final String SUCCEEDED = "SUCCEEDED"; /** * NEW */ public static final String NEW = "NEW"; /** * NEW_SAVING */ public static final String NEW_SAVING = "NEW_SAVING"; /** * SUBMITTED */ public static final String SUBMITTED = "SUBMITTED"; /** * FAILED */ public static final String FAILED = "FAILED"; /** * KILLED */ public static final String KILLED = "KILLED"; /** * RUNNING */ public static final String RUNNING = "RUNNING"; /** * underline "_" */ public static final String UNDERLINE = "_"; /** * quartz job prifix */ public static final String QUARTZ_JOB_PRIFIX = "job"; /** * quartz job group prifix */ public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup"; /** * projectId */ public static final String PROJECT_ID = "projectId"; /** * processId */ public static final String SCHEDULE_ID = "scheduleId"; /** * schedule */ public static final String SCHEDULE = "schedule"; /** * application regex */ public static final String APPLICATION_REGEX = "application_\\d+_\\d+"; public static final String PID = OSUtils.isWindows() ? "handle" : "pid"; /** * month_begin */ public static final String MONTH_BEGIN = "month_begin"; /** * add_months */ public static final String ADD_MONTHS = "add_months"; /** * month_end */ public static final String MONTH_END = "month_end"; /** * week_begin */ public static final String WEEK_BEGIN = "week_begin"; /** * week_end */ public static final String WEEK_END = "week_end"; /** * timestamp */ public static final String TIMESTAMP = "timestamp"; public static final char SUBTRACT_CHAR = '-'; public static final char ADD_CHAR = '+'; public static final char MULTIPLY_CHAR = '*'; public static final char DIVISION_CHAR = '/'; public static final char LEFT_BRACE_CHAR = '('; public static final char RIGHT_BRACE_CHAR = ')'; public static final String ADD_STRING = "+"; public static final String MULTIPLY_STRING = "*"; public static final String DIVISION_STRING = "/"; public static final String LEFT_BRACE_STRING = "("; public static final char P = 'P'; public static final char N = 'N'; public static final String SUBTRACT_STRING = "-"; public static final String GLOBAL_PARAMS = "globalParams"; public static final String LOCAL_PARAMS = "localParams"; public static final String LOCAL_PARAMS_LIST = "localParamsList"; public static final String SUBPROCESS_INSTANCE_ID = "subProcessInstanceId"; public static final String PROCESS_INSTANCE_STATE = "processInstanceState"; public static final String PARENT_WORKFLOW_INSTANCE = "parentWorkflowInstance"; public static final String CONDITION_RESULT = "conditionResult"; public static final String DEPENDENCE = "dependence"; public static final String TASK_TYPE = "taskType"; public static final String TASK_LIST = "taskList"; public static final String RWXR_XR_X = "rwxr-xr-x"; public static final String QUEUE = "queue"; public static final String QUEUE_NAME = "queueName"; public static final int LOG_QUERY_SKIP_LINE_NUMBER = 0; public static final int LOG_QUERY_LIMIT = 4096; /** * master/worker server use for zk */ public static final String MASTER_TYPE = "master"; public static final String WORKER_TYPE = "worker"; public static final String DELETE_ZK_OP = "delete"; public static final String ADD_ZK_OP = "add"; public static final String ALIAS = "alias"; public static final String CONTENT = "content"; public static final String DEPENDENT_SPLIT = ":||"; public static final String DEPENDENT_ALL = "ALL"; /** * preview schedule execute count */ public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5; /** * kerberos */ public static final String KERBEROS = "kerberos"; /** * kerberos expire time */ public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time"; /** * java.security.krb5.conf */ public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf"; /** * java.security.krb5.conf.path */ public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state"; /** * com.amazonaws.services.s3.enableV4 */ public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4"; /** * loginUserFromKeytab user */ public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username"; /** * default worker group id */ public static final int DEFAULT_WORKER_ID = -1; /** * loginUserFromKeytab path */ public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path"; /** * task log info format */ public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s"; /** * hive conf */ public static final String HIVE_CONF = "hiveconf:"; /** * flink */ public static final String FLINK_YARN_CLUSTER = "yarn-cluster"; public static final String FLINK_RUN_MODE = "-m"; public static final String FLINK_YARN_SLOT = "-ys"; public static final String FLINK_APP_NAME = "-ynm"; public static final String FLINK_QUEUE = "-yqu"; public static final String FLINK_TASK_MANAGE = "-yn"; public static final String FLINK_JOB_MANAGE_MEM = "-yjm"; public static final String FLINK_TASK_MANAGE_MEM = "-ytm"; public static final String FLINK_MAIN_CLASS = "-c"; public static final String FLINK_PARALLELISM = "-p"; public static final String FLINK_SHUTDOWN_ON_ATTACHED_EXIT = "-sae"; public static final int[] NOT_TERMINATED_STATES = new int[] { ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal(), ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(), ExecutionStatus.WAITTING_THREAD.ordinal(), ExecutionStatus.WAITTING_DEPEND.ordinal() }; /** * status */ public static final String STATUS = "status"; /** * message */ public static final String MSG = "msg"; /** * data total */ public static final String COUNT = "count"; /** * page size */ public static final String PAGE_SIZE = "pageSize"; /** * current page no */ public static final String PAGE_NUMBER = "pageNo"; /** * */ public static final String DATA_LIST = "data"; public static final String TOTAL_LIST = "totalList"; public static final String CURRENT_PAGE = "currentPage"; public static final String TOTAL_PAGE = "totalPage"; public static final String TOTAL = "total"; /** * workflow */ public static final String WORKFLOW_LIST = "workFlowList"; public static final String WORKFLOW_RELATION_LIST = "workFlowRelationList"; /** * session user */ public static final String SESSION_USER = "session.user"; public static final String SESSION_ID = "sessionId"; public static final String PASSWORD_DEFAULT = "******"; /** * locale */ public static final String LOCALE_LANGUAGE = "language"; /** * driver */ public static final String ORG_POSTGRESQL_DRIVER = "org.postgresql.Driver"; public static final String COM_MYSQL_JDBC_DRIVER = "com.mysql.jdbc.Driver"; public static final String ORG_APACHE_HIVE_JDBC_HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver"; public static final String COM_CLICKHOUSE_JDBC_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver"; public static final String COM_ORACLE_JDBC_DRIVER = "oracle.jdbc.driver.OracleDriver"; public static final String COM_SQLSERVER_JDBC_DRIVER = "com.microsoft.sqlserver.jdbc.SQLServerDriver"; public static final String COM_DB2_JDBC_DRIVER = "com.ibm.db2.jcc.DB2Driver"; public static final String COM_PRESTO_JDBC_DRIVER = "com.facebook.presto.jdbc.PrestoDriver"; /** * database type */ public static final String MYSQL = "MYSQL"; public static final String POSTGRESQL = "POSTGRESQL"; public static final String HIVE = "HIVE"; public static final String SPARK = "SPARK"; public static final String CLICKHOUSE = "CLICKHOUSE"; public static final String ORACLE = "ORACLE"; public static final String SQLSERVER = "SQLSERVER"; public static final String DB2 = "DB2"; public static final String PRESTO = "PRESTO"; /** * jdbc url */ public static final String JDBC_MYSQL = "jdbc:mysql://"; public static final String JDBC_POSTGRESQL = "jdbc:postgresql://"; public static final String JDBC_HIVE_2 = "jdbc:hive2://"; public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://"; public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@"; public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//"; public static final String JDBC_SQLSERVER = "jdbc:sqlserver://"; public static final String JDBC_DB2 = "jdbc:db2://"; public static final String JDBC_PRESTO = "jdbc:presto://"; public static final String ADDRESS = "address"; public static final String DATABASE = "database"; public static final String JDBC_URL = "jdbcUrl"; public static final String PRINCIPAL = "principal"; public static final String OTHER = "other"; public static final String ORACLE_DB_CONNECT_TYPE = "connectType"; public static final String KERBEROS_KRB5_CONF_PATH = "javaSecurityKrb5Conf"; public static final String KERBEROS_KEY_TAB_USERNAME = "loginUserKeytabUsername"; public static final String KERBEROS_KEY_TAB_PATH = "loginUserKeytabPath"; /** * session timeout */ public static final int SESSION_TIME_OUT = 7200; public static final int MAX_FILE_SIZE = 1024 * 1024 * 1024; public static final String UDF = "UDF"; public static final String CLASS = "class"; public static final String RECEIVERS = "receivers"; public static final String RECEIVERS_CC = "receiversCc"; /** * dataSource sensitive param */ public static final String DATASOURCE_PASSWORD_REGEX = "(?<=(\"password\":\")).*?(?=(\"))"; /** * default worker group */ public static final String DEFAULT_WORKER_GROUP = "default"; public static final Integer TASK_INFO_LENGTH = 5; /** * new * schedule time */ public static final String PARAMETER_SHECDULE_TIME = "schedule.time"; /** * authorize writable perm */ public static final int AUTHORIZE_WRITABLE_PERM = 7; /** * authorize readable perm */ public static final int AUTHORIZE_READABLE_PERM = 4; /** * plugin configurations */ public static final String PLUGIN_JAR_SUFFIX = ".jar"; public static final int NORMAL_NODE_STATUS = 0; public static final int ABNORMAL_NODE_STATUS = 1; public static final String START_TIME = "start time"; public static final String END_TIME = "end time"; public static final String START_END_DATE = "startDate,endDate"; /** * system line separator */ public static final String SYSTEM_LINE_SEPARATOR = System.getProperty("line.separator"); /** * net system properties */ public static final String DOLPHIN_SCHEDULER_PREFERRED_NETWORK_INTERFACE = "dolphin.scheduler.network.interface.preferred"; public static final String EXCEL_SUFFIX_XLS = ".xls"; /** * datasource encryption salt */ public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*"; public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable"; public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt"; /** * Network IP gets priority, default inner outer */ public static final String NETWORK_PRIORITY_STRATEGY = "dolphin.scheduler.network.priority.strategy"; /** * exec shell scripts */ public static final String SH = "sh"; /** * pstree, get pud and sub pid */ public static final String PSTREE = "pstree"; /** * snow flake, data center id, this id must be greater than 0 and less than 32 */ public static final String SNOW_FLAKE_DATA_CENTER_ID = "data.center.id"; /** * docker & kubernetes */ public static final boolean DOCKER_MODE = StringUtils.isNotEmpty(System.getenv("DOCKER")); public static final boolean KUBERNETES_MODE = StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_PORT")); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,487
[Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class
Dolphin scheduler 目前已经移除了数据质量检测, 可见在配置文件中也已经移除了对 相关数据质量涉及的db的 但是代码中依旧存在TaskRecordDao对数据质量的query, 并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'" 中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中, 但是在重要的抽象类AbstractTask 中依旧存在对 TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净 public void after() { if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) { // task recor flat : if true , start up qianfan if (TaskRecordDao.getTaskRecordFlag() && TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) { AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams()); // replace placeholder Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()), taskExecutionContext.getDefinedParams(), params.getLocalParametersMap(), CommandType.of(taskExecutionContext.getCmdTypeIfComplement()), taskExecutionContext.getScheduleTime()); if (paramsMap != null && !paramsMap.isEmpty() && paramsMap.containsKey("v_proc_date")) { String vProcDate = paramsMap.get("v_proc_date").getValue(); if (!StringUtils.isEmpty(vProcDate)) { TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate); logger.info("task record status : {}", taskRecordState); if (taskRecordState == TaskRecordStatus.FAILURE) { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } } } } } else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) { setExitStatusCode(Constants.EXIT_CODE_KILL); } else { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } }
https://github.com/apache/dolphinscheduler/issues/5487
https://github.com/apache/dolphinscheduler/pull/5492
018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c
bc22ae7c91c9cbd7c971796ba3a45358c2f11864
2021-05-17T09:46:25Z
java
2021-05-18T09:00:03Z
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/TaskRecordDao.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao; import static org.apache.dolphinscheduler.common.Constants.DATASOURCE_PROPERTIES; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.TaskRecordStatus; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.ConnectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.entity.TaskRecord; import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * task record dao */ public class TaskRecordDao { private static Logger logger = LoggerFactory.getLogger(TaskRecordDao.class.getName()); static { PropertyUtils.loadPropertyFile(DATASOURCE_PROPERTIES); } /** * get task record flag * * @return whether startup taskrecord */ public static boolean getTaskRecordFlag() { return PropertyUtils.getBoolean(Constants.TASK_RECORD_FLAG, false); } /** * create connection * * @return connection */ private static Connection getConn() { if (!getTaskRecordFlag()) { return null; } String driver = "com.mysql.jdbc.Driver"; String url = PropertyUtils.getString(Constants.TASK_RECORD_URL); String username = PropertyUtils.getString(Constants.TASK_RECORD_USER); String password = PropertyUtils.getString(Constants.TASK_RECORD_PWD); Connection conn = null; try { //classLoader,load driver Class.forName(driver); conn = DriverManager.getConnection(url, username, password); } catch (ClassNotFoundException e) { logger.error("Class not found Exception ", e); } catch (SQLException e) { logger.error("SQL Exception ", e); } return conn; } /** * generate where sql string * * @param filterMap filterMap * @return sql string */ private static String getWhereString(Map<String, String> filterMap) { if (filterMap.size() == 0) { return ""; } String result = " where 1=1 "; Object taskName = filterMap.get("taskName"); if (taskName != null && StringUtils.isNotEmpty(taskName.toString())) { result += " and PROC_NAME like concat('%', '" + taskName.toString() + "', '%') "; } Object taskDate = filterMap.get("taskDate"); if (taskDate != null && StringUtils.isNotEmpty(taskDate.toString())) { result += " and PROC_DATE='" + taskDate.toString() + "'"; } Object state = filterMap.get("state"); if (state != null && StringUtils.isNotEmpty(state.toString())) { result += " and NOTE='" + state.toString() + "'"; } Object sourceTable = filterMap.get("sourceTable"); if (sourceTable != null && StringUtils.isNotEmpty(sourceTable.toString())) { result += " and SOURCE_TAB like concat('%', '" + sourceTable.toString() + "', '%')"; } Object targetTable = filterMap.get("targetTable"); if (sourceTable != null && StringUtils.isNotEmpty(targetTable.toString())) { result += " and TARGET_TAB like concat('%', '" + targetTable.toString() + "', '%') "; } Object start = filterMap.get("startTime"); if (start != null && StringUtils.isNotEmpty(start.toString())) { result += " and STARTDATE>='" + start.toString() + "'"; } Object end = filterMap.get("endTime"); if (end != null && StringUtils.isNotEmpty(end.toString())) { result += " and ENDDATE>='" + end.toString() + "'"; } return result; } /** * count task record * * @param filterMap filterMap * @param table table * @return task record count */ public static int countTaskRecord(Map<String, String> filterMap, String table) { int count = 0; Connection conn = null; PreparedStatement pstmt = null; ResultSet rs = null; try { conn = getConn(); if (conn == null) { return count; } String sql = String.format("select count(1) as count from %s", table); sql += getWhereString(filterMap); pstmt = conn.prepareStatement(sql); rs = pstmt.executeQuery(); while (rs.next()) { count = rs.getInt("count"); break; } } catch (SQLException e) { logger.error("Exception ", e); } finally { ConnectionUtils.releaseResource(rs, pstmt, conn); } return count; } /** * query task record by filter map paging * * @param filterMap filterMap * @param table table * @return task record list */ public static List<TaskRecord> queryAllTaskRecord(Map<String, String> filterMap, String table) { String sql = String.format("select * from %s", table); sql += getWhereString(filterMap); int offset = Integer.parseInt(filterMap.get("offset")); int pageSize = Integer.parseInt(filterMap.get("pageSize")); sql += String.format(" order by STARTDATE desc limit %d,%d", offset, pageSize); List<TaskRecord> recordList = new ArrayList<>(); try { recordList = getQueryResult(sql); } catch (Exception e) { logger.error("Exception ", e); } return recordList; } /** * convert result set to task record * * @param resultSet resultSet * @return task record * @throws SQLException if error throws SQLException */ private static TaskRecord convertToTaskRecord(ResultSet resultSet) throws SQLException { TaskRecord taskRecord = new TaskRecord(); taskRecord.setId(resultSet.getInt("ID")); taskRecord.setProcId(resultSet.getInt("PROC_ID")); taskRecord.setProcName(resultSet.getString("PROC_NAME")); taskRecord.setProcDate(resultSet.getString("PROC_DATE")); taskRecord.setStartTime(DateUtils.stringToDate(resultSet.getString("STARTDATE"))); taskRecord.setEndTime(DateUtils.stringToDate(resultSet.getString("ENDDATE"))); taskRecord.setResult(resultSet.getString("RESULT")); taskRecord.setDuration(resultSet.getInt("DURATION")); taskRecord.setNote(resultSet.getString("NOTE")); taskRecord.setSchema(resultSet.getString("SCHEMA")); taskRecord.setJobId(resultSet.getString("JOB_ID")); taskRecord.setSourceTab(resultSet.getString("SOURCE_TAB")); taskRecord.setSourceRowCount(resultSet.getLong("SOURCE_ROW_COUNT")); taskRecord.setTargetTab(resultSet.getString("TARGET_TAB")); taskRecord.setTargetRowCount(resultSet.getLong("TARGET_ROW_COUNT")); taskRecord.setErrorCode(resultSet.getString("ERROR_CODE")); return taskRecord; } /** * query task list by select sql * * @param selectSql select sql * @return task record list */ private static List<TaskRecord> getQueryResult(String selectSql) { List<TaskRecord> recordList = new ArrayList<>(); Connection conn = null; PreparedStatement pstmt = null; ResultSet rs = null; try { conn = getConn(); if (conn == null) { return recordList; } pstmt = conn.prepareStatement(selectSql); rs = pstmt.executeQuery(); while (rs.next()) { TaskRecord taskRecord = convertToTaskRecord(rs); recordList.add(taskRecord); } } catch (SQLException e) { logger.error("Exception ", e); } finally { ConnectionUtils.releaseResource(rs, pstmt, conn); } return recordList; } /** * according to procname and procdate query task record * * @param procName procName * @param procDate procDate * @return task record status */ public static TaskRecordStatus getTaskRecordState(String procName, String procDate) { String sql = String.format("SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'" , procName, procDate + "%"); List<TaskRecord> taskRecordList = getQueryResult(sql); // contains no record and sql exception if (CollectionUtils.isEmpty(taskRecordList)) { // exception return TaskRecordStatus.EXCEPTION; } else if (taskRecordList.size() > 1) { return TaskRecordStatus.EXCEPTION; } else { TaskRecord taskRecord = taskRecordList.get(0); if (taskRecord == null) { return TaskRecordStatus.EXCEPTION; } Long targetRowCount = taskRecord.getTargetRowCount(); if (targetRowCount <= 0) { return TaskRecordStatus.FAILURE; } else { return TaskRecordStatus.SUCCESS; } } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,487
[Improvement][Task] Remove TaskRecordDao And simply the after() in the AbstractTask class
Dolphin scheduler 目前已经移除了数据质量检测, 可见在配置文件中也已经移除了对 相关数据质量涉及的db的 但是代码中依旧存在TaskRecordDao对数据质量的query, 并且SELECT * FROM eamp_hive_log_hd WHERE PROC_NAME='%s' and PROC_DATE like '%s'" 中涉及的eamp_hive_log_hd db明显已经不存在于配置的默认数据库中, 但是在重要的抽象类AbstractTask 中依旧存在对 TaskRecordDao的数据质量检测逻辑的判定,建议移除来保持对重要抽象类的纯净 public void after() { if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) { // task recor flat : if true , start up qianfan if (TaskRecordDao.getTaskRecordFlag() && TaskType.typeIsNormalTask(taskExecutionContext.getTaskType())) { AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams()); // replace placeholder Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()), taskExecutionContext.getDefinedParams(), params.getLocalParametersMap(), CommandType.of(taskExecutionContext.getCmdTypeIfComplement()), taskExecutionContext.getScheduleTime()); if (paramsMap != null && !paramsMap.isEmpty() && paramsMap.containsKey("v_proc_date")) { String vProcDate = paramsMap.get("v_proc_date").getValue(); if (!StringUtils.isEmpty(vProcDate)) { TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate); logger.info("task record status : {}", taskRecordState); if (taskRecordState == TaskRecordStatus.FAILURE) { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } } } } } else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) { setExitStatusCode(Constants.EXIT_CODE_KILL); } else { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } }
https://github.com/apache/dolphinscheduler/issues/5487
https://github.com/apache/dolphinscheduler/pull/5492
018f5c89f6ee1dbb8259a6036c4beb1874cd3f5c
bc22ae7c91c9cbd7c971796ba3a45358c2f11864
2021-05-17T09:46:25Z
java
2021-05-18T09:00:03Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.task; import static ch.qos.logback.classic.ClassicConstants.FINALIZE_SESSION_MARKER; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.TaskRecordStatus; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.common.utils.TaskParametersUtils; import org.apache.dolphinscheduler.dao.TaskRecordDao; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.utils.ParamUtils; import java.util.List; import java.util.Map; import org.slf4j.Logger; /** * executive task */ public abstract class AbstractTask { /** * varPool string */ protected String varPool; /** * taskExecutionContext **/ TaskExecutionContext taskExecutionContext; /** * log record */ protected Logger logger; /** * SHELL process pid */ protected int processId; /** * SHELL result string */ protected String resultString; /** * other resource manager appId , for example : YARN etc */ protected String appIds; /** * cancel */ protected volatile boolean cancel = false; /** * exit code */ protected volatile int exitStatusCode = -1; /** * constructor * * @param taskExecutionContext taskExecutionContext * @param logger logger */ protected AbstractTask(TaskExecutionContext taskExecutionContext, Logger logger) { this.taskExecutionContext = taskExecutionContext; this.logger = logger; } /** * init task * * @throws Exception exception */ public void init() throws Exception { } /** * task handle * * @throws Exception exception */ public abstract void handle() throws Exception; /** * cancel application * * @param status status * @throws Exception exception */ public void cancelApplication(boolean status) throws Exception { this.cancel = status; } /** * log handle * * @param logs log list */ public void logHandle(List<String> logs) { // note that the "new line" is added here to facilitate log parsing if (logs.contains(FINALIZE_SESSION_MARKER.toString())) { logger.info(FINALIZE_SESSION_MARKER, FINALIZE_SESSION_MARKER.toString()); } else { logger.info(" -> {}", String.join("\n\t", logs)); } } public void setVarPool(String varPool) { this.varPool = varPool; } public String getVarPool() { return varPool; } /** * get exit status code * * @return exit status code */ public int getExitStatusCode() { return exitStatusCode; } public void setExitStatusCode(int exitStatusCode) { this.exitStatusCode = exitStatusCode; } public String getAppIds() { return appIds; } public void setAppIds(String appIds) { this.appIds = appIds; } public int getProcessId() { return processId; } public void setProcessId(int processId) { this.processId = processId; } public String getResultString() { return resultString; } public void setResultString(String resultString) { this.resultString = resultString; } /** * get task parameters * * @return AbstractParameters */ public abstract AbstractParameters getParameters(); /** * result processing */ public void after() { if (getExitStatusCode() == Constants.EXIT_CODE_SUCCESS) { // task recor flat : if true , start up qianfan if (TaskRecordDao.getTaskRecordFlag() && typeIsNormalTask(taskExecutionContext.getTaskType())) { AbstractParameters params = TaskParametersUtils.getParameters(taskExecutionContext.getTaskType(), taskExecutionContext.getTaskParams()); // replace placeholder Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()), taskExecutionContext.getDefinedParams(), params.getLocalParametersMap(), CommandType.of(taskExecutionContext.getCmdTypeIfComplement()), taskExecutionContext.getScheduleTime()); if (paramsMap != null && !paramsMap.isEmpty() && paramsMap.containsKey("v_proc_date")) { String vProcDate = paramsMap.get("v_proc_date").getValue(); if (!StringUtils.isEmpty(vProcDate)) { TaskRecordStatus taskRecordState = TaskRecordDao.getTaskRecordState(taskExecutionContext.getTaskName(), vProcDate); logger.info("task record status : {}", taskRecordState); if (taskRecordState == TaskRecordStatus.FAILURE) { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } } } } } else if (getExitStatusCode() == Constants.EXIT_CODE_KILL) { setExitStatusCode(Constants.EXIT_CODE_KILL); } else { setExitStatusCode(Constants.EXIT_CODE_FAILURE); } } private boolean typeIsNormalTask(String taskType) { return !(TaskType.SUB_PROCESS.getDesc().equalsIgnoreCase(taskType) || TaskType.DEPENDENT.getDesc().equalsIgnoreCase(taskType)); } /** * get exit status according to exitCode * * @return exit status */ public ExecutionStatus getExitStatus() { ExecutionStatus status; switch (getExitStatusCode()) { case Constants.EXIT_CODE_SUCCESS: status = ExecutionStatus.SUCCESS; break; case Constants.EXIT_CODE_KILL: status = ExecutionStatus.KILL; break; default: status = ExecutionStatus.FAILURE; break; } return status; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,527
[Improvement][api-server] failed find any kerberos
kerberos.expire.time 已经设置成了1 但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。 ![1908dee0e78a739c5a20284aa524c27](https://user-images.githubusercontent.com/18589655/118931587-bde1da80-b936-11eb-94a8-1f1f86c7a003.jpg) 建议优化使之能够长期生效,避免重启
https://github.com/apache/dolphinscheduler/issues/5527
https://github.com/apache/dolphinscheduler/pull/5533
46660b58ed82d76904f26c5b869f3aa96e50727a
9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86
2021-05-20T06:44:32Z
java
2021-05-23T15:43:59Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/CommonUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ResUploadType; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.net.URL; import java.nio.charset.StandardCharsets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * common utils */ public class CommonUtils { private static final Logger logger = LoggerFactory.getLogger(CommonUtils.class); private static final Base64 BASE64 = new Base64(); private CommonUtils() { throw new UnsupportedOperationException("Construct CommonUtils"); } /** * @return get the path of system environment variables */ public static String getSystemEnvPath() { String envPath = PropertyUtils.getString(Constants.DOLPHINSCHEDULER_ENV_PATH); if (StringUtils.isEmpty(envPath)) { URL envDefaultPath = CommonUtils.class.getClassLoader().getResource(Constants.ENV_PATH); if (envDefaultPath != null) { envPath = envDefaultPath.getPath(); logger.debug("env path :{}", envPath); } else { envPath = "/etc/profile"; } } return envPath; } /** * @return is develop mode */ public static boolean isDevelopMode() { return PropertyUtils.getBoolean(Constants.DEVELOPMENT_STATE, true); } /** * @return sudo enable */ public static boolean isSudoEnable() { return PropertyUtils.getBoolean(Constants.SUDO_ENABLE, true); } /** * if upload resource is HDFS and kerberos startup is true , else false * * @return true if upload resource is HDFS and kerberos startup */ public static boolean getKerberosStartupState() { String resUploadStartupType = PropertyUtils.getUpperCaseString(Constants.RESOURCE_STORAGE_TYPE); ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType); Boolean kerberosStartupState = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false); return resUploadType == ResUploadType.HDFS && kerberosStartupState; } /** * load kerberos configuration * * @throws Exception errors */ public static void loadKerberosConf() throws Exception { loadKerberosConf(PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH), PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME), PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH)); } /** * load kerberos configuration * @param javaSecurityKrb5Conf javaSecurityKrb5Conf * @param loginUserKeytabUsername loginUserKeytabUsername * @param loginUserKeytabPath loginUserKeytabPath * @throws IOException errors */ public static void loadKerberosConf(String javaSecurityKrb5Conf, String loginUserKeytabUsername, String loginUserKeytabPath) throws IOException { if (CommonUtils.getKerberosStartupState()) { System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF, StringUtils.defaultIfBlank(javaSecurityKrb5Conf, PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH))); Configuration configuration = new Configuration(); configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, Constants.KERBEROS); UserGroupInformation.setConfiguration(configuration); UserGroupInformation.loginUserFromKeytab(StringUtils.defaultIfBlank(loginUserKeytabUsername, PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME)), StringUtils.defaultIfBlank(loginUserKeytabPath, PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH))); } } /** * encode password */ public static String encodePassword(String password) { if (StringUtils.isEmpty(password)) { return StringUtils.EMPTY; } //if encryption is not turned on, return directly boolean encryptionEnable = PropertyUtils.getBoolean(Constants.DATASOURCE_ENCRYPTION_ENABLE, false); if (!encryptionEnable) { return password; } // Using Base64 + salt to process password String salt = PropertyUtils.getString(Constants.DATASOURCE_ENCRYPTION_SALT, Constants.DATASOURCE_ENCRYPTION_SALT_DEFAULT); String passwordWithSalt = salt + new String(BASE64.encode(password.getBytes(StandardCharsets.UTF_8))); return new String(BASE64.encode(passwordWithSalt.getBytes(StandardCharsets.UTF_8))); } /** * decode password */ public static String decodePassword(String password) { if (StringUtils.isEmpty(password)) { return StringUtils.EMPTY; } //if encryption is not turned on, return directly boolean encryptionEnable = PropertyUtils.getBoolean(Constants.DATASOURCE_ENCRYPTION_ENABLE, false); if (!encryptionEnable) { return password; } // Using Base64 + salt to process password String salt = PropertyUtils.getString(Constants.DATASOURCE_ENCRYPTION_SALT, Constants.DATASOURCE_ENCRYPTION_SALT_DEFAULT); String passwordWithSalt = new String(BASE64.decode(password), StandardCharsets.UTF_8); if (!passwordWithSalt.startsWith(salt)) { logger.warn("There is a password and salt mismatch: {} ", password); return password; } return new String(BASE64.decode(passwordWithSalt.substring(salt.length())), StandardCharsets.UTF_8); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,527
[Improvement][api-server] failed find any kerberos
kerberos.expire.time 已经设置成了1 但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。 ![1908dee0e78a739c5a20284aa524c27](https://user-images.githubusercontent.com/18589655/118931587-bde1da80-b936-11eb-94a8-1f1f86c7a003.jpg) 建议优化使之能够长期生效,避免重启
https://github.com/apache/dolphinscheduler/issues/5527
https://github.com/apache/dolphinscheduler/pull/5533
46660b58ed82d76904f26c5b869f3aa96e50727a
9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86
2021-05-20T06:44:32Z
java
2021-05-23T15:43:59Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import static org.apache.dolphinscheduler.common.Constants.RESOURCE_UPLOAD_PATH; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.ResUploadType; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.exception.BaseException; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.client.cli.RMAdminCLI; import java.io.BufferedReader; import java.io.Closeable; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.security.PrivilegedExceptionAction; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; /** * hadoop utils * single instance */ public class HadoopUtils implements Closeable { private static final Logger logger = LoggerFactory.getLogger(HadoopUtils.class); private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER); public static final String resourceUploadPath = PropertyUtils.getString(RESOURCE_UPLOAD_PATH, "/dolphinscheduler"); public static final String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS); public static final String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS); public static final String jobHistoryAddress = PropertyUtils.getString(Constants.YARN_JOB_HISTORY_STATUS_ADDRESS); private static final String HADOOP_UTILS_KEY = "HADOOP_UTILS_KEY"; private static final LoadingCache<String, HadoopUtils> cache = CacheBuilder .newBuilder() .expireAfterWrite(PropertyUtils.getInt(Constants.KERBEROS_EXPIRE_TIME, 2), TimeUnit.HOURS) .build(new CacheLoader<String, HadoopUtils>() { @Override public HadoopUtils load(String key) throws Exception { return new HadoopUtils(); } }); private static volatile boolean yarnEnabled = false; private Configuration configuration; private FileSystem fs; private HadoopUtils() { init(); initHdfsPath(); } public static HadoopUtils getInstance() { return cache.getUnchecked(HADOOP_UTILS_KEY); } /** * init dolphinscheduler root path in hdfs */ private void initHdfsPath() { Path path = new Path(resourceUploadPath); try { if (!fs.exists(path)) { fs.mkdirs(path); } } catch (Exception e) { logger.error(e.getMessage(), e); } } /** * init hadoop configuration */ private void init() { try { configuration = new HdfsConfiguration(); String resourceStorageType = PropertyUtils.getUpperCaseString(Constants.RESOURCE_STORAGE_TYPE); ResUploadType resUploadType = ResUploadType.valueOf(resourceStorageType); if (resUploadType == ResUploadType.HDFS) { if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false)) { System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF, PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH)); configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); hdfsUser = ""; UserGroupInformation.setConfiguration(configuration); UserGroupInformation.loginUserFromKeytab(PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME), PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH)); } String defaultFS = configuration.get(Constants.FS_DEFAULTFS); //first get key from core-site.xml hdfs-site.xml ,if null ,then try to get from properties file // the default is the local file system if (defaultFS.startsWith("file")) { String defaultFSProp = PropertyUtils.getString(Constants.FS_DEFAULTFS); if (StringUtils.isNotBlank(defaultFSProp)) { Map<String, String> fsRelatedProps = PropertyUtils.getPrefixedProperties("fs."); configuration.set(Constants.FS_DEFAULTFS, defaultFSProp); fsRelatedProps.forEach((key, value) -> configuration.set(key, value)); } else { logger.error("property:{} can not to be empty, please set!", Constants.FS_DEFAULTFS); throw new RuntimeException( String.format("property: %s can not to be empty, please set!", Constants.FS_DEFAULTFS) ); } } else { logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", Constants.FS_DEFAULTFS, defaultFS); } if (fs == null) { if (StringUtils.isNotEmpty(hdfsUser)) { UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser); ugi.doAs(new PrivilegedExceptionAction<Boolean>() { @Override public Boolean run() throws Exception { fs = FileSystem.get(configuration); return true; } }); } else { logger.warn("hdfs.root.user is not set value!"); fs = FileSystem.get(configuration); } } } else if (resUploadType == ResUploadType.S3) { System.setProperty(Constants.AWS_S3_V4, Constants.STRING_TRUE); configuration.set(Constants.FS_DEFAULTFS, PropertyUtils.getString(Constants.FS_DEFAULTFS)); configuration.set(Constants.FS_S3A_ENDPOINT, PropertyUtils.getString(Constants.FS_S3A_ENDPOINT)); configuration.set(Constants.FS_S3A_ACCESS_KEY, PropertyUtils.getString(Constants.FS_S3A_ACCESS_KEY)); configuration.set(Constants.FS_S3A_SECRET_KEY, PropertyUtils.getString(Constants.FS_S3A_SECRET_KEY)); fs = FileSystem.get(configuration); } } catch (Exception e) { logger.error(e.getMessage(), e); } } /** * @return Configuration */ public Configuration getConfiguration() { return configuration; } /** * get application url * * @param applicationId application id * @return url of application */ public String getApplicationUrl(String applicationId) throws Exception { /** * if rmHaIds contains xx, it signs not use resourcemanager * otherwise: * if rmHaIds is empty, single resourcemanager enabled * if rmHaIds not empty: resourcemanager HA enabled */ yarnEnabled = true; String appUrl = StringUtils.isEmpty(rmHaIds) ? appAddress : getAppAddress(appAddress, rmHaIds); if (StringUtils.isBlank(appUrl)) { throw new BaseException("yarn application url generation failed"); } if (logger.isDebugEnabled()) { logger.debug("yarn application url:{}, applicationId:{}", appUrl, applicationId); } String activeResourceManagerPort = String.valueOf(PropertyUtils.getInt(Constants.HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT, 8088)); return String.format(appUrl, activeResourceManagerPort, applicationId); } public String getJobHistoryUrl(String applicationId) { //eg:application_1587475402360_712719 -> job_1587475402360_712719 String jobId = applicationId.replace("application", "job"); return String.format(jobHistoryAddress, jobId); } /** * cat file on hdfs * * @param hdfsFilePath hdfs file path * @return byte[] byte array * @throws IOException errors */ public byte[] catFile(String hdfsFilePath) throws IOException { if (StringUtils.isBlank(hdfsFilePath)) { logger.error("hdfs file path:{} is blank", hdfsFilePath); return new byte[0]; } try (FSDataInputStream fsDataInputStream = fs.open(new Path(hdfsFilePath))) { return IOUtils.toByteArray(fsDataInputStream); } } /** * cat file on hdfs * * @param hdfsFilePath hdfs file path * @param skipLineNums skip line numbers * @param limit read how many lines * @return content of file * @throws IOException errors */ public List<String> catFile(String hdfsFilePath, int skipLineNums, int limit) throws IOException { if (StringUtils.isBlank(hdfsFilePath)) { logger.error("hdfs file path:{} is blank", hdfsFilePath); return Collections.emptyList(); } try (FSDataInputStream in = fs.open(new Path(hdfsFilePath))) { BufferedReader br = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); Stream<String> stream = br.lines().skip(skipLineNums).limit(limit); return stream.collect(Collectors.toList()); } } /** * make the given file and all non-existent parents into * directories. Has the semantics of Unix 'mkdir -p'. * Existence of the directory hierarchy is not an error. * * @param hdfsPath path to create * @return mkdir result * @throws IOException errors */ public boolean mkdir(String hdfsPath) throws IOException { return fs.mkdirs(new Path(hdfsPath)); } /** * copy files between FileSystems * * @param srcPath source hdfs path * @param dstPath destination hdfs path * @param deleteSource whether to delete the src * @param overwrite whether to overwrite an existing file * @return if success or not * @throws IOException errors */ public boolean copy(String srcPath, String dstPath, boolean deleteSource, boolean overwrite) throws IOException { return FileUtil.copy(fs, new Path(srcPath), fs, new Path(dstPath), deleteSource, overwrite, fs.getConf()); } /** * the src file is on the local disk. Add it to FS at * the given dst name. * * @param srcFile local file * @param dstHdfsPath destination hdfs path * @param deleteSource whether to delete the src * @param overwrite whether to overwrite an existing file * @return if success or not * @throws IOException errors */ public boolean copyLocalToHdfs(String srcFile, String dstHdfsPath, boolean deleteSource, boolean overwrite) throws IOException { Path srcPath = new Path(srcFile); Path dstPath = new Path(dstHdfsPath); fs.copyFromLocalFile(deleteSource, overwrite, srcPath, dstPath); return true; } /** * copy hdfs file to local * * @param srcHdfsFilePath source hdfs file path * @param dstFile destination file * @param deleteSource delete source * @param overwrite overwrite * @return result of copy hdfs file to local * @throws IOException errors */ public boolean copyHdfsToLocal(String srcHdfsFilePath, String dstFile, boolean deleteSource, boolean overwrite) throws IOException { Path srcPath = new Path(srcHdfsFilePath); File dstPath = new File(dstFile); if (dstPath.exists()) { if (dstPath.isFile()) { if (overwrite) { Files.delete(dstPath.toPath()); } } else { logger.error("destination file must be a file"); } } if (!dstPath.getParentFile().exists()) { dstPath.getParentFile().mkdirs(); } return FileUtil.copy(fs, srcPath, dstPath, deleteSource, fs.getConf()); } /** * delete a file * * @param hdfsFilePath the path to delete. * @param recursive if path is a directory and set to * true, the directory is deleted else throws an exception. In * case of a file the recursive can be set to either true or false. * @return true if delete is successful else false. * @throws IOException errors */ public boolean delete(String hdfsFilePath, boolean recursive) throws IOException { return fs.delete(new Path(hdfsFilePath), recursive); } /** * check if exists * * @param hdfsFilePath source file path * @return result of exists or not * @throws IOException errors */ public boolean exists(String hdfsFilePath) throws IOException { return fs.exists(new Path(hdfsFilePath)); } /** * Gets a list of files in the directory * * @param filePath file path * @return {@link FileStatus} file status * @throws Exception errors */ public FileStatus[] listFileStatus(String filePath) throws Exception { try { return fs.listStatus(new Path(filePath)); } catch (IOException e) { logger.error("Get file list exception", e); throw new Exception("Get file list exception", e); } } /** * Renames Path src to Path dst. Can take place on local fs * or remote DFS. * * @param src path to be renamed * @param dst new path after rename * @return true if rename is successful * @throws IOException on failure */ public boolean rename(String src, String dst) throws IOException { return fs.rename(new Path(src), new Path(dst)); } /** * hadoop resourcemanager enabled or not * * @return result */ public boolean isYarnEnabled() { return yarnEnabled; } /** * get the state of an application * * @param applicationId application id * @return the return may be null or there may be other parse exceptions */ public ExecutionStatus getApplicationStatus(String applicationId) throws Exception { if (StringUtils.isEmpty(applicationId)) { return null; } String result = Constants.FAILED; String applicationUrl = getApplicationUrl(applicationId); if (logger.isDebugEnabled()) { logger.debug("generate yarn application url, applicationUrl={}", applicationUrl); } String responseContent = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false) ? KerberosHttpClient.get(applicationUrl) : HttpUtils.get(applicationUrl); if (responseContent != null) { ObjectNode jsonObject = JSONUtils.parseObject(responseContent); if (!jsonObject.has("app")) { return ExecutionStatus.FAILURE; } result = jsonObject.path("app").path("finalStatus").asText(); } else { //may be in job history String jobHistoryUrl = getJobHistoryUrl(applicationId); if (logger.isDebugEnabled()) { logger.debug("generate yarn job history application url, jobHistoryUrl={}", jobHistoryUrl); } responseContent = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false) ? KerberosHttpClient.get(jobHistoryUrl) : HttpUtils.get(jobHistoryUrl); if (null != responseContent) { ObjectNode jsonObject = JSONUtils.parseObject(responseContent); if (!jsonObject.has("job")) { return ExecutionStatus.FAILURE; } result = jsonObject.path("job").path("state").asText(); } else { return ExecutionStatus.FAILURE; } } switch (result) { case Constants.ACCEPTED: return ExecutionStatus.SUBMITTED_SUCCESS; case Constants.SUCCEEDED: return ExecutionStatus.SUCCESS; case Constants.NEW: case Constants.NEW_SAVING: case Constants.SUBMITTED: case Constants.FAILED: return ExecutionStatus.FAILURE; case Constants.KILLED: return ExecutionStatus.KILL; case Constants.RUNNING: default: return ExecutionStatus.RUNNING_EXECUTION; } } /** * get data hdfs path * * @return data hdfs path */ public static String getHdfsDataBasePath() { if ("/".equals(resourceUploadPath)) { // if basepath is configured to /, the generated url may be //default/resources (with extra leading /) return ""; } else { return resourceUploadPath; } } /** * hdfs resource dir * * @param tenantCode tenant code * @param resourceType resource type * @return hdfs resource dir */ public static String getHdfsDir(ResourceType resourceType, String tenantCode) { String hdfsDir = ""; if (resourceType.equals(ResourceType.FILE)) { hdfsDir = getHdfsResDir(tenantCode); } else if (resourceType.equals(ResourceType.UDF)) { hdfsDir = getHdfsUdfDir(tenantCode); } return hdfsDir; } /** * hdfs resource dir * * @param tenantCode tenant code * @return hdfs resource dir */ public static String getHdfsResDir(String tenantCode) { return String.format("%s/resources", getHdfsTenantDir(tenantCode)); } /** * hdfs user dir * * @param tenantCode tenant code * @param userId user id * @return hdfs resource dir */ public static String getHdfsUserDir(String tenantCode, int userId) { return String.format("%s/home/%d", getHdfsTenantDir(tenantCode), userId); } /** * hdfs udf dir * * @param tenantCode tenant code * @return get udf dir on hdfs */ public static String getHdfsUdfDir(String tenantCode) { return String.format("%s/udfs", getHdfsTenantDir(tenantCode)); } /** * get hdfs file name * * @param resourceType resource type * @param tenantCode tenant code * @param fileName file name * @return hdfs file name */ public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) { if (fileName.startsWith("/")) { fileName = fileName.replaceFirst("/", ""); } return String.format("%s/%s", getHdfsDir(resourceType, tenantCode), fileName); } /** * get absolute path and name for resource file on hdfs * * @param tenantCode tenant code * @param fileName file name * @return get absolute path and name for file on hdfs */ public static String getHdfsResourceFileName(String tenantCode, String fileName) { if (fileName.startsWith("/")) { fileName = fileName.replaceFirst("/", ""); } return String.format("%s/%s", getHdfsResDir(tenantCode), fileName); } /** * get absolute path and name for udf file on hdfs * * @param tenantCode tenant code * @param fileName file name * @return get absolute path and name for udf file on hdfs */ public static String getHdfsUdfFileName(String tenantCode, String fileName) { if (fileName.startsWith("/")) { fileName = fileName.replaceFirst("/", ""); } return String.format("%s/%s", getHdfsUdfDir(tenantCode), fileName); } /** * @param tenantCode tenant code * @return file directory of tenants on hdfs */ public static String getHdfsTenantDir(String tenantCode) { return String.format("%s/%s", getHdfsDataBasePath(), tenantCode); } /** * getAppAddress * * @param appAddress app address * @param rmHa resource manager ha * @return app address */ public static String getAppAddress(String appAddress, String rmHa) { //get active ResourceManager String activeRM = YarnHAAdminUtils.getAcitveRMName(rmHa); if (StringUtils.isEmpty(activeRM)) { return null; } String[] split1 = appAddress.split(Constants.DOUBLE_SLASH); if (split1.length != 2) { return null; } String start = split1[0] + Constants.DOUBLE_SLASH; String[] split2 = split1[1].split(Constants.COLON); if (split2.length != 2) { return null; } String end = Constants.COLON + split2[1]; return start + activeRM + end; } @Override public void close() throws IOException { if (fs != null) { try { fs.close(); } catch (IOException e) { logger.error("Close HadoopUtils instance failed", e); throw new IOException("Close HadoopUtils instance failed", e); } } } /** * yarn ha admin utils */ private static final class YarnHAAdminUtils extends RMAdminCLI { /** * get active resourcemanager */ public static String getAcitveRMName(String rmIds) { String[] rmIdArr = rmIds.split(Constants.COMMA); int activeResourceManagerPort = PropertyUtils.getInt(Constants.HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT, 8088); String yarnUrl = "http://%s:" + activeResourceManagerPort + "/ws/v1/cluster/info"; try { /** * send http get request to rm */ for (String rmId : rmIdArr) { String state = getRMState(String.format(yarnUrl, rmId)); if (Constants.HADOOP_RM_STATE_ACTIVE.equals(state)) { return rmId; } } } catch (Exception e) { logger.error("yarn ha application url generation failed, message:{}", e.getMessage()); } return null; } /** * get ResourceManager state */ public static String getRMState(String url) { String retStr = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false) ? KerberosHttpClient.get(url) : HttpUtils.get(url); if (StringUtils.isEmpty(retStr)) { return null; } //to json ObjectNode jsonObject = JSONUtils.parseObject(retStr); //get ResourceManager state if (!jsonObject.has("clusterInfo")) { return null; } return jsonObject.get("clusterInfo").path("haState").asText(); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,527
[Improvement][api-server] failed find any kerberos
kerberos.expire.time 已经设置成了1 但是,每隔几天后,资源中心的资源就无法下载了,提示,failed find any kerberos ,重启后才可用。 ![1908dee0e78a739c5a20284aa524c27](https://user-images.githubusercontent.com/18589655/118931587-bde1da80-b936-11eb-94a8-1f1f86c7a003.jpg) 建议优化使之能够长期生效,避免重启
https://github.com/apache/dolphinscheduler/issues/5527
https://github.com/apache/dolphinscheduler/pull/5533
46660b58ed82d76904f26c5b869f3aa96e50727a
9ba4ffbe48ca9cfeaaceeef2d346235e5e39dd86
2021-05-20T06:44:32Z
java
2021-05-23T15:43:59Z
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/CommonUtilsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import org.apache.dolphinscheduler.common.Constants; import org.junit.Assert; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.net.InetAddress; import java.net.UnknownHostException; /** * configuration test */ public class CommonUtilsTest { private static final Logger logger = LoggerFactory.getLogger(CommonUtilsTest.class); @Test public void getSystemEnvPath() { logger.info(CommonUtils.getSystemEnvPath()); Assert.assertTrue(true); } @Test public void isDevelopMode() { logger.info("develop mode: {}",CommonUtils.isDevelopMode()); Assert.assertTrue(true); } @Test public void getKerberosStartupState(){ logger.info("kerberos startup state: {}",CommonUtils.getKerberosStartupState()); Assert.assertTrue(true); } @Test public void loadKerberosConf(){ try { CommonUtils.loadKerberosConf(); Assert.assertTrue(true); } catch (Exception e) { Assert.fail("load Kerberos Conf failed"); } } @Test public void getHdfsDataBasePath() { logger.info(HadoopUtils.getHdfsDataBasePath()); Assert.assertTrue(true); } @Test public void getDownloadFilename() { logger.info(FileUtils.getDownloadFilename("a.txt")); Assert.assertTrue(true); } @Test public void getUploadFilename() { logger.info(FileUtils.getUploadFilename("1234", "a.txt")); Assert.assertTrue(true); } @Test public void getHdfsDir() { logger.info(HadoopUtils.getHdfsResDir("1234")); Assert.assertTrue(true); } @Test public void test(){ InetAddress IP = null; try { IP = InetAddress.getLocalHost(); logger.info(IP.getHostAddress()); } catch (UnknownHostException e) { e.printStackTrace(); } Assert.assertTrue(true); } @Test public void encodePassword() { PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"true"); Assert.assertEquals("",CommonUtils.encodePassword("")); Assert.assertEquals("IUAjJCVeJipNVEl6TkRVMg==",CommonUtils.encodePassword("123456")); Assert.assertEquals("IUAjJCVeJipJVkZCV2xoVFYwQT0=",CommonUtils.encodePassword("!QAZXSW@")); Assert.assertEquals("IUAjJCVeJipOV1JtWjJWeUtFQT0=",CommonUtils.encodePassword("5dfger(@")); PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"false"); Assert.assertEquals("",CommonUtils.encodePassword("")); Assert.assertEquals("123456",CommonUtils.encodePassword("123456")); Assert.assertEquals("!QAZXSW@",CommonUtils.encodePassword("!QAZXSW@")); Assert.assertEquals("5dfger(@",CommonUtils.encodePassword("5dfger(@")); } @Test public void decodePassword() { PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE, "true"); Assert.assertEquals("", CommonUtils.decodePassword("")); Assert.assertEquals("123456", CommonUtils.decodePassword("IUAjJCVeJipNVEl6TkRVMg==")); Assert.assertEquals("!QAZXSW@", CommonUtils.decodePassword("IUAjJCVeJipJVkZCV2xoVFYwQT0=")); Assert.assertEquals("5dfger(@", CommonUtils.decodePassword("IUAjJCVeJipOV1JtWjJWeUtFQT0=")); PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE, "false"); Assert.assertEquals("", CommonUtils.decodePassword("")); Assert.assertEquals("123456", CommonUtils.decodePassword("123456")); Assert.assertEquals("!QAZXSW@", CommonUtils.decodePassword("!QAZXSW@")); Assert.assertEquals("5dfger(@", CommonUtils.decodePassword("5dfger(@")); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,210
[Improvement][Server] Must restart master if Zk reconnect
**Describe the question** If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery. Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer. **What are the current deficiencies and the benefits of improvement** Improving the server availability. **Which version of DolphinScheduler:** -[1.3.6-preview]
https://github.com/apache/dolphinscheduler/issues/5210
https://github.com/apache/dolphinscheduler/pull/5211
9d0c816cee102edbba2ac080f483c8a73a0b7b30
842c5400e605a8b8eb0d8fdc78701f10222063fd
2021-04-04T14:23:48Z
java
2021-05-24T21:03:29Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistry.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.registry; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.registry.HeartBeatTask; import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter; import org.apache.curator.framework.state.ConnectionState; import java.util.Date; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import com.google.common.collect.Sets; /** * master registry */ @Service public class MasterRegistry { private final Logger logger = LoggerFactory.getLogger(MasterRegistry.class); /** * zookeeper registry center */ @Autowired private ZookeeperRegistryCenter zookeeperRegistryCenter; /** * master config */ @Autowired private MasterConfig masterConfig; /** * heartbeat executor */ private ScheduledExecutorService heartBeatExecutor; /** * master start time */ private String startTime; @PostConstruct public void init() { this.startTime = DateUtils.dateToString(new Date()); this.heartBeatExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("HeartBeatExecutor")); } /** * registry */ public void registry() { String address = NetUtils.getAddr(masterConfig.getListenPort()); String localNodePath = getMasterPath(); zookeeperRegistryCenter.getRegisterOperator().persistEphemeral(localNodePath, ""); zookeeperRegistryCenter.getRegisterOperator().getZkClient().getConnectionStateListenable().addListener( (client, newState) -> { if (newState == ConnectionState.LOST) { logger.error("master : {} connection lost from zookeeper", address); } else if (newState == ConnectionState.RECONNECTED) { logger.info("master : {} reconnected to zookeeper", address); zookeeperRegistryCenter.getRegisterOperator().persistEphemeral(localNodePath, ""); } else if (newState == ConnectionState.SUSPENDED) { logger.warn("master : {} connection SUSPENDED ", address); zookeeperRegistryCenter.getRegisterOperator().persistEphemeral(localNodePath, ""); } }); int masterHeartbeatInterval = masterConfig.getMasterHeartbeatInterval(); HeartBeatTask heartBeatTask = new HeartBeatTask(startTime, masterConfig.getMasterMaxCpuloadAvg(), masterConfig.getMasterReservedMemory(), Sets.newHashSet(getMasterPath()), Constants.MASTER_TYPE, zookeeperRegistryCenter); this.heartBeatExecutor.scheduleAtFixedRate(heartBeatTask, masterHeartbeatInterval, masterHeartbeatInterval, TimeUnit.SECONDS); logger.info("master node : {} registry to ZK successfully with heartBeatInterval : {}s", address, masterHeartbeatInterval); } /** * remove registry info */ public void unRegistry() { String address = getLocalAddress(); String localNodePath = getMasterPath(); zookeeperRegistryCenter.getRegisterOperator().remove(localNodePath); logger.info("master node : {} unRegistry to ZK.", address); heartBeatExecutor.shutdown(); logger.info("heartbeat executor shutdown"); } /** * get master path */ public String getMasterPath() { String address = getLocalAddress(); return this.zookeeperRegistryCenter.getMasterPath() + "/" + address; } /** * get local address */ private String getLocalAddress() { return NetUtils.getAddr(masterConfig.getListenPort()); } /** * get zookeeper registry center * @return ZookeeperRegistryCenter */ public ZookeeperRegistryCenter getZookeeperRegistryCenter() { return zookeeperRegistryCenter; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,210
[Improvement][Server] Must restart master if Zk reconnect
**Describe the question** If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery. Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer. **What are the current deficiencies and the benefits of improvement** Improving the server availability. **Which version of DolphinScheduler:** -[1.3.6-preview]
https://github.com/apache/dolphinscheduler/issues/5210
https://github.com/apache/dolphinscheduler/pull/5211
9d0c816cee102edbba2ac080f483c8a73a0b7b30
842c5400e605a8b8eb0d8fdc78701f10222063fd
2021-04-04T14:23:48Z
java
2021-05-24T21:03:29Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistry.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.registry; import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP; import static org.apache.dolphinscheduler.common.Constants.SLASH; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory; import org.apache.dolphinscheduler.server.registry.HeartBeatTask; import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter; import org.apache.dolphinscheduler.server.worker.config.WorkerConfig; import org.apache.curator.framework.state.ConnectionState; import java.util.Date; import java.util.Set; import java.util.StringJoiner; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import com.google.common.collect.Sets; /** * worker registry */ @Service public class WorkerRegistry { private final Logger logger = LoggerFactory.getLogger(WorkerRegistry.class); /** * zookeeper registry center */ @Autowired private ZookeeperRegistryCenter zookeeperRegistryCenter; /** * worker config */ @Autowired private WorkerConfig workerConfig; /** * heartbeat executor */ private ScheduledExecutorService heartBeatExecutor; /** * worker start time */ private String startTime; private Set<String> workerGroups; @PostConstruct public void init() { this.workerGroups = workerConfig.getWorkerGroups(); this.startTime = DateUtils.dateToString(new Date()); this.heartBeatExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("HeartBeatExecutor")); } /** * get zookeeper registry center * @return ZookeeperRegistryCenter */ public ZookeeperRegistryCenter getZookeeperRegistryCenter() { return zookeeperRegistryCenter; } /** * registry */ public void registry() { String address = NetUtils.getAddr(workerConfig.getListenPort()); Set<String> workerZkPaths = getWorkerZkPaths(); int workerHeartbeatInterval = workerConfig.getWorkerHeartbeatInterval(); for (String workerZKPath : workerZkPaths) { zookeeperRegistryCenter.getRegisterOperator().persistEphemeral(workerZKPath, ""); zookeeperRegistryCenter.getRegisterOperator().getZkClient().getConnectionStateListenable().addListener( (client,newState) -> { if (newState == ConnectionState.LOST) { logger.error("worker : {} connection lost from zookeeper", address); } else if (newState == ConnectionState.RECONNECTED) { logger.info("worker : {} reconnected to zookeeper", address); zookeeperRegistryCenter.getRegisterOperator().persistEphemeral(workerZKPath, ""); } else if (newState == ConnectionState.SUSPENDED) { logger.warn("worker : {} connection SUSPENDED ", address); zookeeperRegistryCenter.getRegisterOperator().persistEphemeral(workerZKPath, ""); } }); logger.info("worker node : {} registry to ZK {} successfully", address, workerZKPath); } HeartBeatTask heartBeatTask = new HeartBeatTask(startTime, workerConfig.getWorkerMaxCpuloadAvg(), workerConfig.getWorkerReservedMemory(), workerConfig.getHostWeight(), workerZkPaths, Constants.WORKER_TYPE, zookeeperRegistryCenter); this.heartBeatExecutor.scheduleAtFixedRate(heartBeatTask, workerHeartbeatInterval, workerHeartbeatInterval, TimeUnit.SECONDS); logger.info("worker node : {} heartbeat interval {} s", address, workerHeartbeatInterval); } /** * remove registry info */ public void unRegistry() { String address = getLocalAddress(); Set<String> workerZkPaths = getWorkerZkPaths(); for (String workerZkPath : workerZkPaths) { zookeeperRegistryCenter.getRegisterOperator().remove(workerZkPath); logger.info("worker node : {} unRegistry from ZK {}.", address, workerZkPath); } this.heartBeatExecutor.shutdownNow(); logger.info("heartbeat executor shutdown"); } /** * get worker path */ public Set<String> getWorkerZkPaths() { Set<String> workerZkPaths = Sets.newHashSet(); String address = getLocalAddress(); String workerZkPathPrefix = this.zookeeperRegistryCenter.getWorkerPath(); for (String workGroup : this.workerGroups) { StringJoiner workerZkPathJoiner = new StringJoiner(SLASH); workerZkPathJoiner.add(workerZkPathPrefix); if (StringUtils.isEmpty(workGroup)) { workGroup = DEFAULT_WORKER_GROUP; } // trim and lower case is need workerZkPathJoiner.add(workGroup.trim().toLowerCase()); workerZkPathJoiner.add(address); workerZkPaths.add(workerZkPathJoiner.toString()); } return workerZkPaths; } /** * get local address */ private String getLocalAddress() { return NetUtils.getAddr(workerConfig.getListenPort()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,210
[Improvement][Server] Must restart master if Zk reconnect
**Describe the question** If zookeeper is disconnect, the master server stop work, and when the zookeeper reconnect, we need to restart the master server, the master server can't automatic recovery. Because when the zookeeper reconnect, treeCache will produce NODE_REMOVED event, `ZKMasterClient` receive this event and add the server to deadNode. When the `HeartBeatTask` check the master is on dead path, it will stop the MasterServer. **What are the current deficiencies and the benefits of improvement** Improving the server availability. **Which version of DolphinScheduler:** -[1.3.6-preview]
https://github.com/apache/dolphinscheduler/issues/5210
https://github.com/apache/dolphinscheduler/pull/5211
9d0c816cee102edbba2ac080f483c8a73a0b7b30
842c5400e605a8b8eb0d8fdc78701f10222063fd
2021-04-04T14:23:48Z
java
2021-05-24T21:03:29Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/ZookeeperOperator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.zk; import static org.apache.dolphinscheduler.common.utils.Preconditions.checkNotNull; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.service.exceptions.ServiceException; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.framework.api.ACLProvider; import org.apache.curator.framework.state.ConnectionState; import org.apache.curator.retry.ExponentialBackoffRetry; import org.apache.curator.utils.CloseableUtils; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException.NoNodeException; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Stat; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.InitializingBean; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; /** * zk base operator */ @Component public class ZookeeperOperator implements InitializingBean { private final Logger logger = LoggerFactory.getLogger(ZookeeperOperator.class); @Autowired private ZookeeperConfig zookeeperConfig; protected CuratorFramework zkClient; @Override public void afterPropertiesSet() { this.zkClient = buildClient(); initStateListener(); treeCacheStart(); } /** * this method is for sub class, */ protected void registerListener() { // Used by sub class } protected void treeCacheStart() { // Used by sub class } public void initStateListener() { checkNotNull(zkClient); zkClient.getConnectionStateListenable().addListener((client, newState) -> { if (newState == ConnectionState.LOST) { logger.error("connection lost from zookeeper"); } else if (newState == ConnectionState.RECONNECTED) { logger.info("reconnected to zookeeper"); } else if (newState == ConnectionState.SUSPENDED) { logger.warn("connection SUSPENDED to zookeeper"); } }); } private CuratorFramework buildClient() { logger.info("zookeeper registry center init, server lists is: {}.", zookeeperConfig.getServerList()); CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder().ensembleProvider(new DefaultEnsembleProvider(checkNotNull(zookeeperConfig.getServerList(), "zookeeper quorum can't be null"))) .retryPolicy(new ExponentialBackoffRetry(zookeeperConfig.getBaseSleepTimeMs(), zookeeperConfig.getMaxRetries(), zookeeperConfig.getMaxSleepMs())); //these has default value if (0 != zookeeperConfig.getSessionTimeoutMs()) { builder.sessionTimeoutMs(zookeeperConfig.getSessionTimeoutMs()); } if (0 != zookeeperConfig.getConnectionTimeoutMs()) { builder.connectionTimeoutMs(zookeeperConfig.getConnectionTimeoutMs()); } if (StringUtils.isNotBlank(zookeeperConfig.getDigest())) { builder.authorization("digest", zookeeperConfig.getDigest().getBytes(StandardCharsets.UTF_8)).aclProvider(new ACLProvider() { @Override public List<ACL> getDefaultAcl() { return ZooDefs.Ids.CREATOR_ALL_ACL; } @Override public List<ACL> getAclForPath(final String path) { return ZooDefs.Ids.CREATOR_ALL_ACL; } }); } zkClient = builder.build(); zkClient.start(); try { zkClient.blockUntilConnected(); } catch (final Exception ex) { throw new ServiceException(ex); } return zkClient; } public String get(final String key) { try { return new String(zkClient.getData().forPath(key), StandardCharsets.UTF_8); } catch (Exception ex) { logger.error("get key : {}", key, ex); } return null; } public List<String> getChildrenKeys(final String key) { try { return zkClient.getChildren().forPath(key); } catch (NoNodeException ex) { return new ArrayList<>(); } catch (InterruptedException ex) { logger.error("getChildrenKeys key : {} InterruptedException", key); throw new IllegalStateException(ex); } catch (Exception ex) { logger.error("getChildrenKeys key : {}", key, ex); throw new ServiceException(ex); } } public boolean hasChildren(final String key) { Stat stat; try { stat = zkClient.checkExists().forPath(key); return stat.getNumChildren() >= 1; } catch (Exception ex) { throw new IllegalStateException(ex); } } public boolean isExisted(final String key) { try { return zkClient.checkExists().forPath(key) != null; } catch (Exception ex) { logger.error("isExisted key : {}", key, ex); } return false; } public void persist(final String key, final String value) { try { if (!isExisted(key)) { zkClient.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).forPath(key, value.getBytes(StandardCharsets.UTF_8)); } else { update(key, value); } } catch (Exception ex) { logger.error("persist key : {} , value : {}", key, value, ex); } } public void update(final String key, final String value) { try { zkClient.inTransaction().check().forPath(key).and().setData().forPath(key, value.getBytes(StandardCharsets.UTF_8)).and().commit(); } catch (Exception ex) { logger.error("update key : {} , value : {}", key, value, ex); } } public void persistEphemeral(final String key, final String value) { try { if (isExisted(key)) { try { zkClient.delete().deletingChildrenIfNeeded().forPath(key); } catch (NoNodeException ignore) { //NOP } } zkClient.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(key, value.getBytes(StandardCharsets.UTF_8)); } catch (final Exception ex) { logger.error("persistEphemeral key : {} , value : {}", key, value, ex); } } public void persistEphemeral(String key, String value, boolean overwrite) { try { if (overwrite) { persistEphemeral(key, value); } else { if (!isExisted(key)) { zkClient.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(key, value.getBytes(StandardCharsets.UTF_8)); } } } catch (final Exception ex) { logger.error("persistEphemeral key : {} , value : {}, overwrite : {}", key, value, overwrite, ex); } } public void persistEphemeralSequential(final String key, String value) { try { zkClient.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL_SEQUENTIAL).forPath(key, value.getBytes(StandardCharsets.UTF_8)); } catch (final Exception ex) { logger.error("persistEphemeralSequential key : {}", key, ex); } } public void remove(final String key) { try { if (isExisted(key)) { zkClient.delete().deletingChildrenIfNeeded().forPath(key); } } catch (NoNodeException ignore) { //NOP } catch (final Exception ex) { logger.error("remove key : {}", key, ex); } } public CuratorFramework getZkClient() { return zkClient; } public ZookeeperConfig getZookeeperConfig() { return zookeeperConfig; } public void close() { CloseableUtils.closeQuietly(zkClient); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,525
server down will send repetitive message
The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat. For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data in the database. I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations?
https://github.com/apache/dolphinscheduler/issues/5525
https://github.com/apache/dolphinscheduler/pull/5529
60af52fb2bbf5f0fcab072024f44b01d85a8d620
f8ecb536b71d6f33b71c73930832b62890b84ea1
2021-05-19T06:41:24Z
java
2021-06-01T02:21:46Z
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/AlertDao.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao; import org.apache.dolphinscheduler.common.enums.AlertEvent; import org.apache.dolphinscheduler.common.enums.AlertStatus; import org.apache.dolphinscheduler.common.enums.AlertWarnLevel; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.datasource.ConnectionFactory; import org.apache.dolphinscheduler.dao.entity.Alert; import org.apache.dolphinscheduler.dao.entity.AlertPluginInstance; import org.apache.dolphinscheduler.dao.entity.ProcessAlertContent; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ServerAlertContent; import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper; import org.apache.dolphinscheduler.dao.mapper.AlertMapper; import org.apache.dolphinscheduler.dao.mapper.AlertPluginInstanceMapper; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.List; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; @Component public class AlertDao extends AbstractBaseDao { private final Logger logger = LoggerFactory.getLogger(getClass()); @Autowired private AlertMapper alertMapper; @Autowired private AlertPluginInstanceMapper alertPluginInstanceMapper; @Autowired private AlertGroupMapper alertGroupMapper; @Override protected void init() { alertMapper = ConnectionFactory.getInstance().getMapper(AlertMapper.class); alertPluginInstanceMapper = ConnectionFactory.getInstance().getMapper(AlertPluginInstanceMapper.class); alertGroupMapper = ConnectionFactory.getInstance().getMapper(AlertGroupMapper.class); } /** * insert alert * * @param alert alert * @return add alert result */ public int addAlert(Alert alert) { return alertMapper.insert(alert); } /** * update alert * * @param alertStatus alertStatus * @param log log * @param id id * @return update alert result */ public int updateAlert(AlertStatus alertStatus, String log, int id) { Alert alert = alertMapper.selectById(id); alert.setAlertStatus(alertStatus); alert.setUpdateTime(new Date()); alert.setLog(log); return alertMapper.updateById(alert); } /** * MasterServer or WorkerServer stoped * * @param alertGroupId alertGroupId * @param host host * @param serverType serverType */ public void sendServerStopedAlert(int alertGroupId, String host, String serverType) { Alert alert = new Alert(); List<ServerAlertContent> serverAlertContents = new ArrayList<>(1); ServerAlertContent serverStopAlertContent = ServerAlertContent.newBuilder(). type(serverType).host(host).event(AlertEvent.SERVER_DOWN).warningLevel(AlertWarnLevel.SERIOUS). build(); serverAlertContents.add(serverStopAlertContent); String content = JSONUtils.toJsonString(serverAlertContents); alert.setTitle("Fault tolerance warning"); saveTaskTimeoutAlert(alert, content, alertGroupId); } /** * process time out alert * * @param processInstance processInstance * @param processDefinition processDefinition */ public void sendProcessTimeoutAlert(ProcessInstance processInstance, ProcessDefinition processDefinition) { int alertGroupId = processInstance.getWarningGroupId(); Alert alert = new Alert(); List<ProcessAlertContent> processAlertContentList = new ArrayList<>(1); ProcessAlertContent processAlertContent = ProcessAlertContent.newBuilder() .processId(processInstance.getId()) .processName(processInstance.getName()) .event(AlertEvent.TIME_OUT) .warningLevel(AlertWarnLevel.MIDDLE) .build(); processAlertContentList.add(processAlertContent); String content = JSONUtils.toJsonString(processAlertContentList); alert.setTitle("Process Timeout Warn"); saveTaskTimeoutAlert(alert, content, alertGroupId); } private void saveTaskTimeoutAlert(Alert alert, String content, int alertGroupId) { alert.setAlertGroupId(alertGroupId); alert.setContent(content); alert.setCreateTime(new Date()); alert.setUpdateTime(new Date()); alertMapper.insert(alert); } /** * task timeout warn * * @param alertGroupId alertGroupId * @param processInstanceId processInstanceId * @param processInstanceName processInstanceName * @param taskId taskId * @param taskName taskName */ public void sendTaskTimeoutAlert(int alertGroupId, int processInstanceId, String processInstanceName, int taskId, String taskName) { Alert alert = new Alert(); List<ProcessAlertContent> processAlertContentList = new ArrayList<>(1); ProcessAlertContent processAlertContent = ProcessAlertContent.newBuilder() .processId(processInstanceId) .processName(processInstanceName) .taskId(taskId) .taskName(taskName) .event(AlertEvent.TIME_OUT) .warningLevel(AlertWarnLevel.MIDDLE) .build(); processAlertContentList.add(processAlertContent); String content = JSONUtils.toJsonString(processAlertContentList); alert.setTitle("Task Timeout Warn"); saveTaskTimeoutAlert(alert, content, alertGroupId); } /** * list the alert information of waiting to be executed * * @return alert list */ public List<Alert> listWaitExecutionAlert() { return alertMapper.listAlertByStatus(AlertStatus.WAIT_EXECUTION); } /** * for test * * @return AlertMapper */ public AlertMapper getAlertMapper() { return alertMapper; } /** * list all alert plugin instance by alert group id * * @param alertGroupId alert group id * @return AlertPluginInstance list */ public List<AlertPluginInstance> listInstanceByAlertGroupId(int alertGroupId) { String alertInstanceIdsParam = alertGroupMapper.queryAlertGroupInstanceIdsById(alertGroupId); if (StringUtils.isNotBlank(alertInstanceIdsParam)) { String[] idsArray = alertInstanceIdsParam.split(","); List<Integer> ids = Arrays.stream(idsArray) .map(s -> Integer.parseInt(s.trim())) .collect(Collectors.toList()); return alertPluginInstanceMapper.queryByIds(ids); } return null; } public AlertPluginInstanceMapper getAlertPluginInstanceMapper() { return alertPluginInstanceMapper; } public void setAlertPluginInstanceMapper(AlertPluginInstanceMapper alertPluginInstanceMapper) { this.alertPluginInstanceMapper = alertPluginInstanceMapper; } public AlertGroupMapper getAlertGroupMapper() { return alertGroupMapper; } public void setAlertGroupMapper(AlertGroupMapper alertGroupMapper) { this.alertGroupMapper = alertGroupMapper; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,525
server down will send repetitive message
The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat. For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data in the database. I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations?
https://github.com/apache/dolphinscheduler/issues/5525
https://github.com/apache/dolphinscheduler/pull/5529
60af52fb2bbf5f0fcab072024f44b01d85a8d620
f8ecb536b71d6f33b71c73930832b62890b84ea1
2021-05-19T06:41:24Z
java
2021-06-01T02:21:46Z
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/AlertMapper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.mapper; import org.apache.dolphinscheduler.common.enums.AlertStatus; import org.apache.dolphinscheduler.dao.entity.Alert; import com.baomidou.mybatisplus.core.mapper.BaseMapper; import org.apache.ibatis.annotations.Param; import java.util.List; /** * alert mapper interface */ public interface AlertMapper extends BaseMapper<Alert> { /** * list alert by status * @param alertStatus alertStatus * @return alert list */ List<Alert> listAlertByStatus(@Param("alertStatus") AlertStatus alertStatus); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,525
server down will send repetitive message
The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat. For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data in the database. I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations?
https://github.com/apache/dolphinscheduler/issues/5525
https://github.com/apache/dolphinscheduler/pull/5529
60af52fb2bbf5f0fcab072024f44b01d85a8d620
f8ecb536b71d6f33b71c73930832b62890b84ea1
2021-05-19T06:41:24Z
java
2021-06-01T02:21:46Z
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/AlertMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.AlertMapper"> <sql id="baseSql"> id , title, content, alert_status, log, alertgroup_id, create_time, update_time </sql> <select id="listAlertByStatus" resultType="org.apache.dolphinscheduler.dao.entity.Alert"> select <include refid="baseSql"/> from t_ds_alert where alert_status = #{alertStatus} </select> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,525
server down will send repetitive message
The master and worker will listen to other master or worker services down from zk, and will send message notifications, but this message will send one for each monitored service, so repeat. For example, I have 3 masters and 3 workers, and one of the masters is down. The remaining five services will insert an alert data in the database. I haven't a good plan to fix this issue ,shoule we consider the influence of normal version iterations?
https://github.com/apache/dolphinscheduler/issues/5525
https://github.com/apache/dolphinscheduler/pull/5529
60af52fb2bbf5f0fcab072024f44b01d85a8d620
f8ecb536b71d6f33b71c73930832b62890b84ea1
2021-05-19T06:41:24Z
java
2021-06-01T02:21:46Z
dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/AlertDaoTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao; import org.apache.dolphinscheduler.common.enums.AlertStatus; import org.apache.dolphinscheduler.dao.entity.Alert; import java.util.List; import org.junit.Assert; import org.junit.Test; public class AlertDaoTest { @Test public void testAlertDao() { AlertDao alertDao = DaoFactory.getDaoInstance(AlertDao.class); Alert alert = new Alert(); alert.setTitle("Mysql Exception"); alert.setContent("[\"alarm time:2018-02-05\", \"service name:MYSQL_ALTER\", \"alarm name:MYSQL_ALTER_DUMP\", " + "\"get the alarm exception.!,interface error,exception information:timed out\", \"request address:http://blog.csdn.net/dreamInTheWorld/article/details/78539286\"]"); alert.setAlertGroupId(1); alert.setAlertStatus(AlertStatus.WAIT_EXECUTION); alertDao.addAlert(alert); List<Alert> alerts = alertDao.listWaitExecutionAlert(); Assert.assertNotNull(alerts); Assert.assertNotEquals(0, alerts.size()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,570
[Bug][Worker] worker.groups in worker.properties is still commented after installation in 1.3.6
**To Reproduce** 1. Run `install.sh` 2. See error **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. **Which version of Dolphin Scheduler:** -[dev] -[1.3.6] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5570
https://github.com/apache/dolphinscheduler/pull/5571
f8ecb536b71d6f33b71c73930832b62890b84ea1
a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57
2021-06-01T13:15:58Z
java
2021-06-02T03:21:01Z
docker/build/conf/dolphinscheduler/datasource.properties.tpl
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # db spring.datasource.driver-class-name=${DATABASE_DRIVER} spring.datasource.url=jdbc:${DATABASE_TYPE}://${DATABASE_HOST}:${DATABASE_PORT}/${DATABASE_DATABASE}${DATABASE_PARAMS:+?${DATABASE_PARAMS}} spring.datasource.username=${DATABASE_USERNAME} spring.datasource.password=${DATABASE_PASSWORD} # postgresql #spring.datasource.driver-class-name=org.postgresql.Driver #spring.datasource.url=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler #spring.datasource.username=root #spring.datasource.password=root # mysql #spring.datasource.driver-class-name=com.mysql.jdbc.Driver #spring.datasource.url=jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8 #spring.datasource.username=ds_user #spring.datasource.password=dolphinscheduler # connection configuration #spring.datasource.initialSize=5 # min connection number #spring.datasource.minIdle=5 # max connection number #spring.datasource.maxActive=50 # max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases. # If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true. #spring.datasource.maxWait=60000 # milliseconds for check to close free connections #spring.datasource.timeBetweenEvictionRunsMillis=60000 # the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis. #spring.datasource.timeBetweenConnectErrorMillis=60000 # the longest time a connection remains idle without being evicted, in milliseconds #spring.datasource.minEvictableIdleTimeMillis=300000 #the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work. #spring.datasource.validationQuery=SELECT 1 #check whether the connection is valid for timeout, in seconds #spring.datasource.validationQueryTimeout=3 # when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis, # validation Query is performed to check whether the connection is valid #spring.datasource.testWhileIdle=true #execute validation to check if the connection is valid when applying for a connection #spring.datasource.testOnBorrow=true #execute validation to check if the connection is valid when the connection is returned #spring.datasource.testOnReturn=false #spring.datasource.defaultAutoCommit=true #spring.datasource.keepAlive=true # open PSCache, specify count PSCache for every connection #spring.datasource.poolPreparedStatements=true #spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,570
[Bug][Worker] worker.groups in worker.properties is still commented after installation in 1.3.6
**To Reproduce** 1. Run `install.sh` 2. See error **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. **Which version of Dolphin Scheduler:** -[dev] -[1.3.6] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5570
https://github.com/apache/dolphinscheduler/pull/5571
f8ecb536b71d6f33b71c73930832b62890b84ea1
a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57
2021-06-01T13:15:58Z
java
2021-06-02T03:21:01Z
dolphinscheduler-dao/src/main/resources/datasource.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # postgresql spring.datasource.driver-class-name=org.postgresql.Driver spring.datasource.url=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler spring.datasource.username=root spring.datasource.password=root # mysql #spring.datasource.driver-class-name=com.mysql.jdbc.Driver #spring.datasource.url=jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8 #spring.datasource.username=ds_user #spring.datasource.password=dolphinscheduler # connection configuration #spring.datasource.initialSize=5 # min connection number #spring.datasource.minIdle=5 # max connection number #spring.datasource.maxActive=50 # max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases. # If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true. #spring.datasource.maxWait=60000 # milliseconds for check to close free connections #spring.datasource.timeBetweenEvictionRunsMillis=60000 # the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis. #spring.datasource.timeBetweenConnectErrorMillis=60000 # the longest time a connection remains idle without being evicted, in milliseconds #spring.datasource.minEvictableIdleTimeMillis=300000 #the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work. #spring.datasource.validationQuery=SELECT 1 #check whether the connection is valid for timeout, in seconds #spring.datasource.validationQueryTimeout=3 # when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis, # validation Query is performed to check whether the connection is valid #spring.datasource.testWhileIdle=true #execute validation to check if the connection is valid when applying for a connection #spring.datasource.testOnBorrow=true #execute validation to check if the connection is valid when the connection is returned #spring.datasource.testOnReturn=false #spring.datasource.defaultAutoCommit=true #spring.datasource.keepAlive=true # open PSCache, specify count PSCache for every connection #spring.datasource.poolPreparedStatements=true #spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,570
[Bug][Worker] worker.groups in worker.properties is still commented after installation in 1.3.6
**To Reproduce** 1. Run `install.sh` 2. See error **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. **Which version of Dolphin Scheduler:** -[dev] -[1.3.6] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5570
https://github.com/apache/dolphinscheduler/pull/5571
f8ecb536b71d6f33b71c73930832b62890b84ea1
a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57
2021-06-01T13:15:58Z
java
2021-06-02T03:21:01Z
dolphinscheduler-server/src/main/resources/config/install_config.conf
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # NOTICE: If the following config has special characters in the variable `.*[]^${}\+?|()@#&`, Please escape, for example, `[` escape to `\[` # postgresql or mysql dbtype="mysql" # db config # db address and port dbhost="192.168.xx.xx:3306" # db username username="xx" # db password # NOTICE: if there are special characters, please use the \ to escape, for example, `[` escape to `\[` password="xx" # database name dbname="dolphinscheduler" # zk cluster zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181" # zk root directory zkRoot="/dolphinscheduler" # Note: the target installation path for dolphinscheduler, please not config as the same as the current path (pwd) installPath="/data1_1T/dolphinscheduler" # deployment user # Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself deployUser="dolphinscheduler" # alert config # alert plugin dir # Note: find and load the Alert Plugin Jar from this dir. alertPluginDir="/data1_1T/dolphinscheduler/lib/plugin/alert" # resource storage type: HDFS, S3, NONE resourceStorageType="NONE" # if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory. # if S3,write S3 address,HA,for example :s3a://dolphinscheduler, # Note,s3 be sure to create the root directory /dolphinscheduler defaultFS="hdfs://mycluster:8020" # if resourceStorageType is S3, the following three configuration is required, otherwise please ignore s3Endpoint="http://192.168.xx.xx:9010" s3AccessKey="xxxxxxxxxx" s3SecretKey="xxxxxxxxxx" # resourcemanager port, the default value is 8088 if not specified resourceManagerHttpAddressPort=8088 # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty yarnHaIps="192.168.xx.xx,192.168.xx.xx" # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname singleYarnIp="yarnIp1" # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended resourceUploadPath="/dolphinscheduler" # who have permissions to create directory under HDFS/S3 root path # Note: if kerberos is enabled, please config hdfsRootUser= hdfsRootUser="hdfs" # kerberos config # whether kerberos starts, if kerberos starts, following four items need to config, otherwise please ignore kerberosStartUp="false" # kdc krb5 config file path krb5ConfPath="$installPath/conf/krb5.conf" # keytab username keytabUserName="[email protected]" # username keytab path keytabPath="$installPath/conf/hdfs.headless.keytab" # api server port apiServerPort="12345" # install hosts # Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname ips="ds1,ds2,ds3,ds4,ds5" # ssh port, default 22 # Note: if ssh port is not default, modify here sshPort="22" # run master machine # Note: list of hosts hostname for deploying master masters="ds1,ds2" # run worker machine # note: need to write the worker group name of each worker, the default value is "default" workers="ds1:default,ds2:default,ds3:default,ds4:default,ds5:default" # run alert machine # note: list of machine hostnames for deploying alert server alertServer="ds3" # run api machine # note: list of machine hostnames for deploying api server apiServers="ds1"
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,570
[Bug][Worker] worker.groups in worker.properties is still commented after installation in 1.3.6
**To Reproduce** 1. Run `install.sh` 2. See error **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. **Which version of Dolphin Scheduler:** -[dev] -[1.3.6] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5570
https://github.com/apache/dolphinscheduler/pull/5571
f8ecb536b71d6f33b71c73930832b62890b84ea1
a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57
2021-06-01T13:15:58Z
java
2021-06-02T03:21:01Z
dolphinscheduler-service/src/main/resources/zookeeper.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # zookeeper cluster. multiple are separated by commas. eg. 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181 zookeeper.quorum=localhost:2181 # dolphinscheduler root directory #zookeeper.dolphinscheduler.root=/dolphinscheduler # dolphinscheduler failover directory #zookeeper.session.timeout=60000 #zookeeper.connection.timeout=30000 #zookeeper.retry.base.sleep=100 #zookeeper.retry.max.sleep=30000 #zookeeper.retry.maxtime=10 #zookeeper.max.wait.time=10000
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,570
[Bug][Worker] worker.groups in worker.properties is still commented after installation in 1.3.6
**To Reproduce** 1. Run `install.sh` 2. See error **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. **Which version of Dolphin Scheduler:** -[dev] -[1.3.6] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5570
https://github.com/apache/dolphinscheduler/pull/5571
f8ecb536b71d6f33b71c73930832b62890b84ea1
a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57
2021-06-01T13:15:58Z
java
2021-06-02T03:21:01Z
install.sh
#!/bin/sh # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # workDir=`dirname $0` workDir=`cd ${workDir};pwd` source ${workDir}/conf/config/install_config.conf # 1.replace file echo "1.replace file" txt="" if [[ "$OSTYPE" == "darwin"* ]]; then # Mac OSX txt="''" fi datasourceDriverClassname="com.mysql.jdbc.Driver" if [ $dbtype == "postgresql" ];then datasourceDriverClassname="org.postgresql.Driver" fi sed -i ${txt} "s#spring.datasource.driver-class-name.*#spring.datasource.driver-class-name=${datasourceDriverClassname}#g" conf/datasource.properties sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:${dbtype}://${dbhost}/${dbname}?characterEncoding=UTF-8\&allowMultiQueries=true#g" conf/datasource.properties sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${username}#g" conf/datasource.properties sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${password}#g" conf/datasource.properties sed -i ${txt} "s#fs.defaultFS.*#fs.defaultFS=${defaultFS}#g" conf/common.properties sed -i ${txt} "s#fs.s3a.endpoint.*#fs.s3a.endpoint=${s3Endpoint}#g" conf/common.properties sed -i ${txt} "s#fs.s3a.access.key.*#fs.s3a.access.key=${s3AccessKey}#g" conf/common.properties sed -i ${txt} "s#fs.s3a.secret.key.*#fs.s3a.secret.key=${s3SecretKey}#g" conf/common.properties sed -i ${txt} "s#resource.manager.httpaddress.port.*#resource.manager.httpaddress.port=${resourceManagerHttpAddressPort}#g" conf/common.properties sed -i ${txt} "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common.properties sed -i ${txt} "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:%s/ws/v1/cluster/apps/%s#g" conf/common.properties sed -i ${txt} "s#yarn.job.history.status.address.*#yarn.job.history.status.address=http://${singleYarnIp}:19888/ws/v1/history/mapreduce/jobs/%s#g" conf/common.properties sed -i ${txt} "s#hdfs.root.user.*#hdfs.root.user=${hdfsRootUser}#g" conf/common.properties sed -i ${txt} "s#resource.upload.path.*#resource.upload.path=${resourceUploadPath}#g" conf/common.properties sed -i ${txt} "s#resource.storage.type.*#resource.storage.type=${resourceStorageType}#g" conf/common.properties sed -i ${txt} "s#hadoop.security.authentication.startup.state.*#hadoop.security.authentication.startup.state=${kerberosStartUp}#g" conf/common.properties sed -i ${txt} "s#java.security.krb5.conf.path.*#java.security.krb5.conf.path=${krb5ConfPath}#g" conf/common.properties sed -i ${txt} "s#login.user.keytab.username.*#login.user.keytab.username=${keytabUserName}#g" conf/common.properties sed -i ${txt} "s#login.user.keytab.path.*#login.user.keytab.path=${keytabPath}#g" conf/common.properties sed -i ${txt} "s#zookeeper.quorum.*#zookeeper.quorum=${zkQuorum}#g" conf/zookeeper.properties sed -i ${txt} "s#\#zookeeper.dolphinscheduler.root.*#zookeeper.dolphinscheduler.root=${zkRoot}#g" conf/zookeeper.properties sed -i ${txt} "s#server.port.*#server.port=${apiServerPort}#g" conf/application-api.properties sed -i ${txt} "s#\#alert.plugin.dir.*#alert.plugin.dir=${alertPluginDir}#g" conf/alert.properties sed -i ${txt} "s#\#alert.listen.host.*#alert.listen.host=${alertServer}#g" conf/worker.properties # 2.create directory echo "2.create directory" if [ ! -d $installPath ];then sudo mkdir -p $installPath sudo chown -R $deployUser:$deployUser $installPath fi # 3.scp resources echo "3.scp resources" sh ${workDir}/script/scp-hosts.sh if [ $? -eq 0 ] then echo 'scp copy completed' else echo 'scp copy failed to exit' exit 1 fi # 4.stop server echo "4.stop server" sh ${workDir}/script/stop-all.sh # 5.delete zk node echo "5.delete zk node" sh ${workDir}/script/remove-zk-node.sh $zkRoot # 6.startup echo "6.startup" sh ${workDir}/script/start-all.sh
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,570
[Bug][Worker] worker.groups in worker.properties is still commented after installation in 1.3.6
**To Reproduce** 1. Run `install.sh` 2. See error **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. **Which version of Dolphin Scheduler:** -[dev] -[1.3.6] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5570
https://github.com/apache/dolphinscheduler/pull/5571
f8ecb536b71d6f33b71c73930832b62890b84ea1
a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57
2021-06-01T13:15:58Z
java
2021-06-02T03:21:01Z
script/scp-hosts.sh
#!/bin/sh # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # workDir=`dirname $0` workDir=`cd ${workDir};pwd` source $workDir/../conf/config/install_config.conf txt="" if [[ "$OSTYPE" == "darwin"* ]]; then # Mac OSX txt="''" fi declare -A workersGroupMap=() workersGroup=(${workers//,/ }) for workerGroup in ${workersGroup[@]} do echo $workerGroup; worker=`echo $workerGroup|awk -F':' '{print $1}'` groupsName=`echo $workerGroup|awk -F':' '{print $2}'` workersGroupMap+=([$worker]=$groupsName) done hostsArr=(${ips//,/ }) for host in ${hostsArr[@]} do if ! ssh -p $sshPort $host test -e $installPath; then ssh -p $sshPort $host "sudo mkdir -p $installPath; sudo chown -R $deployUser:$deployUser $installPath" fi echo "scp dirs to $host/$installPath starting" ssh -p $sshPort $host "cd $installPath/; rm -rf bin/ conf/ lib/ script/ sql/ ui/" for dsDir in bin conf lib script sql ui install.sh do # if worker in workersGroupMap if [[ "${workersGroupMap[${host}]}" ]] && [[ "${dsDir}" == "conf" ]]; then sed -i ${txt} "s:.*worker.groups.*:worker.groups=${workersGroupMap[${host}]}:g" ${dsDir}/worker.properties fi echo "start to scp $dsDir to $host/$installPath" scp -P $sshPort -r $workDir/../$dsDir $host:$installPath done echo "scp dirs to $host/$installPath complete" done
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,569
[Bug][dolphinscheduler-api] verify proccess definition name fail
**For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! ** **Describe the bug** when use spaces in proccess definition name, it will check fail! **To Reproduce** Steps to reproduce the behavior, for example: 1. Go to create a process, edit a simple task; 2. Save process definition name as “new_process”; 3. Go to create a new process, edit a simple task; 4. Save process definition name as “ new_process”. This time, the name verification will also succeed; 5. Then two process definitions with the same name are generated. **Which version of Dolphin Scheduler:** -[1.3.5] **Requirement or improvement** - Name verification can be solved by adding and removing the first and last spaces.
https://github.com/apache/dolphinscheduler/issues/5569
https://github.com/apache/dolphinscheduler/pull/5574
a5a0c7c5f8885b31e18bbf3e2d8567104ba38b57
cc9e5d5d34fcf2279b267cca7df37a9e80eeba07
2021-06-01T11:46:21Z
java
2021-06-02T04:01:01Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import org.apache.dolphinscheduler.api.dto.ProcessMeta; import org.apache.dolphinscheduler.api.dto.treeview.Instance; import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.service.ProcessInstanceService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.SchedulerService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.FileUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.exportprocess.ProcessAddTaskParam; import org.apache.dolphinscheduler.api.utils.exportprocess.TaskNodeParamFactory; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.common.utils.StreamUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.entity.ProcessData; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.service.permission.PermissionCheck; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.collections.map.HashedMap; import java.io.BufferedOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.google.common.collect.ImmutableMap; /** * process definition service impl */ @Service public class ProcessDefinitionServiceImpl extends BaseServiceImpl implements ProcessDefinitionService { private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionServiceImpl.class); private static final String PROCESSDEFINITIONCODE = "processDefinitionCode"; private static final String RELEASESTATE = "releaseState"; private static final String TASKS = "tasks"; @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionLogMapper processDefinitionLogMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProcessInstanceService processInstanceService; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private ProcessService processService; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private SchedulerService schedulerService; /** * create process definition * * @param loginUser login user * @param projectName project name * @param processDefinitionName process definition name * @param processDefinitionJson process definition json * @param desc description * @param locations locations for nodes * @param connects connects for nodes * @return create result code */ @Override @Transactional(rollbackFor = Exception.class) public Map<String, Object> createProcessDefinition(User loginUser, String projectName, String processDefinitionName, String processDefinitionJson, String desc, String locations, String connects) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); // check project auth Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { return checkResult; } ProcessDefinition processDefinition = new ProcessDefinition(); ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class); Map<String, Object> checkProcessJson = checkProcessNodeList(processData, processDefinitionJson); if (checkProcessJson.get(Constants.STATUS) != Status.SUCCESS) { return checkProcessJson; } try { long processDefinitionCode = SnowFlakeUtils.getInstance().nextId(); processDefinition.setCode(processDefinitionCode); processDefinition.setVersion(1); } catch (SnowFlakeException e) { putMsg(result, Status.CREATE_PROCESS_DEFINITION); return result; } int saveResult = processService.saveProcessDefinition(loginUser, project, processDefinitionName, desc, locations, connects, processData, processDefinition, true); if (saveResult > 0) { putMsg(result, Status.SUCCESS); // return processDefinition object with ID result.put(Constants.DATA_LIST, processDefinition.getId()); } else { putMsg(result, Status.CREATE_PROCESS_DEFINITION); } return result; } /** * query process definition list * * @param loginUser login user * @param projectName project name * @return definition list */ @Override public Map<String, Object> queryProcessDefinitionList(User loginUser, String projectName) { HashMap<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { return checkResult; } List<ProcessDefinition> resourceList = processDefinitionMapper.queryAllDefinitionList(project.getCode()); resourceList.forEach(processDefinition -> { ProcessData processData = processService.genProcessData(processDefinition); processDefinition.setProcessDefinitionJson(JSONUtils.toJsonString(processData)); }); result.put(Constants.DATA_LIST, resourceList); putMsg(result, Status.SUCCESS); return result; } /** * query process definition list paging * * @param loginUser login user * @param projectName project name * @param searchVal search value * @param pageNo page number * @param pageSize page size * @param userId user id * @return process definition page */ @Override public Map<String, Object> queryProcessDefinitionListPaging(User loginUser, String projectName, String searchVal, Integer pageNo, Integer pageSize, Integer userId) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { return checkResult; } Page<ProcessDefinition> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinition> processDefinitionIPage = processDefinitionMapper.queryDefineListPaging( page, searchVal, userId, project.getCode(), isAdmin(loginUser)); List<ProcessDefinition> records = processDefinitionIPage.getRecords(); for (ProcessDefinition pd : records) { ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryMaxVersionDefinitionLog(pd.getCode()); int operator = processDefinitionLog.getOperator(); User user = userMapper.selectById(operator); pd.setModifyBy(user.getUserName()); pd.setProjectId(project.getId()); } processDefinitionIPage.setRecords(records); PageInfo<ProcessDefinition> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotalCount((int) processDefinitionIPage.getTotal()); pageInfo.setLists(records); result.put(Constants.DATA_LIST, pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * query datail of process definition * * @param loginUser login user * @param projectName project name * @param processId process definition id * @return process definition detail */ @Override public Map<String, Object> queryProcessDefinitionById(User loginUser, String projectName, Integer processId) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { return checkResult; } ProcessDefinition processDefinition = processDefinitionMapper.selectById(processId); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processId); } else { ProcessData processData = processService.genProcessData(processDefinition); processDefinition.setProcessDefinitionJson(JSONUtils.toJsonString(processData)); result.put(Constants.DATA_LIST, processDefinition); putMsg(result, Status.SUCCESS); } return result; } @Override public Map<String, Object> queryProcessDefinitionByName(User loginUser, String projectName, String processDefinitionName) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { return checkResult; } ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(project.getCode(), processDefinitionName); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionName); } else { ProcessData processData = processService.genProcessData(processDefinition); processDefinition.setProcessDefinitionJson(JSONUtils.toJsonString(processData)); result.put(Constants.DATA_LIST, processDefinition); putMsg(result, Status.SUCCESS); } return result; } /** * update process definition * * @param loginUser login user * @param projectName project name * @param name process definition name * @param id process definition id * @param processDefinitionJson process definition json * @param desc description * @param locations locations for nodes * @param connects connects for nodes * @return update result code */ @Override public Map<String, Object> updateProcessDefinition(User loginUser, String projectName, int id, String name, String processDefinitionJson, String desc, String locations, String connects) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { return checkResult; } ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class); Map<String, Object> checkProcessJson = checkProcessNodeList(processData, processDefinitionJson); if ((checkProcessJson.get(Constants.STATUS) != Status.SUCCESS)) { return checkProcessJson; } // TODO processDefinitionMapper.queryByCode ProcessDefinition processDefinition = processService.findProcessDefineById(id); // check process definition exists if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, id); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { // online can not permit edit putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName()); return result; } if (!name.equals(processDefinition.getName())) { // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } } ProcessData newProcessData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class); int saveResult = processService.saveProcessDefinition(loginUser, project, name, desc, locations, connects, newProcessData, processDefinition, true); if (saveResult > 0) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); } return result; } /** * verify process definition name unique * * @param loginUser login user * @param projectName project name * @param name name * @return true if process definition name not exists, otherwise false */ @Override public Map<String, Object> verifyProcessDefinitionName(User loginUser, String projectName, String name) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { return checkResult; } ProcessDefinition processDefinition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (processDefinition == null) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); } return result; } /** * delete process definition by id * * @param loginUser login user * @param projectName project name * @param processDefinitionId process definition id * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionById(User loginUser, String projectName, Integer processDefinitionId) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { return checkResult; } ProcessDefinition processDefinition = processDefinitionMapper.selectById(processDefinitionId); // TODO: replace id to code // ProcessDefinition processDefinition = processDefineMapper.selectByCode(processDefinitionCode); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionId); return result; } // Determine if the login user is the owner of the process definition if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } // check process definition is already online if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, processDefinitionId); return result; } // check process instances is already running List<ProcessInstance> processInstances = processInstanceService.queryByProcessDefineCodeAndStatus(processDefinition.getCode(), Constants.NOT_TERMINATED_STATES); if (CollectionUtils.isNotEmpty(processInstances)) { putMsg(result, Status.DELETE_PROCESS_DEFINITION_BY_ID_FAIL, processInstances.size()); return result; } // get the timing according to the process definition List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinitionId); if (!schedules.isEmpty() && schedules.size() > 1) { logger.warn("scheduler num is {},Greater than 1", schedules.size()); putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR); return result; } else if (schedules.size() == 1) { Schedule schedule = schedules.get(0); if (schedule.getReleaseState() == ReleaseState.OFFLINE) { scheduleMapper.deleteById(schedule.getId()); } else if (schedule.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId()); return result; } } int delete = processDefinitionMapper.deleteById(processDefinitionId); processTaskRelationMapper.deleteByCode(project.getCode(), processDefinition.getCode()); if (delete > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR); } return result; } /** * release process definition: online / offline * * @param loginUser login user * @param projectName project name * @param id process definition id * @param releaseState release state * @return release result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> releaseProcessDefinition(User loginUser, String projectName, int id, ReleaseState releaseState) { HashMap<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { return checkResult; } // check state if (null == releaseState) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } ProcessDefinition processDefinition = processDefinitionMapper.selectById(id); switch (releaseState) { case ONLINE: // To check resources whether they are already cancel authorized or deleted String resourceIds = processDefinition.getResourceIds(); if (StringUtils.isNotBlank(resourceIds)) { Integer[] resourceIdArray = Arrays.stream(resourceIds.split(Constants.COMMA)).map(Integer::parseInt).toArray(Integer[]::new); PermissionCheck<Integer> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE_ID, processService, resourceIdArray, loginUser.getId(), logger); try { permissionCheck.checkPermission(); } catch (Exception e) { logger.error(e.getMessage(), e); putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, RELEASESTATE); return result; } } processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); break; case OFFLINE: processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); List<Schedule> scheduleList = scheduleMapper.selectAllByProcessDefineArray( new int[]{processDefinition.getId()} ); for (Schedule schedule : scheduleList) { logger.info("set schedule offline, project id: {}, schedule id: {}, process definition id: {}", project.getId(), schedule.getId(), id); // set status schedule.setReleaseState(ReleaseState.OFFLINE); scheduleMapper.updateById(schedule); schedulerService.deleteSchedule(project.getId(), schedule.getId()); } break; default: putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } putMsg(result, Status.SUCCESS); return result; } /** * batch export process definition by ids */ @Override public void batchExportProcessDefinitionByIds(User loginUser, String projectName, String processDefinitionIds, HttpServletResponse response) { if (StringUtils.isEmpty(processDefinitionIds)) { return; } //export project info Project project = projectMapper.queryByName(projectName); //check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { return; } List<ProcessMeta> processDefinitionList = getProcessDefinitionList(processDefinitionIds); if (CollectionUtils.isNotEmpty(processDefinitionList)) { downloadProcessDefinitionFile(response, processDefinitionList); } } /** * get process definition list by ids */ private List<ProcessMeta> getProcessDefinitionList(String processDefinitionIds) { String[] processDefinitionIdArray = processDefinitionIds.split(","); List<ProcessMeta> processDefinitionList = new ArrayList<>(); for (String strProcessDefinitionId : processDefinitionIdArray) { //get workflow info int processDefinitionId = Integer.parseInt(strProcessDefinitionId); ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineId(processDefinitionId); processDefinitionList.add(exportProcessMetaData(processDefinition)); } return processDefinitionList; } /** * download the process definition file */ private void downloadProcessDefinitionFile(HttpServletResponse response, List<ProcessMeta> processDefinitionList) { response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE); BufferedOutputStream buff = null; ServletOutputStream out = null; try { out = response.getOutputStream(); buff = new BufferedOutputStream(out); buff.write(JSONUtils.toJsonString(processDefinitionList).getBytes(StandardCharsets.UTF_8)); buff.flush(); buff.close(); } catch (IOException e) { logger.warn("export process fail", e); } finally { if (null != buff) { try { buff.close(); } catch (Exception e) { logger.warn("export process buffer not close", e); } } if (null != out) { try { out.close(); } catch (Exception e) { logger.warn("export process output stream not close", e); } } } } /** * get export process metadata string * * @param processDefinition process definition * @return export process metadata string */ public ProcessMeta exportProcessMetaData(ProcessDefinition processDefinition) { ProcessData processData = processService.genProcessData(processDefinition); //correct task param which has data source or dependent param addExportTaskNodeSpecialParam(processData); //export process metadata ProcessMeta exportProcessMeta = new ProcessMeta(); exportProcessMeta.setProjectName(processDefinition.getProjectName()); exportProcessMeta.setProcessDefinitionName(processDefinition.getName()); exportProcessMeta.setProcessDefinitionJson(JSONUtils.toJsonString(processService.genProcessData(processDefinition))); exportProcessMeta.setProcessDefinitionDescription(processDefinition.getDescription()); exportProcessMeta.setProcessDefinitionLocations(processDefinition.getLocations()); exportProcessMeta.setProcessDefinitionConnects(processDefinition.getConnects()); //schedule info List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinition.getId()); if (!schedules.isEmpty()) { Schedule schedule = schedules.get(0); exportProcessMeta.setScheduleWarningType(schedule.getWarningType().toString()); exportProcessMeta.setScheduleWarningGroupId(schedule.getWarningGroupId()); exportProcessMeta.setScheduleStartTime(DateUtils.dateToString(schedule.getStartTime())); exportProcessMeta.setScheduleEndTime(DateUtils.dateToString(schedule.getEndTime())); exportProcessMeta.setScheduleCrontab(schedule.getCrontab()); exportProcessMeta.setScheduleFailureStrategy(String.valueOf(schedule.getFailureStrategy())); exportProcessMeta.setScheduleReleaseState(String.valueOf(ReleaseState.OFFLINE)); exportProcessMeta.setScheduleProcessInstancePriority(String.valueOf(schedule.getProcessInstancePriority())); exportProcessMeta.setScheduleWorkerGroupName(schedule.getWorkerGroup()); } //create workflow json file return exportProcessMeta; } /** * correct task param which has datasource or dependent * * @param processData process data * @return correct processDefinitionJson */ private void addExportTaskNodeSpecialParam(ProcessData processData) { List<TaskNode> taskNodeList = processData.getTasks(); List<TaskNode> tmpNodeList = new ArrayList<>(); for (TaskNode taskNode : taskNodeList) { ProcessAddTaskParam addTaskParam = TaskNodeParamFactory.getByTaskType(taskNode.getType()); JsonNode jsonNode = JSONUtils.toJsonNode(taskNode); if (null != addTaskParam) { addTaskParam.addExportSpecialParam(jsonNode); } tmpNodeList.add(JSONUtils.parseObject(jsonNode.toString(), TaskNode.class)); } processData.setTasks(tmpNodeList); } /** * check task if has sub process * * @param taskType task type * @return if task has sub process return true else false */ private boolean checkTaskHasSubProcess(String taskType) { return taskType.equals(TaskType.SUB_PROCESS.getDesc()); } /** * import process definition * * @param loginUser login user * @param file process metadata json file * @param currentProjectName current project name * @return import process */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> importProcessDefinition(User loginUser, MultipartFile file, String currentProjectName) { Map<String, Object> result = new HashMap<>(); String processMetaJson = FileUtils.file2String(file); List<ProcessMeta> processMetaList = JSONUtils.toList(processMetaJson, ProcessMeta.class); //check file content if (CollectionUtils.isEmpty(processMetaList)) { putMsg(result, Status.DATA_IS_NULL, "fileContent"); return result; } for (ProcessMeta processMeta : processMetaList) { if (!checkAndImportProcessDefinition(loginUser, currentProjectName, result, processMeta)) { return result; } } return result; } /** * check and import process definition */ private boolean checkAndImportProcessDefinition(User loginUser, String currentProjectName, Map<String, Object> result, ProcessMeta processMeta) { if (!checkImportanceParams(processMeta, result)) { return false; } //deal with process name String processDefinitionName = processMeta.getProcessDefinitionName(); //use currentProjectName to query Project targetProject = projectMapper.queryByName(currentProjectName); if (null != targetProject) { processDefinitionName = recursionProcessDefinitionName(targetProject.getCode(), processDefinitionName, 1); } //unique check Map<String, Object> checkResult = verifyProcessDefinitionName(loginUser, currentProjectName, processDefinitionName); Status status = (Status) checkResult.get(Constants.STATUS); if (Status.SUCCESS.equals(status)) { putMsg(result, Status.SUCCESS); } else { result.putAll(checkResult); return false; } // get create process result Map<String, Object> createProcessResult = getCreateProcessResult(loginUser, currentProjectName, result, processMeta, processDefinitionName, addImportTaskNodeParam(loginUser, processMeta.getProcessDefinitionJson(), targetProject)); if (createProcessResult == null) { return false; } //create process definition Integer processDefinitionId = Objects.isNull(createProcessResult.get(Constants.DATA_LIST)) ? null : Integer.parseInt(createProcessResult.get(Constants.DATA_LIST).toString()); //scheduler param return getImportProcessScheduleResult(loginUser, currentProjectName, result, processMeta, processDefinitionName, processDefinitionId); } /** * get create process result */ private Map<String, Object> getCreateProcessResult(User loginUser, String currentProjectName, Map<String, Object> result, ProcessMeta processMeta, String processDefinitionName, String importProcessParam) { Map<String, Object> createProcessResult = null; try { createProcessResult = createProcessDefinition(loginUser , currentProjectName, processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp(), importProcessParam, processMeta.getProcessDefinitionDescription(), processMeta.getProcessDefinitionLocations(), processMeta.getProcessDefinitionConnects()); putMsg(result, Status.SUCCESS); } catch (Exception e) { logger.error("import process meta json data: {}", e.getMessage(), e); putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR); } return createProcessResult; } /** * get import process schedule result */ private boolean getImportProcessScheduleResult(User loginUser, String currentProjectName, Map<String, Object> result, ProcessMeta processMeta, String processDefinitionName, Integer processDefinitionId) { if (null != processMeta.getScheduleCrontab() && null != processDefinitionId) { int scheduleInsert = importProcessSchedule(loginUser, currentProjectName, processMeta, processDefinitionName, processDefinitionId); if (0 == scheduleInsert) { putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR); return false; } } return true; } /** * check importance params */ private boolean checkImportanceParams(ProcessMeta processMeta, Map<String, Object> result) { if (StringUtils.isEmpty(processMeta.getProjectName())) { putMsg(result, Status.DATA_IS_NULL, "projectName"); return false; } if (StringUtils.isEmpty(processMeta.getProcessDefinitionName())) { putMsg(result, Status.DATA_IS_NULL, "processDefinitionName"); return false; } if (StringUtils.isEmpty(processMeta.getProcessDefinitionJson())) { putMsg(result, Status.DATA_IS_NULL, "processDefinitionJson"); return false; } return true; } /** * import process add special task param * * @param loginUser login user * @param processDefinitionJson process definition json * @param targetProject target project * @return import process param */ private String addImportTaskNodeParam(User loginUser, String processDefinitionJson, Project targetProject) { ObjectNode jsonObject = JSONUtils.parseObject(processDefinitionJson); ArrayNode jsonArray = (ArrayNode) jsonObject.get(TASKS); //add sql and dependent param for (int i = 0; i < jsonArray.size(); i++) { JsonNode taskNode = jsonArray.path(i); String taskType = taskNode.path("type").asText(); ProcessAddTaskParam addTaskParam = TaskNodeParamFactory.getByTaskType(taskType); if (null != addTaskParam) { addTaskParam.addImportSpecialParam(taskNode); } } //recursive sub-process parameter correction map key for old process code value for new process code Map<Long, Long> subProcessCodeMap = new HashMap<>(); List<Object> subProcessList = StreamUtils.asStream(jsonArray.elements()) .filter(elem -> checkTaskHasSubProcess(JSONUtils.parseObject(elem.toString()).path("type").asText())) .collect(Collectors.toList()); if (CollectionUtils.isNotEmpty(subProcessList)) { importSubProcess(loginUser, targetProject, jsonArray, subProcessCodeMap); } jsonObject.set(TASKS, jsonArray); return jsonObject.toString(); } /** * import process schedule * * @param loginUser login user * @param currentProjectName current project name * @param processMeta process meta data * @param processDefinitionName process definition name * @param processDefinitionId process definition id * @return insert schedule flag */ public int importProcessSchedule(User loginUser, String currentProjectName, ProcessMeta processMeta, String processDefinitionName, Integer processDefinitionId) { Date now = new Date(); Schedule scheduleObj = new Schedule(); scheduleObj.setProjectName(currentProjectName); scheduleObj.setProcessDefinitionId(processDefinitionId); scheduleObj.setProcessDefinitionName(processDefinitionName); scheduleObj.setCreateTime(now); scheduleObj.setUpdateTime(now); scheduleObj.setUserId(loginUser.getId()); scheduleObj.setUserName(loginUser.getUserName()); scheduleObj.setCrontab(processMeta.getScheduleCrontab()); if (null != processMeta.getScheduleStartTime()) { scheduleObj.setStartTime(DateUtils.stringToDate(processMeta.getScheduleStartTime())); } if (null != processMeta.getScheduleEndTime()) { scheduleObj.setEndTime(DateUtils.stringToDate(processMeta.getScheduleEndTime())); } if (null != processMeta.getScheduleWarningType()) { scheduleObj.setWarningType(WarningType.valueOf(processMeta.getScheduleWarningType())); } if (null != processMeta.getScheduleWarningGroupId()) { scheduleObj.setWarningGroupId(processMeta.getScheduleWarningGroupId()); } if (null != processMeta.getScheduleFailureStrategy()) { scheduleObj.setFailureStrategy(FailureStrategy.valueOf(processMeta.getScheduleFailureStrategy())); } if (null != processMeta.getScheduleReleaseState()) { scheduleObj.setReleaseState(ReleaseState.valueOf(processMeta.getScheduleReleaseState())); } if (null != processMeta.getScheduleProcessInstancePriority()) { scheduleObj.setProcessInstancePriority(Priority.valueOf(processMeta.getScheduleProcessInstancePriority())); } if (null != processMeta.getScheduleWorkerGroupName()) { scheduleObj.setWorkerGroup(processMeta.getScheduleWorkerGroupName()); } return scheduleMapper.insert(scheduleObj); } /** * check import process has sub process * recursion create sub process * * @param loginUser login user * @param targetProject target project * @param jsonArray process task array * @param subProcessCodeMap correct sub process id map */ private void importSubProcess(User loginUser, Project targetProject, ArrayNode jsonArray, Map<Long, Long> subProcessCodeMap) { for (int i = 0; i < jsonArray.size(); i++) { ObjectNode taskNode = (ObjectNode) jsonArray.path(i); String taskType = taskNode.path("type").asText(); if (!checkTaskHasSubProcess(taskType)) { continue; } //get sub process info ObjectNode subParams = (ObjectNode) taskNode.path("params"); Long subProcessCode = subParams.path(PROCESSDEFINITIONCODE).asLong(); ProcessDefinition subProcess = processDefinitionMapper.queryByCode(subProcessCode); //check is sub process exist in db if (null == subProcess) { continue; } String subProcessJson = JSONUtils.toJsonString(processService.genProcessData(subProcess)); //check current project has sub process ProcessDefinition currentProjectSubProcess = processDefinitionMapper.queryByDefineName(targetProject.getCode(), subProcess.getName()); if (null == currentProjectSubProcess) { ArrayNode subJsonArray = (ArrayNode) JSONUtils.parseObject(subProcessJson).get(TASKS); List<Object> subProcessList = StreamUtils.asStream(subJsonArray.elements()) .filter(item -> checkTaskHasSubProcess(JSONUtils.parseObject(item.toString()).path("type").asText())) .collect(Collectors.toList()); if (CollectionUtils.isNotEmpty(subProcessList)) { importSubProcess(loginUser, targetProject, subJsonArray, subProcessCodeMap); //sub process processId correct if (!subProcessCodeMap.isEmpty()) { for (Map.Entry<Long, Long> entry : subProcessCodeMap.entrySet()) { String oldSubProcessCode = "\"processDefinitionCode\":" + entry.getKey(); String newSubProcessCode = "\"processDefinitionCode\":" + entry.getValue(); subProcessJson = subProcessJson.replaceAll(oldSubProcessCode, newSubProcessCode); } subProcessCodeMap.clear(); } } try { createProcessDefinition(loginUser , targetProject.getName(), subProcess.getName(), subProcessJson, subProcess.getDescription(), subProcess.getLocations(), subProcess.getConnects()); logger.info("create sub process, project: {}, process name: {}", targetProject.getName(), subProcess.getName()); } catch (Exception e) { logger.error("import process meta json data: {}", e.getMessage(), e); } //modify task node ProcessDefinition newSubProcessDefine = processDefinitionMapper.queryByDefineName(subProcess.getCode(), subProcess.getName()); if (null != newSubProcessDefine) { subProcessCodeMap.put(subProcessCode, newSubProcessDefine.getCode()); subParams.put(PROCESSDEFINITIONCODE, newSubProcessDefine.getId()); taskNode.set("params", subParams); } } } } /** * check the process definition node meets the specifications * * @param processData process data * @param processDefinitionJson process definition json * @return check result code */ @Override public Map<String, Object> checkProcessNodeList(ProcessData processData, String processDefinitionJson) { Map<String, Object> result = new HashMap<>(); try { if (processData == null) { logger.error("process data is null"); putMsg(result, Status.DATA_IS_NOT_VALID, processDefinitionJson); return result; } // Check whether the task node is normal List<TaskNode> taskNodes = processData.getTasks(); if (CollectionUtils.isEmpty(taskNodes)) { logger.error("process node info is empty"); putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result; } // check has cycle if (graphHasCycle(taskNodes)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the process definition json is normal for (TaskNode taskNode : taskNodes) { if (!CheckUtils.checkTaskNodeParameters(taskNode)) { logger.error("task node {} parameter invalid", taskNode.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskNode.getName()); return result; } // check extra params CheckUtils.checkOtherParams(taskNode.getExtras()); } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * get task node details based on process definition * * @param defineCode define code * @return task node list */ public Map<String, Object> getTaskNodeListByDefinitionCode(Long defineCode) { Map<String, Object> result = new HashMap<>(); ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(defineCode); if (processDefinition == null) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, defineCode); return result; } ProcessData processData = processService.genProcessData(processDefinition); //process data check if (null == processData) { logger.error("process data is null"); putMsg(result, Status.DATA_IS_NOT_VALID, JSONUtils.toJsonString(processData)); return result; } List<TaskNode> taskNodeList = (processData.getTasks() == null) ? new ArrayList<>() : processData.getTasks(); result.put(Constants.DATA_LIST, taskNodeList); putMsg(result, Status.SUCCESS); return result; } /** * get task node details based on process definition * * @param defineCodeList define code list * @return task node list */ @Override public Map<String, Object> getTaskNodeListByDefinitionCodeList(String defineCodeList) { Map<String, Object> result = new HashMap<>(); Map<Integer, List<TaskNode>> taskNodeMap = new HashMap<>(); String[] codeArr = defineCodeList.split(","); List<Long> codeList = new ArrayList<>(); for (String definitionCode : codeArr) { codeList.add(Long.parseLong(definitionCode)); } List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(codeList); if (CollectionUtils.isEmpty(processDefinitionList)) { logger.info("process definition not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, defineCodeList); return result; } for (ProcessDefinition processDefinition : processDefinitionList) { ProcessData processData = processService.genProcessData(processDefinition); List<TaskNode> taskNodeList = (processData.getTasks() == null) ? new ArrayList<>() : processData.getTasks(); taskNodeMap.put(processDefinition.getId(), taskNodeList); } result.put(Constants.DATA_LIST, taskNodeMap); putMsg(result, Status.SUCCESS); return result; } /** * query process definition all by project id * * @param projectId project id * @return process definitions in the project */ @Override public Map<String, Object> queryProcessDefinitionAllByProjectId(Integer projectId) { HashMap<String, Object> result = new HashMap<>(); Project project = projectMapper.selectById(projectId); List<ProcessDefinition> resourceList = processDefinitionMapper.queryAllDefinitionList(project.getCode()); resourceList.forEach(processDefinition -> { ProcessData processData = processService.genProcessData(processDefinition); processDefinition.setProcessDefinitionJson(JSONUtils.toJsonString(processData)); }); result.put(Constants.DATA_LIST, resourceList); putMsg(result, Status.SUCCESS); return result; } /** * Encapsulates the TreeView structure * * @param processId process definition id * @param limit limit * @return tree view json data * @throws Exception exception */ @Override public Map<String, Object> viewTree(Integer processId, Integer limit) throws Exception { Map<String, Object> result = new HashMap<>(); ProcessDefinition processDefinition = processDefinitionMapper.selectById(processId); if (null == processDefinition) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefinition); return result; } DAG<String, TaskNode, TaskNodeRelation> dag = processService.genDagGraph(processDefinition); /** * nodes that is running */ Map<String, List<TreeViewDto>> runningNodeMap = new ConcurrentHashMap<>(); /** * nodes that is waiting torun */ Map<String, List<TreeViewDto>> waitingRunningNodeMap = new ConcurrentHashMap<>(); /** * List of process instances */ List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineCode(processDefinition.getCode(), limit); List<TaskDefinitionLog> taskDefinitionList = processService.queryTaskDefinitionList(processDefinition.getCode(), processDefinition.getVersion()); Map<Long, TaskDefinition> taskDefinitionMap = new HashedMap(); taskDefinitionList.forEach(taskDefinitionLog -> taskDefinitionMap.put(taskDefinitionLog.getCode(), taskDefinitionLog)); for (ProcessInstance processInstance : processInstanceList) { processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime())); } if (limit > processInstanceList.size()) { limit = processInstanceList.size(); } TreeViewDto parentTreeViewDto = new TreeViewDto(); parentTreeViewDto.setName("DAG"); parentTreeViewDto.setType(""); // Specify the process definition, because it is a TreeView for a process definition for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); Date endTime = processInstance.getEndTime() == null ? new Date() : processInstance.getEndTime(); parentTreeViewDto.getInstances().add(new Instance(processInstance.getId(), processInstance.getName(), "", processInstance.getState().toString() , processInstance.getStartTime(), endTime, processInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - processInstance.getStartTime().getTime()))); } List<TreeViewDto> parentTreeViewDtoList = new ArrayList<>(); parentTreeViewDtoList.add(parentTreeViewDto); // Here is the encapsulation task instance for (String startNode : dag.getBeginNode()) { runningNodeMap.put(startNode, parentTreeViewDtoList); } while (Stopper.isRunning()) { Set<String> postNodeList = null; Iterator<Map.Entry<String, List<TreeViewDto>>> iter = runningNodeMap.entrySet().iterator(); while (iter.hasNext()) { Map.Entry<String, List<TreeViewDto>> en = iter.next(); String nodeName = en.getKey(); parentTreeViewDtoList = en.getValue(); TreeViewDto treeViewDto = new TreeViewDto(); treeViewDto.setName(nodeName); TaskNode taskNode = dag.getNode(nodeName); treeViewDto.setType(taskNode.getType()); //set treeViewDto instances for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(processInstance.getId(), nodeName); if (taskInstance == null) { treeViewDto.getInstances().add(new Instance(-1, "not running", "null")); } else { Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime(); Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime(); int subProcessId = 0; /** * if process is sub process, the return sub id, or sub id=0 */ if (taskInstance.isSubProcess()) { TaskDefinition taskDefinition = taskDefinitionMap.get(taskInstance.getTaskCode()); subProcessId = Integer.parseInt(JSONUtils.parseObject( taskDefinition.getTaskParams()).path(CMD_PARAM_SUB_PROCESS_DEFINE_ID).asText()); } treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskType(), taskInstance.getState().toString() , taskInstance.getStartTime(), taskInstance.getEndTime(), taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessId)); } } for (TreeViewDto pTreeViewDto : parentTreeViewDtoList) { pTreeViewDto.getChildren().add(treeViewDto); } postNodeList = dag.getSubsequentNodes(nodeName); if (CollectionUtils.isNotEmpty(postNodeList)) { for (String nextNodeName : postNodeList) { List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeName); if (CollectionUtils.isEmpty(treeViewDtoList)) { treeViewDtoList = new ArrayList<>(); } treeViewDtoList.add(treeViewDto); waitingRunningNodeMap.put(nextNodeName, treeViewDtoList); } } runningNodeMap.remove(nodeName); } if (waitingRunningNodeMap.size() == 0) { break; } else { runningNodeMap.putAll(waitingRunningNodeMap); waitingRunningNodeMap.clear(); } } result.put(Constants.DATA_LIST, parentTreeViewDto); result.put(Constants.STATUS, Status.SUCCESS); result.put(Constants.MSG, Status.SUCCESS.getMsg()); return result; } /** * whether the graph has a ring * * @param taskNodeResponseList task node response list * @return if graph has cycle flag */ private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) { DAG<String, TaskNode, String> graph = new DAG<>(); // Fill the vertices for (TaskNode taskNodeResponse : taskNodeResponseList) { graph.addNode(taskNodeResponse.getName(), taskNodeResponse); } // Fill edge relations for (TaskNode taskNodeResponse : taskNodeResponseList) { List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(), String.class); if (CollectionUtils.isNotEmpty(preTasks)) { for (String preTask : preTasks) { if (!graph.addEdge(preTask, taskNodeResponse.getName())) { return true; } } } } return graph.hasCycle(); } private String recursionProcessDefinitionName(Long projectCode, String processDefinitionName, int num) { ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, processDefinitionName); if (processDefinition != null) { if (num > 1) { String str = processDefinitionName.substring(0, processDefinitionName.length() - 3); processDefinitionName = str + "(" + num + ")"; } else { processDefinitionName = processDefinition.getName() + "(" + num + ")"; } } else { return processDefinitionName; } return recursionProcessDefinitionName(projectCode, processDefinitionName, num + 1); } private Map<String, Object> copyProcessDefinition(User loginUser, Integer processId, Project targetProject) throws JsonProcessingException { Map<String, Object> result = new HashMap<>(); String currentTimeStamp = DateUtils.getCurrentTimeStamp(); ProcessDefinition processDefinition = processDefinitionMapper.selectById(processId); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processId); return result; } else { ProcessData processData = processService.genProcessData(processDefinition); List<TaskNode> taskNodeList = processData.getTasks(); String locations = processDefinition.getLocations(); ObjectNode locationsJN = JSONUtils.parseObject(locations); for (TaskNode taskNode : taskNodeList) { String suffix = "_copy_" + currentTimeStamp; String id = taskNode.getId(); String newName = locationsJN.path(id).path("name").asText() + suffix; ((ObjectNode) locationsJN.get(id)).put("name", newName); List<String> depList = taskNode.getDepList(); List<String> newDepList = depList.stream() .map(s -> s + suffix) .collect(Collectors.toList()); taskNode.setDepList(newDepList); taskNode.setName(taskNode.getName() + suffix); taskNode.setCode(0L); } processData.setTasks(taskNodeList); String processDefinitionJson = JSONUtils.toJsonString(processData); return createProcessDefinition( loginUser, targetProject.getName(), processDefinition.getName() + "_copy_" + currentTimeStamp, processDefinitionJson, processDefinition.getDescription(), locationsJN.toString(), processDefinition.getConnects()); } } /** * batch copy process definition * * @param loginUser loginUser * @param projectName projectName * @param processDefinitionIds processDefinitionIds * @param targetProjectId targetProjectId */ @Override public Map<String, Object> batchCopyProcessDefinition(User loginUser, String projectName, String processDefinitionIds, int targetProjectId) { Map<String, Object> result = new HashMap<>(); List<String> failedProcessList = new ArrayList<>(); if (StringUtils.isEmpty(processDefinitionIds)) { putMsg(result, Status.PROCESS_DEFINITION_IDS_IS_EMPTY, processDefinitionIds); return result; } //check src project auth Map<String, Object> checkResult = checkProjectAndAuth(loginUser, projectName); if (checkResult != null) { return checkResult; } Project targetProject = projectMapper.queryDetailById(targetProjectId); if (targetProject == null) { putMsg(result, Status.PROJECT_NOT_FOUNT, targetProjectId); return result; } if (!(targetProject.getName()).equals(projectName)) { Map<String, Object> checkTargetProjectResult = checkProjectAndAuth(loginUser, targetProject.getName()); if (checkTargetProjectResult != null) { return checkTargetProjectResult; } } String[] processDefinitionIdList = processDefinitionIds.split(Constants.COMMA); doBatchCopyProcessDefinition(loginUser, targetProject, failedProcessList, processDefinitionIdList); checkBatchOperateResult(projectName, targetProject.getName(), result, failedProcessList, true); return result; } /** * batch move process definition * * @param loginUser loginUser * @param projectName projectName * @param processDefinitionIds processDefinitionIds * @param targetProjectId targetProjectId */ @Override public Map<String, Object> batchMoveProcessDefinition(User loginUser, String projectName, String processDefinitionIds, int targetProjectId) { Map<String, Object> result = new HashMap<>(); List<String> failedProcessList = new ArrayList<>(); //check src project auth Map<String, Object> checkResult = checkProjectAndAuth(loginUser, projectName); if (checkResult != null) { return checkResult; } if (StringUtils.isEmpty(processDefinitionIds)) { putMsg(result, Status.PROCESS_DEFINITION_IDS_IS_EMPTY, processDefinitionIds); return result; } Project targetProject = projectMapper.queryDetailById(targetProjectId); if (targetProject == null) { putMsg(result, Status.PROJECT_NOT_FOUNT, targetProjectId); return result; } if (!(targetProject.getName()).equals(projectName)) { Map<String, Object> checkTargetProjectResult = checkProjectAndAuth(loginUser, targetProject.getName()); if (checkTargetProjectResult != null) { return checkTargetProjectResult; } } Integer[] definitionIds = Arrays.stream(processDefinitionIds.split(Constants.COMMA)).map(Integer::parseInt).toArray(Integer[]::new); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryDefinitionListByIdList(definitionIds); for (ProcessDefinition processDefinition : processDefinitionList) { ProcessDefinitionLog processDefinitionLog = moveProcessDefinition(loginUser, targetProject.getCode(), processDefinition, result, failedProcessList); if (processDefinitionLog != null) { moveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinitionLog); } } checkBatchOperateResult(projectName, targetProject.getName(), result, failedProcessList, false); return result; } private ProcessDefinitionLog moveProcessDefinition(User loginUser, Long targetProjectCode, ProcessDefinition processDefinition, Map<String, Object> result, List<String> failedProcessList) { try { Integer version = processDefinitionLogMapper.queryMaxVersionForDefinition(processDefinition.getCode()); ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition); processDefinitionLog.setVersion(version == null || version == 0 ? 1 : version + 1); processDefinitionLog.setProjectCode(targetProjectCode); processDefinitionLog.setOperator(loginUser.getId()); Date now = new Date(); processDefinitionLog.setOperateTime(now); processDefinitionLog.setUpdateTime(now); processDefinitionLog.setCreateTime(now); int update = processDefinitionMapper.updateById(processDefinitionLog); int insertLog = processDefinitionLogMapper.insert(processDefinitionLog); if ((insertLog & update) > 0) { putMsg(result, Status.SUCCESS); } else { failedProcessList.add(processDefinition.getId() + "[" + processDefinition.getName() + "]"); putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); } return processDefinitionLog; } catch (Exception e) { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); failedProcessList.add(processDefinition.getId() + "[" + processDefinition.getName() + "]"); logger.error("move processDefinition error: {}", e.getMessage(), e); } return null; } private void moveTaskRelation(User loginUser, Long projectCode, ProcessDefinitionLog processDefinition) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinition.getCode()); if (!processTaskRelationList.isEmpty()) { processTaskRelationMapper.deleteByCode(projectCode, processDefinition.getCode()); } Date now = new Date(); for (ProcessTaskRelation processTaskRelation : processTaskRelationList) { processTaskRelation.setProjectCode(processDefinition.getProjectCode()); processTaskRelation.setProcessDefinitionVersion(processDefinition.getVersion()); processTaskRelation.setCreateTime(now); processTaskRelation.setUpdateTime(now); processService.saveTaskRelation(loginUser, processTaskRelation); } } /** * switch the defined process definition verison * * @param loginUser login user * @param projectName project name * @param processDefinitionId process definition id * @param version the version user want to switch * @return switch process definition version result code */ @Override public Map<String, Object> switchProcessDefinitionVersion(User loginUser, String projectName , int processDefinitionId, long version) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); // check project auth Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { return checkResult; } ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineId(processDefinitionId); if (Objects.isNull(processDefinition)) { putMsg(result , Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR , processDefinitionId); return result; } ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper .queryByDefinitionCodeAndVersion(processDefinition.getCode(), version); if (Objects.isNull(processDefinitionLog)) { putMsg(result , Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR , processDefinition.getCode() , version); return result; } int switchVersion = processService.switchVersion(processDefinition, processDefinitionLog); if (switchVersion > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); } return result; } /** * batch copy process definition * * @param loginUser loginUser * @param targetProject targetProject * @param failedProcessList failedProcessList * @param processDefinitionIdList processDefinitionIdList */ private void doBatchCopyProcessDefinition(User loginUser, Project targetProject, List<String> failedProcessList, String[] processDefinitionIdList) { for (String processDefinitionId : processDefinitionIdList) { try { Map<String, Object> copyProcessDefinitionResult = copyProcessDefinition(loginUser, Integer.valueOf(processDefinitionId), targetProject); if (!Status.SUCCESS.equals(copyProcessDefinitionResult.get(Constants.STATUS))) { setFailedProcessList(failedProcessList, processDefinitionId); logger.error((String) copyProcessDefinitionResult.get(Constants.MSG)); } } catch (Exception e) { setFailedProcessList(failedProcessList, processDefinitionId); logger.error("copy processDefinition error: {}", e.getMessage(), e); } } } /** * set failed processList * * @param failedProcessList failedProcessList * @param processDefinitionId processDefinitionId */ private void setFailedProcessList(List<String> failedProcessList, String processDefinitionId) { ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineId(Integer.parseInt(processDefinitionId)); if (processDefinition != null) { failedProcessList.add(processDefinitionId + "[" + processDefinition.getName() + "]"); } else { failedProcessList.add(processDefinitionId + "[null]"); } } /** * check project and auth * * @param loginUser loginUser * @param projectName projectName */ private Map<String, Object> checkProjectAndAuth(User loginUser, String projectName) { Project project = projectMapper.queryByName(projectName); //check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { return checkResult; } return null; } /** * check batch operate result * * @param srcProjectName srcProjectName * @param targetProjectName targetProjectName * @param result result * @param failedProcessList failedProcessList * @param isCopy isCopy */ private void checkBatchOperateResult(String srcProjectName, String targetProjectName, Map<String, Object> result, List<String> failedProcessList, boolean isCopy) { if (!failedProcessList.isEmpty()) { if (isCopy) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR, srcProjectName, targetProjectName, String.join(",", failedProcessList)); } else { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR, srcProjectName, targetProjectName, String.join(",", failedProcessList)); } } else { putMsg(result, Status.SUCCESS); } } /** * check has associated process definition * * @param processDefinitionId process definition id * @param version version * @return The query result has a specific process definition return true */ @Override public boolean checkHasAssociatedProcessDefinition(int processDefinitionId, long version) { Integer hasAssociatedDefinitionId = processDefinitionMapper.queryHasAssociatedDefinitionByIdAndVersion(processDefinitionId, version); return Objects.nonNull(hasAssociatedDefinitionId); } /** * query the pagination versions info by one certain process definition code * * @param loginUser login user info to check auth * @param projectName process definition project name * @param pageNo page number * @param pageSize page size * @param processDefinitionCode process definition code * @return the pagination process definition versions info of the certain process definition */ @Override public Map<String, Object> queryProcessDefinitionVersions(User loginUser, String projectName, int pageNo, int pageSize, long processDefinitionCode) { Map<String, Object> result = new HashMap<>(); // check the if pageNo or pageSize less than 1 if (pageNo <= 0 || pageSize <= 0) { putMsg(result , Status.QUERY_PROCESS_DEFINITION_VERSIONS_PAGE_NO_OR_PAGE_SIZE_LESS_THAN_1_ERROR , pageNo , pageSize); return result; } Project project = projectMapper.queryByName(projectName); // check project auth Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { return checkResult; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(processDefinitionCode); PageInfo<ProcessDefinitionLog> pageInfo = new PageInfo<>(pageNo, pageSize); Page<ProcessDefinitionLog> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinitionLog> processDefinitionVersionsPaging = processDefinitionLogMapper.queryProcessDefinitionVersionsPaging(page, processDefinition.getCode()); List<ProcessDefinitionLog> processDefinitionLogs = processDefinitionVersionsPaging.getRecords(); ProcessData processData = processService.genProcessData(processDefinition); processDefinition.setProcessDefinitionJson(JSONUtils.toJsonString(processData)); pageInfo.setLists(processDefinitionLogs); pageInfo.setTotalCount((int) processDefinitionVersionsPaging.getTotal()); return ImmutableMap.of( Constants.MSG, Status.SUCCESS.getMsg() , Constants.STATUS, Status.SUCCESS , Constants.DATA_LIST, pageInfo); } /** * delete one certain process definition by version number and process definition id * * @param loginUser login user info to check auth * @param projectName process definition project name * @param processDefinitionId process definition id * @param version version number * @return delele result code */ @Override public Map<String, Object> deleteByProcessDefinitionIdAndVersion(User loginUser, String projectName, int processDefinitionId, long version) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); // check project auth Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { return checkResult; } ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineId(processDefinitionId); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionId); } else { processDefinitionLogMapper.deleteByProcessDefinitionCodeAndVersion(processDefinition.getCode(), version); putMsg(result, Status.SUCCESS); } return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,581
[Bug][Mysql] Specific key was too long, max key length is 767 bytes for varchar(256) in some mysql with innodb_large_prefix=OFF
**To Reproduce** In some mysql with `innodb_large_prefix=OFF`, `Specific key was too long, max key length is 767 bytes` will occur when installing dolphinscheduler **Expected behavior** Bug fixed **Screenshots** ![image](https://user-images.githubusercontent.com/4902714/120445701-75f19780-c3bb-11eb-9f36-68628724c818.png) **Which version of Dolphin Scheduler:** -[1.3.6] -[dev] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5581
https://github.com/apache/dolphinscheduler/pull/5582
cc9e5d5d34fcf2279b267cca7df37a9e80eeba07
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
2021-06-02T08:04:01Z
java
2021-06-04T01:55:42Z
sql/dolphinscheduler_mysql.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET FOREIGN_KEY_CHECKS=0; -- ---------------------------- -- Table structure for QRTZ_BLOB_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_BLOB_TRIGGERS`; CREATE TABLE `QRTZ_BLOB_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `BLOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `SCHED_NAME` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_BLOB_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_BLOB_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CALENDARS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CALENDARS`; CREATE TABLE `QRTZ_CALENDARS` ( `SCHED_NAME` varchar(120) NOT NULL, `CALENDAR_NAME` varchar(200) NOT NULL, `CALENDAR` blob NOT NULL, PRIMARY KEY (`SCHED_NAME`,`CALENDAR_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CALENDARS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CRON_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CRON_TRIGGERS`; CREATE TABLE `QRTZ_CRON_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `CRON_EXPRESSION` varchar(120) NOT NULL, `TIME_ZONE_ID` varchar(80) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_CRON_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CRON_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_FIRED_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_FIRED_TRIGGERS`; CREATE TABLE `QRTZ_FIRED_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `ENTRY_ID` varchar(200) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `FIRED_TIME` bigint(13) NOT NULL, `SCHED_TIME` bigint(13) NOT NULL, `PRIORITY` int(11) NOT NULL, `STATE` varchar(16) NOT NULL, `JOB_NAME` varchar(200) DEFAULT NULL, `JOB_GROUP` varchar(200) DEFAULT NULL, `IS_NONCONCURRENT` varchar(1) DEFAULT NULL, `REQUESTS_RECOVERY` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`ENTRY_ID`), KEY `IDX_QRTZ_FT_TRIG_INST_NAME` (`SCHED_NAME`,`INSTANCE_NAME`), KEY `IDX_QRTZ_FT_INST_JOB_REQ_RCVRY` (`SCHED_NAME`,`INSTANCE_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_FT_J_G` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_T_G` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_FT_TG` (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_FIRED_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_JOB_DETAILS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_JOB_DETAILS`; CREATE TABLE `QRTZ_JOB_DETAILS` ( `SCHED_NAME` varchar(120) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `JOB_CLASS_NAME` varchar(250) NOT NULL, `IS_DURABLE` varchar(1) NOT NULL, `IS_NONCONCURRENT` varchar(1) NOT NULL, `IS_UPDATE_DATA` varchar(1) NOT NULL, `REQUESTS_RECOVERY` varchar(1) NOT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_J_REQ_RECOVERY` (`SCHED_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_J_GRP` (`SCHED_NAME`,`JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_JOB_DETAILS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_LOCKS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_LOCKS`; CREATE TABLE `QRTZ_LOCKS` ( `SCHED_NAME` varchar(120) NOT NULL, `LOCK_NAME` varchar(40) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`LOCK_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_LOCKS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_PAUSED_TRIGGER_GRPS`; CREATE TABLE `QRTZ_PAUSED_TRIGGER_GRPS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SCHEDULER_STATE -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SCHEDULER_STATE`; CREATE TABLE `QRTZ_SCHEDULER_STATE` ( `SCHED_NAME` varchar(120) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `LAST_CHECKIN_TIME` bigint(13) NOT NULL, `CHECKIN_INTERVAL` bigint(13) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`INSTANCE_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SCHEDULER_STATE -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPLE_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPLE_TRIGGERS`; CREATE TABLE `QRTZ_SIMPLE_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `REPEAT_COUNT` bigint(7) NOT NULL, `REPEAT_INTERVAL` bigint(12) NOT NULL, `TIMES_TRIGGERED` bigint(10) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPLE_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPLE_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPROP_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPROP_TRIGGERS`; CREATE TABLE `QRTZ_SIMPROP_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `STR_PROP_1` varchar(512) DEFAULT NULL, `STR_PROP_2` varchar(512) DEFAULT NULL, `STR_PROP_3` varchar(512) DEFAULT NULL, `INT_PROP_1` int(11) DEFAULT NULL, `INT_PROP_2` int(11) DEFAULT NULL, `LONG_PROP_1` bigint(20) DEFAULT NULL, `LONG_PROP_2` bigint(20) DEFAULT NULL, `DEC_PROP_1` decimal(13,4) DEFAULT NULL, `DEC_PROP_2` decimal(13,4) DEFAULT NULL, `BOOL_PROP_1` varchar(1) DEFAULT NULL, `BOOL_PROP_2` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPROP_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPROP_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_TRIGGERS`; CREATE TABLE `QRTZ_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `NEXT_FIRE_TIME` bigint(13) DEFAULT NULL, `PREV_FIRE_TIME` bigint(13) DEFAULT NULL, `PRIORITY` int(11) DEFAULT NULL, `TRIGGER_STATE` varchar(16) NOT NULL, `TRIGGER_TYPE` varchar(8) NOT NULL, `START_TIME` bigint(13) NOT NULL, `END_TIME` bigint(13) DEFAULT NULL, `CALENDAR_NAME` varchar(200) DEFAULT NULL, `MISFIRE_INSTR` smallint(2) DEFAULT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_J` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_C` (`SCHED_NAME`,`CALENDAR_NAME`), KEY `IDX_QRTZ_T_G` (`SCHED_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_STATE` (`SCHED_NAME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_STATE` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_G_STATE` (`SCHED_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NEXT_FIRE_TIME` (`SCHED_NAME`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST` (`SCHED_NAME`,`TRIGGER_STATE`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE_GRP` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), CONSTRAINT `QRTZ_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) REFERENCES `QRTZ_JOB_DETAILS` (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_access_token -- ---------------------------- DROP TABLE IF EXISTS `t_ds_access_token`; CREATE TABLE `t_ds_access_token` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `token` varchar(64) DEFAULT NULL COMMENT 'token', `expire_time` datetime DEFAULT NULL COMMENT 'end time of token ', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_access_token -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alert -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert`; CREATE TABLE `t_ds_alert` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `title` varchar(64) DEFAULT NULL COMMENT 'title', `content` text COMMENT 'Message content (can be email, can be SMS. Mail is stored in JSON map, and SMS is string)', `alert_status` tinyint(4) DEFAULT '0' COMMENT '0:wait running,1:success,2:failed', `log` text COMMENT 'log', `alertgroup_id` int(11) DEFAULT NULL COMMENT 'alert group id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alert -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alertgroup -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alertgroup`; CREATE TABLE `t_ds_alertgroup`( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids', `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id', `group_name` varchar(255) DEFAULT NULL COMMENT 'group name', `description` varchar(255) DEFAULT NULL, `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_alertgroup_name_UN` (`group_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_command`; CREATE TABLE `t_ds_command` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'Command type: 0 start workflow, 1 start execution from current node, 2 resume fault-tolerant workflow, 3 resume pause process, 4 start execution from failed node, 5 complement, 6 schedule, 7 rerun, 8 pause, 9 stop, 10 resume waiting thread', `process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'Node dependency type: 0 current node, 1 forward, 2 backward', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'Failed policy: 0 end, 1 continue', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority: 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_datasource -- ---------------------------- DROP TABLE IF EXISTS `t_ds_datasource`; CREATE TABLE `t_ds_datasource` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(64) NOT NULL COMMENT 'data source name', `note` varchar(256) DEFAULT NULL COMMENT 'description', `type` tinyint(4) NOT NULL COMMENT 'data source type: 0:mysql,1:postgresql,2:hive,3:spark', `user_id` int(11) NOT NULL COMMENT 'the creator id', `connection_params` text NOT NULL COMMENT 'json connection params', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_datasource_name_UN` (`name`, `type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_datasource -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_error_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_error_command`; CREATE TABLE `t_ds_error_command` ( `id` int(11) NOT NULL COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'scheduler time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority, 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', `message` text COMMENT 'message', PRIMARY KEY (`id`) USING BTREE ) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC; -- ---------------------------- -- Records of t_ds_error_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition`; CREATE TABLE `t_ds_process_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(255) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `connects` text COMMENT 'Node connection information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out, unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`), UNIQUE KEY `process_unique` (`name`,`project_code`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_definition -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition_log`; CREATE TABLE `t_ds_process_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `connects` text COMMENT 'Node connection information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out,unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition`; CREATE TABLE `t_ds_task_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT NULL COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` longtext COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`), UNIQUE KEY `task_unique` (`name`,`project_code`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition_log`; CREATE TABLE `t_ds_task_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT NULL COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` text COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation`; CREATE TABLE `t_ds_process_task_relation` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process version', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation_log`; CREATE TABLE `t_ds_process_task_relation_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process version', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_instance`; CREATE TABLE `t_ds_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'process instance name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process definition version', `process_definition_code` bigint(20) not NULL COMMENT 'process definition code', `state` tinyint(4) DEFAULT NULL COMMENT 'process instance Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `recovery` tinyint(4) DEFAULT NULL COMMENT 'process instance failover flag:0:normal,1:failover instance', `start_time` datetime DEFAULT NULL COMMENT 'process instance start time', `end_time` datetime DEFAULT NULL COMMENT 'process instance end time', `run_times` int(11) DEFAULT NULL COMMENT 'process instance run times', `host` varchar(135) DEFAULT NULL COMMENT 'process instance host', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type. 0: only current node,1:before the node,2:later nodes', `max_try_times` tinyint(4) DEFAULT '0' COMMENT 'max try times', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy. 0:end the process when node failed,1:continue running the other nodes when node failed', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type. 0:no warning,1:warning if process success,2:warning if process failed,3:warning if success', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `command_start_time` datetime DEFAULT NULL COMMENT 'command start time', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT '1' COMMENT 'flag', `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `is_sub_process` int(11) DEFAULT '0' COMMENT 'flag, whether the process is sub process', `executor_id` int(11) NOT NULL COMMENT 'executor id', `history_cmd` text COMMENT 'history commands of process instance operation', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority. 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `var_pool` longtext COMMENT 'var_pool', PRIMARY KEY (`id`), KEY `process_instance_index` (`process_definition_code`,`id`) USING BTREE, KEY `start_time_index` (`start_time`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_project -- ---------------------------- DROP TABLE IF EXISTS `t_ds_project`; CREATE TABLE `t_ds_project` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(100) DEFAULT NULL COMMENT 'project name', `code` bigint(20) NOT NULL COMMENT 'encoding', `description` varchar(200) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'creator id', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_project -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_queue -- ---------------------------- DROP TABLE IF EXISTS `t_ds_queue`; CREATE TABLE `t_ds_queue` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `queue_name` varchar(64) DEFAULT NULL COMMENT 'queue name', `queue` varchar(64) DEFAULT NULL COMMENT 'yarn queue name', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_queue -- ---------------------------- INSERT INTO `t_ds_queue` VALUES ('1', 'default', 'default', null, null); -- ---------------------------- -- Table structure for t_ds_relation_datasource_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_datasource_user`; CREATE TABLE `t_ds_relation_datasource_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `datasource_id` int(11) DEFAULT NULL COMMENT 'data source id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_datasource_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_process_instance`; CREATE TABLE `t_ds_relation_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `parent_process_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `parent_task_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `process_instance_id` int(11) DEFAULT NULL COMMENT 'child process instance id', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_project_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_project_user`; CREATE TABLE `t_ds_relation_project_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `project_id` int(11) DEFAULT NULL COMMENT 'project id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_project_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_resources_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_resources_user`; CREATE TABLE `t_ds_relation_resources_user` ( `id` int(11) NOT NULL AUTO_INCREMENT, `user_id` int(11) NOT NULL COMMENT 'user id', `resources_id` int(11) DEFAULT NULL COMMENT 'resource id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_resources_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_udfs_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_udfs_user`; CREATE TABLE `t_ds_relation_udfs_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'userid', `udf_id` int(11) DEFAULT NULL COMMENT 'udf id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_resources -- ---------------------------- DROP TABLE IF EXISTS `t_ds_resources`; CREATE TABLE `t_ds_resources` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alias` varchar(64) DEFAULT NULL COMMENT 'alias', `file_name` varchar(64) DEFAULT NULL COMMENT 'file name', `description` varchar(256) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'user id', `type` tinyint(4) DEFAULT NULL COMMENT 'resource type,0:FILE,1:UDF', `size` bigint(20) DEFAULT NULL COMMENT 'resource size', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `pid` int(11) DEFAULT NULL, `full_name` varchar(64) DEFAULT NULL, `is_directory` tinyint(4) DEFAULT NULL, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_resources_un` (`full_name`,`type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_resources -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_schedules -- ---------------------------- DROP TABLE IF EXISTS `t_ds_schedules`; CREATE TABLE `t_ds_schedules` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `process_definition_id` int(11) NOT NULL COMMENT 'process definition id', `start_time` datetime NOT NULL COMMENT 'start time', `end_time` datetime NOT NULL COMMENT 'end time', `timezone_id` varchar(40) DEFAULT NULL COMMENT 'timezoneId', `crontab` varchar(256) NOT NULL COMMENT 'crontab description', `failure_strategy` tinyint(4) NOT NULL COMMENT 'failure strategy. 0:end,1:continue', `user_id` int(11) NOT NULL COMMENT 'user id', `release_state` tinyint(4) NOT NULL COMMENT 'release state. 0:offline,1:online ', `warning_type` tinyint(4) NOT NULL COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(256) DEFAULT '' COMMENT 'worker group id', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_schedules -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_session -- ---------------------------- DROP TABLE IF EXISTS `t_ds_session`; CREATE TABLE `t_ds_session` ( `id` varchar(64) NOT NULL COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `ip` varchar(45) DEFAULT NULL COMMENT 'ip', `last_login_time` datetime DEFAULT NULL COMMENT 'last login time', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_session -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_task_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_instance`; CREATE TABLE `t_ds_task_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'task name', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_code` bigint(20) NOT NULL COMMENT 'task definition code', `task_definition_version` int(11) DEFAULT NULL COMMENT 'task definition version', `process_instance_id` int(11) DEFAULT NULL COMMENT 'process instance id', `state` tinyint(4) DEFAULT NULL COMMENT 'Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `submit_time` datetime DEFAULT NULL COMMENT 'task submit time', `start_time` datetime DEFAULT NULL COMMENT 'task start time', `end_time` datetime DEFAULT NULL COMMENT 'task end time', `host` varchar(135) DEFAULT NULL COMMENT 'host of task running on', `execute_path` varchar(200) DEFAULT NULL COMMENT 'task execute path in the host', `log_path` varchar(200) DEFAULT NULL COMMENT 'task log path', `alert_flag` tinyint(4) DEFAULT NULL COMMENT 'whether alert', `retry_times` int(4) DEFAULT '0' COMMENT 'task retry times', `pid` int(4) DEFAULT NULL COMMENT 'pid of task', `app_link` text COMMENT 'yarn app id', `task_params` text COMMENT 'job custom parameters', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `retry_interval` int(4) DEFAULT NULL COMMENT 'retry interval when task failed ', `max_retry_times` int(2) DEFAULT NULL COMMENT 'max retry times', `task_instance_priority` int(11) DEFAULT NULL COMMENT 'task instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `executor_id` int(11) DEFAULT NULL, `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time', `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time', `var_pool` longtext COMMENT 'var_pool', PRIMARY KEY (`id`), KEY `process_instance_id` (`process_instance_id`) USING BTREE, CONSTRAINT `foreign_key_instance_id` FOREIGN KEY (`process_instance_id`) REFERENCES `t_ds_process_instance` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_task_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_tenant -- ---------------------------- DROP TABLE IF EXISTS `t_ds_tenant`; CREATE TABLE `t_ds_tenant` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `tenant_code` varchar(64) DEFAULT NULL COMMENT 'tenant code', `description` varchar(256) DEFAULT NULL, `queue_id` int(11) DEFAULT NULL COMMENT 'queue id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_tenant -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_udfs -- ---------------------------- DROP TABLE IF EXISTS `t_ds_udfs`; CREATE TABLE `t_ds_udfs` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `func_name` varchar(100) NOT NULL COMMENT 'UDF function name', `class_name` varchar(255) NOT NULL COMMENT 'class of udf', `type` tinyint(4) NOT NULL COMMENT 'Udf function type', `arg_types` varchar(255) DEFAULT NULL COMMENT 'arguments types', `database` varchar(255) DEFAULT NULL COMMENT 'data base', `description` varchar(255) DEFAULT NULL, `resource_id` int(11) NOT NULL COMMENT 'resource id', `resource_name` varchar(255) NOT NULL COMMENT 'resource name', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_udfs -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_user`; CREATE TABLE `t_ds_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'user id', `user_name` varchar(64) DEFAULT NULL COMMENT 'user name', `user_password` varchar(64) DEFAULT NULL COMMENT 'user password', `user_type` tinyint(4) DEFAULT NULL COMMENT 'user type, 0:administrator,1:ordinary user', `email` varchar(64) DEFAULT NULL COMMENT 'email', `phone` varchar(11) DEFAULT NULL COMMENT 'phone', `tenant_id` int(11) DEFAULT NULL COMMENT 'tenant id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `queue` varchar(64) DEFAULT NULL COMMENT 'queue', `state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable', PRIMARY KEY (`id`), UNIQUE KEY `user_name_unique` (`user_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_worker_group -- ---------------------------- DROP TABLE IF EXISTS `t_ds_worker_group`; CREATE TABLE `t_ds_worker_group` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `name` varchar(256) NOT NULL COMMENT 'worker group name', `addr_list` text NULL DEFAULT NULL COMMENT 'worker addr list. split by [,]', `create_time` datetime NULL DEFAULT NULL COMMENT 'create time', `update_time` datetime NULL DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `name_unique` (`name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_worker_group -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_version -- ---------------------------- DROP TABLE IF EXISTS `t_ds_version`; CREATE TABLE `t_ds_version` ( `id` int(11) NOT NULL AUTO_INCREMENT, `version` varchar(200) NOT NULL, PRIMARY KEY (`id`), UNIQUE KEY `version_UNIQUE` (`version`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8 COMMENT='version'; -- ---------------------------- -- Records of t_ds_version -- ---------------------------- INSERT INTO `t_ds_version` VALUES ('1', '1.4.0'); -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- INSERT INTO `t_ds_alertgroup`(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ("1,2", 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- ---------------------------- -- Records of t_ds_user -- ---------------------------- INSERT INTO `t_ds_user` VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', '[email protected]', '', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22', null, 1); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); DROP TABLE IF EXISTS `t_ds_plugin_define`; CREATE TABLE `t_ds_plugin_define` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email', `plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin', `plugin_params` text COMMENT 'plugin params', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`; CREATE TABLE `t_ds_alert_plugin_instance` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_define_id` int NOT NULL, `plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,581
[Bug][Mysql] Specific key was too long, max key length is 767 bytes for varchar(256) in some mysql with innodb_large_prefix=OFF
**To Reproduce** In some mysql with `innodb_large_prefix=OFF`, `Specific key was too long, max key length is 767 bytes` will occur when installing dolphinscheduler **Expected behavior** Bug fixed **Screenshots** ![image](https://user-images.githubusercontent.com/4902714/120445701-75f19780-c3bb-11eb-9f36-68628724c818.png) **Which version of Dolphin Scheduler:** -[1.3.6] -[dev] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5581
https://github.com/apache/dolphinscheduler/pull/5582
cc9e5d5d34fcf2279b267cca7df37a9e80eeba07
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
2021-06-02T08:04:01Z
java
2021-06-04T01:55:42Z
sql/dolphinscheduler_postgre.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS; DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS; DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE; DROP TABLE IF EXISTS QRTZ_LOCKS; DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS; DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS; DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS; DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS; DROP TABLE IF EXISTS QRTZ_TRIGGERS; DROP TABLE IF EXISTS QRTZ_JOB_DETAILS; DROP TABLE IF EXISTS QRTZ_CALENDARS; CREATE TABLE QRTZ_JOB_DETAILS( SCHED_NAME character varying(120) NOT NULL, JOB_NAME character varying(200) NOT NULL, JOB_GROUP character varying(200) NOT NULL, DESCRIPTION character varying(250) NULL, JOB_CLASS_NAME character varying(250) NOT NULL, IS_DURABLE boolean NOT NULL, IS_NONCONCURRENT boolean NOT NULL, IS_UPDATE_DATA boolean NOT NULL, REQUESTS_RECOVERY boolean NOT NULL, JOB_DATA bytea NULL); alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE TABLE QRTZ_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, JOB_NAME character varying(200) NOT NULL, JOB_GROUP character varying(200) NOT NULL, DESCRIPTION character varying(250) NULL, NEXT_FIRE_TIME BIGINT NULL, PREV_FIRE_TIME BIGINT NULL, PRIORITY INTEGER NULL, TRIGGER_STATE character varying(16) NOT NULL, TRIGGER_TYPE character varying(8) NOT NULL, START_TIME BIGINT NOT NULL, END_TIME BIGINT NULL, CALENDAR_NAME character varying(200) NULL, MISFIRE_INSTR SMALLINT NULL, JOB_DATA bytea NULL) ; alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, REPEAT_COUNT BIGINT NOT NULL, REPEAT_INTERVAL BIGINT NOT NULL, TIMES_TRIGGERED BIGINT NOT NULL) ; alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_CRON_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, CRON_EXPRESSION character varying(120) NOT NULL, TIME_ZONE_ID character varying(80)) ; alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_SIMPROP_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, STR_PROP_1 character varying(512) NULL, STR_PROP_2 character varying(512) NULL, STR_PROP_3 character varying(512) NULL, INT_PROP_1 INT NULL, INT_PROP_2 INT NULL, LONG_PROP_1 BIGINT NULL, LONG_PROP_2 BIGINT NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 boolean NULL, BOOL_PROP_2 boolean NULL) ; alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_BLOB_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, BLOB_DATA bytea NULL) ; alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_CALENDARS ( SCHED_NAME character varying(120) NOT NULL, CALENDAR_NAME character varying(200) NOT NULL, CALENDAR bytea NOT NULL) ; alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME); CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL) ; alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_FIRED_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, ENTRY_ID character varying(200) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, INSTANCE_NAME character varying(200) NOT NULL, FIRED_TIME BIGINT NOT NULL, SCHED_TIME BIGINT NOT NULL, PRIORITY INTEGER NOT NULL, STATE character varying(16) NOT NULL, JOB_NAME character varying(200) NULL, JOB_GROUP character varying(200) NULL, IS_NONCONCURRENT boolean NULL, REQUESTS_RECOVERY boolean NULL) ; alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID); CREATE TABLE QRTZ_SCHEDULER_STATE ( SCHED_NAME character varying(120) NOT NULL, INSTANCE_NAME character varying(200) NOT NULL, LAST_CHECKIN_TIME BIGINT NOT NULL, CHECKIN_INTERVAL BIGINT NOT NULL) ; alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME); CREATE TABLE QRTZ_LOCKS ( SCHED_NAME character varying(120) NOT NULL, LOCK_NAME character varying(40) NOT NULL) ; alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME); CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY); CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME); CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME); CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY); CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); -- -- Table structure for table t_ds_access_token -- DROP TABLE IF EXISTS t_ds_access_token; CREATE TABLE t_ds_access_token ( id int NOT NULL , user_id int DEFAULT NULL , token varchar(64) DEFAULT NULL , expire_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_alert -- DROP TABLE IF EXISTS t_ds_alert; CREATE TABLE t_ds_alert ( id int NOT NULL , title varchar(64) DEFAULT NULL , content text , alert_status int DEFAULT '0' , log text , alertgroup_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_alertgroup -- DROP TABLE IF EXISTS t_ds_alertgroup; CREATE TABLE t_ds_alertgroup( id int NOT NULL, alert_instance_ids varchar (255) DEFAULT NULL, create_user_id int4 DEFAULT NULL, group_name varchar(255) DEFAULT NULL, description varchar(255) DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id), CONSTRAINT t_ds_alertgroup_name_UN UNIQUE (group_name) ) ; -- -- Table structure for table t_ds_command -- DROP TABLE IF EXISTS t_ds_command; CREATE TABLE t_ds_command ( id int NOT NULL , command_type int DEFAULT NULL , process_definition_id int DEFAULT NULL , command_param text , task_depend_type int DEFAULT NULL , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , executor_id int DEFAULT NULL , update_time timestamp DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_datasource -- DROP TABLE IF EXISTS t_ds_datasource; CREATE TABLE t_ds_datasource ( id int NOT NULL , name varchar(64) NOT NULL , note varchar(256) DEFAULT NULL , type int NOT NULL , user_id int NOT NULL , connection_params text NOT NULL , create_time timestamp NOT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id), CONSTRAINT t_ds_datasource_name_UN UNIQUE (name, type) ) ; -- -- Table structure for table t_ds_error_command -- DROP TABLE IF EXISTS t_ds_error_command; CREATE TABLE t_ds_error_command ( id int NOT NULL , command_type int DEFAULT NULL , executor_id int DEFAULT NULL , process_definition_id int DEFAULT NULL , command_param text , task_depend_type int DEFAULT NULL , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), message text , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_master_server -- -- -- Table structure for table t_ds_process_definition -- DROP TABLE IF EXISTS t_ds_process_definition; CREATE TABLE t_ds_process_definition ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , connects text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT '0' , tenant_id int DEFAULT '-1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT process_definition_unique UNIQUE (name, project_code) ) ; create index process_definition_index on t_ds_process_definition (code,id); DROP TABLE IF EXISTS t_ds_process_definition_log; CREATE TABLE t_ds_process_definition_log ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , connects text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT '0' , tenant_id int DEFAULT '-1' , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_task_definition; CREATE TABLE t_ds_task_definition ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT '0' , delay_time int DEFAULT '0' , resource_ids varchar(255) DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT task_definition_unique UNIQUE (name, project_code) ) ; create index task_definition_index on t_ds_task_definition (project_code,id); DROP TABLE IF EXISTS t_ds_task_definition_log; CREATE TABLE t_ds_task_definition_log ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT '0' , delay_time int DEFAULT '0' , resource_ids varchar(255) DEFAULT NULL , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_process_task_relation; CREATE TABLE t_ds_process_task_relation ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT '0' , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT '0' , condition_type int DEFAULT NULL , condition_params text , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_process_task_relation_log; CREATE TABLE t_ds_process_task_relation_log ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT '0' , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT '0' , condition_type int DEFAULT NULL , condition_params text , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_process_instance -- DROP TABLE IF EXISTS t_ds_process_instance; CREATE TABLE t_ds_process_instance ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , process_definition_code bigint DEFAULT NULL , state int DEFAULT NULL , recovery int DEFAULT NULL , start_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL , run_times int DEFAULT NULL , host varchar(135) DEFAULT NULL , command_type int DEFAULT NULL , command_param text , task_depend_type int DEFAULT NULL , max_try_times int DEFAULT '0' , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , command_start_time timestamp DEFAULT NULL , global_params text , process_instance_json text , flag int DEFAULT '1' , update_time timestamp NULL , is_sub_process int DEFAULT '0' , executor_id int NOT NULL , history_cmd text , dependence_schedule_times text , process_instance_priority int DEFAULT NULL , worker_group varchar(64) , timeout int DEFAULT '0' , tenant_id int NOT NULL DEFAULT '-1' , var_pool text , PRIMARY KEY (id) ) ; create index process_instance_index on t_ds_process_instance (process_definition_code,id); create index start_time_index on t_ds_process_instance (start_time); -- -- Table structure for table t_ds_project -- DROP TABLE IF EXISTS t_ds_project; CREATE TABLE t_ds_project ( id int NOT NULL , name varchar(100) DEFAULT NULL , code bigint NOT NULL, description varchar(200) DEFAULT NULL , user_id int DEFAULT NULL , flag int DEFAULT '1' , create_time timestamp DEFAULT CURRENT_TIMESTAMP , update_time timestamp DEFAULT CURRENT_TIMESTAMP , PRIMARY KEY (id) ) ; create index user_id_index on t_ds_project (user_id); -- -- Table structure for table t_ds_queue -- DROP TABLE IF EXISTS t_ds_queue; CREATE TABLE t_ds_queue ( id int NOT NULL , queue_name varchar(64) DEFAULT NULL , queue varchar(64) DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_relation_datasource_user -- DROP TABLE IF EXISTS t_ds_relation_datasource_user; CREATE TABLE t_ds_relation_datasource_user ( id int NOT NULL , user_id int NOT NULL , datasource_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; ; -- -- Table structure for table t_ds_relation_process_instance -- DROP TABLE IF EXISTS t_ds_relation_process_instance; CREATE TABLE t_ds_relation_process_instance ( id int NOT NULL , parent_process_instance_id int DEFAULT NULL , parent_task_instance_id int DEFAULT NULL , process_instance_id int DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_relation_project_user -- DROP TABLE IF EXISTS t_ds_relation_project_user; CREATE TABLE t_ds_relation_project_user ( id int NOT NULL , user_id int NOT NULL , project_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; create index relation_project_user_id_index on t_ds_relation_project_user (user_id); -- -- Table structure for table t_ds_relation_resources_user -- DROP TABLE IF EXISTS t_ds_relation_resources_user; CREATE TABLE t_ds_relation_resources_user ( id int NOT NULL , user_id int NOT NULL , resources_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_relation_udfs_user -- DROP TABLE IF EXISTS t_ds_relation_udfs_user; CREATE TABLE t_ds_relation_udfs_user ( id int NOT NULL , user_id int NOT NULL , udf_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; ; -- -- Table structure for table t_ds_resources -- DROP TABLE IF EXISTS t_ds_resources; CREATE TABLE t_ds_resources ( id int NOT NULL , alias varchar(64) DEFAULT NULL , file_name varchar(64) DEFAULT NULL , description varchar(256) DEFAULT NULL , user_id int DEFAULT NULL , type int DEFAULT NULL , size bigint DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , pid int, full_name varchar(64), is_directory int, PRIMARY KEY (id), CONSTRAINT t_ds_resources_un UNIQUE (full_name, type) ) ; -- -- Table structure for table t_ds_schedules -- DROP TABLE IF EXISTS t_ds_schedules; CREATE TABLE t_ds_schedules ( id int NOT NULL , process_definition_id int NOT NULL , start_time timestamp NOT NULL , end_time timestamp NOT NULL , timezone_id varchar(40) default NULL , crontab varchar(256) NOT NULL , failure_strategy int NOT NULL , user_id int NOT NULL , release_state int NOT NULL , warning_type int NOT NULL , warning_group_id int DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), create_time timestamp NOT NULL , update_time timestamp NOT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_session -- DROP TABLE IF EXISTS t_ds_session; CREATE TABLE t_ds_session ( id varchar(64) NOT NULL , user_id int DEFAULT NULL , ip varchar(45) DEFAULT NULL , last_login_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_task_instance -- DROP TABLE IF EXISTS t_ds_task_instance; CREATE TABLE t_ds_task_instance ( id int NOT NULL , name varchar(255) DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_code bigint NOT NULL, task_definition_version int DEFAULT NULL , process_instance_id int DEFAULT NULL , state int DEFAULT NULL , submit_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL , host varchar(135) DEFAULT NULL , execute_path varchar(200) DEFAULT NULL , log_path varchar(200) DEFAULT NULL , alert_flag int DEFAULT NULL , retry_times int DEFAULT '0' , pid int DEFAULT NULL , app_link text , task_params text , flag int DEFAULT '1' , retry_interval int DEFAULT NULL , max_retry_times int DEFAULT NULL , task_instance_priority int DEFAULT NULL , worker_group varchar(64), executor_id int DEFAULT NULL , first_submit_time timestamp DEFAULT NULL , delay_time int DEFAULT '0' , var_pool text , PRIMARY KEY (id), CONSTRAINT foreign_key_instance_id FOREIGN KEY(process_instance_id) REFERENCES t_ds_process_instance(id) ON DELETE CASCADE ) ; -- -- Table structure for table t_ds_tenant -- DROP TABLE IF EXISTS t_ds_tenant; CREATE TABLE t_ds_tenant ( id int NOT NULL , tenant_code varchar(64) DEFAULT NULL , description varchar(256) DEFAULT NULL , queue_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_udfs -- DROP TABLE IF EXISTS t_ds_udfs; CREATE TABLE t_ds_udfs ( id int NOT NULL , user_id int NOT NULL , func_name varchar(100) NOT NULL , class_name varchar(255) NOT NULL , type int NOT NULL , arg_types varchar(255) DEFAULT NULL , database varchar(255) DEFAULT NULL , description varchar(255) DEFAULT NULL , resource_id int NOT NULL , resource_name varchar(255) NOT NULL , create_time timestamp NOT NULL , update_time timestamp NOT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_user -- DROP TABLE IF EXISTS t_ds_user; CREATE TABLE t_ds_user ( id int NOT NULL , user_name varchar(64) DEFAULT NULL , user_password varchar(64) DEFAULT NULL , user_type int DEFAULT NULL , email varchar(64) DEFAULT NULL , phone varchar(11) DEFAULT NULL , tenant_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , queue varchar(64) DEFAULT NULL , state int DEFAULT 1 , PRIMARY KEY (id) ); comment on column t_ds_user.state is 'state 0:disable 1:enable'; -- -- Table structure for table t_ds_version -- DROP TABLE IF EXISTS t_ds_version; CREATE TABLE t_ds_version ( id int NOT NULL , version varchar(200) NOT NULL, PRIMARY KEY (id) ) ; create index version_index on t_ds_version(version); -- -- Table structure for table t_ds_worker_group -- DROP TABLE IF EXISTS t_ds_worker_group; CREATE TABLE t_ds_worker_group ( id bigint NOT NULL , name varchar(256) NOT NULL , addr_list text DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT name_unique UNIQUE (name) ) ; -- -- Table structure for table t_ds_worker_server -- DROP TABLE IF EXISTS t_ds_worker_server; CREATE TABLE t_ds_worker_server ( id int NOT NULL , host varchar(45) DEFAULT NULL , port int DEFAULT NULL , zk_directory varchar(64) DEFAULT NULL , res_info varchar(255) DEFAULT NULL , create_time timestamp DEFAULT NULL , last_heartbeat_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence; CREATE SEQUENCE t_ds_access_token_id_sequence; ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence; CREATE SEQUENCE t_ds_alert_id_sequence; ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence; CREATE SEQUENCE t_ds_alertgroup_id_sequence; ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_command_id_sequence; CREATE SEQUENCE t_ds_command_id_sequence; ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence; CREATE SEQUENCE t_ds_datasource_id_sequence; ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence; CREATE SEQUENCE t_ds_process_definition_id_sequence; ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_definition_log_id_sequence; CREATE SEQUENCE t_ds_process_definition_log_id_sequence; ALTER TABLE t_ds_process_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_definition_id_sequence; CREATE SEQUENCE t_ds_task_definition_id_sequence; ALTER TABLE t_ds_task_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_definition_log_id_sequence; CREATE SEQUENCE t_ds_task_definition_log_id_sequence; ALTER TABLE t_ds_task_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_task_relation_id_sequence; CREATE SEQUENCE t_ds_process_task_relation_id_sequence; ALTER TABLE t_ds_process_task_relation ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_task_relation_log_id_sequence; CREATE SEQUENCE t_ds_process_task_relation_log_id_sequence; ALTER TABLE t_ds_process_task_relation_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence; CREATE SEQUENCE t_ds_process_instance_id_sequence; ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_project_id_sequence; CREATE SEQUENCE t_ds_project_id_sequence; ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence; CREATE SEQUENCE t_ds_queue_id_sequence; ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence; CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence; ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence; CREATE SEQUENCE t_ds_relation_process_instance_id_sequence; ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence; CREATE SEQUENCE t_ds_relation_project_user_id_sequence; ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence; CREATE SEQUENCE t_ds_relation_resources_user_id_sequence; ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence; CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence; ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence; CREATE SEQUENCE t_ds_resources_id_sequence; ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence; CREATE SEQUENCE t_ds_schedules_id_sequence; ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence; CREATE SEQUENCE t_ds_task_instance_id_sequence; ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence; CREATE SEQUENCE t_ds_tenant_id_sequence; ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence; CREATE SEQUENCE t_ds_udfs_id_sequence; ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_user_id_sequence; CREATE SEQUENCE t_ds_user_id_sequence; ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_version_id_sequence; CREATE SEQUENCE t_ds_version_id_sequence; ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence; CREATE SEQUENCE t_ds_worker_group_id_sequence; ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence; CREATE SEQUENCE t_ds_worker_server_id_sequence; ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence'); -- Records of t_ds_user?user : admin , password : dolphinscheduler123 INSERT INTO t_ds_user(user_name, user_password, user_type, email, phone, tenant_id, state, create_time, update_time) VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', '[email protected]', '', '0', 1, '2018-03-27 15:48:50', '2018-10-24 17:40:22'); -- Records of t_ds_alertgroup, default admin warning group INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- Records of t_ds_queue,default queue name : default INSERT INTO t_ds_queue(queue_name, queue, create_time, update_time) VALUES ('default', 'default', '2018-11-29 10:22:33', '2018-11-29 10:22:33'); -- Records of t_ds_queue,default queue name : default INSERT INTO t_ds_version(version) VALUES ('1.4.0'); -- -- Table structure for table t_ds_plugin_define -- DROP TABLE IF EXISTS t_ds_plugin_define; CREATE TABLE t_ds_plugin_define ( id serial NOT NULL, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id), CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type) ); -- -- Table structure for table t_ds_alert_plugin_instance -- DROP TABLE IF EXISTS t_ds_alert_plugin_instance; CREATE TABLE t_ds_alert_plugin_instance ( id serial NOT NULL, plugin_define_id int4 NOT NULL, plugin_instance_params text NULL, create_time timestamp NULL, update_time timestamp NULL, instance_name varchar(200) NULL, CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id) );
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,581
[Bug][Mysql] Specific key was too long, max key length is 767 bytes for varchar(256) in some mysql with innodb_large_prefix=OFF
**To Reproduce** In some mysql with `innodb_large_prefix=OFF`, `Specific key was too long, max key length is 767 bytes` will occur when installing dolphinscheduler **Expected behavior** Bug fixed **Screenshots** ![image](https://user-images.githubusercontent.com/4902714/120445701-75f19780-c3bb-11eb-9f36-68628724c818.png) **Which version of Dolphin Scheduler:** -[1.3.6] -[dev] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5581
https://github.com/apache/dolphinscheduler/pull/5582
cc9e5d5d34fcf2279b267cca7df37a9e80eeba07
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
2021-06-02T08:04:01Z
java
2021-06-04T01:55:42Z
sql/upgrade/1.3.6_schema/mysql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); -- uc_dolphin_T_t_ds_worker_group_R_ip_list drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_worker_group_R_ip_list; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_worker_group_R_ip_list() BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_worker_group' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='ip_list') THEN ALTER TABLE t_ds_worker_group CHANGE COLUMN `ip_list` `addr_list` text; ALTER TABLE t_ds_worker_group MODIFY COLUMN `name` varchar(256) NOT NULL; ALTER TABLE t_ds_worker_group ADD UNIQUE KEY `name_unique` (`name`); END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_worker_group_R_ip_list; DROP PROCEDURE uc_dolphin_T_t_ds_worker_group_R_ip_list; -- uc_dolphin_T_qrtz_fired_triggers_R_entry_id drop PROCEDURE if EXISTS uc_dolphin_T_qrtz_fired_triggers_R_entry_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_qrtz_fired_triggers_R_entry_id() BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='QRTZ_FIRED_TRIGGERS' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='entry_id') THEN ALTER TABLE QRTZ_FIRED_TRIGGERS MODIFY COLUMN `entry_id` varchar(200); END IF; END; d// delimiter ; CALL uc_dolphin_T_qrtz_fired_triggers_R_entry_id; DROP PROCEDURE uc_dolphin_T_qrtz_fired_triggers_R_entry_id;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,581
[Bug][Mysql] Specific key was too long, max key length is 767 bytes for varchar(256) in some mysql with innodb_large_prefix=OFF
**To Reproduce** In some mysql with `innodb_large_prefix=OFF`, `Specific key was too long, max key length is 767 bytes` will occur when installing dolphinscheduler **Expected behavior** Bug fixed **Screenshots** ![image](https://user-images.githubusercontent.com/4902714/120445701-75f19780-c3bb-11eb-9f36-68628724c818.png) **Which version of Dolphin Scheduler:** -[1.3.6] -[dev] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5581
https://github.com/apache/dolphinscheduler/pull/5582
cc9e5d5d34fcf2279b267cca7df37a9e80eeba07
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
2021-06-02T08:04:01Z
java
2021-06-04T01:55:42Z
sql/upgrade/1.3.6_schema/postgresql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ -- uc_dolphin_T_t_ds_worker_group_A_ip_list delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_worker_group_A_ip_list() RETURNS void AS $$ BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_worker_group' AND COLUMN_NAME ='ip_list') THEN ALTER TABLE t_ds_worker_group RENAME ip_list TO addr_list; ALTER TABLE t_ds_worker_group ALTER COLUMN addr_list type text; ALTER TABLE t_ds_worker_group ALTER COLUMN name type varchar(256), ALTER COLUMN name SET NOT NULL; ALTER TABLE t_ds_worker_group ADD CONSTRAINT name_unique UNIQUE (name); END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_worker_group_A_ip_list(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_worker_group_A_ip_list(); -- uc_dolphin_T_qrtz_fired_triggers_A_entry_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_qrtz_fired_triggers_A_entry_id() RETURNS void AS $$ BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='qrtz_fired_triggers' AND COLUMN_NAME ='entry_id') THEN ALTER TABLE qrtz_fired_triggers ALTER COLUMN entry_id type varchar(200); END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_qrtz_fired_triggers_A_entry_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_qrtz_fired_triggers_A_entry_id(); -- Add foreign key constraints for t_ds_task_instance -- delimiter ; ALTER TABLE t_ds_task_instance ADD CONSTRAINT foreign_key_instance_id FOREIGN KEY(process_instance_id) REFERENCES t_ds_process_instance(id) ON DELETE CASCADE;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,581
[Bug][Mysql] Specific key was too long, max key length is 767 bytes for varchar(256) in some mysql with innodb_large_prefix=OFF
**To Reproduce** In some mysql with `innodb_large_prefix=OFF`, `Specific key was too long, max key length is 767 bytes` will occur when installing dolphinscheduler **Expected behavior** Bug fixed **Screenshots** ![image](https://user-images.githubusercontent.com/4902714/120445701-75f19780-c3bb-11eb-9f36-68628724c818.png) **Which version of Dolphin Scheduler:** -[1.3.6] -[dev] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5581
https://github.com/apache/dolphinscheduler/pull/5582
cc9e5d5d34fcf2279b267cca7df37a9e80eeba07
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
2021-06-02T08:04:01Z
java
2021-06-04T01:55:42Z
sql/upgrade/1.3.7_schema/mysql/dolphinscheduler_ddl.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,581
[Bug][Mysql] Specific key was too long, max key length is 767 bytes for varchar(256) in some mysql with innodb_large_prefix=OFF
**To Reproduce** In some mysql with `innodb_large_prefix=OFF`, `Specific key was too long, max key length is 767 bytes` will occur when installing dolphinscheduler **Expected behavior** Bug fixed **Screenshots** ![image](https://user-images.githubusercontent.com/4902714/120445701-75f19780-c3bb-11eb-9f36-68628724c818.png) **Which version of Dolphin Scheduler:** -[1.3.6] -[dev] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5581
https://github.com/apache/dolphinscheduler/pull/5582
cc9e5d5d34fcf2279b267cca7df37a9e80eeba07
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
2021-06-02T08:04:01Z
java
2021-06-04T01:55:42Z
sql/upgrade/1.3.7_schema/mysql/dolphinscheduler_dml.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,581
[Bug][Mysql] Specific key was too long, max key length is 767 bytes for varchar(256) in some mysql with innodb_large_prefix=OFF
**To Reproduce** In some mysql with `innodb_large_prefix=OFF`, `Specific key was too long, max key length is 767 bytes` will occur when installing dolphinscheduler **Expected behavior** Bug fixed **Screenshots** ![image](https://user-images.githubusercontent.com/4902714/120445701-75f19780-c3bb-11eb-9f36-68628724c818.png) **Which version of Dolphin Scheduler:** -[1.3.6] -[dev] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5581
https://github.com/apache/dolphinscheduler/pull/5582
cc9e5d5d34fcf2279b267cca7df37a9e80eeba07
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
2021-06-02T08:04:01Z
java
2021-06-04T01:55:42Z
sql/upgrade/1.3.7_schema/postgresql/dolphinscheduler_ddl.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,581
[Bug][Mysql] Specific key was too long, max key length is 767 bytes for varchar(256) in some mysql with innodb_large_prefix=OFF
**To Reproduce** In some mysql with `innodb_large_prefix=OFF`, `Specific key was too long, max key length is 767 bytes` will occur when installing dolphinscheduler **Expected behavior** Bug fixed **Screenshots** ![image](https://user-images.githubusercontent.com/4902714/120445701-75f19780-c3bb-11eb-9f36-68628724c818.png) **Which version of Dolphin Scheduler:** -[1.3.6] -[dev] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5581
https://github.com/apache/dolphinscheduler/pull/5582
cc9e5d5d34fcf2279b267cca7df37a9e80eeba07
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
2021-06-02T08:04:01Z
java
2021-06-04T01:55:42Z
sql/upgrade/1.3.7_schema/postgresql/dolphinscheduler_dml.sql
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,586
[Question] the cluster hangs up caused by failing to obtain JDBC Connection
I cannot see any master or worker on the Monitor page when the backend process instance is still alive on the ds server. ```[ERROR] 2021-06-02 08:53:16.176 org.apache.dolphinscheduler.server.master.registry.ServerNodeManager:[230] - WorkerGroupListener capture data change and get data failed org.mybatis.spring.MyBatisSystemException: nested exception is org.apache.ibatis.exceptions.PersistenceException: ### Error updating database. Cause: org.springframework.jdbc.CannotGetJdbcConnectionException: Failed to obtain JDBC Connection; nested exception is com.alibaba.druid.pool.DataSourceClosedException: dataSource already closed at Wed Jun 02 08:52:57 CST 2021 ### The error may exist in org/apache/dolphinscheduler/dao/mapper/AlertMapper.java (best guess) ### The error may involve org.apache.dolphinscheduler.dao.mapper.AlertMapper.insert ### The error occurred while executing an update ### Cause: org.springframework.jdbc.CannotGetJdbcConnectionException: Failed to obtain JDBC Connection; nested exception is com.alibaba.druid.pool.DataSourceClosedException: dataSource already closed at Wed Jun 02 08:52:57 CST 2021 at org.mybatis.spring.MyBatisExceptionTranslator.translateExceptionIfPossible(MyBatisExceptionTranslator.java:78) at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:440) at com.sun.proxy.$Proxy84.insert(Unknown Source) at org.mybatis.spring.SqlSessionTemplate.insert(SqlSessionTemplate.java:271) at com.baomidou.mybatisplus.core.override.MybatisMapperMethod.execute(MybatisMapperMethod.java:58) at com.baomidou.mybatisplus.core.override.MybatisMapperProxy.invoke(MybatisMapperProxy.java:61) at com.sun.proxy.$Proxy108.insert(Unknown Source) at org.apache.dolphinscheduler.dao.AlertDao.saveTaskTimeoutAlert(AlertDao.java:135) at org.apache.dolphinscheduler.dao.AlertDao.sendServerStopedAlert(AlertDao.java:102) at org.apache.dolphinscheduler.server.master.registry.ServerNodeManager$WorkerGroupNodeListener.dataChanged(ServerNodeManager.java:225) at org.apache.dolphinscheduler.service.zk.AbstractListener.childEvent(AbstractListener.java:32) at org.apache.curator.framework.recipes.cache.TreeCache$2.apply(TreeCache.java:760) at org.apache.curator.framework.recipes.cache.TreeCache$2.apply(TreeCache.java:754) at org.apache.curator.framework.listen.ListenerContainer$1.run(ListenerContainer.java:100) at org.apache.curator.shaded.com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30) at org.apache.curator.framework.listen.ListenerContainer.forEach(ListenerContainer.java:92) at org.apache.curator.framework.recipes.cache.TreeCache.callListeners(TreeCache.java:753) at org.apache.curator.framework.recipes.cache.TreeCache.access$1900(TreeCache.java:75) at org.apache.curator.framework.recipes.cache.TreeCache$4.run(TreeCache.java:865) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at `java.lang.Thread.run(Thread.java:748)` Caused by: org.apache.ibatis.exceptions.PersistenceException: ### Error updating database. Cause: org.springframework.jdbc.CannotGetJdbcConnectionException: Failed to obtain JDBC Connection; nested exception is com.alibaba.druid.pool.DataSourceClosedException: dataSource already closed at Wed Jun 02 08:52:57 CST 2021 ### The error may exist in org/apache/dolphinscheduler/dao/mapper/AlertMapper.java (best guess) ### The error may involve org.apache.dolphinscheduler.dao.mapper.AlertMapper.insert ### The error occurred while executing an update ```
https://github.com/apache/dolphinscheduler/issues/5586
https://github.com/apache/dolphinscheduler/pull/5588
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
75be09735a29469ef5169550239c65a5a27af3ba
2021-06-03T01:39:35Z
java
2021-06-04T05:27:18Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/MasterServer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.IStoppable; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.remote.NettyRemotingServer; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.config.NettyServerConfig; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.processor.TaskAckProcessor; import org.apache.dolphinscheduler.server.master.processor.TaskKillResponseProcessor; import org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor; import org.apache.dolphinscheduler.server.master.runner.MasterSchedulerService; import org.apache.dolphinscheduler.server.master.zk.ZKMasterClient; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.apache.dolphinscheduler.service.quartz.QuartzExecutors; import javax.annotation.PostConstruct; import org.quartz.SchedulerException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.WebApplicationType; import org.springframework.boot.builder.SpringApplicationBuilder; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.FilterType; import org.springframework.transaction.annotation.EnableTransactionManagement; /** * master server */ @ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = { @ComponentScan.Filter(type = FilterType.REGEX, pattern = { "org.apache.dolphinscheduler.server.worker.*", "org.apache.dolphinscheduler.server.monitor.*", "org.apache.dolphinscheduler.server.log.*" }) }) @EnableTransactionManagement public class MasterServer implements IStoppable { /** * logger of MasterServer */ private static final Logger logger = LoggerFactory.getLogger(MasterServer.class); /** * master config */ @Autowired private MasterConfig masterConfig; /** * spring application context * only use it for initialization */ @Autowired private SpringApplicationContext springApplicationContext; /** * netty remote server */ private NettyRemotingServer nettyRemotingServer; /** * zk master client */ @Autowired private ZKMasterClient zkMasterClient; /** * scheduler service */ @Autowired private MasterSchedulerService masterSchedulerService; /** * master server startup, not use web service * * @param args arguments */ public static void main(String[] args) { Thread.currentThread().setName(Constants.THREAD_NAME_MASTER_SERVER); new SpringApplicationBuilder(MasterServer.class).web(WebApplicationType.NONE).run(args); } /** * run master server */ @PostConstruct public void run() { // init remoting server NettyServerConfig serverConfig = new NettyServerConfig(); serverConfig.setListenPort(masterConfig.getListenPort()); this.nettyRemotingServer = new NettyRemotingServer(serverConfig); this.nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_RESPONSE, new TaskResponseProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_ACK, new TaskAckProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.TASK_KILL_RESPONSE, new TaskKillResponseProcessor()); this.nettyRemotingServer.start(); // self tolerant this.zkMasterClient.start(); this.zkMasterClient.setStoppable(this); // scheduler start this.masterSchedulerService.start(); // start QuartzExecutors // what system should do if exception try { logger.info("start Quartz server..."); QuartzExecutors.getInstance().start(); } catch (Exception e) { try { QuartzExecutors.getInstance().shutdown(); } catch (SchedulerException e1) { logger.error("QuartzExecutors shutdown failed : " + e1.getMessage(), e1); } logger.error("start Quartz failed", e); } /** * register hooks, which are called before the process exits */ Runtime.getRuntime().addShutdownHook(new Thread(() -> { if (Stopper.isRunning()) { close("shutdownHook"); } })); } /** * gracefully close * * @param cause close cause */ public void close(String cause) { try { // execute only once if (Stopper.isStopped()) { return; } logger.info("master server is stopping ..., cause : {}", cause); // set stop signal is true Stopper.stop(); try { // thread sleep 3 seconds for thread quietly stop Thread.sleep(3000L); } catch (Exception e) { logger.warn("thread sleep exception ", e); } // close this.masterSchedulerService.close(); this.nettyRemotingServer.close(); this.zkMasterClient.close(); // close quartz try { QuartzExecutors.getInstance().shutdown(); logger.info("Quartz service stopped"); } catch (Exception e) { logger.warn("Quartz service stopped exception:{}", e.getMessage()); } } catch (Exception e) { logger.error("master server stop exception ", e); } } @Override public void stop(String cause) { close(cause); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,586
[Question] the cluster hangs up caused by failing to obtain JDBC Connection
I cannot see any master or worker on the Monitor page when the backend process instance is still alive on the ds server. ```[ERROR] 2021-06-02 08:53:16.176 org.apache.dolphinscheduler.server.master.registry.ServerNodeManager:[230] - WorkerGroupListener capture data change and get data failed org.mybatis.spring.MyBatisSystemException: nested exception is org.apache.ibatis.exceptions.PersistenceException: ### Error updating database. Cause: org.springframework.jdbc.CannotGetJdbcConnectionException: Failed to obtain JDBC Connection; nested exception is com.alibaba.druid.pool.DataSourceClosedException: dataSource already closed at Wed Jun 02 08:52:57 CST 2021 ### The error may exist in org/apache/dolphinscheduler/dao/mapper/AlertMapper.java (best guess) ### The error may involve org.apache.dolphinscheduler.dao.mapper.AlertMapper.insert ### The error occurred while executing an update ### Cause: org.springframework.jdbc.CannotGetJdbcConnectionException: Failed to obtain JDBC Connection; nested exception is com.alibaba.druid.pool.DataSourceClosedException: dataSource already closed at Wed Jun 02 08:52:57 CST 2021 at org.mybatis.spring.MyBatisExceptionTranslator.translateExceptionIfPossible(MyBatisExceptionTranslator.java:78) at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:440) at com.sun.proxy.$Proxy84.insert(Unknown Source) at org.mybatis.spring.SqlSessionTemplate.insert(SqlSessionTemplate.java:271) at com.baomidou.mybatisplus.core.override.MybatisMapperMethod.execute(MybatisMapperMethod.java:58) at com.baomidou.mybatisplus.core.override.MybatisMapperProxy.invoke(MybatisMapperProxy.java:61) at com.sun.proxy.$Proxy108.insert(Unknown Source) at org.apache.dolphinscheduler.dao.AlertDao.saveTaskTimeoutAlert(AlertDao.java:135) at org.apache.dolphinscheduler.dao.AlertDao.sendServerStopedAlert(AlertDao.java:102) at org.apache.dolphinscheduler.server.master.registry.ServerNodeManager$WorkerGroupNodeListener.dataChanged(ServerNodeManager.java:225) at org.apache.dolphinscheduler.service.zk.AbstractListener.childEvent(AbstractListener.java:32) at org.apache.curator.framework.recipes.cache.TreeCache$2.apply(TreeCache.java:760) at org.apache.curator.framework.recipes.cache.TreeCache$2.apply(TreeCache.java:754) at org.apache.curator.framework.listen.ListenerContainer$1.run(ListenerContainer.java:100) at org.apache.curator.shaded.com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30) at org.apache.curator.framework.listen.ListenerContainer.forEach(ListenerContainer.java:92) at org.apache.curator.framework.recipes.cache.TreeCache.callListeners(TreeCache.java:753) at org.apache.curator.framework.recipes.cache.TreeCache.access$1900(TreeCache.java:75) at org.apache.curator.framework.recipes.cache.TreeCache$4.run(TreeCache.java:865) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at `java.lang.Thread.run(Thread.java:748)` Caused by: org.apache.ibatis.exceptions.PersistenceException: ### Error updating database. Cause: org.springframework.jdbc.CannotGetJdbcConnectionException: Failed to obtain JDBC Connection; nested exception is com.alibaba.druid.pool.DataSourceClosedException: dataSource already closed at Wed Jun 02 08:52:57 CST 2021 ### The error may exist in org/apache/dolphinscheduler/dao/mapper/AlertMapper.java (best guess) ### The error may involve org.apache.dolphinscheduler.dao.mapper.AlertMapper.insert ### The error occurred while executing an update ```
https://github.com/apache/dolphinscheduler/issues/5586
https://github.com/apache/dolphinscheduler/pull/5588
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
75be09735a29469ef5169550239c65a5a27af3ba
2021-06-03T01:39:35Z
java
2021-06-04T05:27:18Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/bean/SpringApplicationContext.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.bean; import org.springframework.beans.BeansException; import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContextAware; import org.springframework.stereotype.Component; @Component public class SpringApplicationContext implements ApplicationContextAware { private static ApplicationContext applicationContext; @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { SpringApplicationContext.applicationContext = applicationContext; } public static <T> T getBean(Class<T> requiredType) { return applicationContext.getBean(requiredType); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,586
[Question] the cluster hangs up caused by failing to obtain JDBC Connection
I cannot see any master or worker on the Monitor page when the backend process instance is still alive on the ds server. ```[ERROR] 2021-06-02 08:53:16.176 org.apache.dolphinscheduler.server.master.registry.ServerNodeManager:[230] - WorkerGroupListener capture data change and get data failed org.mybatis.spring.MyBatisSystemException: nested exception is org.apache.ibatis.exceptions.PersistenceException: ### Error updating database. Cause: org.springframework.jdbc.CannotGetJdbcConnectionException: Failed to obtain JDBC Connection; nested exception is com.alibaba.druid.pool.DataSourceClosedException: dataSource already closed at Wed Jun 02 08:52:57 CST 2021 ### The error may exist in org/apache/dolphinscheduler/dao/mapper/AlertMapper.java (best guess) ### The error may involve org.apache.dolphinscheduler.dao.mapper.AlertMapper.insert ### The error occurred while executing an update ### Cause: org.springframework.jdbc.CannotGetJdbcConnectionException: Failed to obtain JDBC Connection; nested exception is com.alibaba.druid.pool.DataSourceClosedException: dataSource already closed at Wed Jun 02 08:52:57 CST 2021 at org.mybatis.spring.MyBatisExceptionTranslator.translateExceptionIfPossible(MyBatisExceptionTranslator.java:78) at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:440) at com.sun.proxy.$Proxy84.insert(Unknown Source) at org.mybatis.spring.SqlSessionTemplate.insert(SqlSessionTemplate.java:271) at com.baomidou.mybatisplus.core.override.MybatisMapperMethod.execute(MybatisMapperMethod.java:58) at com.baomidou.mybatisplus.core.override.MybatisMapperProxy.invoke(MybatisMapperProxy.java:61) at com.sun.proxy.$Proxy108.insert(Unknown Source) at org.apache.dolphinscheduler.dao.AlertDao.saveTaskTimeoutAlert(AlertDao.java:135) at org.apache.dolphinscheduler.dao.AlertDao.sendServerStopedAlert(AlertDao.java:102) at org.apache.dolphinscheduler.server.master.registry.ServerNodeManager$WorkerGroupNodeListener.dataChanged(ServerNodeManager.java:225) at org.apache.dolphinscheduler.service.zk.AbstractListener.childEvent(AbstractListener.java:32) at org.apache.curator.framework.recipes.cache.TreeCache$2.apply(TreeCache.java:760) at org.apache.curator.framework.recipes.cache.TreeCache$2.apply(TreeCache.java:754) at org.apache.curator.framework.listen.ListenerContainer$1.run(ListenerContainer.java:100) at org.apache.curator.shaded.com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30) at org.apache.curator.framework.listen.ListenerContainer.forEach(ListenerContainer.java:92) at org.apache.curator.framework.recipes.cache.TreeCache.callListeners(TreeCache.java:753) at org.apache.curator.framework.recipes.cache.TreeCache.access$1900(TreeCache.java:75) at org.apache.curator.framework.recipes.cache.TreeCache$4.run(TreeCache.java:865) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at `java.lang.Thread.run(Thread.java:748)` Caused by: org.apache.ibatis.exceptions.PersistenceException: ### Error updating database. Cause: org.springframework.jdbc.CannotGetJdbcConnectionException: Failed to obtain JDBC Connection; nested exception is com.alibaba.druid.pool.DataSourceClosedException: dataSource already closed at Wed Jun 02 08:52:57 CST 2021 ### The error may exist in org/apache/dolphinscheduler/dao/mapper/AlertMapper.java (best guess) ### The error may involve org.apache.dolphinscheduler.dao.mapper.AlertMapper.insert ### The error occurred while executing an update ```
https://github.com/apache/dolphinscheduler/issues/5586
https://github.com/apache/dolphinscheduler/pull/5588
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
75be09735a29469ef5169550239c65a5a27af3ba
2021-06-03T01:39:35Z
java
2021-06-04T05:27:18Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/CuratorZookeeperClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.zk; import static org.apache.dolphinscheduler.common.utils.Preconditions.checkNotNull; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.service.exceptions.ServiceException; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.framework.api.ACLProvider; import org.apache.curator.framework.state.ConnectionState; import org.apache.curator.retry.ExponentialBackoffRetry; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.data.ACL; import java.nio.charset.StandardCharsets; import java.util.List; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.InitializingBean; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; /** * Shared Curator zookeeper client */ @Component public class CuratorZookeeperClient implements InitializingBean { private final Logger logger = LoggerFactory.getLogger(CuratorZookeeperClient.class); @Autowired private ZookeeperConfig zookeeperConfig; private CuratorFramework zkClient; @Override public void afterPropertiesSet() throws Exception { this.zkClient = buildClient(); initStateLister(); } private CuratorFramework buildClient() { logger.info("zookeeper registry center init, server lists is: [{}]", zookeeperConfig.getServerList()); CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder() .ensembleProvider(new DefaultEnsembleProvider(checkNotNull(zookeeperConfig.getServerList(), "zookeeper quorum can't be null"))) .retryPolicy(new ExponentialBackoffRetry(zookeeperConfig.getBaseSleepTimeMs(), zookeeperConfig.getMaxRetries(), zookeeperConfig.getMaxSleepMs())); //these has default value if (0 != zookeeperConfig.getSessionTimeoutMs()) { builder.sessionTimeoutMs(zookeeperConfig.getSessionTimeoutMs()); } if (0 != zookeeperConfig.getConnectionTimeoutMs()) { builder.connectionTimeoutMs(zookeeperConfig.getConnectionTimeoutMs()); } if (StringUtils.isNotBlank(zookeeperConfig.getDigest())) { builder.authorization("digest", zookeeperConfig.getDigest().getBytes(StandardCharsets.UTF_8)).aclProvider(new ACLProvider() { @Override public List<ACL> getDefaultAcl() { return ZooDefs.Ids.CREATOR_ALL_ACL; } @Override public List<ACL> getAclForPath(final String path) { return ZooDefs.Ids.CREATOR_ALL_ACL; } }); } zkClient = builder.build(); zkClient.start(); try { logger.info("trying to connect zookeeper server list:{}", zookeeperConfig.getServerList()); zkClient.blockUntilConnected(30, TimeUnit.SECONDS); } catch (final Exception ex) { throw new ServiceException(ex); } return zkClient; } public void initStateLister() { checkNotNull(zkClient); zkClient.getConnectionStateListenable().addListener((client, newState) -> { if (newState == ConnectionState.LOST) { logger.error("connection lost from zookeeper"); } else if (newState == ConnectionState.RECONNECTED) { logger.info("reconnected to zookeeper"); } else if (newState == ConnectionState.SUSPENDED) { logger.warn("connection SUSPENDED to zookeeper"); } else if (newState == ConnectionState.CONNECTED) { logger.info("connected to zookeeper server list:[{}]", zookeeperConfig.getServerList()); } }); } public ZookeeperConfig getZookeeperConfig() { return zookeeperConfig; } public void setZookeeperConfig(ZookeeperConfig zookeeperConfig) { this.zookeeperConfig = zookeeperConfig; } public CuratorFramework getZkClient() { return zkClient; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,559
[Bug][Master Server] Master Server was shutdown but the process still in system
Master Server was shutdown but the process still in system when i restart zookeeper cluster version:1.3.6 1.stop all the zookeeper servers , then start. 2. use jps command,will find the Master Server process 3. the dolphinscheduler-master log: scheduler DolphinScheduler_$_slave.. shutdown complete. Quartz service stopped, and halt all task Quartz service stopped
https://github.com/apache/dolphinscheduler/issues/5559
https://github.com/apache/dolphinscheduler/pull/5588
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
75be09735a29469ef5169550239c65a5a27af3ba
2021-05-31T09:40:24Z
java
2021-06-04T05:27:18Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/MasterServer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.IStoppable; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.remote.NettyRemotingServer; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.config.NettyServerConfig; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.processor.TaskAckProcessor; import org.apache.dolphinscheduler.server.master.processor.TaskKillResponseProcessor; import org.apache.dolphinscheduler.server.master.processor.TaskResponseProcessor; import org.apache.dolphinscheduler.server.master.runner.MasterSchedulerService; import org.apache.dolphinscheduler.server.master.zk.ZKMasterClient; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.apache.dolphinscheduler.service.quartz.QuartzExecutors; import javax.annotation.PostConstruct; import org.quartz.SchedulerException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.WebApplicationType; import org.springframework.boot.builder.SpringApplicationBuilder; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.FilterType; import org.springframework.transaction.annotation.EnableTransactionManagement; /** * master server */ @ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = { @ComponentScan.Filter(type = FilterType.REGEX, pattern = { "org.apache.dolphinscheduler.server.worker.*", "org.apache.dolphinscheduler.server.monitor.*", "org.apache.dolphinscheduler.server.log.*" }) }) @EnableTransactionManagement public class MasterServer implements IStoppable { /** * logger of MasterServer */ private static final Logger logger = LoggerFactory.getLogger(MasterServer.class); /** * master config */ @Autowired private MasterConfig masterConfig; /** * spring application context * only use it for initialization */ @Autowired private SpringApplicationContext springApplicationContext; /** * netty remote server */ private NettyRemotingServer nettyRemotingServer; /** * zk master client */ @Autowired private ZKMasterClient zkMasterClient; /** * scheduler service */ @Autowired private MasterSchedulerService masterSchedulerService; /** * master server startup, not use web service * * @param args arguments */ public static void main(String[] args) { Thread.currentThread().setName(Constants.THREAD_NAME_MASTER_SERVER); new SpringApplicationBuilder(MasterServer.class).web(WebApplicationType.NONE).run(args); } /** * run master server */ @PostConstruct public void run() { // init remoting server NettyServerConfig serverConfig = new NettyServerConfig(); serverConfig.setListenPort(masterConfig.getListenPort()); this.nettyRemotingServer = new NettyRemotingServer(serverConfig); this.nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_RESPONSE, new TaskResponseProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_ACK, new TaskAckProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.TASK_KILL_RESPONSE, new TaskKillResponseProcessor()); this.nettyRemotingServer.start(); // self tolerant this.zkMasterClient.start(); this.zkMasterClient.setStoppable(this); // scheduler start this.masterSchedulerService.start(); // start QuartzExecutors // what system should do if exception try { logger.info("start Quartz server..."); QuartzExecutors.getInstance().start(); } catch (Exception e) { try { QuartzExecutors.getInstance().shutdown(); } catch (SchedulerException e1) { logger.error("QuartzExecutors shutdown failed : " + e1.getMessage(), e1); } logger.error("start Quartz failed", e); } /** * register hooks, which are called before the process exits */ Runtime.getRuntime().addShutdownHook(new Thread(() -> { if (Stopper.isRunning()) { close("shutdownHook"); } })); } /** * gracefully close * * @param cause close cause */ public void close(String cause) { try { // execute only once if (Stopper.isStopped()) { return; } logger.info("master server is stopping ..., cause : {}", cause); // set stop signal is true Stopper.stop(); try { // thread sleep 3 seconds for thread quietly stop Thread.sleep(3000L); } catch (Exception e) { logger.warn("thread sleep exception ", e); } // close this.masterSchedulerService.close(); this.nettyRemotingServer.close(); this.zkMasterClient.close(); // close quartz try { QuartzExecutors.getInstance().shutdown(); logger.info("Quartz service stopped"); } catch (Exception e) { logger.warn("Quartz service stopped exception:{}", e.getMessage()); } } catch (Exception e) { logger.error("master server stop exception ", e); } } @Override public void stop(String cause) { close(cause); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,559
[Bug][Master Server] Master Server was shutdown but the process still in system
Master Server was shutdown but the process still in system when i restart zookeeper cluster version:1.3.6 1.stop all the zookeeper servers , then start. 2. use jps command,will find the Master Server process 3. the dolphinscheduler-master log: scheduler DolphinScheduler_$_slave.. shutdown complete. Quartz service stopped, and halt all task Quartz service stopped
https://github.com/apache/dolphinscheduler/issues/5559
https://github.com/apache/dolphinscheduler/pull/5588
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
75be09735a29469ef5169550239c65a5a27af3ba
2021-05-31T09:40:24Z
java
2021-06-04T05:27:18Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/bean/SpringApplicationContext.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.bean; import org.springframework.beans.BeansException; import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContextAware; import org.springframework.stereotype.Component; @Component public class SpringApplicationContext implements ApplicationContextAware { private static ApplicationContext applicationContext; @Override public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { SpringApplicationContext.applicationContext = applicationContext; } public static <T> T getBean(Class<T> requiredType) { return applicationContext.getBean(requiredType); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,559
[Bug][Master Server] Master Server was shutdown but the process still in system
Master Server was shutdown but the process still in system when i restart zookeeper cluster version:1.3.6 1.stop all the zookeeper servers , then start. 2. use jps command,will find the Master Server process 3. the dolphinscheduler-master log: scheduler DolphinScheduler_$_slave.. shutdown complete. Quartz service stopped, and halt all task Quartz service stopped
https://github.com/apache/dolphinscheduler/issues/5559
https://github.com/apache/dolphinscheduler/pull/5588
87604b7a3df17dcfc5cc9087340d06b0d8930ddc
75be09735a29469ef5169550239c65a5a27af3ba
2021-05-31T09:40:24Z
java
2021-06-04T05:27:18Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/CuratorZookeeperClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.zk; import static org.apache.dolphinscheduler.common.utils.Preconditions.checkNotNull; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.service.exceptions.ServiceException; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.framework.api.ACLProvider; import org.apache.curator.framework.state.ConnectionState; import org.apache.curator.retry.ExponentialBackoffRetry; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.data.ACL; import java.nio.charset.StandardCharsets; import java.util.List; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.InitializingBean; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; /** * Shared Curator zookeeper client */ @Component public class CuratorZookeeperClient implements InitializingBean { private final Logger logger = LoggerFactory.getLogger(CuratorZookeeperClient.class); @Autowired private ZookeeperConfig zookeeperConfig; private CuratorFramework zkClient; @Override public void afterPropertiesSet() throws Exception { this.zkClient = buildClient(); initStateLister(); } private CuratorFramework buildClient() { logger.info("zookeeper registry center init, server lists is: [{}]", zookeeperConfig.getServerList()); CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder() .ensembleProvider(new DefaultEnsembleProvider(checkNotNull(zookeeperConfig.getServerList(), "zookeeper quorum can't be null"))) .retryPolicy(new ExponentialBackoffRetry(zookeeperConfig.getBaseSleepTimeMs(), zookeeperConfig.getMaxRetries(), zookeeperConfig.getMaxSleepMs())); //these has default value if (0 != zookeeperConfig.getSessionTimeoutMs()) { builder.sessionTimeoutMs(zookeeperConfig.getSessionTimeoutMs()); } if (0 != zookeeperConfig.getConnectionTimeoutMs()) { builder.connectionTimeoutMs(zookeeperConfig.getConnectionTimeoutMs()); } if (StringUtils.isNotBlank(zookeeperConfig.getDigest())) { builder.authorization("digest", zookeeperConfig.getDigest().getBytes(StandardCharsets.UTF_8)).aclProvider(new ACLProvider() { @Override public List<ACL> getDefaultAcl() { return ZooDefs.Ids.CREATOR_ALL_ACL; } @Override public List<ACL> getAclForPath(final String path) { return ZooDefs.Ids.CREATOR_ALL_ACL; } }); } zkClient = builder.build(); zkClient.start(); try { logger.info("trying to connect zookeeper server list:{}", zookeeperConfig.getServerList()); zkClient.blockUntilConnected(30, TimeUnit.SECONDS); } catch (final Exception ex) { throw new ServiceException(ex); } return zkClient; } public void initStateLister() { checkNotNull(zkClient); zkClient.getConnectionStateListenable().addListener((client, newState) -> { if (newState == ConnectionState.LOST) { logger.error("connection lost from zookeeper"); } else if (newState == ConnectionState.RECONNECTED) { logger.info("reconnected to zookeeper"); } else if (newState == ConnectionState.SUSPENDED) { logger.warn("connection SUSPENDED to zookeeper"); } else if (newState == ConnectionState.CONNECTED) { logger.info("connected to zookeeper server list:[{}]", zookeeperConfig.getServerList()); } }); } public ZookeeperConfig getZookeeperConfig() { return zookeeperConfig; } public void setZookeeperConfig(ZookeeperConfig zookeeperConfig) { this.zookeeperConfig = zookeeperConfig; } public CuratorFramework getZkClient() { return zkClient; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,468
[Bug][Common] Obtaining IP is incorrect
**To Reproduce** In some scenarios, obtaining IP is incorrect **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. **Scenario 1: IP is null** ![image](https://user-images.githubusercontent.com/4902714/118223662-5f3edb80-b4b4-11eb-9046-3565f6fe1a2c.png) **Scenario 2: IP incorrect order** ![image](https://user-images.githubusercontent.com/4902714/118223277-92cd3600-b4b3-11eb-920c-46c3bfe1cc2f.png) ![image](https://user-images.githubusercontent.com/4902714/118223548-230b7b00-b4b4-11eb-8775-57abc66abf41.png) **Scenario 3: IP is 127.0.0.1** ![image](https://user-images.githubusercontent.com/4902714/118223722-7e3d6d80-b4b4-11eb-8cec-a3d408be41a3.png) ![image](https://user-images.githubusercontent.com/4902714/118223725-81d0f480-b4b4-11eb-95aa-0c1a0add8560.png) **Scenario 4: IP is 0.0.0.0.0.0** ![image](https://user-images.githubusercontent.com/4902714/119919935-07b56b00-bf9e-11eb-9972-00d73be4f381.png) ![image](https://user-images.githubusercontent.com/4902714/119919948-0e43e280-bf9e-11eb-8947-9eccfac17b47.png) ![image](https://user-images.githubusercontent.com/4902714/119920051-3895a000-bf9e-11eb-87dc-efc7d6181d64.png) **Which version of Dolphin Scheduler:** -[1.3.x] -[dev] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5468
https://github.com/apache/dolphinscheduler/pull/5594
75be09735a29469ef5169550239c65a5a27af3ba
281b5aea6b85df86b279eb3377ff6851c560bcbd
2021-05-14T05:04:32Z
java
2021-06-07T14:12:49Z
docker/build/conf/dolphinscheduler/common.properties.tpl
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # user data local directory path, please make sure the directory exists and have read write permissions data.basedir.path=${DATA_BASEDIR_PATH} # resource storage type: HDFS, S3, NONE resource.storage.type=${RESOURCE_STORAGE_TYPE} # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended resource.upload.path=${RESOURCE_UPLOAD_PATH} # whether to startup kerberos hadoop.security.authentication.startup.state=${HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE} # java.security.krb5.conf path java.security.krb5.conf.path=${JAVA_SECURITY_KRB5_CONF_PATH} # login user from keytab username login.user.keytab.username=${LOGIN_USER_KEYTAB_USERNAME} # login user from keytab path login.user.keytab.path=${LOGIN_USER_KEYTAB_PATH} # kerberos expire time, the unit is hour kerberos.expire.time=${KERBEROS_EXPIRE_TIME} # resource view suffixs #resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js # if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path hdfs.root.user=${HDFS_ROOT_USER} # if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir fs.defaultFS=${FS_DEFAULT_FS} # if resource.storage.type=S3, s3 endpoint fs.s3a.endpoint=${FS_S3A_ENDPOINT} # if resource.storage.type=S3, s3 access key fs.s3a.access.key=${FS_S3A_ACCESS_KEY} # if resource.storage.type=S3, s3 secret key fs.s3a.secret.key=${FS_S3A_SECRET_KEY} # resourcemanager port, the default value is 8088 if not specified resource.manager.httpaddress.port=${RESOURCE_MANAGER_HTTPADDRESS_PORT} # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty yarn.resourcemanager.ha.rm.ids=${YARN_RESOURCEMANAGER_HA_RM_IDS} # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname yarn.application.status.address=${YARN_APPLICATION_STATUS_ADDRESS} # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000) yarn.job.history.status.address=${YARN_JOB_HISTORY_STATUS_ADDRESS} # datasource encryption enable datasource.encryption.enable=${DATASOURCE_ENCRYPTION_ENABLE} # datasource encryption salt datasource.encryption.salt=${DATASOURCE_ENCRYPTION_SALT} # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions sudo.enable=${SUDO_ENABLE} # network IP gets priority, default: inner outer #dolphin.scheduler.network.priority.strategy=default # system env path #dolphinscheduler.env.path=env/dolphinscheduler_env.sh # development state development.state=false
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,468
[Bug][Common] Obtaining IP is incorrect
**To Reproduce** In some scenarios, obtaining IP is incorrect **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. **Scenario 1: IP is null** ![image](https://user-images.githubusercontent.com/4902714/118223662-5f3edb80-b4b4-11eb-9046-3565f6fe1a2c.png) **Scenario 2: IP incorrect order** ![image](https://user-images.githubusercontent.com/4902714/118223277-92cd3600-b4b3-11eb-920c-46c3bfe1cc2f.png) ![image](https://user-images.githubusercontent.com/4902714/118223548-230b7b00-b4b4-11eb-8775-57abc66abf41.png) **Scenario 3: IP is 127.0.0.1** ![image](https://user-images.githubusercontent.com/4902714/118223722-7e3d6d80-b4b4-11eb-8cec-a3d408be41a3.png) ![image](https://user-images.githubusercontent.com/4902714/118223725-81d0f480-b4b4-11eb-95aa-0c1a0add8560.png) **Scenario 4: IP is 0.0.0.0.0.0** ![image](https://user-images.githubusercontent.com/4902714/119919935-07b56b00-bf9e-11eb-9972-00d73be4f381.png) ![image](https://user-images.githubusercontent.com/4902714/119919948-0e43e280-bf9e-11eb-8947-9eccfac17b47.png) ![image](https://user-images.githubusercontent.com/4902714/119920051-3895a000-bf9e-11eb-87dc-efc7d6181d64.png) **Which version of Dolphin Scheduler:** -[1.3.x] -[dev] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5468
https://github.com/apache/dolphinscheduler/pull/5594
75be09735a29469ef5169550239c65a5a27af3ba
281b5aea6b85df86b279eb3377ff6851c560bcbd
2021-05-14T05:04:32Z
java
2021-06-07T14:12:49Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import java.util.regex.Pattern; /** * Constants */ public final class Constants { private Constants() { throw new UnsupportedOperationException("Construct Constants"); } /** * quartz config */ public static final String ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS = "org.quartz.jobStore.driverDelegateClass"; public static final String ORG_QUARTZ_SCHEDULER_INSTANCENAME = "org.quartz.scheduler.instanceName"; public static final String ORG_QUARTZ_SCHEDULER_INSTANCEID = "org.quartz.scheduler.instanceId"; public static final String ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON = "org.quartz.scheduler.makeSchedulerThreadDaemon"; public static final String ORG_QUARTZ_JOBSTORE_USEPROPERTIES = "org.quartz.jobStore.useProperties"; public static final String ORG_QUARTZ_THREADPOOL_CLASS = "org.quartz.threadPool.class"; public static final String ORG_QUARTZ_THREADPOOL_THREADCOUNT = "org.quartz.threadPool.threadCount"; public static final String ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS = "org.quartz.threadPool.makeThreadsDaemons"; public static final String ORG_QUARTZ_THREADPOOL_THREADPRIORITY = "org.quartz.threadPool.threadPriority"; public static final String ORG_QUARTZ_JOBSTORE_CLASS = "org.quartz.jobStore.class"; public static final String ORG_QUARTZ_JOBSTORE_TABLEPREFIX = "org.quartz.jobStore.tablePrefix"; public static final String ORG_QUARTZ_JOBSTORE_ISCLUSTERED = "org.quartz.jobStore.isClustered"; public static final String ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD = "org.quartz.jobStore.misfireThreshold"; public static final String ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL = "org.quartz.jobStore.clusterCheckinInterval"; public static final String ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK = "org.quartz.jobStore.acquireTriggersWithinLock"; public static final String ORG_QUARTZ_JOBSTORE_DATASOURCE = "org.quartz.jobStore.dataSource"; public static final String ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS = "org.quartz.dataSource.myDs.connectionProvider.class"; /** * quartz config default value */ public static final String QUARTZ_TABLE_PREFIX = "QRTZ_"; public static final String QUARTZ_MISFIRETHRESHOLD = "60000"; public static final String QUARTZ_CLUSTERCHECKININTERVAL = "5000"; public static final String QUARTZ_DATASOURCE = "myDs"; public static final String QUARTZ_THREADCOUNT = "25"; public static final String QUARTZ_THREADPRIORITY = "5"; public static final String QUARTZ_INSTANCENAME = "DolphinScheduler"; public static final String QUARTZ_INSTANCEID = "AUTO"; public static final String QUARTZ_ACQUIRETRIGGERSWITHINLOCK = "true"; /** * common properties path */ public static final String COMMON_PROPERTIES_PATH = "/common.properties"; /** * fs.defaultFS */ public static final String FS_DEFAULTFS = "fs.defaultFS"; /** * fs s3a endpoint */ public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint"; /** * fs s3a access key */ public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key"; /** * fs s3a secret key */ public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key"; /** * hadoop configuration */ public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE"; public static final String HADOOP_RM_STATE_STANDBY = "STANDBY"; public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port"; /** * yarn.resourcemanager.ha.rm.ids */ public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids"; /** * yarn.application.status.address */ public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address"; /** * yarn.job.history.status.address */ public static final String YARN_JOB_HISTORY_STATUS_ADDRESS = "yarn.job.history.status.address"; /** * hdfs configuration * hdfs.root.user */ public static final String HDFS_ROOT_USER = "hdfs.root.user"; /** * hdfs/s3 configuration * resource.upload.path */ public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path"; /** * data basedir path */ public static final String DATA_BASEDIR_PATH = "data.basedir.path"; /** * dolphinscheduler.env.path */ public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path"; /** * environment properties default path */ public static final String ENV_PATH = "env/dolphinscheduler_env.sh"; /** * python home */ public static final String PYTHON_HOME = "PYTHON_HOME"; /** * resource.view.suffixs */ public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs"; public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js"; /** * development.state */ public static final String DEVELOPMENT_STATE = "development.state"; /** * sudo enable */ public static final String SUDO_ENABLE = "sudo.enable"; /** * string true */ public static final String STRING_TRUE = "true"; /** * string false */ public static final String STRING_FALSE = "false"; /** * resource storage type */ public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type"; /** * MasterServer directory registered in zookeeper */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_MASTERS = "/nodes/master"; /** * WorkerServer directory registered in zookeeper */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_WORKERS = "/nodes/worker"; /** * all servers directory registered in zookeeper */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_DEAD_SERVERS = "/dead-servers"; /** * MasterServer lock directory registered in zookeeper */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters"; /** * MasterServer failover directory registered in zookeeper */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS = "/lock/failover/masters"; /** * WorkerServer failover directory registered in zookeeper */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS = "/lock/failover/workers"; /** * MasterServer startup failover runing and fault tolerance process */ public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters"; /** * comma , */ public static final String COMMA = ","; /** * slash / */ public static final String SLASH = "/"; /** * COLON : */ public static final String COLON = ":"; /** * SPACE " " */ public static final String SPACE = " "; /** * SINGLE_SLASH / */ public static final String SINGLE_SLASH = "/"; /** * DOUBLE_SLASH // */ public static final String DOUBLE_SLASH = "//"; /** * SINGLE_QUOTES "'" */ public static final String SINGLE_QUOTES = "'"; /** * DOUBLE_QUOTES "\"" */ public static final String DOUBLE_QUOTES = "\""; /** * SEMICOLON ; */ public static final String SEMICOLON = ";"; /** * EQUAL SIGN */ public static final String EQUAL_SIGN = "="; /** * AT SIGN */ public static final String AT_SIGN = "@"; /** * date format of yyyy-MM-dd HH:mm:ss */ public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss"; /** * date format of yyyyMMddHHmmss */ public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss"; /** * date format of yyyyMMddHHmmssSSS */ public static final String YYYYMMDDHHMMSSSSS = "yyyyMMddHHmmssSSS"; /** * http connect time out */ public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000; /** * http connect request time out */ public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000; /** * httpclient soceket time out */ public static final int SOCKET_TIMEOUT = 60 * 1000; /** * http header */ public static final String HTTP_HEADER_UNKNOWN = "unKnown"; /** * http X-Forwarded-For */ public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For"; /** * http X-Real-IP */ public static final String HTTP_X_REAL_IP = "X-Real-IP"; /** * UTF-8 */ public static final String UTF_8 = "UTF-8"; /** * user name regex */ public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,39}$"); /** * email regex */ public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[_|\\-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$"); /** * default display rows */ public static final int DEFAULT_DISPLAY_ROWS = 10; /** * read permission */ public static final int READ_PERMISSION = 2 * 1; /** * write permission */ public static final int WRITE_PERMISSION = 2 * 2; /** * execute permission */ public static final int EXECUTE_PERMISSION = 1; /** * default admin permission */ public static final int DEFAULT_ADMIN_PERMISSION = 7; /** * all permissions */ public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION; /** * max task timeout */ public static final int MAX_TASK_TIMEOUT = 24 * 3600; /** * master cpu load */ public static final int DEFAULT_MASTER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2; /** * worker cpu load */ public static final int DEFAULT_WORKER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2; /** * worker host weight */ public static final int DEFAULT_WORKER_HOST_WEIGHT = 100; /** * default log cache rows num,output when reach the number */ public static final int DEFAULT_LOG_ROWS_NUM = 4 * 16; /** * log flush interval?output when reach the interval */ public static final int DEFAULT_LOG_FLUSH_INTERVAL = 1000; /** * time unit secong to minutes */ public static final int SEC_2_MINUTES_TIME_UNIT = 60; /*** * * rpc port */ public static final int RPC_PORT = 50051; /*** * alert rpc port */ public static final int ALERT_RPC_PORT = 50052; /** * forbid running task */ public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN"; /** * normal running task */ public static final String FLOWNODE_RUN_FLAG_NORMAL = "NORMAL"; /** * datasource configuration path */ public static final String DATASOURCE_PROPERTIES = "/datasource.properties"; public static final String DEFAULT = "Default"; public static final String USER = "user"; public static final String PASSWORD = "password"; public static final String XXXXXX = "******"; public static final String NULL = "NULL"; public static final String THREAD_NAME_MASTER_SERVER = "Master-Server"; public static final String THREAD_NAME_WORKER_SERVER = "Worker-Server"; /** * command parameter keys */ public static final String CMD_PARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId"; public static final String CMD_PARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList"; public static final String CMD_PARAM_RECOVERY_WAITING_THREAD = "WaitingThreadInstanceId"; public static final String CMD_PARAM_SUB_PROCESS = "processInstanceId"; public static final String CMD_PARAM_EMPTY_SUB_PROCESS = "0"; public static final String CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId"; public static final String CMD_PARAM_SUB_PROCESS_DEFINE_ID = "processDefinitionId"; public static final String CMD_PARAM_START_NODE_NAMES = "StartNodeNameList"; public static final String CMD_PARAM_START_PARAMS = "StartParams"; public static final String CMD_PARAM_FATHER_PARAMS = "fatherParams"; /** * complement data start date */ public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate"; /** * complement data end date */ public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate"; /** * data source config */ public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name"; public static final String SPRING_DATASOURCE_URL = "spring.datasource.url"; public static final String SPRING_DATASOURCE_USERNAME = "spring.datasource.username"; public static final String SPRING_DATASOURCE_PASSWORD = "spring.datasource.password"; public static final String SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT = "spring.datasource.validationQueryTimeout"; public static final String SPRING_DATASOURCE_INITIAL_SIZE = "spring.datasource.initialSize"; public static final String SPRING_DATASOURCE_MIN_IDLE = "spring.datasource.minIdle"; public static final String SPRING_DATASOURCE_MAX_ACTIVE = "spring.datasource.maxActive"; public static final String SPRING_DATASOURCE_MAX_WAIT = "spring.datasource.maxWait"; public static final String SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS = "spring.datasource.timeBetweenEvictionRunsMillis"; public static final String SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS = "spring.datasource.timeBetweenConnectErrorMillis"; public static final String SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS = "spring.datasource.minEvictableIdleTimeMillis"; public static final String SPRING_DATASOURCE_VALIDATION_QUERY = "spring.datasource.validationQuery"; public static final String SPRING_DATASOURCE_TEST_WHILE_IDLE = "spring.datasource.testWhileIdle"; public static final String SPRING_DATASOURCE_TEST_ON_BORROW = "spring.datasource.testOnBorrow"; public static final String SPRING_DATASOURCE_TEST_ON_RETURN = "spring.datasource.testOnReturn"; public static final String SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS = "spring.datasource.poolPreparedStatements"; public static final String SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT = "spring.datasource.defaultAutoCommit"; public static final String SPRING_DATASOURCE_KEEP_ALIVE = "spring.datasource.keepAlive"; public static final String SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE = "spring.datasource.maxPoolPreparedStatementPerConnectionSize"; public static final String DEVELOPMENT = "development"; public static final String QUARTZ_PROPERTIES_PATH = "quartz.properties"; /** * sleep time */ public static final int SLEEP_TIME_MILLIS = 1000; /** * heartbeat for zk info length */ public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 10; public static final int HEARTBEAT_WITH_WEIGHT_FOR_ZOOKEEPER_INFO_LENGTH = 11; /** * jar */ public static final String JAR = "jar"; /** * hadoop */ public static final String HADOOP = "hadoop"; /** * -D <property>=<value> */ public static final String D = "-D"; /** * -D mapreduce.job.name=name */ public static final String MR_NAME = "mapreduce.job.name"; /** * -D mapreduce.job.queuename=queuename */ public static final String MR_QUEUE = "mapreduce.job.queuename"; /** * spark params constant */ public static final String MASTER = "--master"; public static final String DEPLOY_MODE = "--deploy-mode"; /** * --class CLASS_NAME */ public static final String MAIN_CLASS = "--class"; /** * --driver-cores NUM */ public static final String DRIVER_CORES = "--driver-cores"; /** * --driver-memory MEM */ public static final String DRIVER_MEMORY = "--driver-memory"; /** * --num-executors NUM */ public static final String NUM_EXECUTORS = "--num-executors"; /** * --executor-cores NUM */ public static final String EXECUTOR_CORES = "--executor-cores"; /** * --executor-memory MEM */ public static final String EXECUTOR_MEMORY = "--executor-memory"; /** * --name NAME */ public static final String SPARK_NAME = "--name"; /** * --queue QUEUE */ public static final String SPARK_QUEUE = "--queue"; /** * exit code success */ public static final int EXIT_CODE_SUCCESS = 0; /** * exit code kill */ public static final int EXIT_CODE_KILL = 137; /** * exit code failure */ public static final int EXIT_CODE_FAILURE = -1; /** * process or task definition failure */ public static final int DEFINITION_FAILURE = -1; /** * date format of yyyyMMdd */ public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd"; /** * date format of yyyyMMddHHmmss */ public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss"; /** * system date(yyyyMMddHHmmss) */ public static final String PARAMETER_DATETIME = "system.datetime"; /** * system date(yyyymmdd) today */ public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate"; /** * system date(yyyymmdd) yesterday */ public static final String PARAMETER_BUSINESS_DATE = "system.biz.date"; /** * ACCEPTED */ public static final String ACCEPTED = "ACCEPTED"; /** * SUCCEEDED */ public static final String SUCCEEDED = "SUCCEEDED"; /** * NEW */ public static final String NEW = "NEW"; /** * NEW_SAVING */ public static final String NEW_SAVING = "NEW_SAVING"; /** * SUBMITTED */ public static final String SUBMITTED = "SUBMITTED"; /** * FAILED */ public static final String FAILED = "FAILED"; /** * KILLED */ public static final String KILLED = "KILLED"; /** * RUNNING */ public static final String RUNNING = "RUNNING"; /** * underline "_" */ public static final String UNDERLINE = "_"; /** * quartz job prifix */ public static final String QUARTZ_JOB_PRIFIX = "job"; /** * quartz job group prifix */ public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup"; /** * projectId */ public static final String PROJECT_ID = "projectId"; /** * processId */ public static final String SCHEDULE_ID = "scheduleId"; /** * schedule */ public static final String SCHEDULE = "schedule"; /** * application regex */ public static final String APPLICATION_REGEX = "application_\\d+_\\d+"; public static final String PID = OSUtils.isWindows() ? "handle" : "pid"; /** * month_begin */ public static final String MONTH_BEGIN = "month_begin"; /** * add_months */ public static final String ADD_MONTHS = "add_months"; /** * month_end */ public static final String MONTH_END = "month_end"; /** * week_begin */ public static final String WEEK_BEGIN = "week_begin"; /** * week_end */ public static final String WEEK_END = "week_end"; /** * timestamp */ public static final String TIMESTAMP = "timestamp"; public static final char SUBTRACT_CHAR = '-'; public static final char ADD_CHAR = '+'; public static final char MULTIPLY_CHAR = '*'; public static final char DIVISION_CHAR = '/'; public static final char LEFT_BRACE_CHAR = '('; public static final char RIGHT_BRACE_CHAR = ')'; public static final String ADD_STRING = "+"; public static final String MULTIPLY_STRING = "*"; public static final String DIVISION_STRING = "/"; public static final String LEFT_BRACE_STRING = "("; public static final char P = 'P'; public static final char N = 'N'; public static final String SUBTRACT_STRING = "-"; public static final String GLOBAL_PARAMS = "globalParams"; public static final String LOCAL_PARAMS = "localParams"; public static final String LOCAL_PARAMS_LIST = "localParamsList"; public static final String SUBPROCESS_INSTANCE_ID = "subProcessInstanceId"; public static final String PROCESS_INSTANCE_STATE = "processInstanceState"; public static final String PARENT_WORKFLOW_INSTANCE = "parentWorkflowInstance"; public static final String CONDITION_RESULT = "conditionResult"; public static final String DEPENDENCE = "dependence"; public static final String TASK_TYPE = "taskType"; public static final String TASK_LIST = "taskList"; public static final String RWXR_XR_X = "rwxr-xr-x"; public static final String QUEUE = "queue"; public static final String QUEUE_NAME = "queueName"; public static final int LOG_QUERY_SKIP_LINE_NUMBER = 0; public static final int LOG_QUERY_LIMIT = 4096; /** * master/worker server use for zk */ public static final String MASTER_TYPE = "master"; public static final String WORKER_TYPE = "worker"; public static final String DELETE_ZK_OP = "delete"; public static final String ADD_ZK_OP = "add"; public static final String ALIAS = "alias"; public static final String CONTENT = "content"; public static final String DEPENDENT_SPLIT = ":||"; public static final String DEPENDENT_ALL = "ALL"; /** * preview schedule execute count */ public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5; /** * kerberos */ public static final String KERBEROS = "kerberos"; /** * kerberos expire time */ public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time"; /** * java.security.krb5.conf */ public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf"; /** * java.security.krb5.conf.path */ public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state"; /** * com.amazonaws.services.s3.enableV4 */ public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4"; /** * loginUserFromKeytab user */ public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username"; /** * loginUserFromKeytab path */ public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path"; /** * task log info format */ public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s"; /** * hive conf */ public static final String HIVE_CONF = "hiveconf:"; /** * flink */ public static final String FLINK_YARN_CLUSTER = "yarn-cluster"; public static final String FLINK_RUN_MODE = "-m"; public static final String FLINK_YARN_SLOT = "-ys"; public static final String FLINK_APP_NAME = "-ynm"; public static final String FLINK_QUEUE = "-yqu"; public static final String FLINK_TASK_MANAGE = "-yn"; public static final String FLINK_JOB_MANAGE_MEM = "-yjm"; public static final String FLINK_TASK_MANAGE_MEM = "-ytm"; public static final String FLINK_MAIN_CLASS = "-c"; public static final String FLINK_PARALLELISM = "-p"; public static final String FLINK_SHUTDOWN_ON_ATTACHED_EXIT = "-sae"; public static final int[] NOT_TERMINATED_STATES = new int[] { ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal(), ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(), ExecutionStatus.WAITTING_THREAD.ordinal(), ExecutionStatus.WAITTING_DEPEND.ordinal() }; /** * status */ public static final String STATUS = "status"; /** * message */ public static final String MSG = "msg"; /** * data total */ public static final String COUNT = "count"; /** * page size */ public static final String PAGE_SIZE = "pageSize"; /** * current page no */ public static final String PAGE_NUMBER = "pageNo"; /** * */ public static final String DATA_LIST = "data"; public static final String TOTAL_LIST = "totalList"; public static final String CURRENT_PAGE = "currentPage"; public static final String TOTAL_PAGE = "totalPage"; public static final String TOTAL = "total"; /** * workflow */ public static final String WORKFLOW_LIST = "workFlowList"; public static final String WORKFLOW_RELATION_LIST = "workFlowRelationList"; /** * session user */ public static final String SESSION_USER = "session.user"; public static final String SESSION_ID = "sessionId"; public static final String PASSWORD_DEFAULT = "******"; /** * locale */ public static final String LOCALE_LANGUAGE = "language"; /** * driver */ public static final String ORG_POSTGRESQL_DRIVER = "org.postgresql.Driver"; public static final String COM_MYSQL_JDBC_DRIVER = "com.mysql.jdbc.Driver"; public static final String ORG_APACHE_HIVE_JDBC_HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver"; public static final String COM_CLICKHOUSE_JDBC_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver"; public static final String COM_ORACLE_JDBC_DRIVER = "oracle.jdbc.driver.OracleDriver"; public static final String COM_SQLSERVER_JDBC_DRIVER = "com.microsoft.sqlserver.jdbc.SQLServerDriver"; public static final String COM_DB2_JDBC_DRIVER = "com.ibm.db2.jcc.DB2Driver"; public static final String COM_PRESTO_JDBC_DRIVER = "com.facebook.presto.jdbc.PrestoDriver"; /** * database type */ public static final String MYSQL = "MYSQL"; public static final String POSTGRESQL = "POSTGRESQL"; public static final String HIVE = "HIVE"; public static final String SPARK = "SPARK"; public static final String CLICKHOUSE = "CLICKHOUSE"; public static final String ORACLE = "ORACLE"; public static final String SQLSERVER = "SQLSERVER"; public static final String DB2 = "DB2"; public static final String PRESTO = "PRESTO"; /** * jdbc url */ public static final String JDBC_MYSQL = "jdbc:mysql://"; public static final String JDBC_POSTGRESQL = "jdbc:postgresql://"; public static final String JDBC_HIVE_2 = "jdbc:hive2://"; public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://"; public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@"; public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//"; public static final String JDBC_SQLSERVER = "jdbc:sqlserver://"; public static final String JDBC_DB2 = "jdbc:db2://"; public static final String JDBC_PRESTO = "jdbc:presto://"; public static final String ADDRESS = "address"; public static final String DATABASE = "database"; public static final String JDBC_URL = "jdbcUrl"; public static final String PRINCIPAL = "principal"; public static final String OTHER = "other"; public static final String ORACLE_DB_CONNECT_TYPE = "connectType"; public static final String KERBEROS_KRB5_CONF_PATH = "javaSecurityKrb5Conf"; public static final String KERBEROS_KEY_TAB_USERNAME = "loginUserKeytabUsername"; public static final String KERBEROS_KEY_TAB_PATH = "loginUserKeytabPath"; /** * session timeout */ public static final int SESSION_TIME_OUT = 7200; public static final int MAX_FILE_SIZE = 1024 * 1024 * 1024; public static final String UDF = "UDF"; public static final String CLASS = "class"; public static final String RECEIVERS = "receivers"; public static final String RECEIVERS_CC = "receiversCc"; /** * dataSource sensitive param */ public static final String DATASOURCE_PASSWORD_REGEX = "(?<=(\"password\":\")).*?(?=(\"))"; /** * default worker group */ public static final String DEFAULT_WORKER_GROUP = "default"; public static final Integer TASK_INFO_LENGTH = 5; /** * new * schedule time */ public static final String PARAMETER_SHECDULE_TIME = "schedule.time"; /** * authorize writable perm */ public static final int AUTHORIZE_WRITABLE_PERM = 7; /** * authorize readable perm */ public static final int AUTHORIZE_READABLE_PERM = 4; /** * plugin configurations */ public static final String PLUGIN_JAR_SUFFIX = ".jar"; public static final int NORMAL_NODE_STATUS = 0; public static final int ABNORMAL_NODE_STATUS = 1; public static final String START_TIME = "start time"; public static final String END_TIME = "end time"; public static final String START_END_DATE = "startDate,endDate"; /** * system line separator */ public static final String SYSTEM_LINE_SEPARATOR = System.getProperty("line.separator"); /** * net system properties */ public static final String DOLPHIN_SCHEDULER_PREFERRED_NETWORK_INTERFACE = "dolphin.scheduler.network.interface.preferred"; public static final String EXCEL_SUFFIX_XLS = ".xls"; /** * datasource encryption salt */ public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*"; public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable"; public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt"; /** * Network IP gets priority, default inner outer */ public static final String NETWORK_PRIORITY_STRATEGY = "dolphin.scheduler.network.priority.strategy"; /** * exec shell scripts */ public static final String SH = "sh"; /** * pstree, get pud and sub pid */ public static final String PSTREE = "pstree"; /** * snow flake, data center id, this id must be greater than 0 and less than 32 */ public static final String SNOW_FLAKE_DATA_CENTER_ID = "data.center.id"; /** * docker & kubernetes */ public static final boolean DOCKER_MODE = StringUtils.isNotEmpty(System.getenv("DOCKER")); public static final boolean KUBERNETES_MODE = StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_PORT")); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,468
[Bug][Common] Obtaining IP is incorrect
**To Reproduce** In some scenarios, obtaining IP is incorrect **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. **Scenario 1: IP is null** ![image](https://user-images.githubusercontent.com/4902714/118223662-5f3edb80-b4b4-11eb-9046-3565f6fe1a2c.png) **Scenario 2: IP incorrect order** ![image](https://user-images.githubusercontent.com/4902714/118223277-92cd3600-b4b3-11eb-920c-46c3bfe1cc2f.png) ![image](https://user-images.githubusercontent.com/4902714/118223548-230b7b00-b4b4-11eb-8775-57abc66abf41.png) **Scenario 3: IP is 127.0.0.1** ![image](https://user-images.githubusercontent.com/4902714/118223722-7e3d6d80-b4b4-11eb-8cec-a3d408be41a3.png) ![image](https://user-images.githubusercontent.com/4902714/118223725-81d0f480-b4b4-11eb-95aa-0c1a0add8560.png) **Scenario 4: IP is 0.0.0.0.0.0** ![image](https://user-images.githubusercontent.com/4902714/119919935-07b56b00-bf9e-11eb-9972-00d73be4f381.png) ![image](https://user-images.githubusercontent.com/4902714/119919948-0e43e280-bf9e-11eb-8947-9eccfac17b47.png) ![image](https://user-images.githubusercontent.com/4902714/119920051-3895a000-bf9e-11eb-87dc-efc7d6181d64.png) **Which version of Dolphin Scheduler:** -[1.3.x] -[dev] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5468
https://github.com/apache/dolphinscheduler/pull/5594
75be09735a29469ef5169550239c65a5a27af3ba
281b5aea6b85df86b279eb3377ff6851c560bcbd
2021-05-14T05:04:32Z
java
2021-06-07T14:12:49Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/NetUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import static org.apache.dolphinscheduler.common.Constants.DOLPHIN_SCHEDULER_PREFERRED_NETWORK_INTERFACE; import static java.util.Collections.emptyList; import org.apache.dolphinscheduler.common.Constants; import java.io.IOException; import java.net.Inet6Address; import java.net.InetAddress; import java.net.NetworkInterface; import java.net.SocketException; import java.net.UnknownHostException; import java.util.Enumeration; import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.regex.Pattern; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * NetUtils */ public class NetUtils { private static final Pattern IP_PATTERN = Pattern.compile("\\d{1,3}(\\.\\d{1,3}){3,5}$"); private static final String NETWORK_PRIORITY_DEFAULT = "default"; private static final String NETWORK_PRIORITY_INNER = "inner"; private static final String NETWORK_PRIORITY_OUTER = "outer"; private static final Logger logger = LoggerFactory.getLogger(NetUtils.class); private static InetAddress LOCAL_ADDRESS = null; private static volatile String HOST_ADDRESS; private NetUtils() { throw new UnsupportedOperationException("Construct NetUtils"); } /** * get addr like host:port * @return addr */ public static String getAddr(String host, int port) { return String.format("%s:%d", host, port); } /** * get addr like host:port * @return addr */ public static String getAddr(int port) { return getAddr(getHost(), port); } /** * get host * @return host */ public static String getHost(InetAddress inetAddress) { if (inetAddress != null) { if (Constants.KUBERNETES_MODE) { String canonicalHost = inetAddress.getCanonicalHostName(); String[] items = canonicalHost.split("\\."); if (items.length == 6 && "svc".equals(items[3])) { return String.format("%s.%s", items[0], items[1]); } return canonicalHost; } return inetAddress.getHostAddress(); } return null; } public static String getHost() { if (HOST_ADDRESS != null) { return HOST_ADDRESS; } InetAddress address = getLocalAddress(); if (address != null) { HOST_ADDRESS = getHost(address); return HOST_ADDRESS; } return Constants.KUBERNETES_MODE ? "localhost" : "127.0.0.1"; } private static InetAddress getLocalAddress() { if (null != LOCAL_ADDRESS) { return LOCAL_ADDRESS; } return getLocalAddress0(); } /** * Find first valid IP from local network card * * @return first valid local IP */ private static synchronized InetAddress getLocalAddress0() { if (null != LOCAL_ADDRESS) { return LOCAL_ADDRESS; } InetAddress localAddress = null; try { NetworkInterface networkInterface = findNetworkInterface(); if (networkInterface != null) { Enumeration<InetAddress> addresses = networkInterface.getInetAddresses(); while (addresses.hasMoreElements()) { Optional<InetAddress> addressOp = toValidAddress(addresses.nextElement()); if (addressOp.isPresent()) { try { if (addressOp.get().isReachable(100)) { LOCAL_ADDRESS = addressOp.get(); return LOCAL_ADDRESS; } } catch (IOException e) { logger.warn("test address id reachable io exception", e); } } } } localAddress = InetAddress.getLocalHost(); } catch (UnknownHostException e) { logger.warn("InetAddress get LocalHost exception", e); } Optional<InetAddress> addressOp = toValidAddress(localAddress); if (addressOp.isPresent()) { LOCAL_ADDRESS = addressOp.get(); } return LOCAL_ADDRESS; } private static Optional<InetAddress> toValidAddress(InetAddress address) { if (address instanceof Inet6Address) { Inet6Address v6Address = (Inet6Address) address; if (isPreferIPV6Address()) { return Optional.ofNullable(normalizeV6Address(v6Address)); } } if (isValidV4Address(address)) { return Optional.of(address); } return Optional.empty(); } private static InetAddress normalizeV6Address(Inet6Address address) { String addr = address.getHostAddress(); int i = addr.lastIndexOf('%'); if (i > 0) { try { return InetAddress.getByName(addr.substring(0, i) + '%' + address.getScopeId()); } catch (UnknownHostException e) { logger.debug("Unknown IPV6 address: ", e); } } return address; } public static boolean isValidV4Address(InetAddress address) { if (address == null || address.isLoopbackAddress()) { return false; } String name = address.getHostAddress(); return (name != null && IP_PATTERN.matcher(name).matches() && !address.isAnyLocalAddress() && !address.isLoopbackAddress()); } /** * Check if an ipv6 address * * @return true if it is reachable */ private static boolean isPreferIPV6Address() { return Boolean.getBoolean("java.net.preferIPv6Addresses"); } /** * Get the suitable {@link NetworkInterface} * * @return If no {@link NetworkInterface} is available , return <code>null</code> */ private static NetworkInterface findNetworkInterface() { List<NetworkInterface> validNetworkInterfaces = emptyList(); try { validNetworkInterfaces = getValidNetworkInterfaces(); } catch (SocketException e) { logger.warn("ValidNetworkInterfaces exception", e); } NetworkInterface result = null; // Try to specify config NetWork Interface for (NetworkInterface networkInterface : validNetworkInterfaces) { if (isSpecifyNetworkInterface(networkInterface)) { result = networkInterface; break; } } if (null != result) { return result; } return findAddress(validNetworkInterfaces); } /** * Get the valid {@link NetworkInterface network interfaces} * * @throws SocketException SocketException if an I/O error occurs. */ private static List<NetworkInterface> getValidNetworkInterfaces() throws SocketException { List<NetworkInterface> validNetworkInterfaces = new LinkedList<>(); Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces(); while (interfaces.hasMoreElements()) { NetworkInterface networkInterface = interfaces.nextElement(); if (ignoreNetworkInterface(networkInterface)) { // ignore continue; } validNetworkInterfaces.add(networkInterface); } return validNetworkInterfaces; } /** * @param networkInterface {@link NetworkInterface} * @return if the specified {@link NetworkInterface} should be ignored, return <code>true</code> * @throws SocketException SocketException if an I/O error occurs. */ public static boolean ignoreNetworkInterface(NetworkInterface networkInterface) throws SocketException { return networkInterface == null || networkInterface.isLoopback() || networkInterface.isVirtual() || !networkInterface.isUp(); } private static boolean isSpecifyNetworkInterface(NetworkInterface networkInterface) { String preferredNetworkInterface = System.getProperty(DOLPHIN_SCHEDULER_PREFERRED_NETWORK_INTERFACE); return Objects.equals(networkInterface.getDisplayName(), preferredNetworkInterface); } private static NetworkInterface findAddress(List<NetworkInterface> validNetworkInterfaces) { if (validNetworkInterfaces.isEmpty()) { return null; } String networkPriority = PropertyUtils.getString(Constants.NETWORK_PRIORITY_STRATEGY, NETWORK_PRIORITY_DEFAULT); if (NETWORK_PRIORITY_DEFAULT.equalsIgnoreCase(networkPriority)) { return findAddressByDefaultPolicy(validNetworkInterfaces); } else if (NETWORK_PRIORITY_INNER.equalsIgnoreCase(networkPriority)) { return findInnerAddress(validNetworkInterfaces); } else if (NETWORK_PRIORITY_OUTER.equalsIgnoreCase(networkPriority)) { return findOuterAddress(validNetworkInterfaces); } else { logger.error("There is no matching network card acquisition policy!"); return null; } } private static NetworkInterface findAddressByDefaultPolicy(List<NetworkInterface> validNetworkInterfaces) { NetworkInterface networkInterface; networkInterface = findInnerAddress(validNetworkInterfaces); if (networkInterface == null) { networkInterface = findOuterAddress(validNetworkInterfaces); if (networkInterface == null) { networkInterface = validNetworkInterfaces.get(0); } } return networkInterface; } /** * Get the Intranet IP * * @return If no {@link NetworkInterface} is available , return <code>null</code> */ private static NetworkInterface findInnerAddress(List<NetworkInterface> validNetworkInterfaces) { NetworkInterface networkInterface = null; for (NetworkInterface ni : validNetworkInterfaces) { Enumeration<InetAddress> address = ni.getInetAddresses(); while (address.hasMoreElements()) { InetAddress ip = address.nextElement(); if (ip.isSiteLocalAddress() && !ip.isLoopbackAddress()) { networkInterface = ni; } } } return networkInterface; } private static NetworkInterface findOuterAddress(List<NetworkInterface> validNetworkInterfaces) { NetworkInterface networkInterface = null; for (NetworkInterface ni : validNetworkInterfaces) { Enumeration<InetAddress> address = ni.getInetAddresses(); while (address.hasMoreElements()) { InetAddress ip = address.nextElement(); if (!ip.isSiteLocalAddress() && !ip.isLoopbackAddress()) { networkInterface = ni; } } } return networkInterface; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,468
[Bug][Common] Obtaining IP is incorrect
**To Reproduce** In some scenarios, obtaining IP is incorrect **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. **Scenario 1: IP is null** ![image](https://user-images.githubusercontent.com/4902714/118223662-5f3edb80-b4b4-11eb-9046-3565f6fe1a2c.png) **Scenario 2: IP incorrect order** ![image](https://user-images.githubusercontent.com/4902714/118223277-92cd3600-b4b3-11eb-920c-46c3bfe1cc2f.png) ![image](https://user-images.githubusercontent.com/4902714/118223548-230b7b00-b4b4-11eb-8775-57abc66abf41.png) **Scenario 3: IP is 127.0.0.1** ![image](https://user-images.githubusercontent.com/4902714/118223722-7e3d6d80-b4b4-11eb-8cec-a3d408be41a3.png) ![image](https://user-images.githubusercontent.com/4902714/118223725-81d0f480-b4b4-11eb-95aa-0c1a0add8560.png) **Scenario 4: IP is 0.0.0.0.0.0** ![image](https://user-images.githubusercontent.com/4902714/119919935-07b56b00-bf9e-11eb-9972-00d73be4f381.png) ![image](https://user-images.githubusercontent.com/4902714/119919948-0e43e280-bf9e-11eb-8947-9eccfac17b47.png) ![image](https://user-images.githubusercontent.com/4902714/119920051-3895a000-bf9e-11eb-87dc-efc7d6181d64.png) **Which version of Dolphin Scheduler:** -[1.3.x] -[dev] **Additional context** Add any other context about the problem here. **Requirement or improvement** - Please describe about your requirements or improvement suggestions.
https://github.com/apache/dolphinscheduler/issues/5468
https://github.com/apache/dolphinscheduler/pull/5594
75be09735a29469ef5169550239c65a5a27af3ba
281b5aea6b85df86b279eb3377ff6851c560bcbd
2021-05-14T05:04:32Z
java
2021-06-07T14:12:49Z
dolphinscheduler-common/src/main/resources/common.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # user data local directory path, please make sure the directory exists and have read write permissions data.basedir.path=/tmp/dolphinscheduler # resource storage type: HDFS, S3, NONE resource.storage.type=NONE # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended resource.upload.path=/dolphinscheduler # whether to startup kerberos hadoop.security.authentication.startup.state=false # java.security.krb5.conf path java.security.krb5.conf.path=/opt/krb5.conf # login user from keytab username [email protected] # login user from keytab path login.user.keytab.path=/opt/hdfs.headless.keytab # kerberos expire time, the unit is hour kerberos.expire.time=2 # resource view suffixs #resource.view.suffixs=txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js # if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path hdfs.root.user=hdfs # if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir fs.defaultFS=hdfs://mycluster:8020 # if resource.storage.type=S3, s3 endpoint fs.s3a.endpoint=http://192.168.xx.xx:9010 # if resource.storage.type=S3, s3 access key fs.s3a.access.key=A3DXS30FO22544RE # if resource.storage.type=S3, s3 secret key fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK # resourcemanager port, the default value is 8088 if not specified resource.manager.httpaddress.port=8088 # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname yarn.application.status.address=http://ds1:%s/ws/v1/cluster/apps/%s # job history status url when application number threshold is reached(default 10000, maybe it was set to 1000) yarn.job.history.status.address=http://ds1:19888/ws/v1/history/mapreduce/jobs/%s # datasource encryption enable datasource.encryption.enable=false # datasource encryption salt datasource.encryption.salt=!@#$%^&* # use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions sudo.enable=true # network IP gets priority, default: inner outer #dolphin.scheduler.network.priority.strategy=default # system env path #dolphinscheduler.env.path=env/dolphinscheduler_env.sh # development state development.state=false
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,583
[Bug][SQL] org.postgresql.util.PSQLException: ERROR: column "group_name" named in key does not exist
**To Reproduce** Steps to reproduce the behavior, for example: 1. Run DolphinScheduler with PostgreSQL 2. Click on the class 'CreateDolphinScheduler' 3. See error **Expected behavior** Bug fixed **Screenshots** ``` 16:55:01.862 [main] INFO org.apache.dolphinscheduler.common.utils.ScriptRunner - sql: DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id() 16:55:01.869 [main] INFO org.apache.dolphinscheduler.common.utils.ScriptRunner - -- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName 16:55:01.869 [main] INFO org.apache.dolphinscheduler.common.utils.ScriptRunner - sql: CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes WHERE relname='t_ds_alertgroup' AND indexrelname ='t_ds_alertgroup_name_UN') THEN ALTER TABLE t_ds_process_definition ADD CONSTRAINT t_ds_alertgroup_name_UN UNIQUE (group_name); END IF; END; $$ LANGUAGE plpgsql; 16:55:01.882 [main] INFO org.apache.dolphinscheduler.common.utils.ScriptRunner - sql: SELECT uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() 16:55:01.904 [main] ERROR org.apache.dolphinscheduler.common.utils.ScriptRunner - SQLException org.postgresql.util.PSQLException: ERROR: column "group_name" named in key does not exist 在位置:SQL statement "ALTER TABLE t_ds_process_definition ADD CONSTRAINT t_ds_alertgroup_name_UN UNIQUE (group_name)" PL/pgSQL function uc_dolphin_t_t_ds_alertgroup_a_add_un_groupname() line 1 at SQL statement at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2440) at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2183) at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:308) at org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:441) at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:365) at org.postgresql.jdbc.PgStatement.executeWithFlags(PgStatement.java:307) at org.postgresql.jdbc.PgStatement.executeCachedSql(PgStatement.java:293) at org.postgresql.jdbc.PgStatement.executeWithFlags(PgStatement.java:270) at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:266) at com.alibaba.druid.pool.DruidPooledStatement.execute(DruidPooledStatement.java:633) at org.apache.dolphinscheduler.common.utils.ScriptRunner.runScript(ScriptRunner.java:145) at org.apache.dolphinscheduler.common.utils.ScriptRunner.runScript(ScriptRunner.java:75) at org.apache.dolphinscheduler.dao.upgrade.UpgradeDao.upgradeDolphinSchedulerDDL(UpgradeDao.java:489) at org.apache.dolphinscheduler.dao.upgrade.UpgradeDao.upgradeDolphinScheduler(UpgradeDao.java:267) at org.apache.dolphinscheduler.dao.upgrade.DolphinSchedulerManager.upgradeDolphinScheduler(DolphinSchedulerManager.java:117) at org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler.main(CreateDolphinScheduler.java:40) 16:55:01.904 [main] ERROR org.apache.dolphinscheduler.common.utils.ScriptRunner - Error executing: SELECT uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() 16:55:01.904 [main] ERROR org.apache.dolphinscheduler.dao.upgrade.UpgradeDao - ERROR: column "group_name" named in key does not exist 在位置:SQL statement "ALTER TABLE t_ds_process_definition ADD CONSTRAINT t_ds_alertgroup_name_UN UNIQUE (group_name)" PL/pgSQL function uc_dolphin_t_t_ds_alertgroup_a_add_un_groupname() line 1 at SQL statement org.postgresql.util.PSQLException: ERROR: column "group_name" named in key does not exist 在位置:SQL statement "ALTER TABLE t_ds_process_definition ADD CONSTRAINT t_ds_alertgroup_name_UN UNIQUE (group_name)" PL/pgSQL function uc_dolphin_t_t_ds_alertgroup_a_add_un_groupname() line 1 at SQL statement at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2440) at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2183) at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:308) at org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:441) at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:365) at org.postgresql.jdbc.PgStatement.executeWithFlags(PgStatement.java:307) at org.postgresql.jdbc.PgStatement.executeCachedSql(PgStatement.java:293) at org.postgresql.jdbc.PgStatement.executeWithFlags(PgStatement.java:270) at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:266) at com.alibaba.druid.pool.DruidPooledStatement.execute(DruidPooledStatement.java:633) at org.apache.dolphinscheduler.common.utils.ScriptRunner.runScript(ScriptRunner.java:145) at org.apache.dolphinscheduler.common.utils.ScriptRunner.runScript(ScriptRunner.java:75) at org.apache.dolphinscheduler.dao.upgrade.UpgradeDao.upgradeDolphinSchedulerDDL(UpgradeDao.java:489) at org.apache.dolphinscheduler.dao.upgrade.UpgradeDao.upgradeDolphinScheduler(UpgradeDao.java:267) at org.apache.dolphinscheduler.dao.upgrade.DolphinSchedulerManager.upgradeDolphinScheduler(DolphinSchedulerManager.java:117) at org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler.main(CreateDolphinScheduler.java:40) 16:55:01.909 [main] ERROR org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler - create DolphinScheduler failed java.lang.RuntimeException: ERROR: column "group_name" named in key does not exist 在位置:SQL statement "ALTER TABLE t_ds_process_definition ADD CONSTRAINT t_ds_alertgroup_name_UN UNIQUE (group_name)" PL/pgSQL function uc_dolphin_t_t_ds_alertgroup_a_add_un_groupname() line 1 at SQL statement at org.apache.dolphinscheduler.dao.upgrade.UpgradeDao.upgradeDolphinSchedulerDDL(UpgradeDao.java:502) at org.apache.dolphinscheduler.dao.upgrade.UpgradeDao.upgradeDolphinScheduler(UpgradeDao.java:267) at org.apache.dolphinscheduler.dao.upgrade.DolphinSchedulerManager.upgradeDolphinScheduler(DolphinSchedulerManager.java:117) at org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler.main(CreateDolphinScheduler.java:40) Caused by: org.postgresql.util.PSQLException: ERROR: column "group_name" named in key does not exist 在位置:SQL statement "ALTER TABLE t_ds_process_definition ADD CONSTRAINT t_ds_alertgroup_name_UN UNIQUE (group_name)" PL/pgSQL function uc_dolphin_t_t_ds_alertgroup_a_add_un_groupname() line 1 at SQL statement at org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2440) at org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2183) at org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:308) at org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:441) at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:365) at org.postgresql.jdbc.PgStatement.executeWithFlags(PgStatement.java:307) at org.postgresql.jdbc.PgStatement.executeCachedSql(PgStatement.java:293) at org.postgresql.jdbc.PgStatement.executeWithFlags(PgStatement.java:270) at org.postgresql.jdbc.PgStatement.execute(PgStatement.java:266) at com.alibaba.druid.pool.DruidPooledStatement.execute(DruidPooledStatement.java:633) at org.apache.dolphinscheduler.common.utils.ScriptRunner.runScript(ScriptRunner.java:145) at org.apache.dolphinscheduler.common.utils.ScriptRunner.runScript(ScriptRunner.java:75) at org.apache.dolphinscheduler.dao.upgrade.UpgradeDao.upgradeDolphinSchedulerDDL(UpgradeDao.java:489) ... 3 common frames omitted ``` **Which version of Dolphin Scheduler:** -[dev] @ruanwenjun
https://github.com/apache/dolphinscheduler/issues/5583
https://github.com/apache/dolphinscheduler/pull/5606
281b5aea6b85df86b279eb3377ff6851c560bcbd
697f37d2c2c3376c5655d137f03b3fbd1c56a6dc
2021-06-02T09:01:50Z
java
2021-06-09T03:45:08Z
sql/upgrade/1.4.0_schema/postgresql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ -- uc_dolphin_T_t_ds_user_A_state delimiter ; DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_user_A_state(); delimiter d// CREATE FUNCTION uc_dolphin_T_t_ds_user_A_state() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_CATALOG=current_database() AND TABLE_SCHEMA=current_schema() AND TABLE_NAME='t_ds_user' AND COLUMN_NAME ='state') THEN ALTER TABLE t_ds_user ADD COLUMN state int DEFAULT 1; comment on column t_ds_user.state is 'state 0:disable 1:enable'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; select uc_dolphin_T_t_ds_user_A_state(); DROP FUNCTION uc_dolphin_T_t_ds_user_A_state(); -- uc_dolphin_T_t_ds_tenant_A_tenant_name delimiter ; DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name(); delimiter d// CREATE FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name() RETURNS void AS $$ BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_CATALOG=current_database() AND TABLE_SCHEMA=current_schema() AND TABLE_NAME='t_ds_tenant' AND COLUMN_NAME ='tenant_name') THEN ALTER TABLE t_ds_tenant DROP COLUMN "tenant_name"; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; select uc_dolphin_T_t_ds_tenant_A_tenant_name(); DROP FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name(); -- uc_dolphin_T_t_ds_task_instance_A_first_submit_time delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_first_submit_time() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND COLUMN_NAME ='first_submit_time') THEN ALTER TABLE t_ds_task_instance ADD COLUMN first_submit_time timestamp DEFAULT NULL; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_task_instance_A_first_submit_time(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time(); -- uc_dolphin_T_t_ds_task_instance_A_delay_time delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_delay_time() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND COLUMN_NAME ='delay_time') THEN ALTER TABLE t_ds_task_instance ADD COLUMN delay_time int DEFAULT '0'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_task_instance_A_delay_time(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time(); -- uc_dolphin_T_t_ds_task_instance_A_var_pool delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_var_pool() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_task_instance ADD COLUMN var_pool text; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_task_instance_A_var_pool(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool(); -- uc_dolphin_T_t_ds_process_instance_A_var_pool delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_instance_A_var_pool() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_instance' AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_process_instance ADD COLUMN var_pool text; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_process_instance_A_var_pool(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool(); -- uc_dolphin_T_t_ds_process_definition_A_modify_by delimiter d// CREATE OR REPLACE FUNCTION ct_dolphin_T_t_ds_process_definition_version() RETURNS void AS $$ BEGIN CREATE TABLE IF NOT EXISTS t_ds_process_definition_version ( id int NOT NULL , process_definition_id int NOT NULL , version int DEFAULT NULL , process_definition_json text , description text , global_params text , locations text , connects text , receivers text , receivers_cc text , create_time timestamp DEFAULT NULL , timeout int DEFAULT '0' , resource_ids varchar(64), PRIMARY KEY (id) ) ; create index process_definition_id_and_version on t_ds_process_definition_version (process_definition_id,version); DROP SEQUENCE IF EXISTS t_ds_process_definition_version_id_sequence; CREATE SEQUENCE t_ds_process_definition_version_id_sequence; ALTER TABLE t_ds_process_definition_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_version_id_sequence'); END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT ct_dolphin_T_t_ds_process_definition_version(); DROP FUNCTION IF EXISTS ct_dolphin_T_t_ds_process_definition_version(); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- DROP TABLE IF EXISTS t_ds_plugin_define; CREATE TABLE t_ds_plugin_define ( id serial NOT NULL, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id), CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type) ); -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS t_ds_alert_plugin_instance; CREATE TABLE t_ds_alert_plugin_instance ( id serial NOT NULL, plugin_define_id int4 NOT NULL, plugin_instance_params text NULL, create_time timestamp NULL, update_time timestamp NULL, instance_name varchar(200) NULL, CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id) ); -- uc_dolphin_T_t_ds_process_definition_A_warning_group_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_A_warning_group_id() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition' AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition ADD COLUMN warning_group_id int4 DEFAULT NULL; COMMENT ON COLUMN t_ds_process_definition.warning_group_id IS 'alert group id'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_process_definition_A_warning_group_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id(); -- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition_version' AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition_version ADD COLUMN warning_group_id int4 DEFAULT NULL; COMMENT ON COLUMN t_ds_process_definition_version.warning_group_id IS 'alert group id'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id(); -- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND COLUMN_NAME ='alert_instance_ids') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN alert_instance_ids varchar (255) DEFAULT NULL; COMMENT ON COLUMN t_ds_alertgroup.alert_instance_ids IS 'alert instance ids'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); -- uc_dolphin_T_t_ds_alertgroup_A_create_user_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_create_user_id() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND COLUMN_NAME ='create_user_id') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN create_user_id int4 DEFAULT NULL; COMMENT ON COLUMN t_ds_alertgroup.create_user_id IS 'create user id'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); -- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes WHERE relname='t_ds_alertgroup' AND indexrelname ='t_ds_alertgroup_name_UN') THEN ALTER TABLE t_ds_process_definition ADD CONSTRAINT t_ds_alertgroup_name_UN UNIQUE (group_name); END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); -- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes WHERE relname='t_ds_datasource' AND indexrelname ='t_ds_datasource_name_UN') THEN ALTER TABLE t_ds_process_definition ADD CONSTRAINT t_ds_datasource_name_UN UNIQUE (name, type); END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); -- uc_dolphin_T_t_ds_schedules_A_add_timezone delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_schedules_A_add_timezone() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_schedules' AND COLUMN_NAME ='timezone_id') THEN ALTER TABLE t_ds_schedules ADD COLUMN timezone_id varchar(40) DEFAULT NULL; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_schedules_A_add_timezone(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_schedules_A_add_timezone(); -- ---------------------------- -- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below -- ---------------------------- -- ALTER TABLE t_ds_alert DROP COLUMN "show_type", DROP COLUMN "alert_type", DROP COLUMN "receivers", DROP COLUMN "receivers_cc"; -- ALTER TABLE t_ds_alertgroup DROP COLUMN "group_type"; -- ALTER TABLE t_ds_process_definition DROP COLUMN "receivers", DROP COLUMN "receivers_cc"; -- ALTER TABLE t_ds_process_definition_version DROP COLUMN "receivers", DROP COLUMN "receivers_cc"; -- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup; -- ALTER TABLE t_ds_command DROP COLUMN "dependence"; -- ALTER TABLE t_ds_error_command DROP COLUMN "dependence";
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,539
[Improvement][Master] Check status of taskInstance from cache
**Describe the question** After the master submit a task, the master will wait for the task execution to end, and it will loop to query the task status from database. https://github.com/apache/dolphinscheduler/blob/8a1d849701671544327a1d4e7852575af6872017/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThread.java#L123-L164 Why doesn't it query the status from the taskInstanceCacheManager? When the master receive the response from worker, it will also update the cache. https://github.com/apache/dolphinscheduler/blob/8a1d849701671544327a1d4e7852575af6872017/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/processor/TaskResponseProcessor.java#L68-L87 I think if we query the status from cache, we can reduce the pressure of the database. The main risk is that after the worker crashed, we need to send a response to the master when doing worker tolerance. So as a compromise, can we query the cache 9 times and then query the database once? Or we get task status from cache, and the cache query the task status from database periodically(the schedule interval can be longer). **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5539
https://github.com/apache/dolphinscheduler/pull/5572
e2243d63bee789b96d8ceeb302261564c5a28ce7
79eb2e85d78f380bb9b8f812d874f1143b661e76
2021-05-22T07:08:34Z
java
2021-06-10T01:39:12Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import java.util.regex.Pattern; /** * Constants */ public final class Constants { private Constants() { throw new UnsupportedOperationException("Construct Constants"); } /** * quartz config */ public static final String ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS = "org.quartz.jobStore.driverDelegateClass"; public static final String ORG_QUARTZ_SCHEDULER_INSTANCENAME = "org.quartz.scheduler.instanceName"; public static final String ORG_QUARTZ_SCHEDULER_INSTANCEID = "org.quartz.scheduler.instanceId"; public static final String ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON = "org.quartz.scheduler.makeSchedulerThreadDaemon"; public static final String ORG_QUARTZ_JOBSTORE_USEPROPERTIES = "org.quartz.jobStore.useProperties"; public static final String ORG_QUARTZ_THREADPOOL_CLASS = "org.quartz.threadPool.class"; public static final String ORG_QUARTZ_THREADPOOL_THREADCOUNT = "org.quartz.threadPool.threadCount"; public static final String ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS = "org.quartz.threadPool.makeThreadsDaemons"; public static final String ORG_QUARTZ_THREADPOOL_THREADPRIORITY = "org.quartz.threadPool.threadPriority"; public static final String ORG_QUARTZ_JOBSTORE_CLASS = "org.quartz.jobStore.class"; public static final String ORG_QUARTZ_JOBSTORE_TABLEPREFIX = "org.quartz.jobStore.tablePrefix"; public static final String ORG_QUARTZ_JOBSTORE_ISCLUSTERED = "org.quartz.jobStore.isClustered"; public static final String ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD = "org.quartz.jobStore.misfireThreshold"; public static final String ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL = "org.quartz.jobStore.clusterCheckinInterval"; public static final String ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK = "org.quartz.jobStore.acquireTriggersWithinLock"; public static final String ORG_QUARTZ_JOBSTORE_DATASOURCE = "org.quartz.jobStore.dataSource"; public static final String ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS = "org.quartz.dataSource.myDs.connectionProvider.class"; /** * quartz config default value */ public static final String QUARTZ_TABLE_PREFIX = "QRTZ_"; public static final String QUARTZ_MISFIRETHRESHOLD = "60000"; public static final String QUARTZ_CLUSTERCHECKININTERVAL = "5000"; public static final String QUARTZ_DATASOURCE = "myDs"; public static final String QUARTZ_THREADCOUNT = "25"; public static final String QUARTZ_THREADPRIORITY = "5"; public static final String QUARTZ_INSTANCENAME = "DolphinScheduler"; public static final String QUARTZ_INSTANCEID = "AUTO"; public static final String QUARTZ_ACQUIRETRIGGERSWITHINLOCK = "true"; /** * common properties path */ public static final String COMMON_PROPERTIES_PATH = "/common.properties"; /** * fs.defaultFS */ public static final String FS_DEFAULTFS = "fs.defaultFS"; /** * fs s3a endpoint */ public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint"; /** * fs s3a access key */ public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key"; /** * fs s3a secret key */ public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key"; /** * hadoop configuration */ public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE"; public static final String HADOOP_RM_STATE_STANDBY = "STANDBY"; public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port"; /** * yarn.resourcemanager.ha.rm.ids */ public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids"; /** * yarn.application.status.address */ public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address"; /** * yarn.job.history.status.address */ public static final String YARN_JOB_HISTORY_STATUS_ADDRESS = "yarn.job.history.status.address"; /** * hdfs configuration * hdfs.root.user */ public static final String HDFS_ROOT_USER = "hdfs.root.user"; /** * hdfs/s3 configuration * resource.upload.path */ public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path"; /** * data basedir path */ public static final String DATA_BASEDIR_PATH = "data.basedir.path"; /** * dolphinscheduler.env.path */ public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path"; /** * environment properties default path */ public static final String ENV_PATH = "env/dolphinscheduler_env.sh"; /** * python home */ public static final String PYTHON_HOME = "PYTHON_HOME"; /** * resource.view.suffixs */ public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs"; public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js"; /** * development.state */ public static final String DEVELOPMENT_STATE = "development.state"; /** * sudo enable */ public static final String SUDO_ENABLE = "sudo.enable"; /** * string true */ public static final String STRING_TRUE = "true"; /** * string false */ public static final String STRING_FALSE = "false"; /** * resource storage type */ public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type"; /** * MasterServer directory registered in zookeeper */ public static final String REGISTRY_DOLPHINSCHEDULER_MASTERS = "/nodes/master"; /** * WorkerServer directory registered in zookeeper */ public static final String REGISTRY_DOLPHINSCHEDULER_WORKERS = "/nodes/worker"; /** * all servers directory registered in zookeeper */ public static final String REGISTRY_DOLPHINSCHEDULER_DEAD_SERVERS = "/dead-servers"; /** * registry node prefix */ public static final String REGISTRY_DOLPHINSCHEDULER_NODE = "/nodes"; /** * MasterServer lock directory registered in zookeeper */ public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters"; /** * MasterServer failover directory registered in zookeeper */ public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS = "/lock/failover/masters"; /** * WorkerServer failover directory registered in zookeeper */ public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS = "/lock/failover/workers"; /** * MasterServer startup failover runing and fault tolerance process */ public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters"; /** * comma , */ public static final String COMMA = ","; /** * slash / */ public static final String SLASH = "/"; /** * COLON : */ public static final String COLON = ":"; /** * SPACE " " */ public static final String SPACE = " "; /** * SINGLE_SLASH / */ public static final String SINGLE_SLASH = "/"; /** * DOUBLE_SLASH // */ public static final String DOUBLE_SLASH = "//"; /** * SINGLE_QUOTES "'" */ public static final String SINGLE_QUOTES = "'"; /** * DOUBLE_QUOTES "\"" */ public static final String DOUBLE_QUOTES = "\""; /** * SEMICOLON ; */ public static final String SEMICOLON = ";"; /** * EQUAL SIGN */ public static final String EQUAL_SIGN = "="; /** * AT SIGN */ public static final String AT_SIGN = "@"; /** * date format of yyyy-MM-dd HH:mm:ss */ public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss"; /** * date format of yyyyMMddHHmmss */ public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss"; /** * date format of yyyyMMddHHmmssSSS */ public static final String YYYYMMDDHHMMSSSSS = "yyyyMMddHHmmssSSS"; /** * http connect time out */ public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000; /** * http connect request time out */ public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000; /** * httpclient soceket time out */ public static final int SOCKET_TIMEOUT = 60 * 1000; /** * http header */ public static final String HTTP_HEADER_UNKNOWN = "unKnown"; /** * http X-Forwarded-For */ public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For"; /** * http X-Real-IP */ public static final String HTTP_X_REAL_IP = "X-Real-IP"; /** * UTF-8 */ public static final String UTF_8 = "UTF-8"; /** * user name regex */ public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,39}$"); /** * email regex */ public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[_|\\-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$"); /** * default display rows */ public static final int DEFAULT_DISPLAY_ROWS = 10; /** * read permission */ public static final int READ_PERMISSION = 2 * 1; /** * write permission */ public static final int WRITE_PERMISSION = 2 * 2; /** * execute permission */ public static final int EXECUTE_PERMISSION = 1; /** * default admin permission */ public static final int DEFAULT_ADMIN_PERMISSION = 7; /** * all permissions */ public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION; /** * max task timeout */ public static final int MAX_TASK_TIMEOUT = 24 * 3600; /** * master cpu load */ public static final int DEFAULT_MASTER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2; /** * worker cpu load */ public static final int DEFAULT_WORKER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2; /** * worker host weight */ public static final int DEFAULT_WORKER_HOST_WEIGHT = 100; /** * default log cache rows num,output when reach the number */ public static final int DEFAULT_LOG_ROWS_NUM = 4 * 16; /** * log flush interval?output when reach the interval */ public static final int DEFAULT_LOG_FLUSH_INTERVAL = 1000; /** * time unit secong to minutes */ public static final int SEC_2_MINUTES_TIME_UNIT = 60; /*** * * rpc port */ public static final int RPC_PORT = 50051; /*** * alert rpc port */ public static final int ALERT_RPC_PORT = 50052; /** * forbid running task */ public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN"; /** * normal running task */ public static final String FLOWNODE_RUN_FLAG_NORMAL = "NORMAL"; /** * datasource configuration path */ public static final String DATASOURCE_PROPERTIES = "/datasource.properties"; public static final String DEFAULT = "Default"; public static final String USER = "user"; public static final String PASSWORD = "password"; public static final String XXXXXX = "******"; public static final String NULL = "NULL"; public static final String THREAD_NAME_MASTER_SERVER = "Master-Server"; public static final String THREAD_NAME_WORKER_SERVER = "Worker-Server"; /** * command parameter keys */ public static final String CMD_PARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId"; public static final String CMD_PARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList"; public static final String CMD_PARAM_RECOVERY_WAITING_THREAD = "WaitingThreadInstanceId"; public static final String CMD_PARAM_SUB_PROCESS = "processInstanceId"; public static final String CMD_PARAM_EMPTY_SUB_PROCESS = "0"; public static final String CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId"; public static final String CMD_PARAM_SUB_PROCESS_DEFINE_ID = "processDefinitionId"; public static final String CMD_PARAM_START_NODE_NAMES = "StartNodeNameList"; public static final String CMD_PARAM_START_PARAMS = "StartParams"; public static final String CMD_PARAM_FATHER_PARAMS = "fatherParams"; /** * complement data start date */ public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate"; /** * complement data end date */ public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate"; /** * data source config */ public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name"; public static final String SPRING_DATASOURCE_URL = "spring.datasource.url"; public static final String SPRING_DATASOURCE_USERNAME = "spring.datasource.username"; public static final String SPRING_DATASOURCE_PASSWORD = "spring.datasource.password"; public static final String SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT = "spring.datasource.validationQueryTimeout"; public static final String SPRING_DATASOURCE_INITIAL_SIZE = "spring.datasource.initialSize"; public static final String SPRING_DATASOURCE_MIN_IDLE = "spring.datasource.minIdle"; public static final String SPRING_DATASOURCE_MAX_ACTIVE = "spring.datasource.maxActive"; public static final String SPRING_DATASOURCE_MAX_WAIT = "spring.datasource.maxWait"; public static final String SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS = "spring.datasource.timeBetweenEvictionRunsMillis"; public static final String SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS = "spring.datasource.timeBetweenConnectErrorMillis"; public static final String SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS = "spring.datasource.minEvictableIdleTimeMillis"; public static final String SPRING_DATASOURCE_VALIDATION_QUERY = "spring.datasource.validationQuery"; public static final String SPRING_DATASOURCE_TEST_WHILE_IDLE = "spring.datasource.testWhileIdle"; public static final String SPRING_DATASOURCE_TEST_ON_BORROW = "spring.datasource.testOnBorrow"; public static final String SPRING_DATASOURCE_TEST_ON_RETURN = "spring.datasource.testOnReturn"; public static final String SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS = "spring.datasource.poolPreparedStatements"; public static final String SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT = "spring.datasource.defaultAutoCommit"; public static final String SPRING_DATASOURCE_KEEP_ALIVE = "spring.datasource.keepAlive"; public static final String SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE = "spring.datasource.maxPoolPreparedStatementPerConnectionSize"; public static final String DEVELOPMENT = "development"; public static final String QUARTZ_PROPERTIES_PATH = "quartz.properties"; /** * sleep time */ public static final int SLEEP_TIME_MILLIS = 1000; /** * heartbeat for zk info length */ public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 10; public static final int HEARTBEAT_WITH_WEIGHT_FOR_ZOOKEEPER_INFO_LENGTH = 11; /** * jar */ public static final String JAR = "jar"; /** * hadoop */ public static final String HADOOP = "hadoop"; /** * -D <property>=<value> */ public static final String D = "-D"; /** * -D mapreduce.job.name=name */ public static final String MR_NAME = "mapreduce.job.name"; /** * -D mapreduce.job.queuename=queuename */ public static final String MR_QUEUE = "mapreduce.job.queuename"; /** * spark params constant */ public static final String MASTER = "--master"; public static final String DEPLOY_MODE = "--deploy-mode"; /** * --class CLASS_NAME */ public static final String MAIN_CLASS = "--class"; /** * --driver-cores NUM */ public static final String DRIVER_CORES = "--driver-cores"; /** * --driver-memory MEM */ public static final String DRIVER_MEMORY = "--driver-memory"; /** * --num-executors NUM */ public static final String NUM_EXECUTORS = "--num-executors"; /** * --executor-cores NUM */ public static final String EXECUTOR_CORES = "--executor-cores"; /** * --executor-memory MEM */ public static final String EXECUTOR_MEMORY = "--executor-memory"; /** * --name NAME */ public static final String SPARK_NAME = "--name"; /** * --queue QUEUE */ public static final String SPARK_QUEUE = "--queue"; /** * exit code success */ public static final int EXIT_CODE_SUCCESS = 0; /** * exit code kill */ public static final int EXIT_CODE_KILL = 137; /** * exit code failure */ public static final int EXIT_CODE_FAILURE = -1; /** * process or task definition failure */ public static final int DEFINITION_FAILURE = -1; /** * date format of yyyyMMdd */ public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd"; /** * date format of yyyyMMddHHmmss */ public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss"; /** * system date(yyyyMMddHHmmss) */ public static final String PARAMETER_DATETIME = "system.datetime"; /** * system date(yyyymmdd) today */ public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate"; /** * system date(yyyymmdd) yesterday */ public static final String PARAMETER_BUSINESS_DATE = "system.biz.date"; /** * ACCEPTED */ public static final String ACCEPTED = "ACCEPTED"; /** * SUCCEEDED */ public static final String SUCCEEDED = "SUCCEEDED"; /** * NEW */ public static final String NEW = "NEW"; /** * NEW_SAVING */ public static final String NEW_SAVING = "NEW_SAVING"; /** * SUBMITTED */ public static final String SUBMITTED = "SUBMITTED"; /** * FAILED */ public static final String FAILED = "FAILED"; /** * KILLED */ public static final String KILLED = "KILLED"; /** * RUNNING */ public static final String RUNNING = "RUNNING"; /** * underline "_" */ public static final String UNDERLINE = "_"; /** * quartz job prifix */ public static final String QUARTZ_JOB_PRIFIX = "job"; /** * quartz job group prifix */ public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup"; /** * projectId */ public static final String PROJECT_ID = "projectId"; /** * processId */ public static final String SCHEDULE_ID = "scheduleId"; /** * schedule */ public static final String SCHEDULE = "schedule"; /** * application regex */ public static final String APPLICATION_REGEX = "application_\\d+_\\d+"; public static final String PID = OSUtils.isWindows() ? "handle" : "pid"; /** * month_begin */ public static final String MONTH_BEGIN = "month_begin"; /** * add_months */ public static final String ADD_MONTHS = "add_months"; /** * month_end */ public static final String MONTH_END = "month_end"; /** * week_begin */ public static final String WEEK_BEGIN = "week_begin"; /** * week_end */ public static final String WEEK_END = "week_end"; /** * timestamp */ public static final String TIMESTAMP = "timestamp"; public static final char SUBTRACT_CHAR = '-'; public static final char ADD_CHAR = '+'; public static final char MULTIPLY_CHAR = '*'; public static final char DIVISION_CHAR = '/'; public static final char LEFT_BRACE_CHAR = '('; public static final char RIGHT_BRACE_CHAR = ')'; public static final String ADD_STRING = "+"; public static final String MULTIPLY_STRING = "*"; public static final String DIVISION_STRING = "/"; public static final String LEFT_BRACE_STRING = "("; public static final char P = 'P'; public static final char N = 'N'; public static final String SUBTRACT_STRING = "-"; public static final String GLOBAL_PARAMS = "globalParams"; public static final String LOCAL_PARAMS = "localParams"; public static final String LOCAL_PARAMS_LIST = "localParamsList"; public static final String SUBPROCESS_INSTANCE_ID = "subProcessInstanceId"; public static final String PROCESS_INSTANCE_STATE = "processInstanceState"; public static final String PARENT_WORKFLOW_INSTANCE = "parentWorkflowInstance"; public static final String CONDITION_RESULT = "conditionResult"; public static final String DEPENDENCE = "dependence"; public static final String TASK_TYPE = "taskType"; public static final String TASK_LIST = "taskList"; public static final String RWXR_XR_X = "rwxr-xr-x"; public static final String QUEUE = "queue"; public static final String QUEUE_NAME = "queueName"; public static final int LOG_QUERY_SKIP_LINE_NUMBER = 0; public static final int LOG_QUERY_LIMIT = 4096; /** * master/worker server use for zk */ public static final String MASTER_TYPE = "master"; public static final String WORKER_TYPE = "worker"; public static final String DELETE_OP = "delete"; public static final String ADD_OP = "add"; public static final String ALIAS = "alias"; public static final String CONTENT = "content"; public static final String DEPENDENT_SPLIT = ":||"; public static final String DEPENDENT_ALL = "ALL"; /** * preview schedule execute count */ public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5; /** * kerberos */ public static final String KERBEROS = "kerberos"; /** * kerberos expire time */ public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time"; /** * java.security.krb5.conf */ public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf"; /** * java.security.krb5.conf.path */ public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state"; /** * com.amazonaws.services.s3.enableV4 */ public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4"; /** * loginUserFromKeytab user */ public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username"; /** * loginUserFromKeytab path */ public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path"; /** * task log info format */ public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s"; /** * hive conf */ public static final String HIVE_CONF = "hiveconf:"; /** * flink */ public static final String FLINK_YARN_CLUSTER = "yarn-cluster"; public static final String FLINK_RUN_MODE = "-m"; public static final String FLINK_YARN_SLOT = "-ys"; public static final String FLINK_APP_NAME = "-ynm"; public static final String FLINK_QUEUE = "-yqu"; public static final String FLINK_TASK_MANAGE = "-yn"; public static final String FLINK_JOB_MANAGE_MEM = "-yjm"; public static final String FLINK_TASK_MANAGE_MEM = "-ytm"; public static final String FLINK_MAIN_CLASS = "-c"; public static final String FLINK_PARALLELISM = "-p"; public static final String FLINK_SHUTDOWN_ON_ATTACHED_EXIT = "-sae"; public static final int[] NOT_TERMINATED_STATES = new int[] { ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal(), ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(), ExecutionStatus.WAITTING_THREAD.ordinal(), ExecutionStatus.WAITTING_DEPEND.ordinal() }; /** * status */ public static final String STATUS = "status"; /** * message */ public static final String MSG = "msg"; /** * data total */ public static final String COUNT = "count"; /** * page size */ public static final String PAGE_SIZE = "pageSize"; /** * current page no */ public static final String PAGE_NUMBER = "pageNo"; /** * */ public static final String DATA_LIST = "data"; public static final String TOTAL_LIST = "totalList"; public static final String CURRENT_PAGE = "currentPage"; public static final String TOTAL_PAGE = "totalPage"; public static final String TOTAL = "total"; /** * workflow */ public static final String WORKFLOW_LIST = "workFlowList"; public static final String WORKFLOW_RELATION_LIST = "workFlowRelationList"; /** * session user */ public static final String SESSION_USER = "session.user"; public static final String SESSION_ID = "sessionId"; public static final String PASSWORD_DEFAULT = "******"; /** * locale */ public static final String LOCALE_LANGUAGE = "language"; /** * driver */ public static final String ORG_POSTGRESQL_DRIVER = "org.postgresql.Driver"; public static final String COM_MYSQL_JDBC_DRIVER = "com.mysql.jdbc.Driver"; public static final String ORG_APACHE_HIVE_JDBC_HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver"; public static final String COM_CLICKHOUSE_JDBC_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver"; public static final String COM_ORACLE_JDBC_DRIVER = "oracle.jdbc.driver.OracleDriver"; public static final String COM_SQLSERVER_JDBC_DRIVER = "com.microsoft.sqlserver.jdbc.SQLServerDriver"; public static final String COM_DB2_JDBC_DRIVER = "com.ibm.db2.jcc.DB2Driver"; public static final String COM_PRESTO_JDBC_DRIVER = "com.facebook.presto.jdbc.PrestoDriver"; /** * database type */ public static final String MYSQL = "MYSQL"; public static final String POSTGRESQL = "POSTGRESQL"; public static final String HIVE = "HIVE"; public static final String SPARK = "SPARK"; public static final String CLICKHOUSE = "CLICKHOUSE"; public static final String ORACLE = "ORACLE"; public static final String SQLSERVER = "SQLSERVER"; public static final String DB2 = "DB2"; public static final String PRESTO = "PRESTO"; /** * jdbc url */ public static final String JDBC_MYSQL = "jdbc:mysql://"; public static final String JDBC_POSTGRESQL = "jdbc:postgresql://"; public static final String JDBC_HIVE_2 = "jdbc:hive2://"; public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://"; public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@"; public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//"; public static final String JDBC_SQLSERVER = "jdbc:sqlserver://"; public static final String JDBC_DB2 = "jdbc:db2://"; public static final String JDBC_PRESTO = "jdbc:presto://"; public static final String ADDRESS = "address"; public static final String DATABASE = "database"; public static final String JDBC_URL = "jdbcUrl"; public static final String PRINCIPAL = "principal"; public static final String OTHER = "other"; public static final String ORACLE_DB_CONNECT_TYPE = "connectType"; public static final String KERBEROS_KRB5_CONF_PATH = "javaSecurityKrb5Conf"; public static final String KERBEROS_KEY_TAB_USERNAME = "loginUserKeytabUsername"; public static final String KERBEROS_KEY_TAB_PATH = "loginUserKeytabPath"; /** * session timeout */ public static final int SESSION_TIME_OUT = 7200; public static final int MAX_FILE_SIZE = 1024 * 1024 * 1024; public static final String UDF = "UDF"; public static final String CLASS = "class"; public static final String RECEIVERS = "receivers"; public static final String RECEIVERS_CC = "receiversCc"; /** * dataSource sensitive param */ public static final String DATASOURCE_PASSWORD_REGEX = "(?<=(\"password\":\")).*?(?=(\"))"; /** * default worker group */ public static final String DEFAULT_WORKER_GROUP = "default"; public static final Integer TASK_INFO_LENGTH = 5; /** * new * schedule time */ public static final String PARAMETER_SHECDULE_TIME = "schedule.time"; /** * authorize writable perm */ public static final int AUTHORIZE_WRITABLE_PERM = 7; /** * authorize readable perm */ public static final int AUTHORIZE_READABLE_PERM = 4; /** * plugin configurations */ public static final String PLUGIN_JAR_SUFFIX = ".jar"; public static final int NORMAL_NODE_STATUS = 0; public static final int ABNORMAL_NODE_STATUS = 1; public static final String START_TIME = "start time"; public static final String END_TIME = "end time"; public static final String START_END_DATE = "startDate,endDate"; /** * system line separator */ public static final String SYSTEM_LINE_SEPARATOR = System.getProperty("line.separator"); public static final String EXCEL_SUFFIX_XLS = ".xls"; /** * datasource encryption salt */ public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*"; public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable"; public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt"; /** * network interface preferred */ public static final String DOLPHIN_SCHEDULER_NETWORK_INTERFACE_PREFERRED = "dolphin.scheduler.network.interface.preferred"; /** * network IP gets priority, default inner outer */ public static final String DOLPHIN_SCHEDULER_NETWORK_PRIORITY_STRATEGY = "dolphin.scheduler.network.priority.strategy"; /** * exec shell scripts */ public static final String SH = "sh"; /** * pstree, get pud and sub pid */ public static final String PSTREE = "pstree"; /** * snow flake, data center id, this id must be greater than 0 and less than 32 */ public static final String SNOW_FLAKE_DATA_CENTER_ID = "data.center.id"; /** * docker & kubernetes */ public static final boolean DOCKER_MODE = StringUtils.isNotEmpty(System.getenv("DOCKER")); public static final boolean KUBERNETES_MODE = StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_PORT")); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,539
[Improvement][Master] Check status of taskInstance from cache
**Describe the question** After the master submit a task, the master will wait for the task execution to end, and it will loop to query the task status from database. https://github.com/apache/dolphinscheduler/blob/8a1d849701671544327a1d4e7852575af6872017/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThread.java#L123-L164 Why doesn't it query the status from the taskInstanceCacheManager? When the master receive the response from worker, it will also update the cache. https://github.com/apache/dolphinscheduler/blob/8a1d849701671544327a1d4e7852575af6872017/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/processor/TaskResponseProcessor.java#L68-L87 I think if we query the status from cache, we can reduce the pressure of the database. The main risk is that after the worker crashed, we need to send a response to the master when doing worker tolerance. So as a compromise, can we query the cache 9 times and then query the database once? Or we get task status from cache, and the cache query the task status from database periodically(the schedule interval can be longer). **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5539
https://github.com/apache/dolphinscheduler/pull/5572
e2243d63bee789b96d8ceeb302261564c5a28ce7
79eb2e85d78f380bb9b8f812d874f1143b661e76
2021-05-22T07:08:34Z
java
2021-06-10T01:39:12Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/cache/impl/TaskInstanceCacheManagerImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.cache.impl; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.remote.command.TaskExecuteAckCommand; import org.apache.dolphinscheduler.remote.command.TaskExecuteResponseCommand; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.master.cache.TaskInstanceCacheManager; import org.apache.dolphinscheduler.service.process.ProcessService; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; /** * taskInstance state manager */ @Component public class TaskInstanceCacheManagerImpl implements TaskInstanceCacheManager { /** * taskInstance cache */ private Map<Integer,TaskInstance> taskInstanceCache = new ConcurrentHashMap<>(); /** * process service */ @Autowired private ProcessService processService; /** * get taskInstance by taskInstance id * * @param taskInstanceId taskInstanceId * @return taskInstance */ @Override public TaskInstance getByTaskInstanceId(Integer taskInstanceId) { TaskInstance taskInstance = taskInstanceCache.get(taskInstanceId); if (taskInstance == null){ taskInstance = processService.findTaskInstanceById(taskInstanceId); taskInstanceCache.put(taskInstanceId,taskInstance); } return taskInstance; } /** * cache taskInstance * * @param taskExecutionContext taskExecutionContext */ @Override public void cacheTaskInstance(TaskExecutionContext taskExecutionContext) { TaskInstance taskInstance = new TaskInstance(); taskInstance.setId(taskExecutionContext.getTaskInstanceId()); taskInstance.setName(taskExecutionContext.getTaskName()); taskInstance.setStartTime(taskExecutionContext.getStartTime()); taskInstance.setTaskType(taskExecutionContext.getTaskType()); taskInstance.setExecutePath(taskExecutionContext.getExecutePath()); taskInstanceCache.put(taskExecutionContext.getTaskInstanceId(), taskInstance); } /** * cache taskInstance * * @param taskAckCommand taskAckCommand */ @Override public void cacheTaskInstance(TaskExecuteAckCommand taskAckCommand) { TaskInstance taskInstance = new TaskInstance(); taskInstance.setState(ExecutionStatus.of(taskAckCommand.getStatus())); taskInstance.setStartTime(taskAckCommand.getStartTime()); taskInstance.setHost(taskAckCommand.getHost()); taskInstance.setExecutePath(taskAckCommand.getExecutePath()); taskInstance.setLogPath(taskAckCommand.getLogPath()); taskInstanceCache.put(taskAckCommand.getTaskInstanceId(), taskInstance); } /** * cache taskInstance * * @param taskExecuteResponseCommand taskExecuteResponseCommand */ @Override public void cacheTaskInstance(TaskExecuteResponseCommand taskExecuteResponseCommand) { TaskInstance taskInstance = getByTaskInstanceId(taskExecuteResponseCommand.getTaskInstanceId()); taskInstance.setState(ExecutionStatus.of(taskExecuteResponseCommand.getStatus())); taskInstance.setEndTime(taskExecuteResponseCommand.getEndTime()); } /** * remove taskInstance by taskInstanceId * @param taskInstanceId taskInstanceId */ @Override public void removeByTaskInstanceId(Integer taskInstanceId) { taskInstanceCache.remove(taskInstanceId); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,539
[Improvement][Master] Check status of taskInstance from cache
**Describe the question** After the master submit a task, the master will wait for the task execution to end, and it will loop to query the task status from database. https://github.com/apache/dolphinscheduler/blob/8a1d849701671544327a1d4e7852575af6872017/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThread.java#L123-L164 Why doesn't it query the status from the taskInstanceCacheManager? When the master receive the response from worker, it will also update the cache. https://github.com/apache/dolphinscheduler/blob/8a1d849701671544327a1d4e7852575af6872017/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/processor/TaskResponseProcessor.java#L68-L87 I think if we query the status from cache, we can reduce the pressure of the database. The main risk is that after the worker crashed, we need to send a response to the master when doing worker tolerance. So as a compromise, can we query the cache 9 times and then query the database once? Or we get task status from cache, and the cache query the task status from database periodically(the schedule interval can be longer). **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5539
https://github.com/apache/dolphinscheduler/pull/5572
e2243d63bee789b96d8ceeb302261564c5a28ce7
79eb2e85d78f380bb9b8f812d874f1143b661e76
2021-05-22T07:08:34Z
java
2021-06-10T01:39:12Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.remote.command.TaskKillRequestCommand; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.server.master.cache.TaskInstanceCacheManager; import org.apache.dolphinscheduler.server.master.cache.impl.TaskInstanceCacheManagerImpl; import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext; import org.apache.dolphinscheduler.server.master.dispatch.enums.ExecutorType; import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.apache.dolphinscheduler.service.registry.RegistryClient; import java.util.Date; import java.util.Set; /** * master task exec thread */ public class MasterTaskExecThread extends MasterBaseTaskExecThread { /** * taskInstance state manager */ private TaskInstanceCacheManager taskInstanceCacheManager; /** * netty executor manager */ private NettyExecutorManager nettyExecutorManager; /** * zookeeper register center */ private RegistryClient registryClient; /** * constructor of MasterTaskExecThread * * @param taskInstance task instance */ public MasterTaskExecThread(TaskInstance taskInstance) { super(taskInstance); this.taskInstanceCacheManager = SpringApplicationContext.getBean(TaskInstanceCacheManagerImpl.class); this.nettyExecutorManager = SpringApplicationContext.getBean(NettyExecutorManager.class); this.registryClient = SpringApplicationContext.getBean(RegistryClient.class); } /** * get task instance * * @return TaskInstance */ @Override public TaskInstance getTaskInstance() { return this.taskInstance; } /** * whether already Killed,default false */ private boolean alreadyKilled = false; /** * submit task instance and wait complete * * @return true is task quit is true */ @Override public Boolean submitWaitComplete() { Boolean result = false; this.taskInstance = submit(); if (this.taskInstance == null) { logger.error("submit task instance to mysql and queue failed , please check and fix it"); return result; } if (!this.taskInstance.getState().typeIsFinished()) { result = waitTaskQuit(); } taskInstance.setEndTime(new Date()); processService.updateTaskInstance(taskInstance); logger.info("task :{} id:{}, process id:{}, exec thread completed ", this.taskInstance.getName(), taskInstance.getId(), processInstance.getId()); return result; } /** * polling db * <p> * wait task quit * * @return true if task quit success */ public Boolean waitTaskQuit() { // query new state taskInstance = processService.findTaskInstanceById(taskInstance.getId()); logger.info("wait task: process id: {}, task id:{}, task name:{} complete", this.taskInstance.getProcessInstanceId(), this.taskInstance.getId(), this.taskInstance.getName()); while (Stopper.isRunning()) { try { if (this.processInstance == null) { logger.error("process instance not exists , master task exec thread exit"); return true; } // task instance add queue , waiting worker to kill if (this.cancel || this.processInstance.getState() == ExecutionStatus.READY_STOP) { cancelTaskInstance(); } if (processInstance.getState() == ExecutionStatus.READY_PAUSE) { pauseTask(); } // task instance finished if (taskInstance.getState().typeIsFinished()) { // if task is final result , then remove taskInstance from cache taskInstanceCacheManager.removeByTaskInstanceId(taskInstance.getId()); break; } if (checkTaskTimeout()) { this.checkTimeoutFlag = !alertTimeout(); } // updateProcessInstance task instance taskInstance = processService.findTaskInstanceById(taskInstance.getId()); processInstance = processService.findProcessInstanceById(processInstance.getId()); Thread.sleep(Constants.SLEEP_TIME_MILLIS); } catch (Exception e) { logger.error("exception", e); if (processInstance != null) { logger.error("wait task quit failed, instance id:{}, task id:{}", processInstance.getId(), taskInstance.getId()); } } } return true; } /** * pause task if task have not been dispatched to worker, do not dispatch anymore. */ public void pauseTask() { taskInstance = processService.findTaskInstanceById(taskInstance.getId()); if (taskInstance == null) { return; } if (StringUtils.isBlank(taskInstance.getHost())) { taskInstance.setState(ExecutionStatus.PAUSE); taskInstance.setEndTime(new Date()); processService.updateTaskInstance(taskInstance); } } /** * task instance add queue , waiting worker to kill */ private void cancelTaskInstance() throws Exception { if (alreadyKilled) { return; } alreadyKilled = true; taskInstance = processService.findTaskInstanceById(taskInstance.getId()); if (StringUtils.isBlank(taskInstance.getHost())) { taskInstance.setState(ExecutionStatus.KILL); taskInstance.setEndTime(new Date()); processService.updateTaskInstance(taskInstance); return; } TaskKillRequestCommand killCommand = new TaskKillRequestCommand(); killCommand.setTaskInstanceId(taskInstance.getId()); ExecutionContext executionContext = new ExecutionContext(killCommand.convert2Command(), ExecutorType.WORKER); Host host = Host.of(taskInstance.getHost()); executionContext.setHost(host); nettyExecutorManager.executeDirectly(executionContext); logger.info("master kill taskInstance name :{} taskInstance id:{}", taskInstance.getName(), taskInstance.getId()); } /** * whether exists valid worker group * * @param taskInstanceWorkerGroup taskInstanceWorkerGroup * @return whether exists */ public Boolean existsValidWorkerGroup(String taskInstanceWorkerGroup) { Set<String> workerGroups = registryClient.getWorkerGroupDirectly(); // not worker group if (CollectionUtils.isEmpty(workerGroups)) { return false; } // has worker group , but not taskInstance assigned worker group if (!workerGroups.contains(taskInstanceWorkerGroup)) { return false; } Set<String> workers = registryClient.getWorkerGroupNodesDirectly(taskInstanceWorkerGroup); if (CollectionUtils.isEmpty(workers)) { return false; } return true; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,539
[Improvement][Master] Check status of taskInstance from cache
**Describe the question** After the master submit a task, the master will wait for the task execution to end, and it will loop to query the task status from database. https://github.com/apache/dolphinscheduler/blob/8a1d849701671544327a1d4e7852575af6872017/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThread.java#L123-L164 Why doesn't it query the status from the taskInstanceCacheManager? When the master receive the response from worker, it will also update the cache. https://github.com/apache/dolphinscheduler/blob/8a1d849701671544327a1d4e7852575af6872017/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/processor/TaskResponseProcessor.java#L68-L87 I think if we query the status from cache, we can reduce the pressure of the database. The main risk is that after the worker crashed, we need to send a response to the master when doing worker tolerance. So as a compromise, can we query the cache 9 times and then query the database once? Or we get task status from cache, and the cache query the task status from database periodically(the schedule interval can be longer). **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5539
https://github.com/apache/dolphinscheduler/pull/5572
e2243d63bee789b96d8ceeb302261564c5a28ce7
79eb2e85d78f380bb9b8f812d874f1143b661e76
2021-05-22T07:08:34Z
java
2021-06-10T01:39:12Z
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/cache/impl/TaskInstanceCacheManagerImplTest.java
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,539
[Improvement][Master] Check status of taskInstance from cache
**Describe the question** After the master submit a task, the master will wait for the task execution to end, and it will loop to query the task status from database. https://github.com/apache/dolphinscheduler/blob/8a1d849701671544327a1d4e7852575af6872017/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThread.java#L123-L164 Why doesn't it query the status from the taskInstanceCacheManager? When the master receive the response from worker, it will also update the cache. https://github.com/apache/dolphinscheduler/blob/8a1d849701671544327a1d4e7852575af6872017/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/processor/TaskResponseProcessor.java#L68-L87 I think if we query the status from cache, we can reduce the pressure of the database. The main risk is that after the worker crashed, we need to send a response to the master when doing worker tolerance. So as a compromise, can we query the cache 9 times and then query the database once? Or we get task status from cache, and the cache query the task status from database periodically(the schedule interval can be longer). **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5539
https://github.com/apache/dolphinscheduler/pull/5572
e2243d63bee789b96d8ceeb302261564c5a28ce7
79eb2e85d78f380bb9b8f812d874f1143b661e76
2021-05-22T07:08:34Z
java
2021-06-10T01:39:12Z
pom.xml
<?xml version="1.0" encoding="UTF-8"?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler</artifactId> <version>1.3.6-SNAPSHOT</version> <packaging>pom</packaging> <name>${project.artifactId}</name> <url>http://dolphinscheduler.apache.org</url> <description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing. </description> <licenses> <license> <name>Apache License 2.0</name> <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url> <distribution>repo</distribution> </license> </licenses> <scm> <connection>scm:git:https://github.com/apache/dolphinscheduler.git</connection> <developerConnection>scm:git:https://github.com/apache/dolphinscheduler.git</developerConnection> <url>https://github.com/apache/dolphinscheduler</url> <tag>HEAD</tag> </scm> <mailingLists> <mailingList> <name>DolphinScheduler Developer List</name> <post>[email protected]</post> <subscribe>[email protected]</subscribe> <unsubscribe>[email protected]</unsubscribe> </mailingList> </mailingLists> <parent> <groupId>org.apache</groupId> <artifactId>apache</artifactId> <version>21</version> </parent> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding> <curator.version>4.3.0</curator.version> <zookeeper.version>3.4.14</zookeeper.version> <spring.version>5.1.19.RELEASE</spring.version> <spring.boot.version>2.1.18.RELEASE</spring.boot.version> <java.version>1.8</java.version> <logback.version>1.2.3</logback.version> <hadoop.version>2.7.3</hadoop.version> <quartz.version>2.3.0</quartz.version> <jackson.version>2.10.5</jackson.version> <mybatis-plus.version>3.2.0</mybatis-plus.version> <mybatis.spring.version>2.0.1</mybatis.spring.version> <cron.utils.version>5.0.5</cron.utils.version> <druid.version>1.1.22</druid.version> <h2.version>1.4.200</h2.version> <commons.codec.version>1.11</commons.codec.version> <commons.logging.version>1.1.1</commons.logging.version> <httpclient.version>4.4.1</httpclient.version> <httpcore.version>4.4.1</httpcore.version> <junit.version>4.12</junit.version> <mysql.connector.version>5.1.34</mysql.connector.version> <slf4j.api.version>1.7.5</slf4j.api.version> <slf4j.log4j12.version>1.7.5</slf4j.log4j12.version> <commons.collections.version>3.2.2</commons.collections.version> <commons.httpclient>3.0.1</commons.httpclient> <commons.beanutils.version>1.9.4</commons.beanutils.version> <commons.configuration.version>1.10</commons.configuration.version> <commons.email.version>1.5</commons.email.version> <poi.version>4.1.2</poi.version> <javax.servlet.api.version>3.1.0</javax.servlet.api.version> <commons.collections4.version>4.1</commons.collections4.version> <guava.version>24.1-jre</guava.version> <postgresql.version>42.2.5</postgresql.version> <hive.jdbc.version>2.1.0</hive.jdbc.version> <commons.io.version>2.4</commons.io.version> <oshi.core.version>3.9.1</oshi.core.version> <clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version> <mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version> <presto.jdbc.version>0.238.1</presto.jdbc.version> <spotbugs.version>3.1.12</spotbugs.version> <checkstyle.version>3.0.0</checkstyle.version> <zookeeper.version>3.4.14</zookeeper.version> <curator.test>2.12.0</curator.test> <frontend-maven-plugin.version>1.6</frontend-maven-plugin.version> <maven-compiler-plugin.version>3.3</maven-compiler-plugin.version> <maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version> <maven-release-plugin.version>2.5.3</maven-release-plugin.version> <maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version> <maven-source-plugin.version>2.4</maven-source-plugin.version> <maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version> <maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version> <rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version> <jacoco.version>0.8.4</jacoco.version> <jcip.version>1.0</jcip.version> <maven.deploy.skip>false</maven.deploy.skip> <cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version> <mockito.version>2.21.0</mockito.version> <powermock.version>2.0.2</powermock.version> <servlet-api.version>2.5</servlet-api.version> <swagger.version>1.9.3</swagger.version> <springfox.version>2.9.2</springfox.version> <swagger-models.version>1.5.24</swagger-models.version> <guava-retry.version>2.0.0</guava-retry.version> <dep.airlift.version>0.184</dep.airlift.version> <dep.packaging.version>${dep.airlift.version}</dep.packaging.version> <protostuff.version>1.7.2</protostuff.version> <reflections.version>0.9.12</reflections.version> <byte-buddy.version>1.9.16</byte-buddy.version> </properties> <dependencyManagement> <dependencies> <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus-boot-starter</artifactId> <version>${mybatis-plus.version}</version> </dependency> <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus</artifactId> <version>${mybatis-plus.version}</version> </dependency> <!-- quartz--> <dependency> <groupId>org.quartz-scheduler</groupId> <artifactId>quartz</artifactId> <version>${quartz.version}</version> </dependency> <dependency> <groupId>org.quartz-scheduler</groupId> <artifactId>quartz-jobs</artifactId> <version>${quartz.version}</version> </dependency> <dependency> <groupId>com.cronutils</groupId> <artifactId>cron-utils</artifactId> <version>${cron.utils.version}</version> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>druid</artifactId> <version>${druid.version}</version> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-parent</artifactId> <version>${spring.boot.version}</version> <type>pom</type> <scope>import</scope> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-core</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-context</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-beans</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-tx</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-jdbc</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-test</artifactId> <version>${spring.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-common</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-registry-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-dao</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-remote</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-service</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-spi</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-framework</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> <version>${zookeeper.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> </exclusion> <exclusion> <artifactId>netty</artifactId> <groupId>io.netty</groupId> </exclusion> <exclusion> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs-annotations</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-client</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>log4j-1.2-api</groupId> <artifactId>org.apache.logging.log4j</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-recipes</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-test</artifactId> <version>${curator.test}</version> <scope>test</scope> </dependency> <dependency> <groupId>commons-codec</groupId> <artifactId>commons-codec</artifactId> <version>${commons.codec.version}</version> </dependency> <dependency> <groupId>commons-logging</groupId> <artifactId>commons-logging</artifactId> <version>${commons.logging.version}</version> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpclient</artifactId> <version>${httpclient.version}</version> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpcore</artifactId> <version>${httpcore.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-annotations</artifactId> <version>${jackson.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-databind</artifactId> <version>${jackson.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-core</artifactId> <version>${jackson.version}</version> </dependency> <!--protostuff--> <!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-core --> <dependency> <groupId>io.protostuff</groupId> <artifactId>protostuff-core</artifactId> <version>${protostuff.version}</version> </dependency> <!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-runtime --> <dependency> <groupId>io.protostuff</groupId> <artifactId>protostuff-runtime</artifactId> <version>${protostuff.version}</version> </dependency> <dependency> <groupId>net.bytebuddy</groupId> <artifactId>byte-buddy</artifactId> <version>${byte-buddy.version}</version> </dependency> <dependency> <groupId>org.reflections</groupId> <artifactId>reflections</artifactId> <version>${reflections.version}</version> </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>${junit.version}</version> </dependency> <dependency> <groupId>org.mockito</groupId> <artifactId>mockito-core</artifactId> <version>${mockito.version}</version> <type>jar</type> <scope>test</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-module-junit4</artifactId> <version>${powermock.version}</version> <type>jar</type> <scope>test</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-api-mockito2</artifactId> <version>${powermock.version}</version> <type>jar</type> <scope>test</scope> <exclusions> <exclusion> <groupId>org.mockito</groupId> <artifactId>mockito-core</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <version>${mysql.connector.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>com.h2database</groupId> <artifactId>h2</artifactId> <version>${h2.version}</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-api</artifactId> <version>${slf4j.api.version}</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> <version>${slf4j.log4j12.version}</version> </dependency> <dependency> <groupId>commons-collections</groupId> <artifactId>commons-collections</artifactId> <version>${commons.collections.version}</version> </dependency> <dependency> <groupId>commons-httpclient</groupId> <artifactId>commons-httpclient</artifactId> <version>${commons.httpclient}</version> </dependency> <dependency> <groupId>commons-beanutils</groupId> <artifactId>commons-beanutils</artifactId> <version>${commons.beanutils.version}</version> </dependency> <dependency> <groupId>commons-configuration</groupId> <artifactId>commons-configuration</artifactId> <version>${commons.configuration.version}</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-classic</artifactId> <version>${logback.version}</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-core</artifactId> <version>${logback.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-email</artifactId> <version>${commons.email.version}</version> </dependency> <!--excel poi--> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi</artifactId> <version>${poi.version}</version> </dependency> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi-ooxml</artifactId> <version>${poi.version}</version> </dependency> <!-- hadoop --> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>${hadoop.version}</version> <exclusions> <exclusion> <artifactId>slf4j-log4j12</artifactId> <groupId>org.slf4j</groupId> </exclusion> <exclusion> <artifactId>com.sun.jersey</artifactId> <groupId>jersey-json</groupId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-yarn-common</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-aws</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-collections4</artifactId> <version>${commons.collections4.version}</version> </dependency> <dependency> <groupId>com.google.guava</groupId> <artifactId>guava</artifactId> <version>${guava.version}</version> </dependency> <dependency> <groupId>org.postgresql</groupId> <artifactId>postgresql</artifactId> <version>${postgresql.version}</version> </dependency> <dependency> <groupId>org.apache.hive</groupId> <artifactId>hive-jdbc</artifactId> <version>${hive.jdbc.version}</version> </dependency> <dependency> <groupId>commons-io</groupId> <artifactId>commons-io</artifactId> <version>${commons.io.version}</version> </dependency> <dependency> <groupId>com.github.oshi</groupId> <artifactId>oshi-core</artifactId> <version>${oshi.core.version}</version> </dependency> <dependency> <groupId>ru.yandex.clickhouse</groupId> <artifactId>clickhouse-jdbc</artifactId> <version>${clickhouse.jdbc.version}</version> </dependency> <dependency> <groupId>com.microsoft.sqlserver</groupId> <artifactId>mssql-jdbc</artifactId> <version>${mssql.jdbc.version}</version> </dependency> <dependency> <groupId>com.facebook.presto</groupId> <artifactId>presto-jdbc</artifactId> <version>${presto.jdbc.version}</version> </dependency> <dependency> <groupId>net.jcip</groupId> <artifactId>jcip-annotations</artifactId> <version>${jcip.version}</version> <optional>true</optional> </dependency> <dependency> <groupId>javax.servlet</groupId> <artifactId>servlet-api</artifactId> <version>${servlet-api.version}</version> </dependency> <dependency> <groupId>javax.servlet</groupId> <artifactId>javax.servlet-api</artifactId> <version>${javax.servlet.api.version}</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger2</artifactId> <version>${springfox.version}</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger-ui</artifactId> <version>${springfox.version}</version> </dependency> <dependency> <groupId>io.swagger</groupId> <artifactId>swagger-models</artifactId> <version>${swagger-models.version}</version> </dependency> <dependency> <groupId>com.github.xiaoymin</groupId> <artifactId>swagger-bootstrap-ui</artifactId> <version>${swagger.version}</version> </dependency> <dependency> <groupId>com.github.rholder</groupId> <artifactId>guava-retrying</artifactId> <version>${guava-retry.version}</version> </dependency> <dependency> <groupId>org.sonatype.aether</groupId> <artifactId>aether-api</artifactId> <version>1.13.1</version> </dependency> <dependency> <groupId>io.airlift.resolver</groupId> <artifactId>resolver</artifactId> <version>1.5</version> </dependency> <dependency> <groupId>org.ow2.asm</groupId> <artifactId>asm</artifactId> <version>6.2.1</version> </dependency> <dependency> <groupId>javax.activation</groupId> <artifactId>activation</artifactId> <version>1.1</version> </dependency> <dependency> <groupId>com.sun.mail</groupId> <artifactId>javax.mail</artifactId> <version>1.6.2</version> </dependency> </dependencies> </dependencyManagement> <build> <finalName>apache-dolphinscheduler-${project.version}</finalName> <pluginManagement> <plugins> <plugin> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-maven-plugin</artifactId> <version>1.0.0</version> <extensions>true</extensions> </plugin> <plugin> <groupId>ca.vanzyl.maven.plugins</groupId> <artifactId>provisio-maven-plugin</artifactId> <version>1.0.4</version> <extensions>true</extensions> </plugin> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>rpm-maven-plugin</artifactId> <version>${rpm-maven-plugion.version}</version> <inherited>false</inherited> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <configuration> <source>${java.version}</source> <target>${java.version}</target> <testSource>${java.version}</testSource> <testTarget>${java.version}</testTarget> </configuration> <version>${maven-compiler-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-release-plugin</artifactId> <version>${maven-release-plugin.version}</version> <configuration> <tagNameFormat>@{project.version}</tagNameFormat> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-assembly-plugin</artifactId> <version>${maven-assembly-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-javadoc-plugin</artifactId> <version>${maven-javadoc-plugin.version}</version> <configuration> <source>8</source> <failOnError>false</failOnError> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-source-plugin</artifactId> <version>${maven-source-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-dependency-plugin</artifactId> <version>${maven-dependency-plugin.version}</version> </plugin> </plugins> </pluginManagement> <plugins> <plugin> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-maven-plugin</artifactId> <extensions>true</extensions> <!--<configuration>--> <!--<allowedProvidedDependencies>--> <!--<allowedProvidedDependency>org.apache.dolphinscheduler:dolphinscheduler-common</allowedProvidedDependency>--> <!--</allowedProvidedDependencies>--> <!--</configuration>--> </plugin> <plugin> <groupId>ca.vanzyl.maven.plugins</groupId> <artifactId>provisio-maven-plugin</artifactId> <extensions>true</extensions> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-source-plugin</artifactId> <executions> <execution> <id>attach-sources</id> <phase>verify</phase> <goals> <goal>jar-no-fork</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-javadoc-plugin</artifactId> <version>${maven-javadoc-plugin.version}</version> <executions> <execution> <id>attach-javadocs</id> <goals> <goal>jar</goal> </goals> </execution> </executions> <configuration> <aggregate>true</aggregate> <charset>${project.build.sourceEncoding}</charset> <encoding>${project.build.sourceEncoding}</encoding> <docencoding>${project.build.sourceEncoding}</docencoding> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-release-plugin</artifactId> <version>${maven-release-plugin.version}</version> <configuration> <autoVersionSubmodules>true</autoVersionSubmodules> <tagNameFormat>@{project.version}</tagNameFormat> <tagBase>${project.version}</tagBase> <!--<goals>-f pom.xml deploy</goals>--> </configuration> <dependencies> <dependency> <groupId>org.apache.maven.scm</groupId> <artifactId>maven-scm-provider-jgit</artifactId> <version>1.9.5</version> </dependency> </dependencies> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <version>${maven-compiler-plugin.version}</version> <configuration> <source>${java.version}</source> <target>${java.version}</target> <encoding>${project.build.sourceEncoding}</encoding> <skip>false</skip><!--not skip compile test classes--> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <version>${maven-surefire-plugin.version}</version> <configuration> <includes> <!--registry plugin --> <include>**/plugin/registry/zookeeper/ZookeeperRegistryTest.java</include> <!-- API --> <include>**/api/controller/ProjectControllerTest.java</include> <include>**/api/controller/QueueControllerTest.java</include> <include>**/api/configuration/TrafficConfigurationTest.java</include> <include>**/api/controller/ProcessDefinitionControllerTest.java</include> <include>**/api/controller/TenantControllerTest.java</include> <include>**/api/dto/resources/filter/ResourceFilterTest.java</include> <include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include> <includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest> <include>**/api/enums/StatusTest.java</include> <include>**/api/exceptions/ApiExceptionHandlerTest.java</include> <include>**/api/exceptions/ServiceExceptionTest.java</include> <include>**/api/interceptor/LocaleChangeInterceptorTest.java</include> <include>**/api/interceptor/LoginHandlerInterceptorTest.java</include> <include>**/api/interceptor/RateLimitInterceptorTest.java</include> <include>**/api/security/impl/pwd/PasswordAuthenticatorTest.java</include> <include>**/api/security/impl/ldap/LdapAuthenticatorTest.java</include> <include>**/api/security/SecurityConfigLDAPTest.java</include> <include>**/api/security/SecurityConfigPasswordTest.java</include> <include>**/api/service/AccessTokenServiceTest.java</include> <include>**/api/service/AlertGroupServiceTest.java</include> <include>**/api/service/BaseDAGServiceTest.java</include> <include>**/api/service/BaseServiceTest.java</include> <include>**/api/service/DataAnalysisServiceTest.java</include> <include>**/api/service/AlertPluginInstanceServiceTest.java</include> <include>**/api/service/DataSourceServiceTest.java</include> <include>**/api/service/ExecutorService2Test.java</include> <include>**/api/service/ExecutorServiceTest.java</include> <include>**/api/service/LoggerServiceTest.java</include> <include>**/api/service/MonitorServiceTest.java</include> <include>**/api/service/ProcessDefinitionServiceTest.java</include> <include>**/api/service/ProcessTaskRelationServiceImplTest.java</include> <include>**/api/service/TaskDefinitionServiceImplTest.java</include> <include>**/api/service/ProcessInstanceServiceTest.java</include> <include>**/api/service/ProjectServiceTest.java</include> <include>**/api/service/QueueServiceTest.java</include> <include>**/api/service/ResourcesServiceTest.java</include> <include>**/api/service/SchedulerServiceTest.java</include> <include>**/api/service/SessionServiceTest.java</include> <include>**/api/service/TaskInstanceServiceTest.java</include> <include>**/api/service/TenantServiceTest.java</include> <include>**/api/service/UdfFuncServiceTest.java</include> <include>**/api/service/UiPluginServiceTest.java</include> <include>**/api/service/UserAlertGroupServiceTest.java</include> <include>**/api/service/UsersServiceTest.java</include> <include>**/api/service/WorkerGroupServiceTest.java</include> <include>**/api/service/WorkFlowLineageServiceTest.java</include> <include>**/api/controller/ProcessDefinitionControllerTest.java</include> <include>**/api/controller/TaskInstanceControllerTest.java</include> <include>**/api/controller/WorkFlowLineageControllerTest.java</include> <include>**/api/utils/exportprocess/DataSourceParamTest.java</include> <include>**/api/utils/exportprocess/DependentParamTest.java</include> <include>**/api/utils/CheckUtilsTest.java</include> <include>**/api/utils/FileUtilsTest.java</include> <include>**/api/utils/CheckUtilsTest.java</include> <include>**/api/utils/CheckUtilsTest.java</include> <include>**/api/utils/ResultTest.java</include> <include>**/common/graph/DAGTest.java</include> <include>**/common/os/OshiTest.java</include> <include>**/common/os/OSUtilsTest.java</include> <include>**/common/shell/ShellExecutorTest.java</include> <include>**/common/task/DataxParametersTest.java</include> <include>**/common/task/EntityTestUtils.java</include> <include>**/common/task/FlinkParametersTest.java</include> <include>**/common/task/HttpParametersTest.java</include> <include>**/common/task/SparkParametersTest.java</include> <include>**/common/task/SqlParametersTest.java</include> <include>**/common/task/SqoopParameterEntityTest.java</include> <include>**/common/threadutils/ThreadPoolExecutorsTest.java</include> <include>**/common/threadutils/ThreadUtilsTest.java</include> <include>**/common/utils/CollectionUtilsTest.java</include> <include>**/common/utils/CommonUtilsTest.java</include> <include>**/common/utils/DateUtilsTest.java</include> <include>**/common/utils/DependentUtilsTest.java</include> <include>**/common/utils/EncryptionUtilsTest.java</include> <include>**/common/utils/FileUtilsTest.java</include> <include>**/common/utils/JSONUtilsTest.java</include> <include>**/common/utils/LoggerUtilsTest.java</include> <include>**/common/utils/NetUtilsTest.java</include> <include>**/common/utils/OSUtilsTest.java</include> <include>**/common/utils/ParameterUtilsTest.java</include> <include>**/common/utils/TimePlaceholderUtilsTest.java</include> <include>**/common/utils/PreconditionsTest.java</include> <include>**/common/utils/PropertyUtilsTest.java</include> <include>**/common/utils/SchemaUtilsTest.java</include> <include>**/common/utils/ScriptRunnerTest.java</include> <include>**/common/utils/SensitiveLogUtilsTest.java</include> <include>**/common/utils/StringTest.java</include> <include>**/common/utils/StringUtilsTest.java</include> <include>**/common/utils/TaskParametersUtilsTest.java</include> <include>**/common/utils/VarPoolUtilsTest.java</include> <include>**/common/utils/HadoopUtilsTest.java</include> <include>**/common/utils/HttpUtilsTest.java</include> <include>**/common/utils/KerberosHttpClientTest.java</include> <include>**/common/utils/HiveConfUtilsTest.java</include> <include>**/common/ConstantsTest.java</include> <include>**/common/utils/HadoopUtils.java</include> <include>**/common/utils/RetryerUtilsTest.java</include> <include>**/common/datasource/clickhouse/ClickHouseDatasourceProcessorTest.java</include> <include>**/common/datasource/db2/Db2DatasourceProcessorTest.java</include> <include>**/common/datasource/hive/HiveDatasourceProcessorTest.java</include> <include>**/common/datasource/mysql/MysqlDatasourceProcessorTest.java</include> <include>**/common/datasource/oracle/OracleDatasourceProcessorTest.java</include> <include>**/common/datasource/postgresql/PostgreSqlDatasourceProcessorTest.java</include> <include>**/common/datasource/presto/PrestoDatasourceProcessorTest.java</include> <include>**/common/datasource/spark/SparkDatasourceProcessorTest.java</include> <include>**/common/datasource/sqlserver/SqlServerDatasourceProcessorTest.java</include> <include>**/common/datasource/DatasourceUtilTest.java</include> <include>**/common/enums/ExecutionStatusTest</include> <include>**/dao/mapper/AccessTokenMapperTest.java</include> <include>**/dao/mapper/AlertGroupMapperTest.java</include> <include>**/dao/mapper/CommandMapperTest.java</include> <include>**/dao/mapper/ConnectionFactoryTest.java</include> <include>**/dao/mapper/DataSourceMapperTest.java</include> <include>**/dao/datasource/MySQLDataSourceTest.java</include> <include>**/dao/entity/TaskInstanceTest.java</include> <include>**/dao/entity/UdfFuncTest.java</include> <include>**/remote/command/alert/AlertSendRequestCommandTest.java</include> <include>**/remote/command/alert/AlertSendResponseCommandTest.java</include> <include>**/remote/command/future/ResponseFutureTest.java</include> <include>**/remote/command/log/RemoveTaskLogRequestCommandTest.java</include> <include>**/remote/command/log/RemoveTaskLogResponseCommandTest.java</include> <include>**/remote/command/log/GetLogBytesRequestCommandTest.java</include> <include>**/remote/command/log/GetLogBytesResponseCommandTest.java</include> <include>**/remote/command/log/ViewLogRequestCommandTest.java</include> <include>**/remote/utils/HostTest.java</include> <include>**/remote/utils/NettyUtilTest.java</include> <include>**/remote/NettyRemotingClientTest.java</include> <include>**/rpc/RpcTest.java</include> <include>**/server/log/LoggerServerTest.java</include> <include>**/server/entity/SQLTaskExecutionContextTest.java</include> <include>**/server/log/MasterLogFilterTest.java</include> <include>**/server/log/SensitiveDataConverterTest.java</include> <include>**/server/log/LoggerRequestProcessorTest.java</include> <!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>--> <include>**/server/log/TaskLogFilterTest.java</include> <include>**/server/log/WorkerLogFilterTest.java</include> <include>**/server/master/config/MasterConfigTest.java</include> <include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include> <include>**/server/master/runner/MasterTaskExecThreadTest.java</include> <!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>--> <include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include> <include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include> <include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include> <include>**/server/master/dispatch/host/assign/HostWorkerTest.java</include> <include>**/server/master/registry/MasterRegistryClientTest.java</include> <include>**/server/master/registry/ServerNodeManagerTest.java</include> <include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include> <include>**/server/master/MasterCommandTest.java</include> <include>**/server/master/DependentTaskTest.java</include> <include>**/server/master/ConditionsTaskTest.java</include> <include>**/server/master/MasterExecThreadTest.java</include> <include>**/server/master/ParamsTest.java</include> <include>**/server/master/SubProcessTaskTest.java</include> <include>**/server/master/processor/TaskAckProcessorTest.java</include> <include>**/server/master/processor/TaskKillResponseProcessorTest.java</include> <include>**/server/master/processor/queue/TaskResponseServiceTest.java</include> <include>**/server/master/zk/ZKMasterClientTest.java</include> <include>**/server/registry/ZookeeperRegistryCenterTest.java</include> <include>**/server/utils/DataxUtilsTest.java</include> <include>**/server/utils/ExecutionContextTestUtils.java</include> <include>**/server/utils/FlinkArgsUtilsTest.java</include> <include>**/server/utils/LogUtilsTest.java</include> <include>**/server/utils/MapReduceArgsUtilsTest.java</include> <include>**/server/utils/ParamUtilsTest.java</include> <include>**/server/utils/ProcessUtilsTest.java</include> <include>**/server/utils/SparkArgsUtilsTest.java</include> <include>**/server/worker/processor/TaskCallbackServiceTest.java</include> <include>**/server/worker/processor/TaskExecuteProcessorTest.java</include> <include>**/server/worker/registry/WorkerRegistryTest.java</include> <include>**/server/worker/shell/ShellCommandExecutorTest.java</include> <include>**/server/worker/sql/SqlExecutorTest.java</include> <include>**/server/worker/task/spark/SparkTaskTest.java</include> <include>**/server/worker/task/spark/SparkTaskTest.java</include> <include>**/server/worker/task/datax/DataxTaskTest.java</include> <!--<include>**/server/worker/task/http/HttpTaskTest.java</include>--> <include>**/server/worker/task/sqoop/SqoopTaskTest.java</include> <include>**/server/worker/task/processdure/ProcedureTaskTest.java</include> <include>**/server/worker/task/shell/ShellTaskTest.java</include> <include>**/server/worker/task/TaskManagerTest.java</include> <include>**/server/worker/task/AbstractCommandExecutorTest.java</include> <include>**/server/worker/task/PythonCommandExecutorTest.java</include> <include>**/server/worker/task/ShellTaskReturnTest.java</include> <include>**/server/worker/task/sql/SqlTaskTest.java</include> <include>**/server/worker/runner/TaskExecuteThreadTest.java</include> <include>**/server/worker/runner/WorkerManagerThreadTest.java</include> <include>**/service/quartz/cron/CronUtilsTest.java</include> <include>**/service/process/ProcessServiceTest.java</include> <include>**/service/registry/RegistryClientTest.java</include> <include>**/service/registry/RegistryPluginTest.java</include> <include>**/service/queue/TaskUpdateQueueTest.java</include> <include>**/service/queue/PeerTaskInstancePriorityQueueTest.java</include> <include>**/service/log/LogClientServiceTest.java</include> <include>**/service/alert/AlertClientServiceTest.java</include> <include>**/service/alert/ProcessAlertManagerTest.java</include> <include>**/dao/mapper/DataSourceUserMapperTest.java</include> <!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>--> <include>**/dao/mapper/ProcessDefinitionMapperTest.java</include> <include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include> <include>**/dao/mapper/ProcessInstanceMapperTest.java</include> <include>**/dao/mapper/ProjectMapperTest.java</include> <include>**/dao/mapper/ProjectUserMapperTest.java</include> <include>**/dao/mapper/QueueMapperTest.java</include> <include>**/dao/mapper/ResourceUserMapperTest.java</include> <include>**/dao/mapper/ScheduleMapperTest.java</include> <include>**/dao/mapper/SessionMapperTest.java</include> <include>**/dao/mapper/TaskInstanceMapperTest.java</include> <include>**/dao/mapper/TenantMapperTest.java</include> <include>**/dao/mapper/UdfFuncMapperTest.java</include> <include>**/dao/mapper/UDFUserMapperTest.java</include> <include>**/dao/mapper/UserMapperTest.java</include> <include>**/dao/mapper/AlertPluginInstanceMapperTest.java</include> <include>**/dao/mapper/PluginDefineTest.java</include> <include>**/dao/utils/DagHelperTest.java</include> <include>**/dao/AlertDaoTest.java</include> <include>**/dao/datasource/OracleDataSourceTest.java</include> <include>**/dao/datasource/HiveDataSourceTest.java</include> <include>**/dao/datasource/BaseDataSourceTest.java</include> <include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include> <include>**/dao/upgrade/WokrerGrouopDaoTest.java</include> <include>**/dao/upgrade/UpgradeDaoTest.java</include> <include>**/plugin/alert/email/EmailAlertChannelFactoryTest.java</include> <include>**/plugin/alert/email/EmailAlertChannelTest.java</include> <include>**/plugin/alert/email/ExcelUtilsTest.java</include> <include>**/plugin/alert/email/MailUtilsTest.java</include> <include>**/plugin/alert/email/template/DefaultHTMLTemplateTest.java</include> <include>**/plugin/alert/dingtalk/DingTalkSenderTest.java</include> <include>**/plugin/alert/dingtalk/DingTalkAlertChannelFactoryTest.java</include> <include>**/plugin/alert/wechat/WeChatSenderTest.java</include> <include>**/plugin/alert/wechat/WeChatAlertChannelFactoryTest.java</include> <include>**/plugin/alert/script/ProcessUtilsTest.java</include> <include>**/plugin/alert/script/ScriptAlertChannelFactoryTest.java</include> <include>**/plugin/alert/script/ScriptSenderTest.java</include> <include>**/plugin/alert/http/HttpAlertChannelFactoryTest.java</include> <include>**/plugin/alert/http/HttpAlertChannelTest.java</include> <include>**/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java</include> <include>**/plugin/alert/feishu/FeiShuSenderTest.java</include> <include>**/plugin/alert/http/HttpAlertPluginTest.java</include> <include>**/plugin/alert/http/HttpSenderTest.java</include> <include>**/plugin/alert/slack/SlackAlertChannelFactoryTest.java</include> <include>**/plugin/alert/slack/SlackAlertPluginTest.java</include> <include>**/plugin/alert/slack/SlackSenderTest.java</include> <include>**/spi/params/PluginParamsTransferTest.java</include> <include>**/spi/plugin/DolphinSchedulerPluginLoaderTest.java</include> <include>**/alert/plugin/EmailAlertPluginTest.java</include> <include>**/alert/plugin/AlertPluginManagerTest.java</include> <include>**/alert/plugin/DolphinPluginLoaderTest.java</include> <include>**/alert/utils/FuncUtilsTest.java</include> <include>**/alert/processor/AlertRequestProcessorTest.java</include> <include>**/alert/runner/AlertSenderTest.java</include> <include>**/alert/AlertServerTest.java</include> </includes> <!-- <skip>true</skip> --> </configuration> </plugin> <!-- jenkins plugin jacoco report--> <plugin> <groupId>org.jacoco</groupId> <artifactId>jacoco-maven-plugin</artifactId> <version>${jacoco.version}</version> <configuration> <destFile>target/jacoco.exec</destFile> <dataFile>target/jacoco.exec</dataFile> </configuration> <executions> <execution> <id>jacoco-initialize</id> <goals> <goal>prepare-agent</goal> </goals> </execution> <execution> <id>jacoco-site</id> <phase>test</phase> <goals> <goal>report</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs-maven-plugin</artifactId> <version>${spotbugs.version}</version> <configuration> <xmlOutput>true</xmlOutput> <threshold>medium</threshold> <effort>default</effort> <excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile> <failOnError>true</failOnError> </configuration> <dependencies> <dependency> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs</artifactId> <version>4.0.0-beta4</version> </dependency> </dependencies> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-checkstyle-plugin</artifactId> <version>${checkstyle.version}</version> <dependencies> <dependency> <groupId>com.puppycrawl.tools</groupId> <artifactId>checkstyle</artifactId> <version>8.18</version> </dependency> </dependencies> <configuration> <consoleOutput>true</consoleOutput> <encoding>UTF-8</encoding> <configLocation>style/checkstyle.xml</configLocation> <suppressionsLocation>style/checkstyle-suppressions.xml</suppressionsLocation> <suppressionsFileExpression>checkstyle.suppressions.file</suppressionsFileExpression> <failOnViolation>true</failOnViolation> <violationSeverity>warning</violationSeverity> <includeTestSourceDirectory>true</includeTestSourceDirectory> <sourceDirectories> <sourceDirectory>${project.build.sourceDirectory}</sourceDirectory> </sourceDirectories> <excludes>**\/generated-sources\/</excludes> <skip>true</skip> </configuration> <executions> <execution> <phase>compile</phase> <goals> <goal>check</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>cobertura-maven-plugin</artifactId> <version>${cobertura-maven-plugin.version}</version> <configuration> <check> </check> <aggregate>true</aggregate> <outputDirectory>./target/cobertura</outputDirectory> <encoding>${project.build.sourceEncoding}</encoding> <quiet>true</quiet> <format>xml</format> <instrumentation> <ignoreTrivial>true</ignoreTrivial> </instrumentation> </configuration> </plugin> </plugins> </build> <modules> <module>dolphinscheduler-spi</module> <module>dolphinscheduler-alert-plugin</module> <module>dolphinscheduler-registry-plugin</module> <module>dolphinscheduler-ui</module> <module>dolphinscheduler-server</module> <module>dolphinscheduler-common</module> <module>dolphinscheduler-api</module> <module>dolphinscheduler-dao</module> <module>dolphinscheduler-alert</module> <module>dolphinscheduler-dist</module> <module>dolphinscheduler-remote</module> <module>dolphinscheduler-service</module> <module>dolphinscheduler-microbench</module> </modules> </project>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,596
[Bug][Python] Conflict between python_home and datax_home configuration in dolphinscheduler_env.sh
Environment configuration of dataX and python dolphinscheduler_ Env.sh configuration To configure dataX python, you need to configure it in the root directory of Python To execute a python script, you need to configure the python executable file in the python directory - [dev] - [1.3.6]
https://github.com/apache/dolphinscheduler/issues/5596
https://github.com/apache/dolphinscheduler/pull/5612
b436ef0a2c7dbfcdffbeb6006430a893897f2271
8bf042ae6ef7576209a0489e784684f4960ae6e0
2021-06-07T09:27:16Z
java
2021-06-11T17:23:18Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/PythonCommandExecutor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.task; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.utils.FileUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.util.Collections; import java.util.List; import java.util.function.Consumer; import java.util.regex.Pattern; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * python command executor */ public class PythonCommandExecutor extends AbstractCommandExecutor { /** * logger */ private static final Logger logger = LoggerFactory.getLogger(PythonCommandExecutor.class); /** * python */ public static final String PYTHON = "python"; private static final Pattern PYTHON_PATH_PATTERN = Pattern.compile("/bin/python[\\d.]*$"); /** * constructor * @param logHandler log handler * @param taskExecutionContext taskExecutionContext * @param logger logger */ public PythonCommandExecutor(Consumer<List<String>> logHandler, TaskExecutionContext taskExecutionContext, Logger logger) { super(logHandler,taskExecutionContext,logger); } /** * build command file path * * @return command file path */ @Override protected String buildCommandFilePath() { return String.format("%s/py_%s.command", taskExecutionContext.getExecutePath(), taskExecutionContext.getTaskAppId()); } /** * create command file if not exists * @param execCommand exec command * @param commandFile command file * @throws IOException io exception */ @Override protected void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException { logger.info("tenantCode :{}, task dir:{}", taskExecutionContext.getTenantCode(), taskExecutionContext.getExecutePath()); if (!Files.exists(Paths.get(commandFile))) { logger.info("generate command file:{}", commandFile); StringBuilder sb = new StringBuilder(); sb.append("#-*- encoding=utf8 -*-\n"); sb.append("\n\n"); sb.append(execCommand); logger.info(sb.toString()); // write data to file FileUtils.writeStringToFile(new File(commandFile), sb.toString(), StandardCharsets.UTF_8); } } /** * get command options * @return command options list */ @Override protected List<String> commandOptions() { // unbuffered binary stdout and stderr return Collections.singletonList("-u"); } /** * Gets the command path to which Python can execute * @return python command path */ @Override protected String commandInterpreter() { String pythonHome = getPythonHome(taskExecutionContext.getEnvFile()); return getPythonCommand(pythonHome); } /** * get python command * * @param pythonHome python home * @return python command */ public static String getPythonCommand(String pythonHome) { if (StringUtils.isEmpty(pythonHome)) { return PYTHON; } File file = new File(pythonHome); if (file.exists() && file.isFile()) { return pythonHome; } if (PYTHON_PATH_PATTERN.matcher(pythonHome).find()) { return pythonHome; } return pythonHome + "/bin/python"; } /** * get python home * * @param envPath env path * @return python home */ public static String getPythonHome(String envPath) { BufferedReader br = null; StringBuilder sb = new StringBuilder(); try { br = new BufferedReader(new InputStreamReader(new FileInputStream(envPath))); String line; while ((line = br.readLine()) != null) { if (line.contains(Constants.PYTHON_HOME)) { sb.append(line); break; } } String result = sb.toString(); if (StringUtils.isEmpty(result)) { return null; } String[] arrs = result.split(Constants.EQUAL_SIGN); if (arrs.length == 2) { return arrs[1]; } } catch (IOException e) { logger.error("read file failure", e); } finally { try { if (br != null) { br.close(); } } catch (IOException e) { logger.error(e.getMessage(), e); } } return null; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,596
[Bug][Python] Conflict between python_home and datax_home configuration in dolphinscheduler_env.sh
Environment configuration of dataX and python dolphinscheduler_ Env.sh configuration To configure dataX python, you need to configure it in the root directory of Python To execute a python script, you need to configure the python executable file in the python directory - [dev] - [1.3.6]
https://github.com/apache/dolphinscheduler/issues/5596
https://github.com/apache/dolphinscheduler/pull/5612
b436ef0a2c7dbfcdffbeb6006430a893897f2271
8bf042ae6ef7576209a0489e784684f4960ae6e0
2021-06-07T09:27:16Z
java
2021-06-11T17:23:18Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.task.datax; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.common.datasource.DatasourceUtil; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.datax.DataxParameters; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.CommonUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.server.entity.DataxTaskExecutionContext; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.utils.DataxUtils; import org.apache.dolphinscheduler.server.utils.ParamUtils; import org.apache.dolphinscheduler.server.worker.task.AbstractTask; import org.apache.dolphinscheduler.server.worker.task.CommandExecuteResult; import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor; import org.apache.commons.io.FileUtils; import java.io.File; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.nio.file.attribute.FileAttribute; import java.nio.file.attribute.PosixFilePermission; import java.nio.file.attribute.PosixFilePermissions; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; import org.slf4j.Logger; import com.alibaba.druid.sql.ast.SQLStatement; import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr; import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr; import com.alibaba.druid.sql.ast.statement.SQLSelect; import com.alibaba.druid.sql.ast.statement.SQLSelectItem; import com.alibaba.druid.sql.ast.statement.SQLSelectQueryBlock; import com.alibaba.druid.sql.ast.statement.SQLSelectStatement; import com.alibaba.druid.sql.ast.statement.SQLUnionQuery; import com.alibaba.druid.sql.parser.SQLStatementParser; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; /** * DataX task */ public class DataxTask extends AbstractTask { /** * jvm parameters */ public static final String JVM_PARAM = " --jvm=\"-Xms%sG -Xmx%sG\" "; /** * python process(datax only supports version 2.7 by default) */ private static final String DATAX_PYTHON = "python2.7"; /** * datax path */ private static final String DATAX_PATH = "${DATAX_HOME}/bin/datax.py"; /** * datax channel count */ private static final int DATAX_CHANNEL_COUNT = 1; /** * datax parameters */ private DataxParameters dataXParameters; /** * shell command executor */ private ShellCommandExecutor shellCommandExecutor; /** * taskExecutionContext */ private TaskExecutionContext taskExecutionContext; /** * constructor * * @param taskExecutionContext taskExecutionContext * @param logger logger */ public DataxTask(TaskExecutionContext taskExecutionContext, Logger logger) { super(taskExecutionContext, logger); this.taskExecutionContext = taskExecutionContext; this.shellCommandExecutor = new ShellCommandExecutor(this::logHandle, taskExecutionContext, logger); } /** * init DataX config */ @Override public void init() { logger.info("datax task params {}", taskExecutionContext.getTaskParams()); dataXParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), DataxParameters.class); if (!dataXParameters.checkParameters()) { throw new RuntimeException("datax task params is not valid"); } } /** * run DataX process * * @throws Exception if error throws Exception */ @Override public void handle() throws Exception { try { // set the name of the current thread String threadLoggerInfoName = String.format("TaskLogInfo-%s", taskExecutionContext.getTaskAppId()); Thread.currentThread().setName(threadLoggerInfoName); // combining local and global parameters Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()), taskExecutionContext.getDefinedParams(), dataXParameters.getLocalParametersMap(), CommandType.of(taskExecutionContext.getCmdTypeIfComplement()), taskExecutionContext.getScheduleTime()); // run datax procesDataSourceService.s String jsonFilePath = buildDataxJsonFile(paramsMap); String shellCommandFilePath = buildShellCommandFile(jsonFilePath, paramsMap); CommandExecuteResult commandExecuteResult = shellCommandExecutor.run(shellCommandFilePath); setExitStatusCode(commandExecuteResult.getExitStatusCode()); setAppIds(commandExecuteResult.getAppIds()); setProcessId(commandExecuteResult.getProcessId()); } catch (Exception e) { setExitStatusCode(Constants.EXIT_CODE_FAILURE); throw e; } } /** * cancel DataX process * * @param cancelApplication cancelApplication * @throws Exception if error throws Exception */ @Override public void cancelApplication(boolean cancelApplication) throws Exception { // cancel process shellCommandExecutor.cancelApplication(); } /** * build datax configuration file * * @return datax json file name * @throws Exception if error throws Exception */ private String buildDataxJsonFile(Map<String, Property> paramsMap) throws Exception { // generate json String fileName = String.format("%s/%s_job.json", taskExecutionContext.getExecutePath(), taskExecutionContext.getTaskAppId()); String json; Path path = new File(fileName).toPath(); if (Files.exists(path)) { return fileName; } if (dataXParameters.getCustomConfig() == Flag.YES.ordinal()) { json = dataXParameters.getJson().replaceAll("\\r\\n", "\n"); } else { ObjectNode job = JSONUtils.createObjectNode(); job.putArray("content").addAll(buildDataxJobContentJson()); job.set("setting", buildDataxJobSettingJson()); ObjectNode root = JSONUtils.createObjectNode(); root.set("job", job); root.set("core", buildDataxCoreJson()); json = root.toString(); } // replace placeholder json = ParameterUtils.convertParameterPlaceholders(json, ParamUtils.convert(paramsMap)); logger.debug("datax job json : {}", json); // create datax json file FileUtils.writeStringToFile(new File(fileName), json, StandardCharsets.UTF_8); return fileName; } /** * build datax job config * * @return collection of datax job config JSONObject * @throws SQLException if error throws SQLException */ private List<ObjectNode> buildDataxJobContentJson() { DataxTaskExecutionContext dataxTaskExecutionContext = taskExecutionContext.getDataxTaskExecutionContext(); BaseConnectionParam dataSourceCfg = (BaseConnectionParam) DatasourceUtil.buildConnectionParams( DbType.of(dataxTaskExecutionContext.getSourcetype()), dataxTaskExecutionContext.getSourceConnectionParams()); BaseConnectionParam dataTargetCfg = (BaseConnectionParam) DatasourceUtil.buildConnectionParams( DbType.of(dataxTaskExecutionContext.getTargetType()), dataxTaskExecutionContext.getTargetConnectionParams()); List<ObjectNode> readerConnArr = new ArrayList<>(); ObjectNode readerConn = JSONUtils.createObjectNode(); ArrayNode sqlArr = readerConn.putArray("querySql"); for (String sql : new String[]{dataXParameters.getSql()}) { sqlArr.add(sql); } ArrayNode urlArr = readerConn.putArray("jdbcUrl"); urlArr.add(DatasourceUtil.getJdbcUrl(DbType.valueOf(dataXParameters.getDtType()), dataSourceCfg)); readerConnArr.add(readerConn); ObjectNode readerParam = JSONUtils.createObjectNode(); readerParam.put("username", dataSourceCfg.getUser()); readerParam.put("password", CommonUtils.decodePassword(dataSourceCfg.getPassword())); readerParam.putArray("connection").addAll(readerConnArr); ObjectNode reader = JSONUtils.createObjectNode(); reader.put("name", DataxUtils.getReaderPluginName(DbType.of(dataxTaskExecutionContext.getSourcetype()))); reader.set("parameter", readerParam); List<ObjectNode> writerConnArr = new ArrayList<>(); ObjectNode writerConn = JSONUtils.createObjectNode(); ArrayNode tableArr = writerConn.putArray("table"); tableArr.add(dataXParameters.getTargetTable()); writerConn.put("jdbcUrl", DatasourceUtil.getJdbcUrl(DbType.valueOf(dataXParameters.getDsType()), dataTargetCfg)); writerConnArr.add(writerConn); ObjectNode writerParam = JSONUtils.createObjectNode(); writerParam.put("username", dataTargetCfg.getUser()); writerParam.put("password", CommonUtils.decodePassword(dataTargetCfg.getPassword())); String[] columns = parsingSqlColumnNames(DbType.of(dataxTaskExecutionContext.getSourcetype()), DbType.of(dataxTaskExecutionContext.getTargetType()), dataSourceCfg, dataXParameters.getSql()); ArrayNode columnArr = writerParam.putArray("column"); for (String column : columns) { columnArr.add(column); } writerParam.putArray("connection").addAll(writerConnArr); if (CollectionUtils.isNotEmpty(dataXParameters.getPreStatements())) { ArrayNode preSqlArr = writerParam.putArray("preSql"); for (String preSql : dataXParameters.getPreStatements()) { preSqlArr.add(preSql); } } if (CollectionUtils.isNotEmpty(dataXParameters.getPostStatements())) { ArrayNode postSqlArr = writerParam.putArray("postSql"); for (String postSql : dataXParameters.getPostStatements()) { postSqlArr.add(postSql); } } ObjectNode writer = JSONUtils.createObjectNode(); writer.put("name", DataxUtils.getWriterPluginName(DbType.of(dataxTaskExecutionContext.getTargetType()))); writer.set("parameter", writerParam); List<ObjectNode> contentList = new ArrayList<>(); ObjectNode content = JSONUtils.createObjectNode(); content.set("reader", reader); content.set("writer", writer); contentList.add(content); return contentList; } /** * build datax setting config * * @return datax setting config JSONObject */ private ObjectNode buildDataxJobSettingJson() { ObjectNode speed = JSONUtils.createObjectNode(); speed.put("channel", DATAX_CHANNEL_COUNT); if (dataXParameters.getJobSpeedByte() > 0) { speed.put("byte", dataXParameters.getJobSpeedByte()); } if (dataXParameters.getJobSpeedRecord() > 0) { speed.put("record", dataXParameters.getJobSpeedRecord()); } ObjectNode errorLimit = JSONUtils.createObjectNode(); errorLimit.put("record", 0); errorLimit.put("percentage", 0); ObjectNode setting = JSONUtils.createObjectNode(); setting.set("speed", speed); setting.set("errorLimit", errorLimit); return setting; } private ObjectNode buildDataxCoreJson() { ObjectNode speed = JSONUtils.createObjectNode(); speed.put("channel", DATAX_CHANNEL_COUNT); if (dataXParameters.getJobSpeedByte() > 0) { speed.put("byte", dataXParameters.getJobSpeedByte()); } if (dataXParameters.getJobSpeedRecord() > 0) { speed.put("record", dataXParameters.getJobSpeedRecord()); } ObjectNode channel = JSONUtils.createObjectNode(); channel.set("speed", speed); ObjectNode transport = JSONUtils.createObjectNode(); transport.set("channel", channel); ObjectNode core = JSONUtils.createObjectNode(); core.set("transport", transport); return core; } /** * create command * * @return shell command file name * @throws Exception if error throws Exception */ private String buildShellCommandFile(String jobConfigFilePath, Map<String, Property> paramsMap) throws Exception { // generate scripts String fileName = String.format("%s/%s_node.%s", taskExecutionContext.getExecutePath(), taskExecutionContext.getTaskAppId(), OSUtils.isWindows() ? "bat" : "sh"); Path path = new File(fileName).toPath(); if (Files.exists(path)) { return fileName; } // datax python command StringBuilder sbr = new StringBuilder(); sbr.append(DATAX_PYTHON); sbr.append(" "); sbr.append(DATAX_PATH); sbr.append(" "); sbr.append(loadJvmEnv(dataXParameters)); sbr.append(jobConfigFilePath); // replace placeholder String dataxCommand = ParameterUtils.convertParameterPlaceholders(sbr.toString(), ParamUtils.convert(paramsMap)); logger.debug("raw script : {}", dataxCommand); // create shell command file Set<PosixFilePermission> perms = PosixFilePermissions.fromString(Constants.RWXR_XR_X); FileAttribute<Set<PosixFilePermission>> attr = PosixFilePermissions.asFileAttribute(perms); if (OSUtils.isWindows()) { Files.createFile(path); } else { Files.createFile(path, attr); } Files.write(path, dataxCommand.getBytes(), StandardOpenOption.APPEND); return fileName; } public String loadJvmEnv(DataxParameters dataXParameters) { int xms = dataXParameters.getXms() < 1 ? 1 : dataXParameters.getXms(); int xmx = dataXParameters.getXmx() < 1 ? 1 : dataXParameters.getXmx(); return String.format(JVM_PARAM, xms, xmx); } /** * parsing synchronized column names in SQL statements * * @param dsType the database type of the data source * @param dtType the database type of the data target * @param dataSourceCfg the database connection parameters of the data source * @param sql sql for data synchronization * @return Keyword converted column names */ private String[] parsingSqlColumnNames(DbType dsType, DbType dtType, BaseConnectionParam dataSourceCfg, String sql) { String[] columnNames = tryGrammaticalAnalysisSqlColumnNames(dsType, sql); if (columnNames == null || columnNames.length == 0) { logger.info("try to execute sql analysis query column name"); columnNames = tryExecuteSqlResolveColumnNames(dataSourceCfg, sql); } notNull(columnNames, String.format("parsing sql columns failed : %s", sql)); return DataxUtils.convertKeywordsColumns(dtType, columnNames); } /** * try grammatical parsing column * * @param dbType database type * @param sql sql for data synchronization * @return column name array * @throws RuntimeException if error throws RuntimeException */ private String[] tryGrammaticalAnalysisSqlColumnNames(DbType dbType, String sql) { String[] columnNames; try { SQLStatementParser parser = DataxUtils.getSqlStatementParser(dbType, sql); if (parser == null) { logger.warn("database driver [{}] is not support grammatical analysis sql", dbType); return new String[0]; } SQLStatement sqlStatement = parser.parseStatement(); SQLSelectStatement sqlSelectStatement = (SQLSelectStatement) sqlStatement; SQLSelect sqlSelect = sqlSelectStatement.getSelect(); List<SQLSelectItem> selectItemList = null; if (sqlSelect.getQuery() instanceof SQLSelectQueryBlock) { SQLSelectQueryBlock block = (SQLSelectQueryBlock) sqlSelect.getQuery(); selectItemList = block.getSelectList(); } else if (sqlSelect.getQuery() instanceof SQLUnionQuery) { SQLUnionQuery unionQuery = (SQLUnionQuery) sqlSelect.getQuery(); SQLSelectQueryBlock block = (SQLSelectQueryBlock) unionQuery.getRight(); selectItemList = block.getSelectList(); } notNull(selectItemList, String.format("select query type [%s] is not support", sqlSelect.getQuery().toString())); columnNames = new String[selectItemList.size()]; for (int i = 0; i < selectItemList.size(); i++) { SQLSelectItem item = selectItemList.get(i); String columnName = null; if (item.getAlias() != null) { columnName = item.getAlias(); } else if (item.getExpr() != null) { if (item.getExpr() instanceof SQLPropertyExpr) { SQLPropertyExpr expr = (SQLPropertyExpr) item.getExpr(); columnName = expr.getName(); } else if (item.getExpr() instanceof SQLIdentifierExpr) { SQLIdentifierExpr expr = (SQLIdentifierExpr) item.getExpr(); columnName = expr.getName(); } } else { throw new RuntimeException( String.format("grammatical analysis sql column [ %s ] failed", item.toString())); } if (columnName == null) { throw new RuntimeException( String.format("grammatical analysis sql column [ %s ] failed", item.toString())); } columnNames[i] = columnName; } } catch (Exception e) { logger.warn(e.getMessage(), e); return new String[0]; } return columnNames; } /** * try to execute sql to resolve column names * * @param baseDataSource the database connection parameters * @param sql sql for data synchronization * @return column name array */ public String[] tryExecuteSqlResolveColumnNames(BaseConnectionParam baseDataSource, String sql) { String[] columnNames; sql = String.format("SELECT t.* FROM ( %s ) t WHERE 0 = 1", sql); sql = sql.replace(";", ""); try ( Connection connection = DatasourceUtil.getConnection(DbType.valueOf(dataXParameters.getDtType()), baseDataSource); PreparedStatement stmt = connection.prepareStatement(sql); ResultSet resultSet = stmt.executeQuery()) { ResultSetMetaData md = resultSet.getMetaData(); int num = md.getColumnCount(); columnNames = new String[num]; for (int i = 1; i <= num; i++) { columnNames[i - 1] = md.getColumnName(i); } } catch (SQLException e) { logger.warn(e.getMessage(), e); return null; } return columnNames; } @Override public AbstractParameters getParameters() { return dataXParameters; } private void notNull(Object obj, String message) { if (obj == null) { throw new RuntimeException(message); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,596
[Bug][Python] Conflict between python_home and datax_home configuration in dolphinscheduler_env.sh
Environment configuration of dataX and python dolphinscheduler_ Env.sh configuration To configure dataX python, you need to configure it in the root directory of Python To execute a python script, you need to configure the python executable file in the python directory - [dev] - [1.3.6]
https://github.com/apache/dolphinscheduler/issues/5596
https://github.com/apache/dolphinscheduler/pull/5612
b436ef0a2c7dbfcdffbeb6006430a893897f2271
8bf042ae6ef7576209a0489e784684f4960ae6e0
2021-06-07T09:27:16Z
java
2021-06-11T17:23:18Z
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTaskTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.task.datax; import static org.apache.dolphinscheduler.common.enums.CommandType.START_PROCESS; import org.apache.dolphinscheduler.common.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.common.datasource.DatasourceUtil; import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.task.datax.DataxParameters; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.server.entity.DataxTaskExecutionContext; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.utils.DataxUtils; import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor; import org.apache.dolphinscheduler.server.worker.task.TaskProps; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.apache.dolphinscheduler.service.process.ProcessService; import java.lang.reflect.Method; import java.util.Arrays; import java.util.Date; import java.util.List; import java.util.UUID; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import org.mockito.Mockito; import org.powermock.api.mockito.PowerMockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.context.ApplicationContext; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ObjectNode; /** * DataxTask Tester. */ public class DataxTaskTest { private static final Logger logger = LoggerFactory.getLogger(DataxTaskTest.class); private static final String CONNECTION_PARAMS = " {\n" + " \"user\":\"root\",\n" + " \"password\":\"123456\",\n" + " \"address\":\"jdbc:mysql://127.0.0.1:3306\",\n" + " \"database\":\"test\",\n" + " \"jdbcUrl\":\"jdbc:mysql://127.0.0.1:3306/test\"\n" + "}"; private DataxTask dataxTask; private ProcessService processService; private ShellCommandExecutor shellCommandExecutor; private ApplicationContext applicationContext; private TaskExecutionContext taskExecutionContext; private final TaskProps props = new TaskProps(); @Before public void before() throws Exception { setTaskParems(0); } private void setTaskParems(Integer customConfig) { processService = Mockito.mock(ProcessService.class); shellCommandExecutor = Mockito.mock(ShellCommandExecutor.class); applicationContext = Mockito.mock(ApplicationContext.class); SpringApplicationContext springApplicationContext = new SpringApplicationContext(); springApplicationContext.setApplicationContext(applicationContext); Mockito.when(applicationContext.getBean(ProcessService.class)).thenReturn(processService); TaskProps props = new TaskProps(); props.setExecutePath("/tmp"); props.setTaskAppId(String.valueOf(System.currentTimeMillis())); props.setTaskInstanceId(1); props.setTenantCode("1"); props.setEnvFile(".dolphinscheduler_env.sh"); props.setTaskStartTime(new Date()); props.setTaskTimeout(0); if (customConfig == 1) { props.setTaskParams( "{\n" + " \"customConfig\":1,\n" + " \"localParams\":[\n" + " {\n" + " \"prop\":\"test\",\n" + " \"value\":\"38294729\"\n" + " }\n" + " ],\n" + " \"json\":\"" + "{\"job\":{\"setting\":{\"speed\":{\"byte\":1048576},\"errorLimit\":{\"record\":0,\"percentage\":0.02}},\"content\":[" + "{\"reader\":{\"name\":\"rdbmsreader\",\"parameter\":{\"username\":\"xxx\",\"password\":\"${test}\",\"column\":[\"id\",\"name\"],\"splitPk\":\"pk\",\"" + "connection\":[{\"querySql\":[\"SELECT * from dual\"],\"jdbcUrl\":[\"jdbc:dm://ip:port/database\"]}],\"fetchSize\":1024,\"where\":\"1 = 1\"}},\"" + "writer\":{\"name\":\"streamwriter\",\"parameter\":{\"print\":true}}}]}}\"\n" + "}"); } else { props.setTaskParams( "{\n" + " \"customConfig\":0,\n" + " \"targetTable\":\"test\",\n" + " \"postStatements\":[\n" + " \"delete from test\"\n" + " ],\n" + " \"jobSpeedByte\":0,\n" + " \"jobSpeedRecord\":1000,\n" + " \"dtType\":\"MYSQL\",\n" + " \"dataSource\":1,\n" + " \"dsType\":\"MYSQL\",\n" + " \"dataTarget\":2,\n" + " \"sql\":\"select 1 as test from dual\",\n" + " \"preStatements\":[\n" + " \"delete from test\"\n" + " ]\n" + "}"); } taskExecutionContext = Mockito.mock(TaskExecutionContext.class); Mockito.when(taskExecutionContext.getTaskParams()).thenReturn(props.getTaskParams()); Mockito.when(taskExecutionContext.getExecutePath()).thenReturn("/tmp"); Mockito.when(taskExecutionContext.getTaskAppId()).thenReturn(UUID.randomUUID().toString()); Mockito.when(taskExecutionContext.getTenantCode()).thenReturn("root"); Mockito.when(taskExecutionContext.getStartTime()).thenReturn(new Date()); Mockito.when(taskExecutionContext.getTaskTimeout()).thenReturn(10000); Mockito.when(taskExecutionContext.getLogPath()).thenReturn("/tmp/dx"); DataxTaskExecutionContext dataxTaskExecutionContext = new DataxTaskExecutionContext(); dataxTaskExecutionContext.setSourcetype(0); dataxTaskExecutionContext.setTargetType(0); dataxTaskExecutionContext.setSourceConnectionParams(CONNECTION_PARAMS); dataxTaskExecutionContext.setTargetConnectionParams(CONNECTION_PARAMS); Mockito.when(taskExecutionContext.getDataxTaskExecutionContext()).thenReturn(dataxTaskExecutionContext); dataxTask = PowerMockito.spy(new DataxTask(taskExecutionContext, logger)); dataxTask.init(); props.setCmdTypeIfComplement(START_PROCESS); Mockito.when(processService.findDataSourceById(1)).thenReturn(getDataSource()); Mockito.when(processService.findDataSourceById(2)).thenReturn(getDataSource()); Mockito.when(processService.findProcessInstanceByTaskId(1)).thenReturn(getProcessInstance()); String fileName = String.format("%s/%s_node.sh", props.getExecutePath(), props.getTaskAppId()); try { Mockito.when(shellCommandExecutor.run(fileName)).thenReturn(null); } catch (Exception e) { e.printStackTrace(); } dataxTask = PowerMockito.spy(new DataxTask(taskExecutionContext, logger)); dataxTask.init(); } private DataSource getDataSource() { DataSource dataSource = new DataSource(); dataSource.setType(DbType.MYSQL); dataSource.setConnectionParams(CONNECTION_PARAMS); dataSource.setUserId(1); return dataSource; } private ProcessInstance getProcessInstance() { ProcessInstance processInstance = new ProcessInstance(); processInstance.setCommandType(START_PROCESS); processInstance.setScheduleTime(new Date()); return processInstance; } @After public void after() throws Exception { } /** * Method: DataxTask() */ @Test public void testDataxTask() throws Exception { TaskProps props = new TaskProps(); props.setExecutePath("/tmp"); props.setTaskAppId(String.valueOf(System.currentTimeMillis())); props.setTaskInstanceId(1); props.setTenantCode("1"); Assert.assertNotNull(new DataxTask(null, logger)); } /** * Method: init */ @Test public void testInit() throws Exception { try { dataxTask.init(); } catch (Exception e) { Assert.fail(e.getMessage()); } } /** * Method: handle() */ @Test public void testHandle() throws Exception { } /** * Method: cancelApplication() */ @Test public void testCancelApplication() throws Exception { try { dataxTask.cancelApplication(true); } catch (Exception e) { Assert.fail(e.getMessage()); } } /** * Method: parsingSqlColumnNames(DbType dsType, DbType dtType, BaseDataSource * dataSourceCfg, String sql) */ @Test public void testParsingSqlColumnNames() throws Exception { try { BaseConnectionParam dataSource = (BaseConnectionParam) DatasourceUtil.buildConnectionParams( getDataSource().getType(), getDataSource().getConnectionParams()); Method method = DataxTask.class.getDeclaredMethod("parsingSqlColumnNames", DbType.class, DbType.class, BaseConnectionParam.class, String.class); method.setAccessible(true); String[] columns = (String[]) method.invoke(dataxTask, DbType.MYSQL, DbType.MYSQL, dataSource, "select 1 as a, 2 as `table` from dual"); Assert.assertNotNull(columns); Assert.assertTrue(columns.length == 2); Assert.assertEquals("[`a`, `table`]", Arrays.toString(columns)); } catch (Exception e) { Assert.fail(e.getMessage()); } } /** * Method: tryGrammaticalParsingSqlColumnNames(DbType dbType, String sql) */ @Test public void testTryGrammaticalAnalysisSqlColumnNames() throws Exception { try { Method method = DataxTask.class.getDeclaredMethod("tryGrammaticalAnalysisSqlColumnNames", DbType.class, String.class); method.setAccessible(true); String[] columns = (String[]) method.invoke(dataxTask, DbType.MYSQL, "select t1.a, t1.b from test t1 union all select a, t2.b from (select a, b from test) t2"); Assert.assertNotNull(columns); Assert.assertTrue(columns.length == 2); Assert.assertEquals("[a, b]", Arrays.toString(columns)); } catch (Exception e) { Assert.fail(e.getMessage()); } } /** * Method: tryExecuteSqlResolveColumnNames(BaseDataSource baseDataSource, * String sql) */ @Test public void testTryExecuteSqlResolveColumnNames() throws Exception { // TODO: Test goes here... } /** * Method: buildDataxJsonFile() */ @Test @Ignore("method not found") public void testBuildDataxJsonFile() throws Exception { try { setTaskParems(1); Method method = DataxTask.class.getDeclaredMethod("buildDataxJsonFile"); method.setAccessible(true); String filePath = (String) method.invoke(dataxTask, null); Assert.assertNotNull(filePath); } catch (Exception e) { Assert.fail(e.getMessage()); } } /** * Method: buildDataxJsonFile() */ @Test @Ignore("method not found") public void testBuildDataxJsonFile0() throws Exception { try { setTaskParems(0); Method method = DataxTask.class.getDeclaredMethod("buildDataxJsonFile"); method.setAccessible(true); String filePath = (String) method.invoke(dataxTask, null); Assert.assertNotNull(filePath); } catch (Exception e) { Assert.fail(e.getMessage()); } } /** * Method: buildDataxJobContentJson() */ @Test public void testBuildDataxJobContentJson() throws Exception { try { Method method = DataxTask.class.getDeclaredMethod("buildDataxJobContentJson"); method.setAccessible(true); List<ObjectNode> contentList = (List<ObjectNode>) method.invoke(dataxTask, null); Assert.assertNotNull(contentList); ObjectNode content = contentList.get(0); JsonNode reader = JSONUtils.parseObject(content.path("reader").toString()); Assert.assertNotNull(reader); Assert.assertEquals("{\"name\":\"mysqlreader\",\"parameter\":{\"username\":\"root\"," + "\"password\":\"123456\",\"connection\":[{\"querySql\":[\"select 1 as test from dual\"]," + "\"jdbcUrl\":[\"jdbc:mysql://127.0.0.1:3306/test?allowLoadLocalInfile=false" + "&autoDeserialize=false&allowLocalInfile=false&allowUrlInLocalInfile=false\"]}]}}", reader.toString()); String readerPluginName = reader.path("name").asText(); Assert.assertEquals(DataxUtils.DATAX_READER_PLUGIN_MYSQL, readerPluginName); JsonNode writer = JSONUtils.parseObject(content.path("writer").toString()); Assert.assertNotNull(writer); Assert.assertEquals("{\"name\":\"mysqlwriter\",\"parameter\":{\"username\":\"root\"," + "\"password\":\"123456\",\"column\":[\"`test`\"],\"connection\":[{\"table\":[\"test\"]," + "\"jdbcUrl\":\"jdbc:mysql://127.0.0.1:3306/test?allowLoadLocalInfile=false&" + "autoDeserialize=false&allowLocalInfile=false&allowUrlInLocalInfile=false\"}]," + "\"preSql\":[\"delete from test\"],\"postSql\":[\"delete from test\"]}}", writer.toString()); String writerPluginName = writer.path("name").asText(); Assert.assertEquals(DataxUtils.DATAX_WRITER_PLUGIN_MYSQL, writerPluginName); } catch (Exception e) { Assert.fail(e.getMessage()); } } /** * Method: buildDataxJobSettingJson() */ @Test public void testBuildDataxJobSettingJson() throws Exception { try { Method method = DataxTask.class.getDeclaredMethod("buildDataxJobSettingJson"); method.setAccessible(true); JsonNode setting = (JsonNode) method.invoke(dataxTask, null); Assert.assertNotNull(setting); Assert.assertEquals("{\"channel\":1,\"record\":1000}", setting.get("speed").toString()); Assert.assertEquals("{\"record\":0,\"percentage\":0}", setting.get("errorLimit").toString()); } catch (Exception e) { Assert.fail(e.getMessage()); } } /** * Method: buildDataxCoreJson() */ @Test public void testBuildDataxCoreJson() throws Exception { try { Method method = DataxTask.class.getDeclaredMethod("buildDataxCoreJson"); method.setAccessible(true); ObjectNode coreConfig = (ObjectNode) method.invoke(dataxTask, null); Assert.assertNotNull(coreConfig); Assert.assertNotNull(coreConfig.get("transport")); } catch (Exception e) { Assert.fail(e.getMessage()); } } /** * Method: buildShellCommandFile(String jobConfigFilePath) */ @Test @Ignore("method not found") public void testBuildShellCommandFile() throws Exception { try { Method method = DataxTask.class.getDeclaredMethod("buildShellCommandFile", String.class); method.setAccessible(true); Assert.assertNotNull(method.invoke(dataxTask, "test.json")); } catch (Exception e) { Assert.fail(e.getMessage()); } } /** * Method: getParameters */ @Test public void testGetParameters() throws Exception { Assert.assertTrue(dataxTask.getParameters() != null); } /** * Method: notNull(Object obj, String message) */ @Test public void testNotNull() throws Exception { try { Method method = DataxTask.class.getDeclaredMethod("notNull", Object.class, String.class); method.setAccessible(true); method.invoke(dataxTask, "abc", "test throw RuntimeException"); } catch (Exception e) { Assert.fail(e.getMessage()); } } @Test public void testLoadJvmEnv() { DataxTask dataxTask = new DataxTask(null,null); DataxParameters dataxParameters = new DataxParameters(); dataxParameters.setXms(0); dataxParameters.setXmx(-100); String actual = dataxTask.loadJvmEnv(dataxParameters); String except = " --jvm=\"-Xms1G -Xmx1G\" "; Assert.assertEquals(except,actual); dataxParameters.setXms(13); dataxParameters.setXmx(14); actual = dataxTask.loadJvmEnv(dataxParameters); except = " --jvm=\"-Xms13G -Xmx14G\" "; Assert.assertEquals(except,actual); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,483
[Bug][Api] Can't view variables
**Describe the bug** When I want to view the variables defined in process instance, it will throw an exception. **To Reproduce** Steps to reproduce the behavior, for example: 1. Create a process definition 2. Add localparams 3. Execute the process definition 4. View params in process instance **Screenshots** ![image](https://user-images.githubusercontent.com/22415594/118438653-4b46e400-b717-11eb-94d4-5e187a377d51.png) **Which version of Dolphin Scheduler:** -[dev] **Additional context** This issue caused by deserialize the taskParams in TaskDefinitionLog. https://github.com/apache/dolphinscheduler/blob/68301db6b914ff4002bfbc531c6810864d8e47c2/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessInstanceServiceImpl.java#L664-L666 For example, there exist list in the json attribute, so it cannot be deserialized as string. ```json { "resourceList":[ ], "localParams":[ { "prop":"BATCH_TIME", "direct":"IN", "type":"VARCHAR", "value":"20210517131849" } ], "rawScript":"echo "${BATCH_TIME}"", "conditionResult":"{"successNode":[""],"failedNode":[""]}", "dependence":"{}" } ``` And there are multiple places use different way to deserialize the` taskParams`. https://github.com/apache/dolphinscheduler/blob/68301db6b914ff4002bfbc531c6810864d8e47c2/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java#L1611 I think it is better to use the same way to do this transform, otherwise, once we make changes, we need to change many places. And the `taskParams` is transported by front-end and stored in database as a JSON string. We use Map to represent this field in backend, I think it is better to define a specific class to express the `taskParams`, this maybe helpful for deserialize and code maintain.
https://github.com/apache/dolphinscheduler/issues/5483
https://github.com/apache/dolphinscheduler/pull/5631
8bf042ae6ef7576209a0489e784684f4960ae6e0
0d5037e7c37d7903d9172f165b348058f1ddbf88
2021-05-17T06:24:02Z
java
2021-06-13T03:43:53Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessInstanceServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.DATA_LIST; import static org.apache.dolphinscheduler.common.Constants.DEPENDENT_SPLIT; import static org.apache.dolphinscheduler.common.Constants.GLOBAL_PARAMS; import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS; import static org.apache.dolphinscheduler.common.Constants.PROCESS_INSTANCE_STATE; import static org.apache.dolphinscheduler.common.Constants.TASK_LIST; import org.apache.dolphinscheduler.api.dto.gantt.GanttDto; import org.apache.dolphinscheduler.api.dto.gantt.Task; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.ExecutorService; import org.apache.dolphinscheduler.api.service.LoggerService; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.service.ProcessInstanceService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.UsersService; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.DependResult; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.common.utils.placeholder.BusinessTimeUtils; import org.apache.dolphinscheduler.dao.entity.ProcessData; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.service.process.ProcessService; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; /** * process instance service impl */ @Service public class ProcessInstanceServiceImpl extends BaseServiceImpl implements ProcessInstanceService { private static final Logger logger = LoggerFactory.getLogger(ProcessInstanceService.class); public static final String TASK_TYPE = "taskType"; public static final String LOCAL_PARAMS_LIST = "localParamsList"; @Autowired ProjectMapper projectMapper; @Autowired ProjectService projectService; @Autowired ProcessService processService; @Autowired ProcessInstanceMapper processInstanceMapper; @Autowired ProcessDefinitionMapper processDefineMapper; @Autowired ProcessDefinitionService processDefinitionService; @Autowired ExecutorService execService; @Autowired TaskInstanceMapper taskInstanceMapper; @Autowired LoggerService loggerService; @Autowired ProcessDefinitionLogMapper processDefinitionLogMapper; @Autowired TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired UsersService usersService; /** * return top n SUCCESS process instance order by running time which started between startTime and endTime */ @Override public Map<String, Object> queryTopNLongestRunningProcessInstance(User loginUser, String projectName, int size, String startTime, String endTime) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { return checkResult; } if (0 > size) { putMsg(result, Status.NEGTIVE_SIZE_NUMBER_ERROR, size); return result; } if (Objects.isNull(startTime)) { putMsg(result, Status.DATA_IS_NULL, Constants.START_TIME); return result; } Date start = DateUtils.stringToDate(startTime); if (Objects.isNull(endTime)) { putMsg(result, Status.DATA_IS_NULL, Constants.END_TIME); return result; } Date end = DateUtils.stringToDate(endTime); if (start == null || end == null) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, Constants.START_END_DATE); return result; } if (start.getTime() > end.getTime()) { putMsg(result, Status.START_TIME_BIGGER_THAN_END_TIME_ERROR, startTime, endTime); return result; } List<ProcessInstance> processInstances = processInstanceMapper.queryTopNProcessInstance(size, start, end, ExecutionStatus.SUCCESS); result.put(DATA_LIST, processInstances); putMsg(result, Status.SUCCESS); return result; } /** * query process instance by id * * @param loginUser login user * @param projectName project name * @param processId process instance id * @return process instance detail */ @Override public Map<String, Object> queryProcessInstanceById(User loginUser, String projectName, Integer processId) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { return checkResult; } ProcessInstance processInstance = processService.findProcessInstanceDetailById(processId); ProcessDefinition processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processId); } else { processInstance.setWarningGroupId(processDefinition.getWarningGroupId()); processInstance.setConnects(processDefinition.getConnects()); processInstance.setLocations(processDefinition.getLocations()); ProcessData processData = processService.genProcessData(processDefinition); processInstance.setProcessInstanceJson(JSONUtils.toJsonString(processData)); result.put(DATA_LIST, processInstance); putMsg(result, Status.SUCCESS); } return result; } /** * paging query process instance list, filtering according to project, process definition, time range, keyword, process status * * @param loginUser login user * @param projectName project name * @param pageNo page number * @param pageSize page size * @param processDefineId process definition id * @param searchVal search value * @param stateType state type * @param host host * @param startDate start time * @param endDate end time * @return process instance list */ @Override public Map<String, Object> queryProcessInstanceList(User loginUser, String projectName, Integer processDefineId, String startDate, String endDate, String searchVal, String executorName, ExecutionStatus stateType, String host, Integer pageNo, Integer pageSize) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { return checkResult; } int[] statusArray = null; // filter by state if (stateType != null) { statusArray = new int[]{stateType.ordinal()}; } Map<String, Object> checkAndParseDateResult = checkAndParseDateParameters(startDate, endDate); if (checkAndParseDateResult.get(Constants.STATUS) != Status.SUCCESS) { return checkAndParseDateResult; } Date start = (Date) checkAndParseDateResult.get(Constants.START_TIME); Date end = (Date) checkAndParseDateResult.get(Constants.END_TIME); Page<ProcessInstance> page = new Page<>(pageNo, pageSize); PageInfo<ProcessInstance> pageInfo = new PageInfo<>(pageNo, pageSize); int executorId = usersService.getUserIdByName(executorName); ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(processDefineId); IPage<ProcessInstance> processInstanceList = processInstanceMapper.queryProcessInstanceListPaging(page, project.getCode(), processDefinition == null ? 0L : processDefinition.getCode(), searchVal, executorId, statusArray, host, start, end); List<ProcessInstance> processInstances = processInstanceList.getRecords(); List<Integer> userIds = CollectionUtils.transformToList(processInstances, ProcessInstance::getExecutorId); Map<Integer, User> idToUserMap = CollectionUtils.collectionToMap(usersService.queryUser(userIds), User::getId); for (ProcessInstance processInstance : processInstances) { processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime())); User executor = idToUserMap.get(processInstance.getExecutorId()); if (null != executor) { processInstance.setExecutorName(executor.getUserName()); } } pageInfo.setTotalCount((int) processInstanceList.getTotal()); pageInfo.setLists(processInstances); result.put(DATA_LIST, pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * query task list by process instance id * * @param loginUser login user * @param projectName project name * @param processId process instance id * @return task list for the process instance * @throws IOException io exception */ @Override public Map<String, Object> queryTaskListByProcessId(User loginUser, String projectName, Integer processId) throws IOException { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { return checkResult; } ProcessInstance processInstance = processService.findProcessInstanceDetailById(processId); List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processId); addDependResultForTaskList(taskInstanceList); Map<String, Object> resultMap = new HashMap<>(); resultMap.put(PROCESS_INSTANCE_STATE, processInstance.getState().toString()); resultMap.put(TASK_LIST, taskInstanceList); result.put(DATA_LIST, resultMap); putMsg(result, Status.SUCCESS); return result; } /** * add dependent result for dependent task */ private void addDependResultForTaskList(List<TaskInstance> taskInstanceList) throws IOException { for (TaskInstance taskInstance : taskInstanceList) { if (TaskType.DEPENDENT.getDesc().equalsIgnoreCase(taskInstance.getTaskType())) { Result<String> logResult = loggerService.queryLog( taskInstance.getId(), Constants.LOG_QUERY_SKIP_LINE_NUMBER, Constants.LOG_QUERY_LIMIT); if (logResult.getCode() == Status.SUCCESS.ordinal()) { String log = logResult.getData(); Map<String, DependResult> resultMap = parseLogForDependentResult(log); taskInstance.setDependentResult(JSONUtils.toJsonString(resultMap)); } } } } @Override public Map<String, DependResult> parseLogForDependentResult(String log) throws IOException { Map<String, DependResult> resultMap = new HashMap<>(); if (StringUtils.isEmpty(log)) { return resultMap; } BufferedReader br = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(log.getBytes( StandardCharsets.UTF_8)), StandardCharsets.UTF_8)); String line; while ((line = br.readLine()) != null) { if (line.contains(DEPENDENT_SPLIT)) { String[] tmpStringArray = line.split(":\\|\\|"); if (tmpStringArray.length != 2) { continue; } String dependResultString = tmpStringArray[1]; String[] dependStringArray = dependResultString.split(","); if (dependStringArray.length != 2) { continue; } String key = dependStringArray[0].trim(); DependResult dependResult = DependResult.valueOf(dependStringArray[1].trim()); resultMap.put(key, dependResult); } } return resultMap; } /** * query sub process instance detail info by task id * * @param loginUser login user * @param projectName project name * @param taskId task id * @return sub process instance detail */ @Override public Map<String, Object> querySubProcessInstanceByTaskId(User loginUser, String projectName, Integer taskId) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { return checkResult; } TaskInstance taskInstance = processService.findTaskInstanceById(taskId); if (taskInstance == null) { putMsg(result, Status.TASK_INSTANCE_NOT_EXISTS, taskId); return result; } if (!taskInstance.isSubProcess()) { putMsg(result, Status.TASK_INSTANCE_NOT_SUB_WORKFLOW_INSTANCE, taskInstance.getName()); return result; } ProcessInstance subWorkflowInstance = processService.findSubProcessInstance( taskInstance.getProcessInstanceId(), taskInstance.getId()); if (subWorkflowInstance == null) { putMsg(result, Status.SUB_PROCESS_INSTANCE_NOT_EXIST, taskId); return result; } Map<String, Object> dataMap = new HashMap<>(); dataMap.put(Constants.SUBPROCESS_INSTANCE_ID, subWorkflowInstance.getId()); result.put(DATA_LIST, dataMap); putMsg(result, Status.SUCCESS); return result; } /** * update process instance * * @param loginUser login user * @param projectName project name * @param processInstanceJson process instance json * @param processInstanceId process instance id * @param scheduleTime schedule time * @param syncDefine sync define * @param flag flag * @param locations locations * @param connects connects * @return update result code */ @Transactional @Override public Map<String, Object> updateProcessInstance(User loginUser, String projectName, Integer processInstanceId, String processInstanceJson, String scheduleTime, Boolean syncDefine, Flag flag, String locations, String connects) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); //check project permission Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { return checkResult; } //check process instance exists ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId); return result; } //check process instance status if (!processInstance.getState().typeIsFinished()) { putMsg(result, Status.PROCESS_INSTANCE_STATE_OPERATION_ERROR, processInstance.getName(), processInstance.getState().toString(), "update"); return result; } ProcessDefinition processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); ProcessData processData = JSONUtils.parseObject(processInstanceJson, ProcessData.class); //check workflow json is valid result = processDefinitionService.checkProcessNodeList(processData, processInstanceJson); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } Tenant tenant = processService.getTenantForProcess(processData.getTenantId(), processDefinition.getUserId()); setProcessInstance(processInstance, tenant, scheduleTime, processData); int updateDefine = 1; if (Boolean.TRUE.equals(syncDefine)) { processDefinition.setId(processDefineMapper.queryByCode(processInstance.getProcessDefinitionCode()).getId()); updateDefine = syncDefinition(loginUser, project, locations, connects, processInstance, processDefinition, processData); processInstance.setProcessDefinitionVersion(processDefinitionLogMapper. queryMaxVersionForDefinition(processInstance.getProcessDefinitionCode())); } int update = processService.updateProcessInstance(processInstance); if (update > 0 && updateDefine > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.UPDATE_PROCESS_INSTANCE_ERROR); } return result; } /** * sync definition according process instance */ private int syncDefinition(User loginUser, Project project, String locations, String connects, ProcessInstance processInstance, ProcessDefinition processDefinition, ProcessData processData) { String originDefParams = JSONUtils.toJsonString(processData.getGlobalParams()); processDefinition.setGlobalParams(originDefParams); processDefinition.setLocations(locations); processDefinition.setConnects(connects); processDefinition.setTimeout(processInstance.getTimeout()); processDefinition.setUpdateTime(new Date()); return processService.saveProcessDefinition(loginUser, project, processDefinition.getName(), processDefinition.getDescription(), locations, connects, processData, processDefinition, false); } /** * update process instance attributes */ private void setProcessInstance(ProcessInstance processInstance, Tenant tenant, String scheduleTime, ProcessData processData) { Date schedule = processInstance.getScheduleTime(); if (scheduleTime != null) { schedule = DateUtils.getScheduleDate(scheduleTime); } processInstance.setScheduleTime(schedule); List<Property> globalParamList = processData.getGlobalParams(); Map<String, String> globalParamMap = Optional.ofNullable(globalParamList) .orElse(Collections.emptyList()) .stream() .collect(Collectors.toMap(Property::getProp, Property::getValue)); String globalParams = ParameterUtils.curingGlobalParams(globalParamMap, globalParamList, processInstance.getCmdTypeIfComplement(), schedule); processInstance.setTimeout(processData.getTimeout()); if (tenant != null) { processInstance.setTenantCode(tenant.getTenantCode()); } processInstance.setGlobalParams(globalParams); } /** * query parent process instance detail info by sub process instance id * * @param loginUser login user * @param projectName project name * @param subId sub process id * @return parent instance detail */ @Override public Map<String, Object> queryParentInstanceBySubId(User loginUser, String projectName, Integer subId) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { return checkResult; } ProcessInstance subInstance = processService.findProcessInstanceDetailById(subId); if (subInstance == null) { putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, subId); return result; } if (subInstance.getIsSubProcess() == Flag.NO) { putMsg(result, Status.PROCESS_INSTANCE_NOT_SUB_PROCESS_INSTANCE, subInstance.getName()); return result; } ProcessInstance parentWorkflowInstance = processService.findParentProcessInstance(subId); if (parentWorkflowInstance == null) { putMsg(result, Status.SUB_PROCESS_INSTANCE_NOT_EXIST); return result; } Map<String, Object> dataMap = new HashMap<>(); dataMap.put(Constants.PARENT_WORKFLOW_INSTANCE, parentWorkflowInstance.getId()); result.put(DATA_LIST, dataMap); putMsg(result, Status.SUCCESS); return result; } /** * delete process instance by id, at the same time,delete task instance and their mapping relation data * * @param loginUser login user * @param projectName project name * @param processInstanceId process instance id * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessInstanceById(User loginUser, String projectName, Integer processInstanceId) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { return checkResult; } ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId); if (null == processInstance) { putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId); return result; } processService.removeTaskLogFile(processInstanceId); // delete database cascade int delete = processService.deleteWorkProcessInstanceById(processInstanceId); processService.deleteAllSubWorkProcessByParentId(processInstanceId); processService.deleteWorkProcessMapByParentId(processInstanceId); if (delete > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.DELETE_PROCESS_INSTANCE_BY_ID_ERROR); } return result; } /** * view process instance variables * * @param processInstanceId process instance id * @return variables data */ @Override public Map<String, Object> viewVariables(Integer processInstanceId) { Map<String, Object> result = new HashMap<>(); ProcessInstance processInstance = processInstanceMapper.queryDetailById(processInstanceId); if (processInstance == null) { throw new RuntimeException("workflow instance is null"); } Map<String, String> timeParams = BusinessTimeUtils .getBusinessTime(processInstance.getCmdTypeIfComplement(), processInstance.getScheduleTime()); String userDefinedParams = processInstance.getGlobalParams(); // global params List<Property> globalParams = new ArrayList<>(); // global param string String globalParamStr = ParameterUtils.convertParameterPlaceholders(JSONUtils.toJsonString(globalParams), timeParams); globalParams = JSONUtils.toList(globalParamStr, Property.class); for (Property property : globalParams) { timeParams.put(property.getProp(), property.getValue()); } if (userDefinedParams != null && userDefinedParams.length() > 0) { globalParams = JSONUtils.toList(userDefinedParams, Property.class); } Map<String, Map<String, Object>> localUserDefParams = getLocalParams(processInstance, timeParams); Map<String, Object> resultMap = new HashMap<>(); resultMap.put(GLOBAL_PARAMS, globalParams); resultMap.put(LOCAL_PARAMS, localUserDefParams); result.put(DATA_LIST, resultMap); putMsg(result, Status.SUCCESS); return result; } /** * get local params */ private Map<String, Map<String, Object>> getLocalParams(ProcessInstance processInstance, Map<String, String> timeParams) { Map<String, Map<String, Object>> localUserDefParams = new HashMap<>(); List<TaskInstance> taskInstanceList = taskInstanceMapper.findValidTaskListByProcessId(processInstance.getId(), Flag.YES); for (TaskInstance taskInstance : taskInstanceList) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); String parameter = taskDefinitionLog.getTaskParams(); Map<String, String> map = JSONUtils.toMap(parameter); String localParams = map.get(LOCAL_PARAMS); if (localParams != null && !localParams.isEmpty()) { localParams = ParameterUtils.convertParameterPlaceholders(localParams, timeParams); List<Property> localParamsList = JSONUtils.toList(localParams, Property.class); Map<String, Object> localParamsMap = new HashMap<>(); localParamsMap.put(TASK_TYPE, taskDefinitionLog.getTaskType()); localParamsMap.put(LOCAL_PARAMS_LIST, localParamsList); if (CollectionUtils.isNotEmpty(localParamsList)) { localUserDefParams.put(taskDefinitionLog.getName(), localParamsMap); } } } return localUserDefParams; } /** * encapsulation gantt structure * * @param processInstanceId process instance id * @return gantt tree data * @throws Exception exception when json parse */ @Override public Map<String, Object> viewGantt(Integer processInstanceId) throws Exception { Map<String, Object> result = new HashMap<>(); ProcessInstance processInstance = processInstanceMapper.queryDetailById(processInstanceId); if (processInstance == null) { throw new RuntimeException("workflow instance is null"); } ProcessDefinition processDefinition = processDefinitionLogMapper.queryByDefinitionCodeAndVersion( processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion() ); GanttDto ganttDto = new GanttDto(); DAG<String, TaskNode, TaskNodeRelation> dag = processService.genDagGraph(processDefinition); //topological sort List<String> nodeList = dag.topologicalSort(); ganttDto.setTaskNames(nodeList); List<Task> taskList = new ArrayList<>(); for (String node : nodeList) { TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(processInstanceId, node); if (taskInstance == null) { continue; } Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime(); Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime(); Task task = new Task(); task.setTaskName(taskInstance.getName()); task.getStartDate().add(startTime.getTime()); task.getEndDate().add(endTime.getTime()); task.setIsoStart(startTime); task.setIsoEnd(endTime); task.setStatus(taskInstance.getState().toString()); task.setExecutionDate(taskInstance.getStartTime()); task.setDuration(DateUtils.format2Readable(endTime.getTime() - startTime.getTime())); taskList.add(task); } ganttDto.setTasks(taskList); result.put(DATA_LIST, ganttDto); putMsg(result, Status.SUCCESS); return result; } /** * query process instance by processDefinitionCode and stateArray * * @param processDefinitionCode processDefinitionCode * @param states states array * @return process instance list */ @Override public List<ProcessInstance> queryByProcessDefineCodeAndStatus(Long processDefinitionCode, int[] states) { return processInstanceMapper.queryByProcessDefineCodeAndStatus(processDefinitionCode, states); } /** * query process instance by processDefinitionCode * * @param processDefinitionCode processDefinitionCode * @param size size * @return process instance list */ @Override public List<ProcessInstance> queryByProcessDefineCode(Long processDefinitionCode, int size) { return processInstanceMapper.queryByProcessDefineCode(processDefinitionCode, size); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,483
[Bug][Api] Can't view variables
**Describe the bug** When I want to view the variables defined in process instance, it will throw an exception. **To Reproduce** Steps to reproduce the behavior, for example: 1. Create a process definition 2. Add localparams 3. Execute the process definition 4. View params in process instance **Screenshots** ![image](https://user-images.githubusercontent.com/22415594/118438653-4b46e400-b717-11eb-94d4-5e187a377d51.png) **Which version of Dolphin Scheduler:** -[dev] **Additional context** This issue caused by deserialize the taskParams in TaskDefinitionLog. https://github.com/apache/dolphinscheduler/blob/68301db6b914ff4002bfbc531c6810864d8e47c2/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessInstanceServiceImpl.java#L664-L666 For example, there exist list in the json attribute, so it cannot be deserialized as string. ```json { "resourceList":[ ], "localParams":[ { "prop":"BATCH_TIME", "direct":"IN", "type":"VARCHAR", "value":"20210517131849" } ], "rawScript":"echo "${BATCH_TIME}"", "conditionResult":"{"successNode":[""],"failedNode":[""]}", "dependence":"{}" } ``` And there are multiple places use different way to deserialize the` taskParams`. https://github.com/apache/dolphinscheduler/blob/68301db6b914ff4002bfbc531c6810864d8e47c2/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java#L1611 I think it is better to use the same way to do this transform, otherwise, once we make changes, we need to change many places. And the `taskParams` is transported by front-end and stored in database as a JSON string. We use Map to represent this field in backend, I think it is better to define a specific class to express the `taskParams`, this maybe helpful for deserialize and code maintain.
https://github.com/apache/dolphinscheduler/issues/5483
https://github.com/apache/dolphinscheduler/pull/5631
8bf042ae6ef7576209a0489e784684f4960ae6e0
0d5037e7c37d7903d9172f165b348058f1ddbf88
2021-05-17T06:24:02Z
java
2021-06-13T03:43:53Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import static java.nio.charset.StandardCharsets.UTF_8; import static com.fasterxml.jackson.databind.DeserializationFeature.ACCEPT_EMPTY_ARRAY_AS_NULL_OBJECT; import static com.fasterxml.jackson.databind.DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES; import static com.fasterxml.jackson.databind.DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_AS_NULL; import static com.fasterxml.jackson.databind.MapperFeature.REQUIRE_SETTERS_FOR_GETTERS; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.TimeZone; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.JsonDeserializer; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.JsonSerializer; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.SerializerProvider; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import com.fasterxml.jackson.databind.type.CollectionType; /** * json utils */ public class JSONUtils { private static final Logger logger = LoggerFactory.getLogger(JSONUtils.class); /** * can use static singleton, inject: just make sure to reuse! */ private static final ObjectMapper objectMapper = new ObjectMapper() .configure(FAIL_ON_UNKNOWN_PROPERTIES, false) .configure(ACCEPT_EMPTY_ARRAY_AS_NULL_OBJECT, true) .configure(READ_UNKNOWN_ENUM_VALUES_AS_NULL, true) .configure(REQUIRE_SETTERS_FOR_GETTERS, true) .setTimeZone(TimeZone.getDefault()); private JSONUtils() { throw new UnsupportedOperationException("Construct JSONUtils"); } public static ArrayNode createArrayNode() { return objectMapper.createArrayNode(); } public static ObjectNode createObjectNode() { return objectMapper.createObjectNode(); } public static JsonNode toJsonNode(Object obj) { return objectMapper.valueToTree(obj); } /** * json representation of object * * @param object object * @param feature feature * @return object to json string */ public static String toJsonString(Object object, SerializationFeature feature) { try { ObjectWriter writer = objectMapper.writer(feature); return writer.writeValueAsString(object); } catch (Exception e) { logger.error("object to json exception!", e); } return null; } /** * This method deserializes the specified Json into an object of the specified class. It is not * suitable to use if the specified class is a generic type since it will not have the generic * type information because of the Type Erasure feature of Java. Therefore, this method should not * be used if the desired type is a generic type. Note that this method works fine if the any of * the fields of the specified object are generics, just the object itself should not be a * generic type. * * @param json the string from which the object is to be deserialized * @param clazz the class of T * @param <T> T * @return an object of type T from the string * classOfT */ public static <T> T parseObject(String json, Class<T> clazz) { if (StringUtils.isEmpty(json)) { return null; } try { return objectMapper.readValue(json, clazz); } catch (Exception e) { logger.error("parse object exception!", e); } return null; } /** * deserialize * * @param src byte array * @param clazz class * @param <T> deserialize type * @return deserialize type */ public static <T> T parseObject(byte[] src, Class<T> clazz) { if (src == null) { return null; } String json = new String(src, UTF_8); return parseObject(json, clazz); } /** * json to list * * @param json json string * @param clazz class * @param <T> T * @return list */ public static <T> List<T> toList(String json, Class<T> clazz) { if (StringUtils.isEmpty(json)) { return Collections.emptyList(); } try { CollectionType listType = objectMapper.getTypeFactory().constructCollectionType(ArrayList.class, clazz); return objectMapper.readValue(json, listType); } catch (Exception e) { logger.error("parse list exception!", e); } return Collections.emptyList(); } /** * check json object valid * * @param json json * @return true if valid */ public static boolean checkJsonValid(String json) { if (StringUtils.isEmpty(json)) { return false; } try { objectMapper.readTree(json); return true; } catch (IOException e) { logger.error("check json object valid exception!", e); } return false; } /** * Method for finding a JSON Object field with specified name in this * node or its child nodes, and returning value it has. * If no matching field is found in this node or its descendants, returns null. * * @param jsonNode json node * @param fieldName Name of field to look for * @return Value of first matching node found, if any; null if none */ public static String findValue(JsonNode jsonNode, String fieldName) { JsonNode node = jsonNode.findValue(fieldName); if (node == null) { return null; } return node.asText(); } /** * json to map * {@link #toMap(String, Class, Class)} * * @param json json * @return json to map */ public static Map<String, String> toMap(String json) { return parseObject(json, new TypeReference<Map<String, String>>() {}); } /** * json to map * * @param json json * @param classK classK * @param classV classV * @param <K> K * @param <V> V * @return to map */ public static <K, V> Map<K, V> toMap(String json, Class<K> classK, Class<V> classV) { return parseObject(json, new TypeReference<Map<K, V>>() {}); } /** * json to object * * @param json json string * @param type type reference * @param <T> * @return return parse object */ public static <T> T parseObject(String json, TypeReference<T> type) { if (StringUtils.isEmpty(json)) { return null; } try { return objectMapper.readValue(json, type); } catch (Exception e) { logger.error("json to map exception!", e); } return null; } /** * object to json string * * @param object object * @return json string */ public static String toJsonString(Object object) { try { return objectMapper.writeValueAsString(object); } catch (Exception e) { throw new RuntimeException("Object json deserialization exception.", e); } } /** * serialize to json byte * * @param obj object * @param <T> object type * @return byte array */ public static <T> byte[] toJsonByteArray(T obj) { if (obj == null) { return null; } String json = ""; try { json = toJsonString(obj); } catch (Exception e) { logger.error("json serialize exception.", e); } return json.getBytes(UTF_8); } public static ObjectNode parseObject(String text) { try { if (text.isEmpty()) { return parseObject(text, ObjectNode.class); } else { return (ObjectNode) objectMapper.readTree(text); } } catch (Exception e) { throw new RuntimeException("String json deserialization exception.", e); } } public static ArrayNode parseArray(String text) { try { return (ArrayNode) objectMapper.readTree(text); } catch (Exception e) { throw new RuntimeException("Json deserialization exception.", e); } } /** * json serializer */ public static class JsonDataSerializer extends JsonSerializer<String> { @Override public void serialize(String value, JsonGenerator gen, SerializerProvider provider) throws IOException { gen.writeRawValue(value); } } /** * json data deserializer */ public static class JsonDataDeserializer extends JsonDeserializer<String> { @Override public String deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { JsonNode node = p.getCodec().readTree(p); if (node instanceof TextNode) { return node.asText(); } else { return node.toString(); } } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,483
[Bug][Api] Can't view variables
**Describe the bug** When I want to view the variables defined in process instance, it will throw an exception. **To Reproduce** Steps to reproduce the behavior, for example: 1. Create a process definition 2. Add localparams 3. Execute the process definition 4. View params in process instance **Screenshots** ![image](https://user-images.githubusercontent.com/22415594/118438653-4b46e400-b717-11eb-94d4-5e187a377d51.png) **Which version of Dolphin Scheduler:** -[dev] **Additional context** This issue caused by deserialize the taskParams in TaskDefinitionLog. https://github.com/apache/dolphinscheduler/blob/68301db6b914ff4002bfbc531c6810864d8e47c2/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessInstanceServiceImpl.java#L664-L666 For example, there exist list in the json attribute, so it cannot be deserialized as string. ```json { "resourceList":[ ], "localParams":[ { "prop":"BATCH_TIME", "direct":"IN", "type":"VARCHAR", "value":"20210517131849" } ], "rawScript":"echo "${BATCH_TIME}"", "conditionResult":"{"successNode":[""],"failedNode":[""]}", "dependence":"{}" } ``` And there are multiple places use different way to deserialize the` taskParams`. https://github.com/apache/dolphinscheduler/blob/68301db6b914ff4002bfbc531c6810864d8e47c2/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java#L1611 I think it is better to use the same way to do this transform, otherwise, once we make changes, we need to change many places. And the `taskParams` is transported by front-end and stored in database as a JSON string. We use Map to represent this field in backend, I think it is better to define a specific class to express the `taskParams`, this maybe helpful for deserialize and code maintain.
https://github.com/apache/dolphinscheduler/issues/5483
https://github.com/apache/dolphinscheduler/pull/5631
8bf042ae6ef7576209a0489e784684f4960ae6e0
0d5037e7c37d7903d9172f165b348058f1ddbf88
2021-05-17T06:24:02Z
java
2021-06-13T03:43:53Z
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/JSONUtilsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import org.apache.dolphinscheduler.common.enums.DataType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.process.Property; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import org.junit.Assert; import org.junit.Test; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; public class JSONUtilsTest { @Test public void createArrayNodeTest() { Property property = new Property(); property.setProp("ds"); property.setDirect(Direct.IN); property.setType(DataType.VARCHAR); property.setValue("sssssss"); String str = "[{\"prop\":\"ds\",\"direct\":\"IN\",\"type\":\"VARCHAR\",\"value\":\"sssssss\"},{\"prop\":\"ds\",\"direct\":\"IN\",\"type\":\"VARCHAR\",\"value\":\"sssssss\"}]"; JsonNode jsonNode = JSONUtils.toJsonNode(property); ArrayNode arrayNode = JSONUtils.createArrayNode(); ArrayList<JsonNode> objects = new ArrayList<>(); objects.add(jsonNode); objects.add(jsonNode); ArrayNode jsonNodes = arrayNode.addAll(objects); String s = JSONUtils.toJsonString(jsonNodes); Assert.assertEquals(s, str); } @Test public void toJsonNodeTest() { Property property = new Property(); property.setProp("ds"); property.setDirect(Direct.IN); property.setType(DataType.VARCHAR); property.setValue("sssssss"); String str = "{\"prop\":\"ds\",\"direct\":\"IN\",\"type\":\"VARCHAR\",\"value\":\"sssssss\"}"; JsonNode jsonNodes = JSONUtils.toJsonNode(property); String s = JSONUtils.toJsonString(jsonNodes); Assert.assertEquals(s, str); } @Test public void createObjectNodeTest() { String jsonStr = "{\"a\":\"b\",\"b\":\"d\"}"; ObjectNode objectNode = JSONUtils.createObjectNode(); objectNode.put("a","b"); objectNode.put("b","d"); String s = JSONUtils.toJsonString(objectNode); Assert.assertEquals(s, jsonStr); } @Test public void toMap() { String jsonStr = "{\"id\":\"1001\",\"name\":\"Jobs\"}"; Map<String, String> models = JSONUtils.toMap(jsonStr); Assert.assertEquals("1001", models.get("id")); Assert.assertEquals("Jobs", models.get("name")); } @Test public void convert2Property() { Property property = new Property(); property.setProp("ds"); property.setDirect(Direct.IN); property.setType(DataType.VARCHAR); property.setValue("sssssss"); String str = "{\"direct\":\"IN\",\"prop\":\"ds\",\"type\":\"VARCHAR\",\"value\":\"sssssss\"}"; Property property1 = JSONUtils.parseObject(str, Property.class); Direct direct = property1.getDirect(); Assert.assertEquals(Direct.IN, direct); } @Test public void string2MapTest() { String str = list2String(); List<LinkedHashMap> maps = JSONUtils.toList(str, LinkedHashMap.class); Assert.assertEquals(1, maps.size()); Assert.assertEquals("mysql200", maps.get(0).get("mysql service name")); Assert.assertEquals("192.168.xx.xx", maps.get(0).get("mysql address")); Assert.assertEquals("3306", maps.get(0).get("port")); Assert.assertEquals("80", maps.get(0).get("no index of number")); Assert.assertEquals("190", maps.get(0).get("database client connections")); } public String list2String() { LinkedHashMap<String, String> map1 = new LinkedHashMap<>(); map1.put("mysql service name", "mysql200"); map1.put("mysql address", "192.168.xx.xx"); map1.put("port", "3306"); map1.put("no index of number", "80"); map1.put("database client connections", "190"); List<LinkedHashMap<String, String>> maps = new ArrayList<>(); maps.add(0, map1); String resultJson = JSONUtils.toJsonString(maps); return resultJson; } @Test public void testParseObject() { Assert.assertNull(JSONUtils.parseObject("")); Assert.assertNull(JSONUtils.parseObject("foo", String.class)); } @Test public void testJsonByteArray() { String str = "foo"; byte[] serializeByte = JSONUtils.toJsonByteArray(str); String deserialize = JSONUtils.parseObject(serializeByte, String.class); Assert.assertEquals(str, deserialize); str = null; serializeByte = JSONUtils.toJsonByteArray(str); deserialize = JSONUtils.parseObject(serializeByte, String.class); Assert.assertNull(deserialize); } @Test public void testToList() { Assert.assertEquals(new ArrayList(), JSONUtils.toList("A1B2C3", null)); Assert.assertEquals(new ArrayList(), JSONUtils.toList("", null)); } @Test public void testCheckJsonValid() { Assert.assertTrue(JSONUtils.checkJsonValid("3")); Assert.assertFalse(JSONUtils.checkJsonValid("")); } @Test public void testFindValue() { Assert.assertNull(JSONUtils.findValue( new ArrayNode(new JsonNodeFactory(true)), null)); } @Test public void testToMap() { Map<String, String> map = new HashMap<>(); map.put("foo", "bar"); Assert.assertTrue(map.equals(JSONUtils.toMap( "{\n" + "\"foo\": \"bar\"\n" + "}"))); Assert.assertFalse(map.equals(JSONUtils.toMap( "{\n" + "\"bar\": \"foo\"\n" + "}"))); Assert.assertNull(JSONUtils.toMap("3")); Assert.assertNull(JSONUtils.toMap(null)); Assert.assertNull(JSONUtils.toMap("3", null, null)); Assert.assertNull(JSONUtils.toMap(null, null, null)); String str = "{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"#!/bin/bash\\necho \\\"shell-1\\\"\"}"; Map<String, String> m = JSONUtils.toMap(str); Assert.assertNotNull(m); } @Test public void testToJsonString() { Map<String, Object> map = new HashMap<>(); map.put("foo", "bar"); Assert.assertEquals("{\"foo\":\"bar\"}", JSONUtils.toJsonString(map)); Assert.assertEquals(String.valueOf((Object) null), JSONUtils.toJsonString(null)); Assert.assertEquals("{\"foo\":\"bar\"}", JSONUtils.toJsonString(map, SerializationFeature.WRITE_NULL_MAP_VALUES)); } @Test public void parseObject() { String str = "{\"color\":\"yellow\",\"type\":\"renault\"}"; ObjectNode node = JSONUtils.parseObject(str); Assert.assertEquals("yellow", node.path("color").asText()); node.put("price", 100); Assert.assertEquals(100, node.path("price").asInt()); node.put("color", "red"); Assert.assertEquals("red", node.path("color").asText()); } @Test public void parseArray() { String str = "[{\"color\":\"yellow\",\"type\":\"renault\"}]"; ArrayNode node = JSONUtils.parseArray(str); Assert.assertEquals("yellow", node.path(0).path("color").asText()); } @Test public void jsonDataDeserializerTest() { String a = "{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\"," + "\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false," + "\"id\":\"tasks-86823\",\"maxRetryTimes\":1,\"name\":\"shell test\"," + "\"params\":\"{\\\"resourceList\\\":[],\\\"localParams\\\":[],\\\"rawScript\\\":\\\"echo " + "'yyc'\\\"}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\"," + "\"taskInstancePriority\":\"HIGHEST\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0}," + "\"timeout\":\"{}\",\"type\":\"SHELL\",\"workerGroup\":\"default\"}"; TaskNode taskNode = JSONUtils.parseObject(a, TaskNode.class); Assert.assertTrue(true); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,610
[Question] Something wrong with build docker image from source
**Describe the question** I tried building a image by clean source code. ![image](https://user-images.githubusercontent.com/84900511/121340374-20386480-c952-11eb-81d8-c201586a47c0.png) and then setup in this way. ![image](https://user-images.githubusercontent.com/84900511/121340450-3514f800-c952-11eb-8042-4bdd56df2d7b.png) But when I tried to create a project, it failed. ![image](https://user-images.githubusercontent.com/84900511/121341733-896ca780-c953-11eb-8f3c-d494bddea536.png) **Which version of DolphinScheduler:** -[1.3.6] **Additional context** And also I found that the image I pull does not has the same size as the image I build, Does anyone know the reason? ![image](https://user-images.githubusercontent.com/84900511/121340737-84f3bf00-c952-11eb-8dec-10f6c4d28b7f.png)
https://github.com/apache/dolphinscheduler/issues/5610
https://github.com/apache/dolphinscheduler/pull/5611
0d5037e7c37d7903d9172f165b348058f1ddbf88
c5bea3c77430e0b46a2f5a3a91a7fbbc78874196
2021-06-09T10:45:43Z
java
2021-06-15T06:45:39Z
sql/dolphinscheduler_mysql.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET FOREIGN_KEY_CHECKS=0; -- ---------------------------- -- Table structure for QRTZ_BLOB_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_BLOB_TRIGGERS`; CREATE TABLE `QRTZ_BLOB_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `BLOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `SCHED_NAME` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_BLOB_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_BLOB_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CALENDARS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CALENDARS`; CREATE TABLE `QRTZ_CALENDARS` ( `SCHED_NAME` varchar(120) NOT NULL, `CALENDAR_NAME` varchar(200) NOT NULL, `CALENDAR` blob NOT NULL, PRIMARY KEY (`SCHED_NAME`,`CALENDAR_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CALENDARS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CRON_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CRON_TRIGGERS`; CREATE TABLE `QRTZ_CRON_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `CRON_EXPRESSION` varchar(120) NOT NULL, `TIME_ZONE_ID` varchar(80) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_CRON_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CRON_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_FIRED_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_FIRED_TRIGGERS`; CREATE TABLE `QRTZ_FIRED_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `ENTRY_ID` varchar(200) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `FIRED_TIME` bigint(13) NOT NULL, `SCHED_TIME` bigint(13) NOT NULL, `PRIORITY` int(11) NOT NULL, `STATE` varchar(16) NOT NULL, `JOB_NAME` varchar(200) DEFAULT NULL, `JOB_GROUP` varchar(200) DEFAULT NULL, `IS_NONCONCURRENT` varchar(1) DEFAULT NULL, `REQUESTS_RECOVERY` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`ENTRY_ID`), KEY `IDX_QRTZ_FT_TRIG_INST_NAME` (`SCHED_NAME`,`INSTANCE_NAME`), KEY `IDX_QRTZ_FT_INST_JOB_REQ_RCVRY` (`SCHED_NAME`,`INSTANCE_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_FT_J_G` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_T_G` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_FT_TG` (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_FIRED_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_JOB_DETAILS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_JOB_DETAILS`; CREATE TABLE `QRTZ_JOB_DETAILS` ( `SCHED_NAME` varchar(120) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `JOB_CLASS_NAME` varchar(250) NOT NULL, `IS_DURABLE` varchar(1) NOT NULL, `IS_NONCONCURRENT` varchar(1) NOT NULL, `IS_UPDATE_DATA` varchar(1) NOT NULL, `REQUESTS_RECOVERY` varchar(1) NOT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_J_REQ_RECOVERY` (`SCHED_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_J_GRP` (`SCHED_NAME`,`JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_JOB_DETAILS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_LOCKS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_LOCKS`; CREATE TABLE `QRTZ_LOCKS` ( `SCHED_NAME` varchar(120) NOT NULL, `LOCK_NAME` varchar(40) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`LOCK_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_LOCKS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_PAUSED_TRIGGER_GRPS`; CREATE TABLE `QRTZ_PAUSED_TRIGGER_GRPS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SCHEDULER_STATE -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SCHEDULER_STATE`; CREATE TABLE `QRTZ_SCHEDULER_STATE` ( `SCHED_NAME` varchar(120) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `LAST_CHECKIN_TIME` bigint(13) NOT NULL, `CHECKIN_INTERVAL` bigint(13) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`INSTANCE_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SCHEDULER_STATE -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPLE_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPLE_TRIGGERS`; CREATE TABLE `QRTZ_SIMPLE_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `REPEAT_COUNT` bigint(7) NOT NULL, `REPEAT_INTERVAL` bigint(12) NOT NULL, `TIMES_TRIGGERED` bigint(10) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPLE_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPLE_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPROP_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPROP_TRIGGERS`; CREATE TABLE `QRTZ_SIMPROP_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `STR_PROP_1` varchar(512) DEFAULT NULL, `STR_PROP_2` varchar(512) DEFAULT NULL, `STR_PROP_3` varchar(512) DEFAULT NULL, `INT_PROP_1` int(11) DEFAULT NULL, `INT_PROP_2` int(11) DEFAULT NULL, `LONG_PROP_1` bigint(20) DEFAULT NULL, `LONG_PROP_2` bigint(20) DEFAULT NULL, `DEC_PROP_1` decimal(13,4) DEFAULT NULL, `DEC_PROP_2` decimal(13,4) DEFAULT NULL, `BOOL_PROP_1` varchar(1) DEFAULT NULL, `BOOL_PROP_2` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPROP_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPROP_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_TRIGGERS`; CREATE TABLE `QRTZ_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `NEXT_FIRE_TIME` bigint(13) DEFAULT NULL, `PREV_FIRE_TIME` bigint(13) DEFAULT NULL, `PRIORITY` int(11) DEFAULT NULL, `TRIGGER_STATE` varchar(16) NOT NULL, `TRIGGER_TYPE` varchar(8) NOT NULL, `START_TIME` bigint(13) NOT NULL, `END_TIME` bigint(13) DEFAULT NULL, `CALENDAR_NAME` varchar(200) DEFAULT NULL, `MISFIRE_INSTR` smallint(2) DEFAULT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_J` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_C` (`SCHED_NAME`,`CALENDAR_NAME`), KEY `IDX_QRTZ_T_G` (`SCHED_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_STATE` (`SCHED_NAME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_STATE` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_G_STATE` (`SCHED_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NEXT_FIRE_TIME` (`SCHED_NAME`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST` (`SCHED_NAME`,`TRIGGER_STATE`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE_GRP` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), CONSTRAINT `QRTZ_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) REFERENCES `QRTZ_JOB_DETAILS` (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_access_token -- ---------------------------- DROP TABLE IF EXISTS `t_ds_access_token`; CREATE TABLE `t_ds_access_token` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `token` varchar(64) DEFAULT NULL COMMENT 'token', `expire_time` datetime DEFAULT NULL COMMENT 'end time of token ', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_access_token -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alert -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert`; CREATE TABLE `t_ds_alert` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `title` varchar(64) DEFAULT NULL COMMENT 'title', `content` text COMMENT 'Message content (can be email, can be SMS. Mail is stored in JSON map, and SMS is string)', `alert_status` tinyint(4) DEFAULT '0' COMMENT '0:wait running,1:success,2:failed', `log` text COMMENT 'log', `alertgroup_id` int(11) DEFAULT NULL COMMENT 'alert group id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alert -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alertgroup -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alertgroup`; CREATE TABLE `t_ds_alertgroup`( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids', `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id', `group_name` varchar(255) DEFAULT NULL COMMENT 'group name', `description` varchar(255) DEFAULT NULL, `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_alertgroup_name_UN` (`group_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_command`; CREATE TABLE `t_ds_command` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'Command type: 0 start workflow, 1 start execution from current node, 2 resume fault-tolerant workflow, 3 resume pause process, 4 start execution from failed node, 5 complement, 6 schedule, 7 rerun, 8 pause, 9 stop, 10 resume waiting thread', `process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'Node dependency type: 0 current node, 1 forward, 2 backward', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'Failed policy: 0 end, 1 continue', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority: 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_datasource -- ---------------------------- DROP TABLE IF EXISTS `t_ds_datasource`; CREATE TABLE `t_ds_datasource` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(64) NOT NULL COMMENT 'data source name', `note` varchar(255) DEFAULT NULL COMMENT 'description', `type` tinyint(4) NOT NULL COMMENT 'data source type: 0:mysql,1:postgresql,2:hive,3:spark', `user_id` int(11) NOT NULL COMMENT 'the creator id', `connection_params` text NOT NULL COMMENT 'json connection params', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_datasource_name_UN` (`name`, `type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_datasource -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_error_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_error_command`; CREATE TABLE `t_ds_error_command` ( `id` int(11) NOT NULL COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'scheduler time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority, 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', `message` text COMMENT 'message', PRIMARY KEY (`id`) USING BTREE ) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC; -- ---------------------------- -- Records of t_ds_error_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition`; CREATE TABLE `t_ds_process_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(255) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `connects` text COMMENT 'Node connection information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out, unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`), UNIQUE KEY `process_unique` (`name`,`project_code`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_definition -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition_log`; CREATE TABLE `t_ds_process_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `connects` text COMMENT 'Node connection information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out,unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition`; CREATE TABLE `t_ds_task_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT NULL COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` longtext COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`), UNIQUE KEY `task_unique` (`name`,`project_code`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition_log`; CREATE TABLE `t_ds_task_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT NULL COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` text COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation`; CREATE TABLE `t_ds_process_task_relation` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process version', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation_log`; CREATE TABLE `t_ds_process_task_relation_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process version', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_instance`; CREATE TABLE `t_ds_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'process instance name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process definition version', `process_definition_code` bigint(20) not NULL COMMENT 'process definition code', `state` tinyint(4) DEFAULT NULL COMMENT 'process instance Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `recovery` tinyint(4) DEFAULT NULL COMMENT 'process instance failover flag:0:normal,1:failover instance', `start_time` datetime DEFAULT NULL COMMENT 'process instance start time', `end_time` datetime DEFAULT NULL COMMENT 'process instance end time', `run_times` int(11) DEFAULT NULL COMMENT 'process instance run times', `host` varchar(135) DEFAULT NULL COMMENT 'process instance host', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type. 0: only current node,1:before the node,2:later nodes', `max_try_times` tinyint(4) DEFAULT '0' COMMENT 'max try times', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy. 0:end the process when node failed,1:continue running the other nodes when node failed', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type. 0:no warning,1:warning if process success,2:warning if process failed,3:warning if success', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `command_start_time` datetime DEFAULT NULL COMMENT 'command start time', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT '1' COMMENT 'flag', `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `is_sub_process` int(11) DEFAULT '0' COMMENT 'flag, whether the process is sub process', `executor_id` int(11) NOT NULL COMMENT 'executor id', `history_cmd` text COMMENT 'history commands of process instance operation', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority. 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `var_pool` longtext COMMENT 'var_pool', PRIMARY KEY (`id`), KEY `process_instance_index` (`process_definition_code`,`id`) USING BTREE, KEY `start_time_index` (`start_time`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_project -- ---------------------------- DROP TABLE IF EXISTS `t_ds_project`; CREATE TABLE `t_ds_project` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(100) DEFAULT NULL COMMENT 'project name', `code` bigint(20) NOT NULL COMMENT 'encoding', `description` varchar(200) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'creator id', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_project -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_queue -- ---------------------------- DROP TABLE IF EXISTS `t_ds_queue`; CREATE TABLE `t_ds_queue` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `queue_name` varchar(64) DEFAULT NULL COMMENT 'queue name', `queue` varchar(64) DEFAULT NULL COMMENT 'yarn queue name', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_queue -- ---------------------------- INSERT INTO `t_ds_queue` VALUES ('1', 'default', 'default', null, null); -- ---------------------------- -- Table structure for t_ds_relation_datasource_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_datasource_user`; CREATE TABLE `t_ds_relation_datasource_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `datasource_id` int(11) DEFAULT NULL COMMENT 'data source id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_datasource_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_process_instance`; CREATE TABLE `t_ds_relation_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `parent_process_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `parent_task_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `process_instance_id` int(11) DEFAULT NULL COMMENT 'child process instance id', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_project_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_project_user`; CREATE TABLE `t_ds_relation_project_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `project_id` int(11) DEFAULT NULL COMMENT 'project id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_project_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_resources_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_resources_user`; CREATE TABLE `t_ds_relation_resources_user` ( `id` int(11) NOT NULL AUTO_INCREMENT, `user_id` int(11) NOT NULL COMMENT 'user id', `resources_id` int(11) DEFAULT NULL COMMENT 'resource id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_resources_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_udfs_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_udfs_user`; CREATE TABLE `t_ds_relation_udfs_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'userid', `udf_id` int(11) DEFAULT NULL COMMENT 'udf id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_resources -- ---------------------------- DROP TABLE IF EXISTS `t_ds_resources`; CREATE TABLE `t_ds_resources` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alias` varchar(64) DEFAULT NULL COMMENT 'alias', `file_name` varchar(64) DEFAULT NULL COMMENT 'file name', `description` varchar(255) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'user id', `type` tinyint(4) DEFAULT NULL COMMENT 'resource type,0:FILE,1:UDF', `size` bigint(20) DEFAULT NULL COMMENT 'resource size', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `pid` int(11) DEFAULT NULL, `full_name` varchar(64) DEFAULT NULL, `is_directory` tinyint(4) DEFAULT NULL, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_resources_un` (`full_name`,`type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_resources -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_schedules -- ---------------------------- DROP TABLE IF EXISTS `t_ds_schedules`; CREATE TABLE `t_ds_schedules` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `process_definition_id` int(11) NOT NULL COMMENT 'process definition id', `start_time` datetime NOT NULL COMMENT 'start time', `end_time` datetime NOT NULL COMMENT 'end time', `timezone_id` varchar(40) DEFAULT NULL COMMENT 'timezoneId', `crontab` varchar(255) NOT NULL COMMENT 'crontab description', `failure_strategy` tinyint(4) NOT NULL COMMENT 'failure strategy. 0:end,1:continue', `user_id` int(11) NOT NULL COMMENT 'user id', `release_state` tinyint(4) NOT NULL COMMENT 'release state. 0:offline,1:online ', `warning_type` tinyint(4) NOT NULL COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT '' COMMENT 'worker group id', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_schedules -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_session -- ---------------------------- DROP TABLE IF EXISTS `t_ds_session`; CREATE TABLE `t_ds_session` ( `id` varchar(64) NOT NULL COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `ip` varchar(45) DEFAULT NULL COMMENT 'ip', `last_login_time` datetime DEFAULT NULL COMMENT 'last login time', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_session -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_task_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_instance`; CREATE TABLE `t_ds_task_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'task name', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_code` bigint(20) NOT NULL COMMENT 'task definition code', `task_definition_version` int(11) DEFAULT NULL COMMENT 'task definition version', `process_instance_id` int(11) DEFAULT NULL COMMENT 'process instance id', `state` tinyint(4) DEFAULT NULL COMMENT 'Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `submit_time` datetime DEFAULT NULL COMMENT 'task submit time', `start_time` datetime DEFAULT NULL COMMENT 'task start time', `end_time` datetime DEFAULT NULL COMMENT 'task end time', `host` varchar(135) DEFAULT NULL COMMENT 'host of task running on', `execute_path` varchar(200) DEFAULT NULL COMMENT 'task execute path in the host', `log_path` varchar(200) DEFAULT NULL COMMENT 'task log path', `alert_flag` tinyint(4) DEFAULT NULL COMMENT 'whether alert', `retry_times` int(4) DEFAULT '0' COMMENT 'task retry times', `pid` int(4) DEFAULT NULL COMMENT 'pid of task', `app_link` text COMMENT 'yarn app id', `task_params` text COMMENT 'job custom parameters', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `retry_interval` int(4) DEFAULT NULL COMMENT 'retry interval when task failed ', `max_retry_times` int(2) DEFAULT NULL COMMENT 'max retry times', `task_instance_priority` int(11) DEFAULT NULL COMMENT 'task instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `executor_id` int(11) DEFAULT NULL, `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time', `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time', `var_pool` longtext COMMENT 'var_pool', PRIMARY KEY (`id`), KEY `process_instance_id` (`process_instance_id`) USING BTREE, CONSTRAINT `foreign_key_instance_id` FOREIGN KEY (`process_instance_id`) REFERENCES `t_ds_process_instance` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_task_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_tenant -- ---------------------------- DROP TABLE IF EXISTS `t_ds_tenant`; CREATE TABLE `t_ds_tenant` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `tenant_code` varchar(64) DEFAULT NULL COMMENT 'tenant code', `description` varchar(255) DEFAULT NULL, `queue_id` int(11) DEFAULT NULL COMMENT 'queue id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_tenant -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_udfs -- ---------------------------- DROP TABLE IF EXISTS `t_ds_udfs`; CREATE TABLE `t_ds_udfs` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `func_name` varchar(100) NOT NULL COMMENT 'UDF function name', `class_name` varchar(255) NOT NULL COMMENT 'class of udf', `type` tinyint(4) NOT NULL COMMENT 'Udf function type', `arg_types` varchar(255) DEFAULT NULL COMMENT 'arguments types', `database` varchar(255) DEFAULT NULL COMMENT 'data base', `description` varchar(255) DEFAULT NULL, `resource_id` int(11) NOT NULL COMMENT 'resource id', `resource_name` varchar(255) NOT NULL COMMENT 'resource name', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_udfs -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_user`; CREATE TABLE `t_ds_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'user id', `user_name` varchar(64) DEFAULT NULL COMMENT 'user name', `user_password` varchar(64) DEFAULT NULL COMMENT 'user password', `user_type` tinyint(4) DEFAULT NULL COMMENT 'user type, 0:administrator,1:ordinary user', `email` varchar(64) DEFAULT NULL COMMENT 'email', `phone` varchar(11) DEFAULT NULL COMMENT 'phone', `tenant_id` int(11) DEFAULT NULL COMMENT 'tenant id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `queue` varchar(64) DEFAULT NULL COMMENT 'queue', `state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable', PRIMARY KEY (`id`), UNIQUE KEY `user_name_unique` (`user_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_worker_group -- ---------------------------- DROP TABLE IF EXISTS `t_ds_worker_group`; CREATE TABLE `t_ds_worker_group` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `name` varchar(255) NOT NULL COMMENT 'worker group name', `addr_list` text NULL DEFAULT NULL COMMENT 'worker addr list. split by [,]', `create_time` datetime NULL DEFAULT NULL COMMENT 'create time', `update_time` datetime NULL DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `name_unique` (`name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_worker_group -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_version -- ---------------------------- DROP TABLE IF EXISTS `t_ds_version`; CREATE TABLE `t_ds_version` ( `id` int(11) NOT NULL AUTO_INCREMENT, `version` varchar(200) NOT NULL, PRIMARY KEY (`id`), UNIQUE KEY `version_UNIQUE` (`version`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8 COMMENT='version'; -- ---------------------------- -- Records of t_ds_version -- ---------------------------- INSERT INTO `t_ds_version` VALUES ('1', '1.4.0'); -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- INSERT INTO `t_ds_alertgroup`(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ("1,2", 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- ---------------------------- -- Records of t_ds_user -- ---------------------------- INSERT INTO `t_ds_user` VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', '[email protected]', '', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22', null, 1); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); DROP TABLE IF EXISTS `t_ds_plugin_define`; CREATE TABLE `t_ds_plugin_define` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email', `plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin', `plugin_params` text COMMENT 'plugin params', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`; CREATE TABLE `t_ds_alert_plugin_instance` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_define_id` int NOT NULL, `plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,610
[Question] Something wrong with build docker image from source
**Describe the question** I tried building a image by clean source code. ![image](https://user-images.githubusercontent.com/84900511/121340374-20386480-c952-11eb-81d8-c201586a47c0.png) and then setup in this way. ![image](https://user-images.githubusercontent.com/84900511/121340450-3514f800-c952-11eb-8042-4bdd56df2d7b.png) But when I tried to create a project, it failed. ![image](https://user-images.githubusercontent.com/84900511/121341733-896ca780-c953-11eb-8f3c-d494bddea536.png) **Which version of DolphinScheduler:** -[1.3.6] **Additional context** And also I found that the image I pull does not has the same size as the image I build, Does anyone know the reason? ![image](https://user-images.githubusercontent.com/84900511/121340737-84f3bf00-c952-11eb-8dec-10f6c4d28b7f.png)
https://github.com/apache/dolphinscheduler/issues/5610
https://github.com/apache/dolphinscheduler/pull/5611
0d5037e7c37d7903d9172f165b348058f1ddbf88
c5bea3c77430e0b46a2f5a3a91a7fbbc78874196
2021-06-09T10:45:43Z
java
2021-06-15T06:45:39Z
sql/dolphinscheduler_postgre.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS; DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS; DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE; DROP TABLE IF EXISTS QRTZ_LOCKS; DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS; DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS; DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS; DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS; DROP TABLE IF EXISTS QRTZ_TRIGGERS; DROP TABLE IF EXISTS QRTZ_JOB_DETAILS; DROP TABLE IF EXISTS QRTZ_CALENDARS; CREATE TABLE QRTZ_JOB_DETAILS( SCHED_NAME character varying(120) NOT NULL, JOB_NAME character varying(200) NOT NULL, JOB_GROUP character varying(200) NOT NULL, DESCRIPTION character varying(250) NULL, JOB_CLASS_NAME character varying(250) NOT NULL, IS_DURABLE boolean NOT NULL, IS_NONCONCURRENT boolean NOT NULL, IS_UPDATE_DATA boolean NOT NULL, REQUESTS_RECOVERY boolean NOT NULL, JOB_DATA bytea NULL); alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE TABLE QRTZ_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, JOB_NAME character varying(200) NOT NULL, JOB_GROUP character varying(200) NOT NULL, DESCRIPTION character varying(250) NULL, NEXT_FIRE_TIME BIGINT NULL, PREV_FIRE_TIME BIGINT NULL, PRIORITY INTEGER NULL, TRIGGER_STATE character varying(16) NOT NULL, TRIGGER_TYPE character varying(8) NOT NULL, START_TIME BIGINT NOT NULL, END_TIME BIGINT NULL, CALENDAR_NAME character varying(200) NULL, MISFIRE_INSTR SMALLINT NULL, JOB_DATA bytea NULL) ; alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, REPEAT_COUNT BIGINT NOT NULL, REPEAT_INTERVAL BIGINT NOT NULL, TIMES_TRIGGERED BIGINT NOT NULL) ; alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_CRON_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, CRON_EXPRESSION character varying(120) NOT NULL, TIME_ZONE_ID character varying(80)) ; alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_SIMPROP_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, STR_PROP_1 character varying(512) NULL, STR_PROP_2 character varying(512) NULL, STR_PROP_3 character varying(512) NULL, INT_PROP_1 INT NULL, INT_PROP_2 INT NULL, LONG_PROP_1 BIGINT NULL, LONG_PROP_2 BIGINT NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 boolean NULL, BOOL_PROP_2 boolean NULL) ; alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_BLOB_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, BLOB_DATA bytea NULL) ; alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_CALENDARS ( SCHED_NAME character varying(120) NOT NULL, CALENDAR_NAME character varying(200) NOT NULL, CALENDAR bytea NOT NULL) ; alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME); CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL) ; alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_FIRED_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, ENTRY_ID character varying(200) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, INSTANCE_NAME character varying(200) NOT NULL, FIRED_TIME BIGINT NOT NULL, SCHED_TIME BIGINT NOT NULL, PRIORITY INTEGER NOT NULL, STATE character varying(16) NOT NULL, JOB_NAME character varying(200) NULL, JOB_GROUP character varying(200) NULL, IS_NONCONCURRENT boolean NULL, REQUESTS_RECOVERY boolean NULL) ; alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID); CREATE TABLE QRTZ_SCHEDULER_STATE ( SCHED_NAME character varying(120) NOT NULL, INSTANCE_NAME character varying(200) NOT NULL, LAST_CHECKIN_TIME BIGINT NOT NULL, CHECKIN_INTERVAL BIGINT NOT NULL) ; alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME); CREATE TABLE QRTZ_LOCKS ( SCHED_NAME character varying(120) NOT NULL, LOCK_NAME character varying(40) NOT NULL) ; alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME); CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY); CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME); CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME); CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY); CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); -- -- Table structure for table t_ds_access_token -- DROP TABLE IF EXISTS t_ds_access_token; CREATE TABLE t_ds_access_token ( id int NOT NULL , user_id int DEFAULT NULL , token varchar(64) DEFAULT NULL , expire_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_alert -- DROP TABLE IF EXISTS t_ds_alert; CREATE TABLE t_ds_alert ( id int NOT NULL , title varchar(64) DEFAULT NULL , content text , alert_status int DEFAULT '0' , log text , alertgroup_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_alertgroup -- DROP TABLE IF EXISTS t_ds_alertgroup; CREATE TABLE t_ds_alertgroup( id int NOT NULL, alert_instance_ids varchar (255) DEFAULT NULL, create_user_id int4 DEFAULT NULL, group_name varchar(255) DEFAULT NULL, description varchar(255) DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id), CONSTRAINT t_ds_alertgroup_name_UN UNIQUE (group_name) ) ; -- -- Table structure for table t_ds_command -- DROP TABLE IF EXISTS t_ds_command; CREATE TABLE t_ds_command ( id int NOT NULL , command_type int DEFAULT NULL , process_definition_id int DEFAULT NULL , command_param text , task_depend_type int DEFAULT NULL , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , executor_id int DEFAULT NULL , update_time timestamp DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_datasource -- DROP TABLE IF EXISTS t_ds_datasource; CREATE TABLE t_ds_datasource ( id int NOT NULL , name varchar(64) NOT NULL , note varchar(255) DEFAULT NULL , type int NOT NULL , user_id int NOT NULL , connection_params text NOT NULL , create_time timestamp NOT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id), CONSTRAINT t_ds_datasource_name_UN UNIQUE (name, type) ) ; -- -- Table structure for table t_ds_error_command -- DROP TABLE IF EXISTS t_ds_error_command; CREATE TABLE t_ds_error_command ( id int NOT NULL , command_type int DEFAULT NULL , executor_id int DEFAULT NULL , process_definition_id int DEFAULT NULL , command_param text , task_depend_type int DEFAULT NULL , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), message text , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_master_server -- -- -- Table structure for table t_ds_process_definition -- DROP TABLE IF EXISTS t_ds_process_definition; CREATE TABLE t_ds_process_definition ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , connects text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT '0' , tenant_id int DEFAULT '-1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT process_definition_unique UNIQUE (name, project_code) ) ; create index process_definition_index on t_ds_process_definition (code,id); DROP TABLE IF EXISTS t_ds_process_definition_log; CREATE TABLE t_ds_process_definition_log ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , connects text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT '0' , tenant_id int DEFAULT '-1' , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_task_definition; CREATE TABLE t_ds_task_definition ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT '0' , delay_time int DEFAULT '0' , resource_ids varchar(255) DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT task_definition_unique UNIQUE (name, project_code) ) ; create index task_definition_index on t_ds_task_definition (project_code,id); DROP TABLE IF EXISTS t_ds_task_definition_log; CREATE TABLE t_ds_task_definition_log ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT '0' , delay_time int DEFAULT '0' , resource_ids varchar(255) DEFAULT NULL , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_process_task_relation; CREATE TABLE t_ds_process_task_relation ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT '0' , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT '0' , condition_type int DEFAULT NULL , condition_params text , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_process_task_relation_log; CREATE TABLE t_ds_process_task_relation_log ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT '0' , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT '0' , condition_type int DEFAULT NULL , condition_params text , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_process_instance -- DROP TABLE IF EXISTS t_ds_process_instance; CREATE TABLE t_ds_process_instance ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , process_definition_code bigint DEFAULT NULL , state int DEFAULT NULL , recovery int DEFAULT NULL , start_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL , run_times int DEFAULT NULL , host varchar(135) DEFAULT NULL , command_type int DEFAULT NULL , command_param text , task_depend_type int DEFAULT NULL , max_try_times int DEFAULT '0' , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , command_start_time timestamp DEFAULT NULL , global_params text , process_instance_json text , flag int DEFAULT '1' , update_time timestamp NULL , is_sub_process int DEFAULT '0' , executor_id int NOT NULL , history_cmd text , dependence_schedule_times text , process_instance_priority int DEFAULT NULL , worker_group varchar(64) , timeout int DEFAULT '0' , tenant_id int NOT NULL DEFAULT '-1' , var_pool text , PRIMARY KEY (id) ) ; create index process_instance_index on t_ds_process_instance (process_definition_code,id); create index start_time_index on t_ds_process_instance (start_time); -- -- Table structure for table t_ds_project -- DROP TABLE IF EXISTS t_ds_project; CREATE TABLE t_ds_project ( id int NOT NULL , name varchar(100) DEFAULT NULL , code bigint NOT NULL, description varchar(200) DEFAULT NULL , user_id int DEFAULT NULL , flag int DEFAULT '1' , create_time timestamp DEFAULT CURRENT_TIMESTAMP , update_time timestamp DEFAULT CURRENT_TIMESTAMP , PRIMARY KEY (id) ) ; create index user_id_index on t_ds_project (user_id); -- -- Table structure for table t_ds_queue -- DROP TABLE IF EXISTS t_ds_queue; CREATE TABLE t_ds_queue ( id int NOT NULL , queue_name varchar(64) DEFAULT NULL , queue varchar(64) DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_relation_datasource_user -- DROP TABLE IF EXISTS t_ds_relation_datasource_user; CREATE TABLE t_ds_relation_datasource_user ( id int NOT NULL , user_id int NOT NULL , datasource_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; ; -- -- Table structure for table t_ds_relation_process_instance -- DROP TABLE IF EXISTS t_ds_relation_process_instance; CREATE TABLE t_ds_relation_process_instance ( id int NOT NULL , parent_process_instance_id int DEFAULT NULL , parent_task_instance_id int DEFAULT NULL , process_instance_id int DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_relation_project_user -- DROP TABLE IF EXISTS t_ds_relation_project_user; CREATE TABLE t_ds_relation_project_user ( id int NOT NULL , user_id int NOT NULL , project_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; create index relation_project_user_id_index on t_ds_relation_project_user (user_id); -- -- Table structure for table t_ds_relation_resources_user -- DROP TABLE IF EXISTS t_ds_relation_resources_user; CREATE TABLE t_ds_relation_resources_user ( id int NOT NULL , user_id int NOT NULL , resources_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_relation_udfs_user -- DROP TABLE IF EXISTS t_ds_relation_udfs_user; CREATE TABLE t_ds_relation_udfs_user ( id int NOT NULL , user_id int NOT NULL , udf_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; ; -- -- Table structure for table t_ds_resources -- DROP TABLE IF EXISTS t_ds_resources; CREATE TABLE t_ds_resources ( id int NOT NULL , alias varchar(64) DEFAULT NULL , file_name varchar(64) DEFAULT NULL , description varchar(255) DEFAULT NULL , user_id int DEFAULT NULL , type int DEFAULT NULL , size bigint DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , pid int, full_name varchar(64), is_directory int, PRIMARY KEY (id), CONSTRAINT t_ds_resources_un UNIQUE (full_name, type) ) ; -- -- Table structure for table t_ds_schedules -- DROP TABLE IF EXISTS t_ds_schedules; CREATE TABLE t_ds_schedules ( id int NOT NULL , process_definition_id int NOT NULL , start_time timestamp NOT NULL , end_time timestamp NOT NULL , timezone_id varchar(40) default NULL , crontab varchar(255) NOT NULL , failure_strategy int NOT NULL , user_id int NOT NULL , release_state int NOT NULL , warning_type int NOT NULL , warning_group_id int DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), create_time timestamp NOT NULL , update_time timestamp NOT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_session -- DROP TABLE IF EXISTS t_ds_session; CREATE TABLE t_ds_session ( id varchar(64) NOT NULL , user_id int DEFAULT NULL , ip varchar(45) DEFAULT NULL , last_login_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_task_instance -- DROP TABLE IF EXISTS t_ds_task_instance; CREATE TABLE t_ds_task_instance ( id int NOT NULL , name varchar(255) DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_code bigint NOT NULL, task_definition_version int DEFAULT NULL , process_instance_id int DEFAULT NULL , state int DEFAULT NULL , submit_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL , host varchar(135) DEFAULT NULL , execute_path varchar(200) DEFAULT NULL , log_path varchar(200) DEFAULT NULL , alert_flag int DEFAULT NULL , retry_times int DEFAULT '0' , pid int DEFAULT NULL , app_link text , task_params text , flag int DEFAULT '1' , retry_interval int DEFAULT NULL , max_retry_times int DEFAULT NULL , task_instance_priority int DEFAULT NULL , worker_group varchar(64), executor_id int DEFAULT NULL , first_submit_time timestamp DEFAULT NULL , delay_time int DEFAULT '0' , var_pool text , PRIMARY KEY (id), CONSTRAINT foreign_key_instance_id FOREIGN KEY(process_instance_id) REFERENCES t_ds_process_instance(id) ON DELETE CASCADE ) ; -- -- Table structure for table t_ds_tenant -- DROP TABLE IF EXISTS t_ds_tenant; CREATE TABLE t_ds_tenant ( id int NOT NULL , tenant_code varchar(64) DEFAULT NULL , description varchar(255) DEFAULT NULL , queue_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_udfs -- DROP TABLE IF EXISTS t_ds_udfs; CREATE TABLE t_ds_udfs ( id int NOT NULL , user_id int NOT NULL , func_name varchar(100) NOT NULL , class_name varchar(255) NOT NULL , type int NOT NULL , arg_types varchar(255) DEFAULT NULL , database varchar(255) DEFAULT NULL , description varchar(255) DEFAULT NULL , resource_id int NOT NULL , resource_name varchar(255) NOT NULL , create_time timestamp NOT NULL , update_time timestamp NOT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_user -- DROP TABLE IF EXISTS t_ds_user; CREATE TABLE t_ds_user ( id int NOT NULL , user_name varchar(64) DEFAULT NULL , user_password varchar(64) DEFAULT NULL , user_type int DEFAULT NULL , email varchar(64) DEFAULT NULL , phone varchar(11) DEFAULT NULL , tenant_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , queue varchar(64) DEFAULT NULL , state int DEFAULT 1 , PRIMARY KEY (id) ); comment on column t_ds_user.state is 'state 0:disable 1:enable'; -- -- Table structure for table t_ds_version -- DROP TABLE IF EXISTS t_ds_version; CREATE TABLE t_ds_version ( id int NOT NULL , version varchar(200) NOT NULL, PRIMARY KEY (id) ) ; create index version_index on t_ds_version(version); -- -- Table structure for table t_ds_worker_group -- DROP TABLE IF EXISTS t_ds_worker_group; CREATE TABLE t_ds_worker_group ( id bigint NOT NULL , name varchar(255) NOT NULL , addr_list text DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT name_unique UNIQUE (name) ) ; -- -- Table structure for table t_ds_worker_server -- DROP TABLE IF EXISTS t_ds_worker_server; CREATE TABLE t_ds_worker_server ( id int NOT NULL , host varchar(45) DEFAULT NULL , port int DEFAULT NULL , zk_directory varchar(64) DEFAULT NULL , res_info varchar(255) DEFAULT NULL , create_time timestamp DEFAULT NULL , last_heartbeat_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence; CREATE SEQUENCE t_ds_access_token_id_sequence; ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence; CREATE SEQUENCE t_ds_alert_id_sequence; ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence; CREATE SEQUENCE t_ds_alertgroup_id_sequence; ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_command_id_sequence; CREATE SEQUENCE t_ds_command_id_sequence; ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence; CREATE SEQUENCE t_ds_datasource_id_sequence; ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence; CREATE SEQUENCE t_ds_process_definition_id_sequence; ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_definition_log_id_sequence; CREATE SEQUENCE t_ds_process_definition_log_id_sequence; ALTER TABLE t_ds_process_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_definition_id_sequence; CREATE SEQUENCE t_ds_task_definition_id_sequence; ALTER TABLE t_ds_task_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_definition_log_id_sequence; CREATE SEQUENCE t_ds_task_definition_log_id_sequence; ALTER TABLE t_ds_task_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_task_relation_id_sequence; CREATE SEQUENCE t_ds_process_task_relation_id_sequence; ALTER TABLE t_ds_process_task_relation ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_task_relation_log_id_sequence; CREATE SEQUENCE t_ds_process_task_relation_log_id_sequence; ALTER TABLE t_ds_process_task_relation_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence; CREATE SEQUENCE t_ds_process_instance_id_sequence; ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_project_id_sequence; CREATE SEQUENCE t_ds_project_id_sequence; ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence; CREATE SEQUENCE t_ds_queue_id_sequence; ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence; CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence; ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence; CREATE SEQUENCE t_ds_relation_process_instance_id_sequence; ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence; CREATE SEQUENCE t_ds_relation_project_user_id_sequence; ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence; CREATE SEQUENCE t_ds_relation_resources_user_id_sequence; ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence; CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence; ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence; CREATE SEQUENCE t_ds_resources_id_sequence; ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence; CREATE SEQUENCE t_ds_schedules_id_sequence; ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence; CREATE SEQUENCE t_ds_task_instance_id_sequence; ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence; CREATE SEQUENCE t_ds_tenant_id_sequence; ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence; CREATE SEQUENCE t_ds_udfs_id_sequence; ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_user_id_sequence; CREATE SEQUENCE t_ds_user_id_sequence; ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_version_id_sequence; CREATE SEQUENCE t_ds_version_id_sequence; ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence; CREATE SEQUENCE t_ds_worker_group_id_sequence; ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence; CREATE SEQUENCE t_ds_worker_server_id_sequence; ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence'); -- Records of t_ds_user?user : admin , password : dolphinscheduler123 INSERT INTO t_ds_user(user_name, user_password, user_type, email, phone, tenant_id, state, create_time, update_time) VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', '[email protected]', '', '0', 1, '2018-03-27 15:48:50', '2018-10-24 17:40:22'); -- Records of t_ds_alertgroup, default admin warning group INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- Records of t_ds_queue,default queue name : default INSERT INTO t_ds_queue(queue_name, queue, create_time, update_time) VALUES ('default', 'default', '2018-11-29 10:22:33', '2018-11-29 10:22:33'); -- Records of t_ds_queue,default queue name : default INSERT INTO t_ds_version(version) VALUES ('1.4.0'); -- -- Table structure for table t_ds_plugin_define -- DROP TABLE IF EXISTS t_ds_plugin_define; CREATE TABLE t_ds_plugin_define ( id serial NOT NULL, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id), CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type) ); -- -- Table structure for table t_ds_alert_plugin_instance -- DROP TABLE IF EXISTS t_ds_alert_plugin_instance; CREATE TABLE t_ds_alert_plugin_instance ( id serial NOT NULL, plugin_define_id int4 NOT NULL, plugin_instance_params text NULL, create_time timestamp NULL, update_time timestamp NULL, instance_name varchar(200) NULL, CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id) );
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,610
[Question] Something wrong with build docker image from source
**Describe the question** I tried building a image by clean source code. ![image](https://user-images.githubusercontent.com/84900511/121340374-20386480-c952-11eb-81d8-c201586a47c0.png) and then setup in this way. ![image](https://user-images.githubusercontent.com/84900511/121340450-3514f800-c952-11eb-8042-4bdd56df2d7b.png) But when I tried to create a project, it failed. ![image](https://user-images.githubusercontent.com/84900511/121341733-896ca780-c953-11eb-8f3c-d494bddea536.png) **Which version of DolphinScheduler:** -[1.3.6] **Additional context** And also I found that the image I pull does not has the same size as the image I build, Does anyone know the reason? ![image](https://user-images.githubusercontent.com/84900511/121340737-84f3bf00-c952-11eb-8dec-10f6c4d28b7f.png)
https://github.com/apache/dolphinscheduler/issues/5610
https://github.com/apache/dolphinscheduler/pull/5611
0d5037e7c37d7903d9172f165b348058f1ddbf88
c5bea3c77430e0b46a2f5a3a91a7fbbc78874196
2021-06-09T10:45:43Z
java
2021-06-15T06:45:39Z
sql/upgrade/1.4.0_schema/mysql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); -- uc_dolphin_T_t_ds_user_A_state drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_user_A_state; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_user_A_state() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_user' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='state') THEN ALTER TABLE t_ds_user ADD `state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_user_A_state; DROP PROCEDURE uc_dolphin_T_t_ds_user_A_state; -- uc_dolphin_T_t_ds_tenant_A_tenant_name drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name() BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_tenant' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='tenant_name') THEN ALTER TABLE t_ds_tenant DROP `tenant_name`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_tenant_A_tenant_name; DROP PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name; -- uc_dolphin_T_t_ds_task_instance_A_first_submit_time drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='first_submit_time') THEN ALTER TABLE t_ds_task_instance ADD `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_task_instance_A_first_submit_time(); DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time; -- uc_dolphin_T_t_ds_task_instance_A_delay_time drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='delay_time') THEN ALTER TABLE t_ds_task_instance ADD `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_task_instance_A_delay_time(); DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time; -- uc_dolphin_T_t_ds_task_instance_A_var_pool drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_task_instance ADD `var_pool` longtext NULL; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_task_instance_A_var_pool(); DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool; -- uc_dolphin_T_t_ds_process_instance_A_var_pool drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_process_instance ADD `var_pool` longtext NULL; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_process_instance_A_var_pool(); DROP PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool; -- uc_dolphin_T_t_ds_process_definition_A_modify_by drop PROCEDURE if EXISTS ct_dolphin_T_t_ds_process_definition_version; delimiter d// CREATE PROCEDURE ct_dolphin_T_t_ds_process_definition_version() BEGIN CREATE TABLE IF NOT EXISTS `t_ds_process_definition_version` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `process_definition_id` int(11) NOT NULL COMMENT 'process definition id', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `process_definition_json` longtext COMMENT 'process definition json content', `description` text, `global_params` text COMMENT 'global parameters', `locations` text COMMENT 'Node location information', `connects` text COMMENT 'Node connection information', `receivers` text COMMENT 'receivers', `receivers_cc` text COMMENT 'cc', `create_time` datetime DEFAULT NULL COMMENT 'create time', `timeout` int(11) DEFAULT '0' COMMENT 'time out', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource ids', PRIMARY KEY (`id`), UNIQUE KEY `process_definition_id_and_version` (`process_definition_id`,`version`) USING BTREE, KEY `process_definition_index` (`id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=84 DEFAULT CHARSET=utf8; END; d// delimiter ; CALL ct_dolphin_T_t_ds_process_definition_version; DROP PROCEDURE ct_dolphin_T_t_ds_process_definition_version; -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- DROP TABLE IF EXISTS `t_ds_plugin_define`; CREATE TABLE `t_ds_plugin_define` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email', `plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin', `plugin_params` text COMMENT 'plugin params', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`; CREATE TABLE `t_ds_alert_plugin_instance` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_define_id` int NOT NULL, `plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- uc_dolphin_T_t_ds_process_definition_A_warning_group_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_process_definition_A_warning_group_id(); DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id; -- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition_version' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition_version ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id(); DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id; -- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='alert_instance_ids') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids' AFTER `id`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids; -- uc_dolphin_T_t_ds_alertgroup_A_create_user_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='create_user_id') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id' AFTER `alert_instance_ids`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id; -- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.STATISTICS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND INDEX_NAME ='t_ds_alertgroup_name_UN') THEN ALTER TABLE t_ds_alertgroup ADD UNIQUE KEY `t_ds_alertgroup_name_UN` (`group_name`); END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName; -- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_datasource' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='t_ds_datasource_name_UN') THEN ALTER TABLE t_ds_datasource ADD UNIQUE KEY `t_ds_datasource_name_UN` (`name`, `type`); END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); DROP PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName; -- uc_dolphin_T_t_ds_schedules_A_add_timezone drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_schedules_A_add_timezone; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_schedules_A_add_timezone() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_schedules' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='timezone_id') THEN ALTER TABLE t_ds_schedules ADD COLUMN `timezone_id` varchar(40) default NULL COMMENT 'schedule timezone id' AFTER `end_time`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_schedules_A_add_timezone(); DROP PROCEDURE uc_dolphin_T_t_ds_schedules_A_add_timezone; -- ---------------------------- -- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below -- ---------------------------- -- ALTER TABLE t_ds_alert DROP `show_type`, DROP `alert_type`, DROP `receivers`, DROP `receivers_cc`; -- ALTER TABLE t_ds_alertgroup DROP `group_type`; -- ALTER TABLE t_ds_process_definition DROP `receivers`, DROP `receivers_cc`; -- ALTER TABLE t_ds_process_definition_version DROP `receivers`, DROP `receivers_cc`; -- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup; -- ALTER TABLE t_ds_command DROP `dependence`; -- ALTER TABLE t_ds_error_command DROP `dependence`;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,610
[Question] Something wrong with build docker image from source
**Describe the question** I tried building a image by clean source code. ![image](https://user-images.githubusercontent.com/84900511/121340374-20386480-c952-11eb-81d8-c201586a47c0.png) and then setup in this way. ![image](https://user-images.githubusercontent.com/84900511/121340450-3514f800-c952-11eb-8042-4bdd56df2d7b.png) But when I tried to create a project, it failed. ![image](https://user-images.githubusercontent.com/84900511/121341733-896ca780-c953-11eb-8f3c-d494bddea536.png) **Which version of DolphinScheduler:** -[1.3.6] **Additional context** And also I found that the image I pull does not has the same size as the image I build, Does anyone know the reason? ![image](https://user-images.githubusercontent.com/84900511/121340737-84f3bf00-c952-11eb-8dec-10f6c4d28b7f.png)
https://github.com/apache/dolphinscheduler/issues/5610
https://github.com/apache/dolphinscheduler/pull/5611
0d5037e7c37d7903d9172f165b348058f1ddbf88
c5bea3c77430e0b46a2f5a3a91a7fbbc78874196
2021-06-09T10:45:43Z
java
2021-06-15T06:45:39Z
sql/upgrade/1.4.0_schema/postgresql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ -- uc_dolphin_T_t_ds_user_A_state delimiter ; DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_user_A_state(); delimiter d// CREATE FUNCTION uc_dolphin_T_t_ds_user_A_state() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_CATALOG=current_database() AND TABLE_SCHEMA=current_schema() AND TABLE_NAME='t_ds_user' AND COLUMN_NAME ='state') THEN ALTER TABLE t_ds_user ADD COLUMN state int DEFAULT 1; comment on column t_ds_user.state is 'state 0:disable 1:enable'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; select uc_dolphin_T_t_ds_user_A_state(); DROP FUNCTION uc_dolphin_T_t_ds_user_A_state(); -- uc_dolphin_T_t_ds_tenant_A_tenant_name delimiter ; DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name(); delimiter d// CREATE FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name() RETURNS void AS $$ BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_CATALOG=current_database() AND TABLE_SCHEMA=current_schema() AND TABLE_NAME='t_ds_tenant' AND COLUMN_NAME ='tenant_name') THEN ALTER TABLE t_ds_tenant DROP COLUMN "tenant_name"; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; select uc_dolphin_T_t_ds_tenant_A_tenant_name(); DROP FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name(); -- uc_dolphin_T_t_ds_task_instance_A_first_submit_time delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_first_submit_time() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND COLUMN_NAME ='first_submit_time') THEN ALTER TABLE t_ds_task_instance ADD COLUMN first_submit_time timestamp DEFAULT NULL; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_task_instance_A_first_submit_time(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time(); -- uc_dolphin_T_t_ds_task_instance_A_delay_time delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_delay_time() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND COLUMN_NAME ='delay_time') THEN ALTER TABLE t_ds_task_instance ADD COLUMN delay_time int DEFAULT '0'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_task_instance_A_delay_time(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time(); -- uc_dolphin_T_t_ds_task_instance_A_var_pool delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_var_pool() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_task_instance ADD COLUMN var_pool text; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_task_instance_A_var_pool(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool(); -- uc_dolphin_T_t_ds_process_instance_A_var_pool delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_instance_A_var_pool() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_instance' AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_process_instance ADD COLUMN var_pool text; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_process_instance_A_var_pool(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool(); -- uc_dolphin_T_t_ds_process_definition_A_modify_by delimiter d// CREATE OR REPLACE FUNCTION ct_dolphin_T_t_ds_process_definition_version() RETURNS void AS $$ BEGIN CREATE TABLE IF NOT EXISTS t_ds_process_definition_version ( id int NOT NULL , process_definition_id int NOT NULL , version int DEFAULT NULL , process_definition_json text , description text , global_params text , locations text , connects text , receivers text , receivers_cc text , create_time timestamp DEFAULT NULL , timeout int DEFAULT '0' , resource_ids varchar(64), PRIMARY KEY (id) ) ; create index process_definition_id_and_version on t_ds_process_definition_version (process_definition_id,version); DROP SEQUENCE IF EXISTS t_ds_process_definition_version_id_sequence; CREATE SEQUENCE t_ds_process_definition_version_id_sequence; ALTER TABLE t_ds_process_definition_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_version_id_sequence'); END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT ct_dolphin_T_t_ds_process_definition_version(); DROP FUNCTION IF EXISTS ct_dolphin_T_t_ds_process_definition_version(); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- DROP TABLE IF EXISTS t_ds_plugin_define; CREATE TABLE t_ds_plugin_define ( id serial NOT NULL, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id), CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type) ); -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS t_ds_alert_plugin_instance; CREATE TABLE t_ds_alert_plugin_instance ( id serial NOT NULL, plugin_define_id int4 NOT NULL, plugin_instance_params text NULL, create_time timestamp NULL, update_time timestamp NULL, instance_name varchar(200) NULL, CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id) ); -- uc_dolphin_T_t_ds_process_definition_A_warning_group_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_A_warning_group_id() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition' AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition ADD COLUMN warning_group_id int4 DEFAULT NULL; COMMENT ON COLUMN t_ds_process_definition.warning_group_id IS 'alert group id'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_process_definition_A_warning_group_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id(); -- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition_version' AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition_version ADD COLUMN warning_group_id int4 DEFAULT NULL; COMMENT ON COLUMN t_ds_process_definition_version.warning_group_id IS 'alert group id'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id(); -- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND COLUMN_NAME ='alert_instance_ids') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN alert_instance_ids varchar (255) DEFAULT NULL; COMMENT ON COLUMN t_ds_alertgroup.alert_instance_ids IS 'alert instance ids'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); -- uc_dolphin_T_t_ds_alertgroup_A_create_user_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_create_user_id() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND COLUMN_NAME ='create_user_id') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN create_user_id int4 DEFAULT NULL; COMMENT ON COLUMN t_ds_alertgroup.create_user_id IS 'create user id'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); -- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes WHERE relname='t_ds_alertgroup' AND indexrelname ='t_ds_alertgroup_name_UN') THEN ALTER TABLE t_ds_alertgroup ADD CONSTRAINT t_ds_alertgroup_name_UN UNIQUE (group_name); END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); -- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes WHERE relname='t_ds_datasource' AND indexrelname ='t_ds_datasource_name_UN') THEN ALTER TABLE t_ds_datasource ADD CONSTRAINT t_ds_datasource_name_UN UNIQUE (name, type); END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); -- uc_dolphin_T_t_ds_schedules_A_add_timezone delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_schedules_A_add_timezone() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_schedules' AND COLUMN_NAME ='timezone_id') THEN ALTER TABLE t_ds_schedules ADD COLUMN timezone_id varchar(40) DEFAULT NULL; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_schedules_A_add_timezone(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_schedules_A_add_timezone(); -- ---------------------------- -- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below -- ---------------------------- -- ALTER TABLE t_ds_alert DROP COLUMN "show_type", DROP COLUMN "alert_type", DROP COLUMN "receivers", DROP COLUMN "receivers_cc"; -- ALTER TABLE t_ds_alertgroup DROP COLUMN "group_type"; -- ALTER TABLE t_ds_process_definition DROP COLUMN "receivers", DROP COLUMN "receivers_cc"; -- ALTER TABLE t_ds_process_definition_version DROP COLUMN "receivers", DROP COLUMN "receivers_cc"; -- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup; -- ALTER TABLE t_ds_command DROP COLUMN "dependence"; -- ALTER TABLE t_ds_error_command DROP COLUMN "dependence";
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,540
[Bug][JSON Split] Project created failure and unknown column p.code in field list
**To Reproduce** Refer to https://dolphinscheduler.apache.org/zh-cn/development/development-environment-setup.html And run `org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler` **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. ![image](https://user-images.githubusercontent.com/4902714/119256897-76d53d00-bbf5-11eb-9a1e-aa82c3948d2e.png) ![image](https://user-images.githubusercontent.com/4902714/119256898-79d02d80-bbf5-11eb-99ae-08cf47a95976.png) **Which version of Dolphin Scheduler:** -[dev] @JinyLeeChina
https://github.com/apache/dolphinscheduler/issues/5540
https://github.com/apache/dolphinscheduler/pull/5611
0d5037e7c37d7903d9172f165b348058f1ddbf88
c5bea3c77430e0b46a2f5a3a91a7fbbc78874196
2021-05-23T10:38:54Z
java
2021-06-15T06:45:39Z
sql/dolphinscheduler_mysql.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET FOREIGN_KEY_CHECKS=0; -- ---------------------------- -- Table structure for QRTZ_BLOB_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_BLOB_TRIGGERS`; CREATE TABLE `QRTZ_BLOB_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `BLOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `SCHED_NAME` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_BLOB_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_BLOB_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CALENDARS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CALENDARS`; CREATE TABLE `QRTZ_CALENDARS` ( `SCHED_NAME` varchar(120) NOT NULL, `CALENDAR_NAME` varchar(200) NOT NULL, `CALENDAR` blob NOT NULL, PRIMARY KEY (`SCHED_NAME`,`CALENDAR_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CALENDARS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CRON_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CRON_TRIGGERS`; CREATE TABLE `QRTZ_CRON_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `CRON_EXPRESSION` varchar(120) NOT NULL, `TIME_ZONE_ID` varchar(80) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_CRON_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CRON_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_FIRED_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_FIRED_TRIGGERS`; CREATE TABLE `QRTZ_FIRED_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `ENTRY_ID` varchar(200) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `FIRED_TIME` bigint(13) NOT NULL, `SCHED_TIME` bigint(13) NOT NULL, `PRIORITY` int(11) NOT NULL, `STATE` varchar(16) NOT NULL, `JOB_NAME` varchar(200) DEFAULT NULL, `JOB_GROUP` varchar(200) DEFAULT NULL, `IS_NONCONCURRENT` varchar(1) DEFAULT NULL, `REQUESTS_RECOVERY` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`ENTRY_ID`), KEY `IDX_QRTZ_FT_TRIG_INST_NAME` (`SCHED_NAME`,`INSTANCE_NAME`), KEY `IDX_QRTZ_FT_INST_JOB_REQ_RCVRY` (`SCHED_NAME`,`INSTANCE_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_FT_J_G` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_T_G` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_FT_TG` (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_FIRED_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_JOB_DETAILS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_JOB_DETAILS`; CREATE TABLE `QRTZ_JOB_DETAILS` ( `SCHED_NAME` varchar(120) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `JOB_CLASS_NAME` varchar(250) NOT NULL, `IS_DURABLE` varchar(1) NOT NULL, `IS_NONCONCURRENT` varchar(1) NOT NULL, `IS_UPDATE_DATA` varchar(1) NOT NULL, `REQUESTS_RECOVERY` varchar(1) NOT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_J_REQ_RECOVERY` (`SCHED_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_J_GRP` (`SCHED_NAME`,`JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_JOB_DETAILS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_LOCKS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_LOCKS`; CREATE TABLE `QRTZ_LOCKS` ( `SCHED_NAME` varchar(120) NOT NULL, `LOCK_NAME` varchar(40) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`LOCK_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_LOCKS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_PAUSED_TRIGGER_GRPS`; CREATE TABLE `QRTZ_PAUSED_TRIGGER_GRPS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SCHEDULER_STATE -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SCHEDULER_STATE`; CREATE TABLE `QRTZ_SCHEDULER_STATE` ( `SCHED_NAME` varchar(120) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `LAST_CHECKIN_TIME` bigint(13) NOT NULL, `CHECKIN_INTERVAL` bigint(13) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`INSTANCE_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SCHEDULER_STATE -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPLE_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPLE_TRIGGERS`; CREATE TABLE `QRTZ_SIMPLE_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `REPEAT_COUNT` bigint(7) NOT NULL, `REPEAT_INTERVAL` bigint(12) NOT NULL, `TIMES_TRIGGERED` bigint(10) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPLE_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPLE_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPROP_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPROP_TRIGGERS`; CREATE TABLE `QRTZ_SIMPROP_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `STR_PROP_1` varchar(512) DEFAULT NULL, `STR_PROP_2` varchar(512) DEFAULT NULL, `STR_PROP_3` varchar(512) DEFAULT NULL, `INT_PROP_1` int(11) DEFAULT NULL, `INT_PROP_2` int(11) DEFAULT NULL, `LONG_PROP_1` bigint(20) DEFAULT NULL, `LONG_PROP_2` bigint(20) DEFAULT NULL, `DEC_PROP_1` decimal(13,4) DEFAULT NULL, `DEC_PROP_2` decimal(13,4) DEFAULT NULL, `BOOL_PROP_1` varchar(1) DEFAULT NULL, `BOOL_PROP_2` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPROP_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPROP_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_TRIGGERS`; CREATE TABLE `QRTZ_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `NEXT_FIRE_TIME` bigint(13) DEFAULT NULL, `PREV_FIRE_TIME` bigint(13) DEFAULT NULL, `PRIORITY` int(11) DEFAULT NULL, `TRIGGER_STATE` varchar(16) NOT NULL, `TRIGGER_TYPE` varchar(8) NOT NULL, `START_TIME` bigint(13) NOT NULL, `END_TIME` bigint(13) DEFAULT NULL, `CALENDAR_NAME` varchar(200) DEFAULT NULL, `MISFIRE_INSTR` smallint(2) DEFAULT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_J` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_C` (`SCHED_NAME`,`CALENDAR_NAME`), KEY `IDX_QRTZ_T_G` (`SCHED_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_STATE` (`SCHED_NAME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_STATE` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_G_STATE` (`SCHED_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NEXT_FIRE_TIME` (`SCHED_NAME`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST` (`SCHED_NAME`,`TRIGGER_STATE`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE_GRP` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), CONSTRAINT `QRTZ_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) REFERENCES `QRTZ_JOB_DETAILS` (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_access_token -- ---------------------------- DROP TABLE IF EXISTS `t_ds_access_token`; CREATE TABLE `t_ds_access_token` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `token` varchar(64) DEFAULT NULL COMMENT 'token', `expire_time` datetime DEFAULT NULL COMMENT 'end time of token ', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_access_token -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alert -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert`; CREATE TABLE `t_ds_alert` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `title` varchar(64) DEFAULT NULL COMMENT 'title', `content` text COMMENT 'Message content (can be email, can be SMS. Mail is stored in JSON map, and SMS is string)', `alert_status` tinyint(4) DEFAULT '0' COMMENT '0:wait running,1:success,2:failed', `log` text COMMENT 'log', `alertgroup_id` int(11) DEFAULT NULL COMMENT 'alert group id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alert -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alertgroup -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alertgroup`; CREATE TABLE `t_ds_alertgroup`( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids', `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id', `group_name` varchar(255) DEFAULT NULL COMMENT 'group name', `description` varchar(255) DEFAULT NULL, `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_alertgroup_name_UN` (`group_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_command`; CREATE TABLE `t_ds_command` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'Command type: 0 start workflow, 1 start execution from current node, 2 resume fault-tolerant workflow, 3 resume pause process, 4 start execution from failed node, 5 complement, 6 schedule, 7 rerun, 8 pause, 9 stop, 10 resume waiting thread', `process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'Node dependency type: 0 current node, 1 forward, 2 backward', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'Failed policy: 0 end, 1 continue', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority: 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_datasource -- ---------------------------- DROP TABLE IF EXISTS `t_ds_datasource`; CREATE TABLE `t_ds_datasource` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(64) NOT NULL COMMENT 'data source name', `note` varchar(255) DEFAULT NULL COMMENT 'description', `type` tinyint(4) NOT NULL COMMENT 'data source type: 0:mysql,1:postgresql,2:hive,3:spark', `user_id` int(11) NOT NULL COMMENT 'the creator id', `connection_params` text NOT NULL COMMENT 'json connection params', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_datasource_name_UN` (`name`, `type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_datasource -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_error_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_error_command`; CREATE TABLE `t_ds_error_command` ( `id` int(11) NOT NULL COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'scheduler time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority, 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', `message` text COMMENT 'message', PRIMARY KEY (`id`) USING BTREE ) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC; -- ---------------------------- -- Records of t_ds_error_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition`; CREATE TABLE `t_ds_process_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(255) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `connects` text COMMENT 'Node connection information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out, unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`), UNIQUE KEY `process_unique` (`name`,`project_code`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_definition -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition_log`; CREATE TABLE `t_ds_process_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `connects` text COMMENT 'Node connection information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out,unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition`; CREATE TABLE `t_ds_task_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT NULL COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` longtext COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`), UNIQUE KEY `task_unique` (`name`,`project_code`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition_log`; CREATE TABLE `t_ds_task_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT NULL COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` text COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation`; CREATE TABLE `t_ds_process_task_relation` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process version', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation_log`; CREATE TABLE `t_ds_process_task_relation_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process version', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_instance`; CREATE TABLE `t_ds_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'process instance name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process definition version', `process_definition_code` bigint(20) not NULL COMMENT 'process definition code', `state` tinyint(4) DEFAULT NULL COMMENT 'process instance Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `recovery` tinyint(4) DEFAULT NULL COMMENT 'process instance failover flag:0:normal,1:failover instance', `start_time` datetime DEFAULT NULL COMMENT 'process instance start time', `end_time` datetime DEFAULT NULL COMMENT 'process instance end time', `run_times` int(11) DEFAULT NULL COMMENT 'process instance run times', `host` varchar(135) DEFAULT NULL COMMENT 'process instance host', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type. 0: only current node,1:before the node,2:later nodes', `max_try_times` tinyint(4) DEFAULT '0' COMMENT 'max try times', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy. 0:end the process when node failed,1:continue running the other nodes when node failed', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type. 0:no warning,1:warning if process success,2:warning if process failed,3:warning if success', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `command_start_time` datetime DEFAULT NULL COMMENT 'command start time', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT '1' COMMENT 'flag', `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `is_sub_process` int(11) DEFAULT '0' COMMENT 'flag, whether the process is sub process', `executor_id` int(11) NOT NULL COMMENT 'executor id', `history_cmd` text COMMENT 'history commands of process instance operation', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority. 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `var_pool` longtext COMMENT 'var_pool', PRIMARY KEY (`id`), KEY `process_instance_index` (`process_definition_code`,`id`) USING BTREE, KEY `start_time_index` (`start_time`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_project -- ---------------------------- DROP TABLE IF EXISTS `t_ds_project`; CREATE TABLE `t_ds_project` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(100) DEFAULT NULL COMMENT 'project name', `code` bigint(20) NOT NULL COMMENT 'encoding', `description` varchar(200) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'creator id', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_project -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_queue -- ---------------------------- DROP TABLE IF EXISTS `t_ds_queue`; CREATE TABLE `t_ds_queue` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `queue_name` varchar(64) DEFAULT NULL COMMENT 'queue name', `queue` varchar(64) DEFAULT NULL COMMENT 'yarn queue name', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_queue -- ---------------------------- INSERT INTO `t_ds_queue` VALUES ('1', 'default', 'default', null, null); -- ---------------------------- -- Table structure for t_ds_relation_datasource_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_datasource_user`; CREATE TABLE `t_ds_relation_datasource_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `datasource_id` int(11) DEFAULT NULL COMMENT 'data source id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_datasource_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_process_instance`; CREATE TABLE `t_ds_relation_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `parent_process_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `parent_task_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `process_instance_id` int(11) DEFAULT NULL COMMENT 'child process instance id', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_project_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_project_user`; CREATE TABLE `t_ds_relation_project_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `project_id` int(11) DEFAULT NULL COMMENT 'project id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_project_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_resources_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_resources_user`; CREATE TABLE `t_ds_relation_resources_user` ( `id` int(11) NOT NULL AUTO_INCREMENT, `user_id` int(11) NOT NULL COMMENT 'user id', `resources_id` int(11) DEFAULT NULL COMMENT 'resource id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_resources_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_udfs_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_udfs_user`; CREATE TABLE `t_ds_relation_udfs_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'userid', `udf_id` int(11) DEFAULT NULL COMMENT 'udf id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_resources -- ---------------------------- DROP TABLE IF EXISTS `t_ds_resources`; CREATE TABLE `t_ds_resources` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alias` varchar(64) DEFAULT NULL COMMENT 'alias', `file_name` varchar(64) DEFAULT NULL COMMENT 'file name', `description` varchar(255) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'user id', `type` tinyint(4) DEFAULT NULL COMMENT 'resource type,0:FILE,1:UDF', `size` bigint(20) DEFAULT NULL COMMENT 'resource size', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `pid` int(11) DEFAULT NULL, `full_name` varchar(64) DEFAULT NULL, `is_directory` tinyint(4) DEFAULT NULL, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_resources_un` (`full_name`,`type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_resources -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_schedules -- ---------------------------- DROP TABLE IF EXISTS `t_ds_schedules`; CREATE TABLE `t_ds_schedules` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `process_definition_id` int(11) NOT NULL COMMENT 'process definition id', `start_time` datetime NOT NULL COMMENT 'start time', `end_time` datetime NOT NULL COMMENT 'end time', `timezone_id` varchar(40) DEFAULT NULL COMMENT 'timezoneId', `crontab` varchar(255) NOT NULL COMMENT 'crontab description', `failure_strategy` tinyint(4) NOT NULL COMMENT 'failure strategy. 0:end,1:continue', `user_id` int(11) NOT NULL COMMENT 'user id', `release_state` tinyint(4) NOT NULL COMMENT 'release state. 0:offline,1:online ', `warning_type` tinyint(4) NOT NULL COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT '' COMMENT 'worker group id', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_schedules -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_session -- ---------------------------- DROP TABLE IF EXISTS `t_ds_session`; CREATE TABLE `t_ds_session` ( `id` varchar(64) NOT NULL COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `ip` varchar(45) DEFAULT NULL COMMENT 'ip', `last_login_time` datetime DEFAULT NULL COMMENT 'last login time', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_session -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_task_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_instance`; CREATE TABLE `t_ds_task_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'task name', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_code` bigint(20) NOT NULL COMMENT 'task definition code', `task_definition_version` int(11) DEFAULT NULL COMMENT 'task definition version', `process_instance_id` int(11) DEFAULT NULL COMMENT 'process instance id', `state` tinyint(4) DEFAULT NULL COMMENT 'Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `submit_time` datetime DEFAULT NULL COMMENT 'task submit time', `start_time` datetime DEFAULT NULL COMMENT 'task start time', `end_time` datetime DEFAULT NULL COMMENT 'task end time', `host` varchar(135) DEFAULT NULL COMMENT 'host of task running on', `execute_path` varchar(200) DEFAULT NULL COMMENT 'task execute path in the host', `log_path` varchar(200) DEFAULT NULL COMMENT 'task log path', `alert_flag` tinyint(4) DEFAULT NULL COMMENT 'whether alert', `retry_times` int(4) DEFAULT '0' COMMENT 'task retry times', `pid` int(4) DEFAULT NULL COMMENT 'pid of task', `app_link` text COMMENT 'yarn app id', `task_params` text COMMENT 'job custom parameters', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `retry_interval` int(4) DEFAULT NULL COMMENT 'retry interval when task failed ', `max_retry_times` int(2) DEFAULT NULL COMMENT 'max retry times', `task_instance_priority` int(11) DEFAULT NULL COMMENT 'task instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `executor_id` int(11) DEFAULT NULL, `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time', `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time', `var_pool` longtext COMMENT 'var_pool', PRIMARY KEY (`id`), KEY `process_instance_id` (`process_instance_id`) USING BTREE, CONSTRAINT `foreign_key_instance_id` FOREIGN KEY (`process_instance_id`) REFERENCES `t_ds_process_instance` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_task_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_tenant -- ---------------------------- DROP TABLE IF EXISTS `t_ds_tenant`; CREATE TABLE `t_ds_tenant` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `tenant_code` varchar(64) DEFAULT NULL COMMENT 'tenant code', `description` varchar(255) DEFAULT NULL, `queue_id` int(11) DEFAULT NULL COMMENT 'queue id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_tenant -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_udfs -- ---------------------------- DROP TABLE IF EXISTS `t_ds_udfs`; CREATE TABLE `t_ds_udfs` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `func_name` varchar(100) NOT NULL COMMENT 'UDF function name', `class_name` varchar(255) NOT NULL COMMENT 'class of udf', `type` tinyint(4) NOT NULL COMMENT 'Udf function type', `arg_types` varchar(255) DEFAULT NULL COMMENT 'arguments types', `database` varchar(255) DEFAULT NULL COMMENT 'data base', `description` varchar(255) DEFAULT NULL, `resource_id` int(11) NOT NULL COMMENT 'resource id', `resource_name` varchar(255) NOT NULL COMMENT 'resource name', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_udfs -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_user`; CREATE TABLE `t_ds_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'user id', `user_name` varchar(64) DEFAULT NULL COMMENT 'user name', `user_password` varchar(64) DEFAULT NULL COMMENT 'user password', `user_type` tinyint(4) DEFAULT NULL COMMENT 'user type, 0:administrator,1:ordinary user', `email` varchar(64) DEFAULT NULL COMMENT 'email', `phone` varchar(11) DEFAULT NULL COMMENT 'phone', `tenant_id` int(11) DEFAULT NULL COMMENT 'tenant id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `queue` varchar(64) DEFAULT NULL COMMENT 'queue', `state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable', PRIMARY KEY (`id`), UNIQUE KEY `user_name_unique` (`user_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_worker_group -- ---------------------------- DROP TABLE IF EXISTS `t_ds_worker_group`; CREATE TABLE `t_ds_worker_group` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `name` varchar(255) NOT NULL COMMENT 'worker group name', `addr_list` text NULL DEFAULT NULL COMMENT 'worker addr list. split by [,]', `create_time` datetime NULL DEFAULT NULL COMMENT 'create time', `update_time` datetime NULL DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `name_unique` (`name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_worker_group -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_version -- ---------------------------- DROP TABLE IF EXISTS `t_ds_version`; CREATE TABLE `t_ds_version` ( `id` int(11) NOT NULL AUTO_INCREMENT, `version` varchar(200) NOT NULL, PRIMARY KEY (`id`), UNIQUE KEY `version_UNIQUE` (`version`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8 COMMENT='version'; -- ---------------------------- -- Records of t_ds_version -- ---------------------------- INSERT INTO `t_ds_version` VALUES ('1', '1.4.0'); -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- INSERT INTO `t_ds_alertgroup`(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ("1,2", 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- ---------------------------- -- Records of t_ds_user -- ---------------------------- INSERT INTO `t_ds_user` VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', '[email protected]', '', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22', null, 1); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); DROP TABLE IF EXISTS `t_ds_plugin_define`; CREATE TABLE `t_ds_plugin_define` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email', `plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin', `plugin_params` text COMMENT 'plugin params', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`; CREATE TABLE `t_ds_alert_plugin_instance` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_define_id` int NOT NULL, `plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,540
[Bug][JSON Split] Project created failure and unknown column p.code in field list
**To Reproduce** Refer to https://dolphinscheduler.apache.org/zh-cn/development/development-environment-setup.html And run `org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler` **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. ![image](https://user-images.githubusercontent.com/4902714/119256897-76d53d00-bbf5-11eb-9a1e-aa82c3948d2e.png) ![image](https://user-images.githubusercontent.com/4902714/119256898-79d02d80-bbf5-11eb-99ae-08cf47a95976.png) **Which version of Dolphin Scheduler:** -[dev] @JinyLeeChina
https://github.com/apache/dolphinscheduler/issues/5540
https://github.com/apache/dolphinscheduler/pull/5611
0d5037e7c37d7903d9172f165b348058f1ddbf88
c5bea3c77430e0b46a2f5a3a91a7fbbc78874196
2021-05-23T10:38:54Z
java
2021-06-15T06:45:39Z
sql/dolphinscheduler_postgre.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS; DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS; DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE; DROP TABLE IF EXISTS QRTZ_LOCKS; DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS; DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS; DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS; DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS; DROP TABLE IF EXISTS QRTZ_TRIGGERS; DROP TABLE IF EXISTS QRTZ_JOB_DETAILS; DROP TABLE IF EXISTS QRTZ_CALENDARS; CREATE TABLE QRTZ_JOB_DETAILS( SCHED_NAME character varying(120) NOT NULL, JOB_NAME character varying(200) NOT NULL, JOB_GROUP character varying(200) NOT NULL, DESCRIPTION character varying(250) NULL, JOB_CLASS_NAME character varying(250) NOT NULL, IS_DURABLE boolean NOT NULL, IS_NONCONCURRENT boolean NOT NULL, IS_UPDATE_DATA boolean NOT NULL, REQUESTS_RECOVERY boolean NOT NULL, JOB_DATA bytea NULL); alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE TABLE QRTZ_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, JOB_NAME character varying(200) NOT NULL, JOB_GROUP character varying(200) NOT NULL, DESCRIPTION character varying(250) NULL, NEXT_FIRE_TIME BIGINT NULL, PREV_FIRE_TIME BIGINT NULL, PRIORITY INTEGER NULL, TRIGGER_STATE character varying(16) NOT NULL, TRIGGER_TYPE character varying(8) NOT NULL, START_TIME BIGINT NOT NULL, END_TIME BIGINT NULL, CALENDAR_NAME character varying(200) NULL, MISFIRE_INSTR SMALLINT NULL, JOB_DATA bytea NULL) ; alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, REPEAT_COUNT BIGINT NOT NULL, REPEAT_INTERVAL BIGINT NOT NULL, TIMES_TRIGGERED BIGINT NOT NULL) ; alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_CRON_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, CRON_EXPRESSION character varying(120) NOT NULL, TIME_ZONE_ID character varying(80)) ; alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_SIMPROP_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, STR_PROP_1 character varying(512) NULL, STR_PROP_2 character varying(512) NULL, STR_PROP_3 character varying(512) NULL, INT_PROP_1 INT NULL, INT_PROP_2 INT NULL, LONG_PROP_1 BIGINT NULL, LONG_PROP_2 BIGINT NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 boolean NULL, BOOL_PROP_2 boolean NULL) ; alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_BLOB_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, BLOB_DATA bytea NULL) ; alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_CALENDARS ( SCHED_NAME character varying(120) NOT NULL, CALENDAR_NAME character varying(200) NOT NULL, CALENDAR bytea NOT NULL) ; alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME); CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL) ; alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_FIRED_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, ENTRY_ID character varying(200) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, INSTANCE_NAME character varying(200) NOT NULL, FIRED_TIME BIGINT NOT NULL, SCHED_TIME BIGINT NOT NULL, PRIORITY INTEGER NOT NULL, STATE character varying(16) NOT NULL, JOB_NAME character varying(200) NULL, JOB_GROUP character varying(200) NULL, IS_NONCONCURRENT boolean NULL, REQUESTS_RECOVERY boolean NULL) ; alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID); CREATE TABLE QRTZ_SCHEDULER_STATE ( SCHED_NAME character varying(120) NOT NULL, INSTANCE_NAME character varying(200) NOT NULL, LAST_CHECKIN_TIME BIGINT NOT NULL, CHECKIN_INTERVAL BIGINT NOT NULL) ; alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME); CREATE TABLE QRTZ_LOCKS ( SCHED_NAME character varying(120) NOT NULL, LOCK_NAME character varying(40) NOT NULL) ; alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME); CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY); CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME); CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME); CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY); CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); -- -- Table structure for table t_ds_access_token -- DROP TABLE IF EXISTS t_ds_access_token; CREATE TABLE t_ds_access_token ( id int NOT NULL , user_id int DEFAULT NULL , token varchar(64) DEFAULT NULL , expire_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_alert -- DROP TABLE IF EXISTS t_ds_alert; CREATE TABLE t_ds_alert ( id int NOT NULL , title varchar(64) DEFAULT NULL , content text , alert_status int DEFAULT '0' , log text , alertgroup_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_alertgroup -- DROP TABLE IF EXISTS t_ds_alertgroup; CREATE TABLE t_ds_alertgroup( id int NOT NULL, alert_instance_ids varchar (255) DEFAULT NULL, create_user_id int4 DEFAULT NULL, group_name varchar(255) DEFAULT NULL, description varchar(255) DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id), CONSTRAINT t_ds_alertgroup_name_UN UNIQUE (group_name) ) ; -- -- Table structure for table t_ds_command -- DROP TABLE IF EXISTS t_ds_command; CREATE TABLE t_ds_command ( id int NOT NULL , command_type int DEFAULT NULL , process_definition_id int DEFAULT NULL , command_param text , task_depend_type int DEFAULT NULL , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , executor_id int DEFAULT NULL , update_time timestamp DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_datasource -- DROP TABLE IF EXISTS t_ds_datasource; CREATE TABLE t_ds_datasource ( id int NOT NULL , name varchar(64) NOT NULL , note varchar(255) DEFAULT NULL , type int NOT NULL , user_id int NOT NULL , connection_params text NOT NULL , create_time timestamp NOT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id), CONSTRAINT t_ds_datasource_name_UN UNIQUE (name, type) ) ; -- -- Table structure for table t_ds_error_command -- DROP TABLE IF EXISTS t_ds_error_command; CREATE TABLE t_ds_error_command ( id int NOT NULL , command_type int DEFAULT NULL , executor_id int DEFAULT NULL , process_definition_id int DEFAULT NULL , command_param text , task_depend_type int DEFAULT NULL , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), message text , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_master_server -- -- -- Table structure for table t_ds_process_definition -- DROP TABLE IF EXISTS t_ds_process_definition; CREATE TABLE t_ds_process_definition ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , connects text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT '0' , tenant_id int DEFAULT '-1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT process_definition_unique UNIQUE (name, project_code) ) ; create index process_definition_index on t_ds_process_definition (code,id); DROP TABLE IF EXISTS t_ds_process_definition_log; CREATE TABLE t_ds_process_definition_log ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , connects text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT '0' , tenant_id int DEFAULT '-1' , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_task_definition; CREATE TABLE t_ds_task_definition ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT '0' , delay_time int DEFAULT '0' , resource_ids varchar(255) DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT task_definition_unique UNIQUE (name, project_code) ) ; create index task_definition_index on t_ds_task_definition (project_code,id); DROP TABLE IF EXISTS t_ds_task_definition_log; CREATE TABLE t_ds_task_definition_log ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT '0' , delay_time int DEFAULT '0' , resource_ids varchar(255) DEFAULT NULL , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_process_task_relation; CREATE TABLE t_ds_process_task_relation ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT '0' , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT '0' , condition_type int DEFAULT NULL , condition_params text , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_process_task_relation_log; CREATE TABLE t_ds_process_task_relation_log ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT '0' , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT '0' , condition_type int DEFAULT NULL , condition_params text , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_process_instance -- DROP TABLE IF EXISTS t_ds_process_instance; CREATE TABLE t_ds_process_instance ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , process_definition_code bigint DEFAULT NULL , state int DEFAULT NULL , recovery int DEFAULT NULL , start_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL , run_times int DEFAULT NULL , host varchar(135) DEFAULT NULL , command_type int DEFAULT NULL , command_param text , task_depend_type int DEFAULT NULL , max_try_times int DEFAULT '0' , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , command_start_time timestamp DEFAULT NULL , global_params text , process_instance_json text , flag int DEFAULT '1' , update_time timestamp NULL , is_sub_process int DEFAULT '0' , executor_id int NOT NULL , history_cmd text , dependence_schedule_times text , process_instance_priority int DEFAULT NULL , worker_group varchar(64) , timeout int DEFAULT '0' , tenant_id int NOT NULL DEFAULT '-1' , var_pool text , PRIMARY KEY (id) ) ; create index process_instance_index on t_ds_process_instance (process_definition_code,id); create index start_time_index on t_ds_process_instance (start_time); -- -- Table structure for table t_ds_project -- DROP TABLE IF EXISTS t_ds_project; CREATE TABLE t_ds_project ( id int NOT NULL , name varchar(100) DEFAULT NULL , code bigint NOT NULL, description varchar(200) DEFAULT NULL , user_id int DEFAULT NULL , flag int DEFAULT '1' , create_time timestamp DEFAULT CURRENT_TIMESTAMP , update_time timestamp DEFAULT CURRENT_TIMESTAMP , PRIMARY KEY (id) ) ; create index user_id_index on t_ds_project (user_id); -- -- Table structure for table t_ds_queue -- DROP TABLE IF EXISTS t_ds_queue; CREATE TABLE t_ds_queue ( id int NOT NULL , queue_name varchar(64) DEFAULT NULL , queue varchar(64) DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_relation_datasource_user -- DROP TABLE IF EXISTS t_ds_relation_datasource_user; CREATE TABLE t_ds_relation_datasource_user ( id int NOT NULL , user_id int NOT NULL , datasource_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; ; -- -- Table structure for table t_ds_relation_process_instance -- DROP TABLE IF EXISTS t_ds_relation_process_instance; CREATE TABLE t_ds_relation_process_instance ( id int NOT NULL , parent_process_instance_id int DEFAULT NULL , parent_task_instance_id int DEFAULT NULL , process_instance_id int DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_relation_project_user -- DROP TABLE IF EXISTS t_ds_relation_project_user; CREATE TABLE t_ds_relation_project_user ( id int NOT NULL , user_id int NOT NULL , project_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; create index relation_project_user_id_index on t_ds_relation_project_user (user_id); -- -- Table structure for table t_ds_relation_resources_user -- DROP TABLE IF EXISTS t_ds_relation_resources_user; CREATE TABLE t_ds_relation_resources_user ( id int NOT NULL , user_id int NOT NULL , resources_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_relation_udfs_user -- DROP TABLE IF EXISTS t_ds_relation_udfs_user; CREATE TABLE t_ds_relation_udfs_user ( id int NOT NULL , user_id int NOT NULL , udf_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; ; -- -- Table structure for table t_ds_resources -- DROP TABLE IF EXISTS t_ds_resources; CREATE TABLE t_ds_resources ( id int NOT NULL , alias varchar(64) DEFAULT NULL , file_name varchar(64) DEFAULT NULL , description varchar(255) DEFAULT NULL , user_id int DEFAULT NULL , type int DEFAULT NULL , size bigint DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , pid int, full_name varchar(64), is_directory int, PRIMARY KEY (id), CONSTRAINT t_ds_resources_un UNIQUE (full_name, type) ) ; -- -- Table structure for table t_ds_schedules -- DROP TABLE IF EXISTS t_ds_schedules; CREATE TABLE t_ds_schedules ( id int NOT NULL , process_definition_id int NOT NULL , start_time timestamp NOT NULL , end_time timestamp NOT NULL , timezone_id varchar(40) default NULL , crontab varchar(255) NOT NULL , failure_strategy int NOT NULL , user_id int NOT NULL , release_state int NOT NULL , warning_type int NOT NULL , warning_group_id int DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), create_time timestamp NOT NULL , update_time timestamp NOT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_session -- DROP TABLE IF EXISTS t_ds_session; CREATE TABLE t_ds_session ( id varchar(64) NOT NULL , user_id int DEFAULT NULL , ip varchar(45) DEFAULT NULL , last_login_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_task_instance -- DROP TABLE IF EXISTS t_ds_task_instance; CREATE TABLE t_ds_task_instance ( id int NOT NULL , name varchar(255) DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_code bigint NOT NULL, task_definition_version int DEFAULT NULL , process_instance_id int DEFAULT NULL , state int DEFAULT NULL , submit_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL , host varchar(135) DEFAULT NULL , execute_path varchar(200) DEFAULT NULL , log_path varchar(200) DEFAULT NULL , alert_flag int DEFAULT NULL , retry_times int DEFAULT '0' , pid int DEFAULT NULL , app_link text , task_params text , flag int DEFAULT '1' , retry_interval int DEFAULT NULL , max_retry_times int DEFAULT NULL , task_instance_priority int DEFAULT NULL , worker_group varchar(64), executor_id int DEFAULT NULL , first_submit_time timestamp DEFAULT NULL , delay_time int DEFAULT '0' , var_pool text , PRIMARY KEY (id), CONSTRAINT foreign_key_instance_id FOREIGN KEY(process_instance_id) REFERENCES t_ds_process_instance(id) ON DELETE CASCADE ) ; -- -- Table structure for table t_ds_tenant -- DROP TABLE IF EXISTS t_ds_tenant; CREATE TABLE t_ds_tenant ( id int NOT NULL , tenant_code varchar(64) DEFAULT NULL , description varchar(255) DEFAULT NULL , queue_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_udfs -- DROP TABLE IF EXISTS t_ds_udfs; CREATE TABLE t_ds_udfs ( id int NOT NULL , user_id int NOT NULL , func_name varchar(100) NOT NULL , class_name varchar(255) NOT NULL , type int NOT NULL , arg_types varchar(255) DEFAULT NULL , database varchar(255) DEFAULT NULL , description varchar(255) DEFAULT NULL , resource_id int NOT NULL , resource_name varchar(255) NOT NULL , create_time timestamp NOT NULL , update_time timestamp NOT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_user -- DROP TABLE IF EXISTS t_ds_user; CREATE TABLE t_ds_user ( id int NOT NULL , user_name varchar(64) DEFAULT NULL , user_password varchar(64) DEFAULT NULL , user_type int DEFAULT NULL , email varchar(64) DEFAULT NULL , phone varchar(11) DEFAULT NULL , tenant_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , queue varchar(64) DEFAULT NULL , state int DEFAULT 1 , PRIMARY KEY (id) ); comment on column t_ds_user.state is 'state 0:disable 1:enable'; -- -- Table structure for table t_ds_version -- DROP TABLE IF EXISTS t_ds_version; CREATE TABLE t_ds_version ( id int NOT NULL , version varchar(200) NOT NULL, PRIMARY KEY (id) ) ; create index version_index on t_ds_version(version); -- -- Table structure for table t_ds_worker_group -- DROP TABLE IF EXISTS t_ds_worker_group; CREATE TABLE t_ds_worker_group ( id bigint NOT NULL , name varchar(255) NOT NULL , addr_list text DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT name_unique UNIQUE (name) ) ; -- -- Table structure for table t_ds_worker_server -- DROP TABLE IF EXISTS t_ds_worker_server; CREATE TABLE t_ds_worker_server ( id int NOT NULL , host varchar(45) DEFAULT NULL , port int DEFAULT NULL , zk_directory varchar(64) DEFAULT NULL , res_info varchar(255) DEFAULT NULL , create_time timestamp DEFAULT NULL , last_heartbeat_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence; CREATE SEQUENCE t_ds_access_token_id_sequence; ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence; CREATE SEQUENCE t_ds_alert_id_sequence; ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence; CREATE SEQUENCE t_ds_alertgroup_id_sequence; ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_command_id_sequence; CREATE SEQUENCE t_ds_command_id_sequence; ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence; CREATE SEQUENCE t_ds_datasource_id_sequence; ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence; CREATE SEQUENCE t_ds_process_definition_id_sequence; ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_definition_log_id_sequence; CREATE SEQUENCE t_ds_process_definition_log_id_sequence; ALTER TABLE t_ds_process_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_definition_id_sequence; CREATE SEQUENCE t_ds_task_definition_id_sequence; ALTER TABLE t_ds_task_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_definition_log_id_sequence; CREATE SEQUENCE t_ds_task_definition_log_id_sequence; ALTER TABLE t_ds_task_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_task_relation_id_sequence; CREATE SEQUENCE t_ds_process_task_relation_id_sequence; ALTER TABLE t_ds_process_task_relation ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_task_relation_log_id_sequence; CREATE SEQUENCE t_ds_process_task_relation_log_id_sequence; ALTER TABLE t_ds_process_task_relation_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence; CREATE SEQUENCE t_ds_process_instance_id_sequence; ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_project_id_sequence; CREATE SEQUENCE t_ds_project_id_sequence; ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence; CREATE SEQUENCE t_ds_queue_id_sequence; ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence; CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence; ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence; CREATE SEQUENCE t_ds_relation_process_instance_id_sequence; ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence; CREATE SEQUENCE t_ds_relation_project_user_id_sequence; ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence; CREATE SEQUENCE t_ds_relation_resources_user_id_sequence; ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence; CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence; ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence; CREATE SEQUENCE t_ds_resources_id_sequence; ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence; CREATE SEQUENCE t_ds_schedules_id_sequence; ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence; CREATE SEQUENCE t_ds_task_instance_id_sequence; ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence; CREATE SEQUENCE t_ds_tenant_id_sequence; ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence; CREATE SEQUENCE t_ds_udfs_id_sequence; ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_user_id_sequence; CREATE SEQUENCE t_ds_user_id_sequence; ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_version_id_sequence; CREATE SEQUENCE t_ds_version_id_sequence; ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence; CREATE SEQUENCE t_ds_worker_group_id_sequence; ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence; CREATE SEQUENCE t_ds_worker_server_id_sequence; ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence'); -- Records of t_ds_user?user : admin , password : dolphinscheduler123 INSERT INTO t_ds_user(user_name, user_password, user_type, email, phone, tenant_id, state, create_time, update_time) VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', '[email protected]', '', '0', 1, '2018-03-27 15:48:50', '2018-10-24 17:40:22'); -- Records of t_ds_alertgroup, default admin warning group INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- Records of t_ds_queue,default queue name : default INSERT INTO t_ds_queue(queue_name, queue, create_time, update_time) VALUES ('default', 'default', '2018-11-29 10:22:33', '2018-11-29 10:22:33'); -- Records of t_ds_queue,default queue name : default INSERT INTO t_ds_version(version) VALUES ('1.4.0'); -- -- Table structure for table t_ds_plugin_define -- DROP TABLE IF EXISTS t_ds_plugin_define; CREATE TABLE t_ds_plugin_define ( id serial NOT NULL, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id), CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type) ); -- -- Table structure for table t_ds_alert_plugin_instance -- DROP TABLE IF EXISTS t_ds_alert_plugin_instance; CREATE TABLE t_ds_alert_plugin_instance ( id serial NOT NULL, plugin_define_id int4 NOT NULL, plugin_instance_params text NULL, create_time timestamp NULL, update_time timestamp NULL, instance_name varchar(200) NULL, CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id) );
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,540
[Bug][JSON Split] Project created failure and unknown column p.code in field list
**To Reproduce** Refer to https://dolphinscheduler.apache.org/zh-cn/development/development-environment-setup.html And run `org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler` **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. ![image](https://user-images.githubusercontent.com/4902714/119256897-76d53d00-bbf5-11eb-9a1e-aa82c3948d2e.png) ![image](https://user-images.githubusercontent.com/4902714/119256898-79d02d80-bbf5-11eb-99ae-08cf47a95976.png) **Which version of Dolphin Scheduler:** -[dev] @JinyLeeChina
https://github.com/apache/dolphinscheduler/issues/5540
https://github.com/apache/dolphinscheduler/pull/5611
0d5037e7c37d7903d9172f165b348058f1ddbf88
c5bea3c77430e0b46a2f5a3a91a7fbbc78874196
2021-05-23T10:38:54Z
java
2021-06-15T06:45:39Z
sql/upgrade/1.4.0_schema/mysql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); -- uc_dolphin_T_t_ds_user_A_state drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_user_A_state; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_user_A_state() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_user' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='state') THEN ALTER TABLE t_ds_user ADD `state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_user_A_state; DROP PROCEDURE uc_dolphin_T_t_ds_user_A_state; -- uc_dolphin_T_t_ds_tenant_A_tenant_name drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name() BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_tenant' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='tenant_name') THEN ALTER TABLE t_ds_tenant DROP `tenant_name`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_tenant_A_tenant_name; DROP PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name; -- uc_dolphin_T_t_ds_task_instance_A_first_submit_time drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='first_submit_time') THEN ALTER TABLE t_ds_task_instance ADD `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_task_instance_A_first_submit_time(); DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time; -- uc_dolphin_T_t_ds_task_instance_A_delay_time drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='delay_time') THEN ALTER TABLE t_ds_task_instance ADD `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_task_instance_A_delay_time(); DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time; -- uc_dolphin_T_t_ds_task_instance_A_var_pool drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_task_instance ADD `var_pool` longtext NULL; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_task_instance_A_var_pool(); DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool; -- uc_dolphin_T_t_ds_process_instance_A_var_pool drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_process_instance ADD `var_pool` longtext NULL; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_process_instance_A_var_pool(); DROP PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool; -- uc_dolphin_T_t_ds_process_definition_A_modify_by drop PROCEDURE if EXISTS ct_dolphin_T_t_ds_process_definition_version; delimiter d// CREATE PROCEDURE ct_dolphin_T_t_ds_process_definition_version() BEGIN CREATE TABLE IF NOT EXISTS `t_ds_process_definition_version` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `process_definition_id` int(11) NOT NULL COMMENT 'process definition id', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `process_definition_json` longtext COMMENT 'process definition json content', `description` text, `global_params` text COMMENT 'global parameters', `locations` text COMMENT 'Node location information', `connects` text COMMENT 'Node connection information', `receivers` text COMMENT 'receivers', `receivers_cc` text COMMENT 'cc', `create_time` datetime DEFAULT NULL COMMENT 'create time', `timeout` int(11) DEFAULT '0' COMMENT 'time out', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource ids', PRIMARY KEY (`id`), UNIQUE KEY `process_definition_id_and_version` (`process_definition_id`,`version`) USING BTREE, KEY `process_definition_index` (`id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=84 DEFAULT CHARSET=utf8; END; d// delimiter ; CALL ct_dolphin_T_t_ds_process_definition_version; DROP PROCEDURE ct_dolphin_T_t_ds_process_definition_version; -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- DROP TABLE IF EXISTS `t_ds_plugin_define`; CREATE TABLE `t_ds_plugin_define` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email', `plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin', `plugin_params` text COMMENT 'plugin params', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`; CREATE TABLE `t_ds_alert_plugin_instance` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_define_id` int NOT NULL, `plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- uc_dolphin_T_t_ds_process_definition_A_warning_group_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_process_definition_A_warning_group_id(); DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id; -- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition_version' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition_version ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id(); DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id; -- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='alert_instance_ids') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids' AFTER `id`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids; -- uc_dolphin_T_t_ds_alertgroup_A_create_user_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='create_user_id') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id' AFTER `alert_instance_ids`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id; -- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.STATISTICS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND INDEX_NAME ='t_ds_alertgroup_name_UN') THEN ALTER TABLE t_ds_alertgroup ADD UNIQUE KEY `t_ds_alertgroup_name_UN` (`group_name`); END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName; -- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_datasource' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='t_ds_datasource_name_UN') THEN ALTER TABLE t_ds_datasource ADD UNIQUE KEY `t_ds_datasource_name_UN` (`name`, `type`); END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); DROP PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName; -- uc_dolphin_T_t_ds_schedules_A_add_timezone drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_schedules_A_add_timezone; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_schedules_A_add_timezone() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_schedules' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='timezone_id') THEN ALTER TABLE t_ds_schedules ADD COLUMN `timezone_id` varchar(40) default NULL COMMENT 'schedule timezone id' AFTER `end_time`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_schedules_A_add_timezone(); DROP PROCEDURE uc_dolphin_T_t_ds_schedules_A_add_timezone; -- ---------------------------- -- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below -- ---------------------------- -- ALTER TABLE t_ds_alert DROP `show_type`, DROP `alert_type`, DROP `receivers`, DROP `receivers_cc`; -- ALTER TABLE t_ds_alertgroup DROP `group_type`; -- ALTER TABLE t_ds_process_definition DROP `receivers`, DROP `receivers_cc`; -- ALTER TABLE t_ds_process_definition_version DROP `receivers`, DROP `receivers_cc`; -- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup; -- ALTER TABLE t_ds_command DROP `dependence`; -- ALTER TABLE t_ds_error_command DROP `dependence`;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,540
[Bug][JSON Split] Project created failure and unknown column p.code in field list
**To Reproduce** Refer to https://dolphinscheduler.apache.org/zh-cn/development/development-environment-setup.html And run `org.apache.dolphinscheduler.dao.upgrade.shell.CreateDolphinScheduler` **Expected behavior** Bug fixed **Screenshots** If applicable, add screenshots to help explain your problem. ![image](https://user-images.githubusercontent.com/4902714/119256897-76d53d00-bbf5-11eb-9a1e-aa82c3948d2e.png) ![image](https://user-images.githubusercontent.com/4902714/119256898-79d02d80-bbf5-11eb-99ae-08cf47a95976.png) **Which version of Dolphin Scheduler:** -[dev] @JinyLeeChina
https://github.com/apache/dolphinscheduler/issues/5540
https://github.com/apache/dolphinscheduler/pull/5611
0d5037e7c37d7903d9172f165b348058f1ddbf88
c5bea3c77430e0b46a2f5a3a91a7fbbc78874196
2021-05-23T10:38:54Z
java
2021-06-15T06:45:39Z
sql/upgrade/1.4.0_schema/postgresql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ -- uc_dolphin_T_t_ds_user_A_state delimiter ; DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_user_A_state(); delimiter d// CREATE FUNCTION uc_dolphin_T_t_ds_user_A_state() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_CATALOG=current_database() AND TABLE_SCHEMA=current_schema() AND TABLE_NAME='t_ds_user' AND COLUMN_NAME ='state') THEN ALTER TABLE t_ds_user ADD COLUMN state int DEFAULT 1; comment on column t_ds_user.state is 'state 0:disable 1:enable'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; select uc_dolphin_T_t_ds_user_A_state(); DROP FUNCTION uc_dolphin_T_t_ds_user_A_state(); -- uc_dolphin_T_t_ds_tenant_A_tenant_name delimiter ; DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name(); delimiter d// CREATE FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name() RETURNS void AS $$ BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_CATALOG=current_database() AND TABLE_SCHEMA=current_schema() AND TABLE_NAME='t_ds_tenant' AND COLUMN_NAME ='tenant_name') THEN ALTER TABLE t_ds_tenant DROP COLUMN "tenant_name"; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; select uc_dolphin_T_t_ds_tenant_A_tenant_name(); DROP FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name(); -- uc_dolphin_T_t_ds_task_instance_A_first_submit_time delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_first_submit_time() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND COLUMN_NAME ='first_submit_time') THEN ALTER TABLE t_ds_task_instance ADD COLUMN first_submit_time timestamp DEFAULT NULL; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_task_instance_A_first_submit_time(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time(); -- uc_dolphin_T_t_ds_task_instance_A_delay_time delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_delay_time() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND COLUMN_NAME ='delay_time') THEN ALTER TABLE t_ds_task_instance ADD COLUMN delay_time int DEFAULT '0'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_task_instance_A_delay_time(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time(); -- uc_dolphin_T_t_ds_task_instance_A_var_pool delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_var_pool() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_task_instance ADD COLUMN var_pool text; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_task_instance_A_var_pool(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool(); -- uc_dolphin_T_t_ds_process_instance_A_var_pool delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_instance_A_var_pool() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_instance' AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_process_instance ADD COLUMN var_pool text; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_process_instance_A_var_pool(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool(); -- uc_dolphin_T_t_ds_process_definition_A_modify_by delimiter d// CREATE OR REPLACE FUNCTION ct_dolphin_T_t_ds_process_definition_version() RETURNS void AS $$ BEGIN CREATE TABLE IF NOT EXISTS t_ds_process_definition_version ( id int NOT NULL , process_definition_id int NOT NULL , version int DEFAULT NULL , process_definition_json text , description text , global_params text , locations text , connects text , receivers text , receivers_cc text , create_time timestamp DEFAULT NULL , timeout int DEFAULT '0' , resource_ids varchar(64), PRIMARY KEY (id) ) ; create index process_definition_id_and_version on t_ds_process_definition_version (process_definition_id,version); DROP SEQUENCE IF EXISTS t_ds_process_definition_version_id_sequence; CREATE SEQUENCE t_ds_process_definition_version_id_sequence; ALTER TABLE t_ds_process_definition_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_version_id_sequence'); END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT ct_dolphin_T_t_ds_process_definition_version(); DROP FUNCTION IF EXISTS ct_dolphin_T_t_ds_process_definition_version(); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- DROP TABLE IF EXISTS t_ds_plugin_define; CREATE TABLE t_ds_plugin_define ( id serial NOT NULL, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id), CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type) ); -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS t_ds_alert_plugin_instance; CREATE TABLE t_ds_alert_plugin_instance ( id serial NOT NULL, plugin_define_id int4 NOT NULL, plugin_instance_params text NULL, create_time timestamp NULL, update_time timestamp NULL, instance_name varchar(200) NULL, CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id) ); -- uc_dolphin_T_t_ds_process_definition_A_warning_group_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_A_warning_group_id() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition' AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition ADD COLUMN warning_group_id int4 DEFAULT NULL; COMMENT ON COLUMN t_ds_process_definition.warning_group_id IS 'alert group id'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_process_definition_A_warning_group_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id(); -- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition_version' AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition_version ADD COLUMN warning_group_id int4 DEFAULT NULL; COMMENT ON COLUMN t_ds_process_definition_version.warning_group_id IS 'alert group id'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id(); -- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND COLUMN_NAME ='alert_instance_ids') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN alert_instance_ids varchar (255) DEFAULT NULL; COMMENT ON COLUMN t_ds_alertgroup.alert_instance_ids IS 'alert instance ids'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); -- uc_dolphin_T_t_ds_alertgroup_A_create_user_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_create_user_id() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND COLUMN_NAME ='create_user_id') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN create_user_id int4 DEFAULT NULL; COMMENT ON COLUMN t_ds_alertgroup.create_user_id IS 'create user id'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); -- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes WHERE relname='t_ds_alertgroup' AND indexrelname ='t_ds_alertgroup_name_UN') THEN ALTER TABLE t_ds_alertgroup ADD CONSTRAINT t_ds_alertgroup_name_UN UNIQUE (group_name); END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); -- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes WHERE relname='t_ds_datasource' AND indexrelname ='t_ds_datasource_name_UN') THEN ALTER TABLE t_ds_datasource ADD CONSTRAINT t_ds_datasource_name_UN UNIQUE (name, type); END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); -- uc_dolphin_T_t_ds_schedules_A_add_timezone delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_schedules_A_add_timezone() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_schedules' AND COLUMN_NAME ='timezone_id') THEN ALTER TABLE t_ds_schedules ADD COLUMN timezone_id varchar(40) DEFAULT NULL; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_schedules_A_add_timezone(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_schedules_A_add_timezone(); -- ---------------------------- -- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below -- ---------------------------- -- ALTER TABLE t_ds_alert DROP COLUMN "show_type", DROP COLUMN "alert_type", DROP COLUMN "receivers", DROP COLUMN "receivers_cc"; -- ALTER TABLE t_ds_alertgroup DROP COLUMN "group_type"; -- ALTER TABLE t_ds_process_definition DROP COLUMN "receivers", DROP COLUMN "receivers_cc"; -- ALTER TABLE t_ds_process_definition_version DROP COLUMN "receivers", DROP COLUMN "receivers_cc"; -- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup; -- ALTER TABLE t_ds_command DROP COLUMN "dependence"; -- ALTER TABLE t_ds_error_command DROP COLUMN "dependence";
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,634
[Improvement][registry-plugin] Optimize registry plugin loading and initial installation
**Describe the question** Optimize registry plugin loading and initial installation **What are the current deficiencies and the benefits of improvement** - Registry Plugin install dir attribute assignment. - Add registry Plugin to initialize and install related properties **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5634
https://github.com/apache/dolphinscheduler/pull/5635
9d70c7e534aa10e729d564f14a1126f623cd1035
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
2021-06-15T10:45:19Z
java
2021-06-15T12:51:52Z
.gitignore
.git .svn .hg .zip .gz .DS_Store .target .idea/ target/ dist/ all-dependencies.txt self-modules.txt third-party-dependencies.txt .settings .nbproject .classpath .project *.iml *.ipr *.iws *.tgz .*.swp .vim .tmp node_modules npm-debug.log .vscode logs/* .mvn/ .www t.* .factorypath Chart.lock yarn.lock package-lock.json config.gypi test/coverage /docs/zh_CN/介绍 /docs/zh_CN/贡献代码.md dolphinscheduler-common/src/main/resources/zookeeper.properties dolphinscheduler-dao/src/main/resources/dao/data_source.properties dolphinscheduler-alert/logs/ dolphinscheduler-alert/src/main/resources/alert.properties_bak dolphinscheduler-alert/src/main/resources/logback.xml dolphinscheduler-server/src/main/resources/logback.xml dolphinscheduler-ui/dist dolphinscheduler-ui/node docker/build/apache-dolphinscheduler*
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,634
[Improvement][registry-plugin] Optimize registry plugin loading and initial installation
**Describe the question** Optimize registry plugin loading and initial installation **What are the current deficiencies and the benefits of improvement** - Registry Plugin install dir attribute assignment. - Add registry Plugin to initialize and install related properties **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5634
https://github.com/apache/dolphinscheduler/pull/5635
9d70c7e534aa10e729d564f14a1126f623cd1035
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
2021-06-15T10:45:19Z
java
2021-06-15T12:51:52Z
dolphinscheduler-registry-plugin/dolphinscheduler-registry-zookeeper/src/main/resources/logback.xml
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,634
[Improvement][registry-plugin] Optimize registry plugin loading and initial installation
**Describe the question** Optimize registry plugin loading and initial installation **What are the current deficiencies and the benefits of improvement** - Registry Plugin install dir attribute assignment. - Add registry Plugin to initialize and install related properties **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5634
https://github.com/apache/dolphinscheduler/pull/5635
9d70c7e534aa10e729d564f14a1126f623cd1035
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
2021-06-15T10:45:19Z
java
2021-06-15T12:51:52Z
dolphinscheduler-server/src/main/resources/config/install_config.conf
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # NOTICE: If the following config has special characters in the variable `.*[]^${}\+?|()@#&`, Please escape, for example, `[` escape to `\[` # postgresql or mysql dbtype="mysql" # db config # db address and port dbhost="192.168.xx.xx:3306" # db username username="xx" # db password # NOTICE: if there are special characters, please use the \ to escape, for example, `[` escape to `\[` password="xx" # database name dbname="dolphinscheduler" # zk cluster zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181" # zk root directory zkRoot="/dolphinscheduler" # Note: the target installation path for dolphinscheduler, please not config as the same as the current path (pwd) installPath="/data1_1T/dolphinscheduler" # deployment user # Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself deployUser="dolphinscheduler" # alert config # alert plugin dir # Note: find and load the Alert Plugin Jar from this dir. alertPluginDir="/data1_1T/dolphinscheduler/lib/plugin/alert" # user data local directory path, please make sure the directory exists and have read write permissions dataBasedirPath="/tmp/dolphinscheduler" # resource storage type: HDFS, S3, NONE resourceStorageType="NONE" # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended resourceUploadPath="/dolphinscheduler" # if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory. # if S3,write S3 address,HA,for example :s3a://dolphinscheduler, # Note,s3 be sure to create the root directory /dolphinscheduler defaultFS="hdfs://mycluster:8020" # if resourceStorageType is S3, the following three configuration is required, otherwise please ignore s3Endpoint="http://192.168.xx.xx:9010" s3AccessKey="xxxxxxxxxx" s3SecretKey="xxxxxxxxxx" # resourcemanager port, the default value is 8088 if not specified resourceManagerHttpAddressPort="8088" # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty yarnHaIps="192.168.xx.xx,192.168.xx.xx" # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname singleYarnIp="yarnIp1" # who have permissions to create directory under HDFS/S3 root path # Note: if kerberos is enabled, please config hdfsRootUser= hdfsRootUser="hdfs" # kerberos config # whether kerberos starts, if kerberos starts, following four items need to config, otherwise please ignore kerberosStartUp="false" # kdc krb5 config file path krb5ConfPath="$installPath/conf/krb5.conf" # keytab username keytabUserName="[email protected]" # username keytab path keytabPath="$installPath/conf/hdfs.headless.keytab" # kerberos expire time, the unit is hour kerberosExpireTime="2" # use sudo or not sudoEnable="true" # worker tenant auto create workerTenantAutoCreate="false" # api server port apiServerPort="12345" # install hosts # Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname ips="ds1,ds2,ds3,ds4,ds5" # ssh port, default 22 # Note: if ssh port is not default, modify here sshPort="22" # run master machine # Note: list of hosts hostname for deploying master masters="ds1,ds2" # run worker machine # note: need to write the worker group name of each worker, the default value is "default" workers="ds1:default,ds2:default,ds3:default,ds4:default,ds5:default" # run alert machine # note: list of machine hostnames for deploying alert server alertServer="ds3" # run api machine # note: list of machine hostnames for deploying api server apiServers="ds1"
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,634
[Improvement][registry-plugin] Optimize registry plugin loading and initial installation
**Describe the question** Optimize registry plugin loading and initial installation **What are the current deficiencies and the benefits of improvement** - Registry Plugin install dir attribute assignment. - Add registry Plugin to initialize and install related properties **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5634
https://github.com/apache/dolphinscheduler/pull/5635
9d70c7e534aa10e729d564f14a1126f623cd1035
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
2021-06-15T10:45:19Z
java
2021-06-15T12:51:52Z
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/registry/RegistryCenter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.registry; import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_DEAD_SERVERS; import org.apache.dolphinscheduler.common.IStoppable; import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.spi.plugin.DolphinPluginLoader; import org.apache.dolphinscheduler.spi.plugin.DolphinPluginManagerConfig; import org.apache.dolphinscheduler.spi.register.Registry; import org.apache.dolphinscheduler.spi.register.RegistryConnectListener; import org.apache.dolphinscheduler.spi.register.RegistryException; import org.apache.dolphinscheduler.spi.register.RegistryPluginManager; import org.apache.dolphinscheduler.spi.register.SubscribeListener; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableList; /** * All business parties use this class to access the registry */ public class RegistryCenter { private static final Logger logger = LoggerFactory.getLogger(RegistryCenter.class); private final AtomicBoolean isStarted = new AtomicBoolean(false); private Registry registry; private IStoppable stoppable; /** * nodes namespace */ protected static String NODES; /** * master path */ protected static String MASTER_PATH = "/nodes/master"; private RegistryPluginManager registryPluginManager; /** * worker path */ protected static String WORKER_PATH = "/nodes/worker"; protected static final String EMPTY = ""; private static final String REGISTRY_PREFIX = "registry"; private static final String REGISTRY_PLUGIN_BINDING = "registry.plugin.binding"; private static final String REGISTRY_PLUGIN_DIR = "registry.plugin.dir"; private static final String MAVEN_LOCAL_REPOSITORY = "maven.local.repository"; private static final String REGISTRY_PLUGIN_NAME = "plugin.name"; /** * default registry plugin dir */ private static final String REGISTRY_PLUGIN_PATH = "lib/plugin/registry"; private static final String REGISTRY_CONFIG_FILE_PATH = "/registry.properties"; /** * init node persist */ public void init() { if (isStarted.compareAndSet(false, true)) { PropertyUtils.loadPropertyFile(REGISTRY_CONFIG_FILE_PATH); Map<String, String> registryConfig = PropertyUtils.getPropertiesByPrefix(REGISTRY_PREFIX); if (null == registryConfig || registryConfig.isEmpty()) { throw new RegistryException("registry config param is null"); } if (null == registryPluginManager) { installRegistryPlugin(registryConfig.get(REGISTRY_PLUGIN_NAME)); registry = registryPluginManager.getRegistry(); } registry.init(registryConfig); initNodes(); } } /** * init nodes */ private void initNodes() { persist(MASTER_PATH, EMPTY); persist(WORKER_PATH, EMPTY); } /** * install registry plugin */ private void installRegistryPlugin(String registryPluginName) { DolphinPluginManagerConfig registryPluginManagerConfig = new DolphinPluginManagerConfig(); registryPluginManagerConfig.setPlugins(PropertyUtils.getString(REGISTRY_PLUGIN_BINDING)); if (StringUtils.isNotBlank(PropertyUtils.getString(REGISTRY_PLUGIN_DIR))) { registryPluginManagerConfig.setPlugins(PropertyUtils.getString(REGISTRY_PLUGIN_DIR, REGISTRY_PLUGIN_PATH).trim()); } if (StringUtils.isNotBlank(PropertyUtils.getString(MAVEN_LOCAL_REPOSITORY))) { registryPluginManagerConfig.setMavenLocalRepository(PropertyUtils.getString(MAVEN_LOCAL_REPOSITORY).trim()); } if (StringUtils.isNotBlank(PropertyUtils.getString(MAVEN_LOCAL_REPOSITORY))) { registryPluginManagerConfig.setMavenLocalRepository(PropertyUtils.getString(MAVEN_LOCAL_REPOSITORY).trim()); } registryPluginManager = new RegistryPluginManager(registryPluginName); DolphinPluginLoader registryPluginLoader = new DolphinPluginLoader(registryPluginManagerConfig, ImmutableList.of(registryPluginManager)); try { registryPluginLoader.loadPlugins(); } catch (Exception e) { throw new RuntimeException("Load registry Plugin Failed !", e); } } /** * close */ public void close() { if (isStarted.compareAndSet(true, false) && registry != null) { registry.close(); } } public void persist(String key, String value) { registry.persist(key, value); } public void persistEphemeral(String key, String value) { registry.persistEphemeral(key, value); } public void remove(String key) { registry.remove(key); } public void update(String key, String value) { registry.update(key, value); } public String get(String key) { return registry.get(key); } public void subscribe(String path, SubscribeListener subscribeListener) { registry.subscribe(path, subscribeListener); } public void addConnectionStateListener(RegistryConnectListener registryConnectListener) { registry.addConnectionStateListener(registryConnectListener); } public boolean isExisted(String key) { return registry.isExisted(key); } public boolean getLock(String key) { return registry.acquireLock(key); } public boolean releaseLock(String key) { return registry.releaseLock(key); } /** * @return get dead server node parent path */ public String getDeadZNodeParentPath() { return REGISTRY_DOLPHINSCHEDULER_DEAD_SERVERS; } public void setStoppable(IStoppable stoppable) { this.stoppable = stoppable; } public IStoppable getStoppable() { return stoppable; } /** * get master path * * @return master path */ public String getMasterPath() { return MASTER_PATH; } /** * whether master path * * @param path path * @return result */ public boolean isMasterPath(String path) { return path != null && path.contains(MASTER_PATH); } /** * get worker path * * @return worker path */ public String getWorkerPath() { return WORKER_PATH; } /** * get worker group path * * @param workerGroup workerGroup * @return worker group path */ public String getWorkerGroupPath(String workerGroup) { return WORKER_PATH + "/" + workerGroup; } /** * whether worker path * * @param path path * @return result */ public boolean isWorkerPath(String path) { return path != null && path.contains(WORKER_PATH); } /** * get children nodes * * @param key key * @return children nodes */ public List<String> getChildrenKeys(final String key) { return registry.getChildren(key); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,634
[Improvement][registry-plugin] Optimize registry plugin loading and initial installation
**Describe the question** Optimize registry plugin loading and initial installation **What are the current deficiencies and the benefits of improvement** - Registry Plugin install dir attribute assignment. - Add registry Plugin to initialize and install related properties **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5634
https://github.com/apache/dolphinscheduler/pull/5635
9d70c7e534aa10e729d564f14a1126f623cd1035
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
2021-06-15T10:45:19Z
java
2021-06-15T12:51:52Z
dolphinscheduler-service/src/main/resources/registry.properties
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #registry.plugin.dir config the Alert Plugin dir . AlertServer while find and load the Alert Plugin Jar from this dir when deploy and start AlertServer on the server . #registry.plugin.dir=/Users/kris/workspace/incubator-dolphinscheduler/dolphinscheduler-dist/target/dolphinscheduler-dist-1.3.6-SNAPSHOT/lib/plugin/registry/zookeeper #registry.plugin.name=zookeeper #registry.plugin.binding=registry #registry.servers=127.0.0.1:2181 #maven.local.repository=/Users/gaojun/Documents/jianguoyun/localRepository #registry.plugin.binding config the Alert Plugin need be load when development and run in IDE #registry.plugin.binding=\ # ./dolphinscheduler-registry-plugin/dolphinscheduler-registry-zookeeper/pom.xml
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,634
[Improvement][registry-plugin] Optimize registry plugin loading and initial installation
**Describe the question** Optimize registry plugin loading and initial installation **What are the current deficiencies and the benefits of improvement** - Registry Plugin install dir attribute assignment. - Add registry Plugin to initialize and install related properties **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5634
https://github.com/apache/dolphinscheduler/pull/5635
9d70c7e534aa10e729d564f14a1126f623cd1035
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
2021-06-15T10:45:19Z
java
2021-06-15T12:51:52Z
install.sh
#!/bin/sh # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # workDir=`dirname $0` workDir=`cd ${workDir};pwd` source ${workDir}/conf/config/install_config.conf # 1.replace file echo "1.replace file" txt="" if [[ "$OSTYPE" == "darwin"* ]]; then # Mac OSX txt="''" fi datasourceDriverClassname="com.mysql.jdbc.Driver" if [ $dbtype == "postgresql" ];then datasourceDriverClassname="org.postgresql.Driver" fi sed -i ${txt} "s@^spring.datasource.driver-class-name=.*@spring.datasource.driver-class-name=${datasourceDriverClassname}@g" conf/datasource.properties sed -i ${txt} "s@^spring.datasource.url=.*@spring.datasource.url=jdbc:${dbtype}://${dbhost}/${dbname}?characterEncoding=UTF-8\&allowMultiQueries=true@g" conf/datasource.properties sed -i ${txt} "s@^spring.datasource.username=.*@spring.datasource.username=${username}@g" conf/datasource.properties sed -i ${txt} "s@^spring.datasource.password=.*@spring.datasource.password=${password}@g" conf/datasource.properties sed -i ${txt} "s@^#\?zookeeper.quorum=.*@zookeeper.quorum=${zkQuorum}@g" conf/zookeeper.properties sed -i ${txt} "s@^#\?zookeeper.dolphinscheduler.root=.*@zookeeper.dolphinscheduler.root=${zkRoot}@g" conf/zookeeper.properties sed -i ${txt} "s@^data.basedir.path=.*@data.basedir.path=${dataBasedirPath}@g" conf/common.properties sed -i ${txt} "s@^resource.storage.type=.*@resource.storage.type=${resourceStorageType}@g" conf/common.properties sed -i ${txt} "s@^resource.upload.path=.*@resource.upload.path=${resourceUploadPath}@g" conf/common.properties sed -i ${txt} "s@^hadoop.security.authentication.startup.state=.*@hadoop.security.authentication.startup.state=${kerberosStartUp}@g" conf/common.properties sed -i ${txt} "s@^java.security.krb5.conf.path=.*@java.security.krb5.conf.path=${krb5ConfPath}@g" conf/common.properties sed -i ${txt} "s@^login.user.keytab.username=.*@login.user.keytab.username=${keytabUserName}@g" conf/common.properties sed -i ${txt} "s@^login.user.keytab.path=.*@login.user.keytab.path=${keytabPath}@g" conf/common.properties sed -i ${txt} "s@^kerberos.expire.time=.*@kerberos.expire.time=${kerberosExpireTime}@g" conf/common.properties sed -i ${txt} "s@^hdfs.root.user=.*@hdfs.root.user=${hdfsRootUser}@g" conf/common.properties sed -i ${txt} "s@^fs.defaultFS=.*@fs.defaultFS=${defaultFS}@g" conf/common.properties sed -i ${txt} "s@^fs.s3a.endpoint=.*@fs.s3a.endpoint=${s3Endpoint}@g" conf/common.properties sed -i ${txt} "s@^fs.s3a.access.key=.*@fs.s3a.access.key=${s3AccessKey}@g" conf/common.properties sed -i ${txt} "s@^fs.s3a.secret.key=.*@fs.s3a.secret.key=${s3SecretKey}@g" conf/common.properties sed -i ${txt} "s@^resource.manager.httpaddress.port=.*@resource.manager.httpaddress.port=${resourceManagerHttpAddressPort}@g" conf/common.properties sed -i ${txt} "s@^yarn.resourcemanager.ha.rm.ids=.*@yarn.resourcemanager.ha.rm.ids=${yarnHaIps}@g" conf/common.properties sed -i ${txt} "s@^yarn.application.status.address=.*@yarn.application.status.address=http://${singleYarnIp}:%s/ws/v1/cluster/apps/%s@g" conf/common.properties sed -i ${txt} "s@^yarn.job.history.status.address=.*@yarn.job.history.status.address=http://${singleYarnIp}:19888/ws/v1/history/mapreduce/jobs/%s@g" conf/common.properties sed -i ${txt} "s@^sudo.enable=.*@sudo.enable=${sudoEnable}@g" conf/common.properties # the following configurations may be commented, so ddd #\? to ensure successful sed sed -i ${txt} "s@^#\?worker.tenant.auto.create=.*@worker.tenant.auto.create=${workerTenantAutoCreate}@g" conf/worker.properties sed -i ${txt} "s@^#\?alert.listen.host=.*@alert.listen.host=${alertServer}@g" conf/worker.properties sed -i ${txt} "s@^#\?alert.plugin.dir=.*@alert.plugin.dir=${alertPluginDir}@g" conf/alert.properties sed -i ${txt} "s@^#\?server.port=.*@server.port=${apiServerPort}@g" conf/application-api.properties # 2.create directory echo "2.create directory" if [ ! -d $installPath ];then sudo mkdir -p $installPath sudo chown -R $deployUser:$deployUser $installPath fi # 3.scp resources echo "3.scp resources" sh ${workDir}/script/scp-hosts.sh if [ $? -eq 0 ] then echo 'scp copy completed' else echo 'scp copy failed to exit' exit 1 fi # 4.stop server echo "4.stop server" sh ${workDir}/script/stop-all.sh # 5.delete zk node echo "5.delete zk node" sh ${workDir}/script/remove-zk-node.sh $zkRoot # 6.startup echo "6.startup" sh ${workDir}/script/start-all.sh
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,580
[Improvement][SQL] Query return number should be configurable
**Describe the question** The SQL query result can only return 10,000 records, which are currently hard-coded **Which version of DolphinScheduler:** -[1.3.6-Release] **Describe alternatives you've considered** I think it should be configurable.
https://github.com/apache/dolphinscheduler/issues/5580
https://github.com/apache/dolphinscheduler/pull/5632
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
67711442d5add82164a916452020a68a84693000
2021-06-02T04:11:42Z
java
2021-06-16T01:40:21Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sql/SqlParameters.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.task.sql; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.utils.StringUtils; import java.util.ArrayList; import java.util.List; /** * Sql/Hql parameter */ public class SqlParameters extends AbstractParameters { /** * data source type,eg MYSQL, POSTGRES, HIVE ... */ private String type; /** * datasource id */ private int datasource; /** * sql */ private String sql; /** * sql type * 0 query * 1 NON_QUERY */ private int sqlType; /** * send email */ private Boolean sendEmail; /** * display rows */ private int displayRows; /** * udf list */ private String udfs; /** * show type * 0 TABLE * 1 TEXT * 2 attachment * 3 TABLE+attachment */ private String showType; /** * SQL connection parameters */ private String connParams; /** * Pre Statements */ private List<String> preStatements; /** * Post Statements */ private List<String> postStatements; /** * groupId */ private int groupId; /** * title */ private String title; public String getType() { return type; } public void setType(String type) { this.type = type; } public int getDatasource() { return datasource; } public void setDatasource(int datasource) { this.datasource = datasource; } public String getSql() { return sql; } public void setSql(String sql) { this.sql = sql; } public String getUdfs() { return udfs; } public void setUdfs(String udfs) { this.udfs = udfs; } public int getSqlType() { return sqlType; } public void setSqlType(int sqlType) { this.sqlType = sqlType; } public Boolean getSendEmail() { return sendEmail; } public void setSendEmail(Boolean sendEmail) { this.sendEmail = sendEmail; } public int getDisplayRows() { return displayRows; } public void setDisplayRows(int displayRows) { this.displayRows = displayRows; } public String getShowType() { return showType; } public void setShowType(String showType) { this.showType = showType; } public String getConnParams() { return connParams; } public void setConnParams(String connParams) { this.connParams = connParams; } public String getTitle() { return title; } public void setTitle(String title) { this.title = title; } public List<String> getPreStatements() { return preStatements; } public void setPreStatements(List<String> preStatements) { this.preStatements = preStatements; } public List<String> getPostStatements() { return postStatements; } public void setPostStatements(List<String> postStatements) { this.postStatements = postStatements; } public int getGroupId() { return groupId; } public void setGroupId(int groupId) { this.groupId = groupId; } @Override public boolean checkParameters() { return datasource != 0 && StringUtils.isNotEmpty(type) && StringUtils.isNotEmpty(sql); } @Override public List<ResourceInfo> getResourceFilesList() { return new ArrayList<>(); } @Override public String toString() { return "SqlParameters{" + "type='" + type + '\'' + ", datasource=" + datasource + ", sql='" + sql + '\'' + ", sqlType=" + sqlType + ", sendEmail=" + sendEmail + ", displayRows=" + displayRows + ", udfs='" + udfs + '\'' + ", showType='" + showType + '\'' + ", connParams='" + connParams + '\'' + ", groupId='" + groupId + '\'' + ", title='" + title + '\'' + ", preStatements=" + preStatements + ", postStatements=" + postStatements + '}'; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,580
[Improvement][SQL] Query return number should be configurable
**Describe the question** The SQL query result can only return 10,000 records, which are currently hard-coded **Which version of DolphinScheduler:** -[1.3.6-Release] **Describe alternatives you've considered** I think it should be configurable.
https://github.com/apache/dolphinscheduler/issues/5580
https://github.com/apache/dolphinscheduler/pull/5632
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
67711442d5add82164a916452020a68a84693000
2021-06-02T04:11:42Z
java
2021-06-16T01:40:21Z
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/task/SqlParametersTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.task; import org.apache.dolphinscheduler.common.task.sql.SqlParameters; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.junit.Assert; import org.junit.Test; public class SqlParametersTest { private final String type = "MYSQL"; private final String sql = "select * from t_ds_user"; private final String udfs = "test-udfs-1.0.0-SNAPSHOT.jar"; private final int datasource = 1; private final int sqlType = 0; private final Boolean sendEmail = true; private final int displayRows = 10; private final String showType = "TABLE"; private final String title = "sql test"; private final int groupId = 0; @Test public void testSqlParameters() { SqlParameters sqlParameters = new SqlParameters(); Assert.assertTrue(CollectionUtils.isEmpty(sqlParameters.getResourceFilesList())); sqlParameters.setType(type); sqlParameters.setSql(sql); sqlParameters.setUdfs(udfs); sqlParameters.setDatasource(datasource); sqlParameters.setSqlType(sqlType); sqlParameters.setSendEmail(sendEmail); sqlParameters.setDisplayRows(displayRows); sqlParameters.setShowType(showType); sqlParameters.setTitle(title); sqlParameters.setGroupId(groupId); Assert.assertEquals(type, sqlParameters.getType()); Assert.assertEquals(sql, sqlParameters.getSql()); Assert.assertEquals(udfs, sqlParameters.getUdfs()); Assert.assertEquals(datasource, sqlParameters.getDatasource()); Assert.assertEquals(sqlType, sqlParameters.getSqlType()); Assert.assertEquals(sendEmail, sqlParameters.getSendEmail()); Assert.assertEquals(displayRows, sqlParameters.getDisplayRows()); Assert.assertEquals(showType, sqlParameters.getShowType()); Assert.assertEquals(title, sqlParameters.getTitle()); Assert.assertEquals(groupId, sqlParameters.getGroupId()); Assert.assertTrue(sqlParameters.checkParameters()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,580
[Improvement][SQL] Query return number should be configurable
**Describe the question** The SQL query result can only return 10,000 records, which are currently hard-coded **Which version of DolphinScheduler:** -[1.3.6-Release] **Describe alternatives you've considered** I think it should be configurable.
https://github.com/apache/dolphinscheduler/issues/5580
https://github.com/apache/dolphinscheduler/pull/5632
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
67711442d5add82164a916452020a68a84693000
2021-06-02T04:11:42Z
java
2021-06-16T01:40:21Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.task.sql; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.common.datasource.DatasourceUtil; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.sql.SqlBinds; import org.apache.dolphinscheduler.common.task.sql.SqlParameters; import org.apache.dolphinscheduler.common.task.sql.SqlType; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.AlertDao; import org.apache.dolphinscheduler.remote.command.alert.AlertSendResponseCommand; import org.apache.dolphinscheduler.server.entity.SQLTaskExecutionContext; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.utils.ParamUtils; import org.apache.dolphinscheduler.server.utils.UDFUtils; import org.apache.dolphinscheduler.server.worker.task.AbstractTask; import org.apache.dolphinscheduler.service.alert.AlertClientService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import org.slf4j.Logger; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; /** * sql task */ public class SqlTask extends AbstractTask { /** * sql parameters */ private SqlParameters sqlParameters; /** * alert dao */ private AlertDao alertDao; /** * base datasource */ private BaseConnectionParam baseConnectionParam; /** * taskExecutionContext */ private TaskExecutionContext taskExecutionContext; /** * default query sql limit */ private static final int LIMIT = 10000; private AlertClientService alertClientService; public SqlTask(TaskExecutionContext taskExecutionContext, Logger logger, AlertClientService alertClientService) { super(taskExecutionContext, logger); this.taskExecutionContext = taskExecutionContext; logger.info("sql task params {}", taskExecutionContext.getTaskParams()); this.sqlParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), SqlParameters.class); if (!sqlParameters.checkParameters()) { throw new RuntimeException("sql task params is not valid"); } this.alertClientService = alertClientService; this.alertDao = SpringApplicationContext.getBean(AlertDao.class); } @Override public void handle() throws Exception { // set the name of the current thread String threadLoggerInfoName = String.format(Constants.TASK_LOG_INFO_FORMAT, taskExecutionContext.getTaskAppId()); Thread.currentThread().setName(threadLoggerInfoName); logger.info("Full sql parameters: {}", sqlParameters); logger.info("sql type : {}, datasource : {}, sql : {} , localParams : {},udfs : {},showType : {},connParams : {}", sqlParameters.getType(), sqlParameters.getDatasource(), sqlParameters.getSql(), sqlParameters.getLocalParams(), sqlParameters.getUdfs(), sqlParameters.getShowType(), sqlParameters.getConnParams()); try { SQLTaskExecutionContext sqlTaskExecutionContext = taskExecutionContext.getSqlTaskExecutionContext(); // get datasource baseConnectionParam = (BaseConnectionParam) DatasourceUtil.buildConnectionParams( DbType.valueOf(sqlParameters.getType()), sqlTaskExecutionContext.getConnectionParams()); // ready to execute SQL and parameter entity Map SqlBinds mainSqlBinds = getSqlAndSqlParamsMap(sqlParameters.getSql()); List<SqlBinds> preStatementSqlBinds = Optional.ofNullable(sqlParameters.getPreStatements()) .orElse(new ArrayList<>()) .stream() .map(this::getSqlAndSqlParamsMap) .collect(Collectors.toList()); List<SqlBinds> postStatementSqlBinds = Optional.ofNullable(sqlParameters.getPostStatements()) .orElse(new ArrayList<>()) .stream() .map(this::getSqlAndSqlParamsMap) .collect(Collectors.toList()); List<String> createFuncs = UDFUtils.createFuncs(sqlTaskExecutionContext.getUdfFuncTenantCodeMap(), logger); // execute sql task executeFuncAndSql(mainSqlBinds, preStatementSqlBinds, postStatementSqlBinds, createFuncs); setExitStatusCode(Constants.EXIT_CODE_SUCCESS); } catch (Exception e) { setExitStatusCode(Constants.EXIT_CODE_FAILURE); logger.error("sql task error: {}", e.toString()); throw e; } } /** * ready to execute SQL and parameter entity Map * * @return SqlBinds */ private SqlBinds getSqlAndSqlParamsMap(String sql) { Map<Integer, Property> sqlParamsMap = new HashMap<>(); StringBuilder sqlBuilder = new StringBuilder(); // find process instance by task id Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()), taskExecutionContext.getDefinedParams(), sqlParameters.getLocalParametersMap(), CommandType.of(taskExecutionContext.getCmdTypeIfComplement()), taskExecutionContext.getScheduleTime()); // spell SQL according to the final user-defined variable if (paramsMap == null) { sqlBuilder.append(sql); return new SqlBinds(sqlBuilder.toString(), sqlParamsMap); } if (StringUtils.isNotEmpty(sqlParameters.getTitle())) { String title = ParameterUtils.convertParameterPlaceholders(sqlParameters.getTitle(), ParamUtils.convert(paramsMap)); logger.info("SQL title : {}", title); sqlParameters.setTitle(title); } //new //replace variable TIME with $[YYYYmmddd...] in sql when history run job and batch complement job sql = ParameterUtils.replaceScheduleTime(sql, taskExecutionContext.getScheduleTime()); // special characters need to be escaped, ${} needs to be escaped String rgex = "['\"]*\\$\\{(.*?)\\}['\"]*"; setSqlParamsMap(sql, rgex, sqlParamsMap, paramsMap); //Replace the original value in sql !{...} ,Does not participate in precompilation String rgexo = "['\"]*\\!\\{(.*?)\\}['\"]*"; sql = replaceOriginalValue(sql, rgexo, paramsMap); // replace the ${} of the SQL statement with the Placeholder String formatSql = sql.replaceAll(rgex, "?"); sqlBuilder.append(formatSql); // print repalce sql printReplacedSql(sql, formatSql, rgex, sqlParamsMap); return new SqlBinds(sqlBuilder.toString(), sqlParamsMap); } public String replaceOriginalValue(String content, String rgex, Map<String, Property> sqlParamsMap) { Pattern pattern = Pattern.compile(rgex); while (true) { Matcher m = pattern.matcher(content); if (!m.find()) { break; } String paramName = m.group(1); String paramValue = sqlParamsMap.get(paramName).getValue(); content = m.replaceFirst(paramValue); } return content; } @Override public AbstractParameters getParameters() { return this.sqlParameters; } /** * execute function and sql * * @param mainSqlBinds main sql binds * @param preStatementsBinds pre statements binds * @param postStatementsBinds post statements binds * @param createFuncs create functions */ public void executeFuncAndSql(SqlBinds mainSqlBinds, List<SqlBinds> preStatementsBinds, List<SqlBinds> postStatementsBinds, List<String> createFuncs) throws Exception { Connection connection = null; PreparedStatement stmt = null; ResultSet resultSet = null; try { // create connection connection = DatasourceUtil.getConnection(DbType.valueOf(sqlParameters.getType()), baseConnectionParam); // create temp function if (CollectionUtils.isNotEmpty(createFuncs)) { createTempFunction(connection, createFuncs); } // pre sql preSql(connection, preStatementsBinds); stmt = prepareStatementAndBind(connection, mainSqlBinds); String result = null; // decide whether to executeQuery or executeUpdate based on sqlType if (sqlParameters.getSqlType() == SqlType.QUERY.ordinal()) { // query statements need to be convert to JsonArray and inserted into Alert to send resultSet = stmt.executeQuery(); result = resultProcess(resultSet); } else if (sqlParameters.getSqlType() == SqlType.NON_QUERY.ordinal()) { // non query statement String updateResult = String.valueOf(stmt.executeUpdate()); result = setNonQuerySqlReturn(updateResult, sqlParameters.getLocalParams()); } postSql(connection, postStatementsBinds); this.setResultString(result); } catch (Exception e) { logger.error("execute sql error: {}", e.getMessage()); throw e; } finally { close(resultSet, stmt, connection); } } public String setNonQuerySqlReturn(String updateResult, List<Property> properties) { String result = null; for (Property info :properties) { if (Direct.OUT == info.getDirect()) { List<Map<String,String>> updateRL = new ArrayList<>(); Map<String,String> updateRM = new HashMap<>(); updateRM.put(info.getProp(),updateResult); updateRL.add(updateRM); result = JSONUtils.toJsonString(updateRL); break; } } return result; } /** * result process * * @param resultSet resultSet * @throws Exception Exception */ private String resultProcess(ResultSet resultSet) throws Exception { ArrayNode resultJSONArray = JSONUtils.createArrayNode(); if (resultSet != null) { ResultSetMetaData md = resultSet.getMetaData(); int num = md.getColumnCount(); int rowCount = 0; while (rowCount < LIMIT && resultSet.next()) { ObjectNode mapOfColValues = JSONUtils.createObjectNode(); for (int i = 1; i <= num; i++) { mapOfColValues.set(md.getColumnLabel(i), JSONUtils.toJsonNode(resultSet.getObject(i))); } resultJSONArray.add(mapOfColValues); rowCount++; } int displayRows = sqlParameters.getDisplayRows() > 0 ? sqlParameters.getDisplayRows() : Constants.DEFAULT_DISPLAY_ROWS; displayRows = Math.min(displayRows, resultJSONArray.size()); logger.info("display sql result {} rows as follows:", displayRows); for (int i = 0; i < displayRows; i++) { String row = JSONUtils.toJsonString(resultJSONArray.get(i)); logger.info("row {} : {}", i + 1, row); } } String result = JSONUtils.toJsonString(resultJSONArray); if (sqlParameters.getSendEmail() == null || sqlParameters.getSendEmail()) { sendAttachment(sqlParameters.getGroupId(), StringUtils.isNotEmpty(sqlParameters.getTitle()) ? sqlParameters.getTitle() : taskExecutionContext.getTaskName() + " query result sets", result); } logger.debug("execute sql result : {}", result); return result; } /** * pre sql * * @param connection connection * @param preStatementsBinds preStatementsBinds */ private void preSql(Connection connection, List<SqlBinds> preStatementsBinds) throws Exception { for (SqlBinds sqlBind : preStatementsBinds) { try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) { int result = pstmt.executeUpdate(); logger.info("pre statement execute result: {}, for sql: {}", result, sqlBind.getSql()); } } } /** * post sql * * @param connection connection * @param postStatementsBinds postStatementsBinds */ private void postSql(Connection connection, List<SqlBinds> postStatementsBinds) throws Exception { for (SqlBinds sqlBind : postStatementsBinds) { try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) { int result = pstmt.executeUpdate(); logger.info("post statement execute result: {},for sql: {}", result, sqlBind.getSql()); } } } /** * create temp function * * @param connection connection * @param createFuncs createFuncs */ private void createTempFunction(Connection connection, List<String> createFuncs) throws Exception { try (Statement funcStmt = connection.createStatement()) { for (String createFunc : createFuncs) { logger.info("hive create function sql: {}", createFunc); funcStmt.execute(createFunc); } } } /** * close jdbc resource * * @param resultSet resultSet * @param pstmt pstmt * @param connection connection */ private void close(ResultSet resultSet, PreparedStatement pstmt, Connection connection) { if (resultSet != null) { try { resultSet.close(); } catch (SQLException e) { logger.error("close result set error : {}", e.getMessage(), e); } } if (pstmt != null) { try { pstmt.close(); } catch (SQLException e) { logger.error("close prepared statement error : {}", e.getMessage(), e); } } if (connection != null) { try { connection.close(); } catch (SQLException e) { logger.error("close connection error : {}", e.getMessage(), e); } } } /** * preparedStatement bind * * @param connection connection * @param sqlBinds sqlBinds * @return PreparedStatement * @throws Exception Exception */ private PreparedStatement prepareStatementAndBind(Connection connection, SqlBinds sqlBinds) throws Exception { // is the timeout set boolean timeoutFlag = taskExecutionContext.getTaskTimeoutStrategy() == TaskTimeoutStrategy.FAILED || taskExecutionContext.getTaskTimeoutStrategy() == TaskTimeoutStrategy.WARNFAILED; PreparedStatement stmt = connection.prepareStatement(sqlBinds.getSql()); if (timeoutFlag) { stmt.setQueryTimeout(taskExecutionContext.getTaskTimeout()); } Map<Integer, Property> params = sqlBinds.getParamsMap(); if (params != null) { for (Map.Entry<Integer, Property> entry : params.entrySet()) { Property prop = entry.getValue(); ParameterUtils.setInParameter(entry.getKey(), stmt, prop.getType(), prop.getValue()); } } logger.info("prepare statement replace sql : {} ", stmt); return stmt; } /** * send mail as an attachment * * @param title title * @param content content */ public void sendAttachment(int groupId, String title, String content) { AlertSendResponseCommand alertSendResponseCommand = alertClientService.sendAlert(groupId, title, content); if (!alertSendResponseCommand.getResStatus()) { throw new RuntimeException("send mail failed!"); } } /** * regular expressions match the contents between two specified strings * * @param content content * @param rgex rgex * @param sqlParamsMap sql params map * @param paramsPropsMap params props map */ public void setSqlParamsMap(String content, String rgex, Map<Integer, Property> sqlParamsMap, Map<String, Property> paramsPropsMap) { Pattern pattern = Pattern.compile(rgex); Matcher m = pattern.matcher(content); int index = 1; while (m.find()) { String paramName = m.group(1); Property prop = paramsPropsMap.get(paramName); sqlParamsMap.put(index, prop); index++; } } /** * print replace sql * * @param content content * @param formatSql format sql * @param rgex rgex * @param sqlParamsMap sql params map */ public void printReplacedSql(String content, String formatSql, String rgex, Map<Integer, Property> sqlParamsMap) { //parameter print style logger.info("after replace sql , preparing : {}", formatSql); StringBuilder logPrint = new StringBuilder("replaced sql , parameters:"); for (int i = 1; i <= sqlParamsMap.size(); i++) { logPrint.append(sqlParamsMap.get(i).getValue() + "(" + sqlParamsMap.get(i).getType() + ")"); } logger.info("Sql Params are {}", logPrint); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,580
[Improvement][SQL] Query return number should be configurable
**Describe the question** The SQL query result can only return 10,000 records, which are currently hard-coded **Which version of DolphinScheduler:** -[1.3.6-Release] **Describe alternatives you've considered** I think it should be configurable.
https://github.com/apache/dolphinscheduler/issues/5580
https://github.com/apache/dolphinscheduler/pull/5632
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
67711442d5add82164a916452020a68a84693000
2021-06-02T04:11:42Z
java
2021-06-16T01:40:21Z
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTaskTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.task.sql; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.datasource.DatasourceUtil; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.AlertDao; import org.apache.dolphinscheduler.remote.command.alert.AlertSendResponseCommand; import org.apache.dolphinscheduler.server.entity.SQLTaskExecutionContext; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.worker.task.TaskProps; import org.apache.dolphinscheduler.service.alert.AlertClientService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.util.Date; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mockito; import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; import org.powermock.reflect.Whitebox; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * sql task test */ @RunWith(PowerMockRunner.class) @PrepareForTest(value = {SqlTask.class, DatasourceUtil.class, SpringApplicationContext.class, ParameterUtils.class, AlertSendResponseCommand.class}) public class SqlTaskTest { private static final Logger logger = LoggerFactory.getLogger(SqlTaskTest.class); private static final String CONNECTION_PARAMS = "{\"user\":\"root\",\"password\":\"123456\",\"address\":\"jdbc:mysql://127.0.0.1:3306\"," + "\"database\":\"test\",\"jdbcUrl\":\"jdbc:mysql://127.0.0.1:3306/test\"}"; private SqlTask sqlTask; private TaskExecutionContext taskExecutionContext; private AlertClientService alertClientService; @Before public void before() throws Exception { taskExecutionContext = new TaskExecutionContext(); TaskProps props = new TaskProps(); props.setExecutePath("/tmp"); props.setTaskAppId(String.valueOf(System.currentTimeMillis())); props.setTaskInstanceId(1); props.setTenantCode("1"); props.setEnvFile(".dolphinscheduler_env.sh"); props.setTaskStartTime(new Date()); props.setTaskTimeout(0); props.setTaskParams( "{\"localParams\":[{\"prop\":\"ret\", \"direct\":\"OUT\", \"type\":\"VARCHAR\", \"value\":\"\"}]," + "\"type\":\"POSTGRESQL\",\"datasource\":1,\"sql\":\"insert into tb_1 values('1','2')\"," + "\"sqlType\":1}"); taskExecutionContext = PowerMockito.mock(TaskExecutionContext.class); PowerMockito.when(taskExecutionContext.getTaskParams()).thenReturn(props.getTaskParams()); PowerMockito.when(taskExecutionContext.getExecutePath()).thenReturn("/tmp"); PowerMockito.when(taskExecutionContext.getTaskAppId()).thenReturn("1"); PowerMockito.when(taskExecutionContext.getTenantCode()).thenReturn("root"); PowerMockito.when(taskExecutionContext.getStartTime()).thenReturn(new Date()); PowerMockito.when(taskExecutionContext.getTaskTimeout()).thenReturn(10000); PowerMockito.when(taskExecutionContext.getLogPath()).thenReturn("/tmp/dx"); SQLTaskExecutionContext sqlTaskExecutionContext = new SQLTaskExecutionContext(); sqlTaskExecutionContext.setConnectionParams(CONNECTION_PARAMS); PowerMockito.when(taskExecutionContext.getSqlTaskExecutionContext()).thenReturn(sqlTaskExecutionContext); PowerMockito.mockStatic(SpringApplicationContext.class); PowerMockito.when(SpringApplicationContext.getBean(Mockito.any())).thenReturn(new AlertDao()); alertClientService = PowerMockito.mock(AlertClientService.class); sqlTask = new SqlTask(taskExecutionContext, logger, alertClientService); sqlTask.init(); } @Test public void testGetParameters() { Assert.assertNotNull(sqlTask.getParameters()); } @Test public void testHandle() throws Exception { Connection connection = PowerMockito.mock(Connection.class); PreparedStatement preparedStatement = PowerMockito.mock(PreparedStatement.class); PowerMockito.when(connection.prepareStatement(Mockito.any())).thenReturn(preparedStatement); PowerMockito.mockStatic(ParameterUtils.class); PowerMockito.when(ParameterUtils.replaceScheduleTime(Mockito.any(), Mockito.any())).thenReturn("insert into tb_1 values('1','2')"); PowerMockito.mockStatic(DatasourceUtil.class); PowerMockito.when(DatasourceUtil.getConnection(Mockito.any(), Mockito.any())).thenReturn(connection); sqlTask.handle(); Assert.assertEquals(Constants.EXIT_CODE_SUCCESS, sqlTask.getExitStatusCode()); } @Test public void testResultProcess() throws Exception { // test input null and will not throw a exception AlertSendResponseCommand mockResponseCommand = PowerMockito.mock(AlertSendResponseCommand.class); PowerMockito.when(mockResponseCommand.getResStatus()).thenReturn(true); PowerMockito.when(alertClientService.sendAlert(0, "null query result sets", "[]")).thenReturn(mockResponseCommand); String result = Whitebox.invokeMethod(sqlTask, "resultProcess", null); Assert.assertNotNull(result); } @Test public void testResultProcess02() throws Exception { // test input not null ResultSet resultSet = PowerMockito.mock(ResultSet.class); ResultSetMetaData mockResultMetaData = PowerMockito.mock(ResultSetMetaData.class); PowerMockito.when(resultSet.getMetaData()).thenReturn(mockResultMetaData); PowerMockito.when(mockResultMetaData.getColumnCount()).thenReturn(2); PowerMockito.when(resultSet.next()).thenReturn(true); PowerMockito.when(resultSet.getObject(Mockito.anyInt())).thenReturn(1); PowerMockito.when(mockResultMetaData.getColumnLabel(Mockito.anyInt())).thenReturn("a"); AlertSendResponseCommand mockResponseCommand = PowerMockito.mock(AlertSendResponseCommand.class); PowerMockito.when(mockResponseCommand.getResStatus()).thenReturn(true); PowerMockito.when(alertClientService.sendAlert(Mockito.anyInt(), Mockito.anyString(), Mockito.anyString())).thenReturn(mockResponseCommand); String result = Whitebox.invokeMethod(sqlTask, "resultProcess", resultSet); Assert.assertNotNull(result); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,580
[Improvement][SQL] Query return number should be configurable
**Describe the question** The SQL query result can only return 10,000 records, which are currently hard-coded **Which version of DolphinScheduler:** -[1.3.6-Release] **Describe alternatives you've considered** I think it should be configurable.
https://github.com/apache/dolphinscheduler/issues/5580
https://github.com/apache/dolphinscheduler/pull/5632
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
67711442d5add82164a916452020a68a84693000
2021-06-02T04:11:42Z
java
2021-06-16T01:40:21Z
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/sql.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="sql-model"> <m-list-box> <div slot="text">{{$t('Datasource')}}</div> <div slot="content"> <m-datasource ref="refDs" @on-dsData="_onDsData" :data="{ type:type,datasource:datasource }"> </m-datasource> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('SQL Type')}}</div> <div slot="content"> <div style="display: inline-block;"> <m-sql-type @on-sqlType="_onSqlType" :sql-type="sqlType"></m-sql-type> </div> <div style="display: inline-block;" v-if="sqlType === '0'"> <span class="text-b">{{$t('Send Email')}}</span> <el-switch size="small" v-model="sendEmail"></el-switch> </div> <div style="display: inline-block;" v-if="sqlType === '0'"> <span class="text-b">{{$t('Log display')}}</span> <m-select-input v-model="displayRows" :list="[1,10,25,50,100]" style="width: 70px;"></m-select-input> <span>{{$t('rows of result')}}</span> </div> </div> </m-list-box> <template v-if="sqlType === '0' && sendEmail"> <m-list-box> <div slot="text"><strong class='requiredIcon'>*</strong>{{$t('Title')}}</div> <div slot="content"> <el-input type="input" size="small" v-model="title" :disabled="isDetails" :placeholder="$t('Please enter the title of email')"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text"><strong class='requiredIcon'>*</strong>{{$t('Alarm group')}}</div> <div slot="content"> <m-warning-groups v-model="groupId"></m-warning-groups> </div> </m-list-box> </template> <m-list-box v-show="type === 'HIVE'"> <div slot="text">{{$t('SQL Parameter')}}</div> <div slot="content"> <el-input :disabled="isDetails" type="input" size="small" v-model="connParams" :placeholder="$t('Please enter format') + ' key1=value1;key2=value2...'"> </el-input> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('SQL Statement')}}</div> <div slot="content"> <div class="form-mirror"> <textarea id="code-sql-mirror" name="code-sql-mirror" style="opacity: 0;"> </textarea> <a class="ans-modal-box-max"> <em class="el-icon-full-screen" @click="setEditorVal"></em> </a> </div> </div> </m-list-box> <m-list-box v-if="type === 'HIVE'"> <div slot="text">{{$t('UDF Function')}}</div> <div slot="content"> <m-udfs ref="refUdfs" @on-udfsData="_onUdfsData" :udfs="udfs" :type="type"> </m-udfs> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Custom Parameters')}}</div> <div slot="content"> <m-local-params ref="refLocalParams" @on-udpData="_onUdpData" :udp-list="localParams"> </m-local-params> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Pre Statement')}}</div> <div slot="content"> <m-statement-list ref="refPreStatements" @on-statement-list="_onPreStatements" :statement-list="preStatements"> </m-statement-list> </div> </m-list-box> <m-list-box> <div slot="text">{{$t('Post Statement')}}</div> <div slot="content"> <m-statement-list ref="refPostStatements" @on-statement-list="_onPostStatements" :statement-list="postStatements"> </m-statement-list> </div> </m-list-box> <el-dialog :visible.sync="scriptBoxDialog" append-to-body="true" width="80%"> <m-script-box :item="item" @getSriptBoxValue="getSriptBoxValue" @closeAble="closeAble"></m-script-box> </el-dialog> </div> </template> <script> import _ from 'lodash' import i18n from '@/module/i18n' import mUdfs from './_source/udfs' import mListBox from './_source/listBox' import mScriptBox from './_source/scriptBox' import mSqlType from './_source/sqlType' import mDatasource from './_source/datasource' import mLocalParams from './_source/localParams' import mStatementList from './_source/statementList' import mWarningGroups from './_source/warningGroups' import mSelectInput from '../_source/selectInput' import disabledState from '@/module/mixin/disabledState' import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror' let editor export default { name: 'sql', data () { return { // Data source type type: '', // data source datasource: '', // Return to the selected data source rtDatasource: '', // Sql statement sql: '', // Custom parameter localParams: [], // UDF function udfs: '', // Sql type sqlType: '0', // Send email sendEmail: false, // Display rows displayRows: 10, // Email title title: '', // Sql parameter connParams: '', // Pre statements preStatements: [], // Post statements postStatements: [], item: '', scriptBoxDialog: false, groupId: null } }, mixins: [disabledState], props: { backfillItem: Object, createNodeId: Number }, methods: { setEditorVal () { this.item = editor.getValue() this.scriptBoxDialog = true }, getSriptBoxValue (val) { editor.setValue(val) }, /** * return sqlType */ _onSqlType (a) { this.sqlType = a }, /** * return udfs */ _onUdfsData (a) { this.udfs = a }, /** * return Custom parameter */ _onUdpData (a) { this.localParams = a }, /** * return data source */ _onDsData (o) { this.type = o.type this.rtDatasource = o.datasource }, /** * return pre statements */ _onPreStatements (a) { this.preStatements = a }, /** * return post statements */ _onPostStatements (a) { this.postStatements = a }, /** * verification */ _verification () { if (!editor.getValue()) { this.$message.warning(`${i18n.$t('Please enter a SQL Statement(required)')}`) return false } // datasource Subcomponent verification if (!this.$refs.refDs._verifDatasource()) { return false } if (this.sqlType === '0' && this.sendEmail && !this.title) { this.$message.warning(`${i18n.$t('Mail subject required')}`) return false } if (this.sqlType === '0' && this.sendEmail && (this.groupId === '' || this.groupId === null)) { this.$message.warning(`${i18n.$t('Alarm group required')}`) return false } // udfs Subcomponent verification Verification only if the data type is HIVE if (this.type === 'HIVE') { if (!this.$refs.refUdfs._verifUdfs()) { return false } } // localParams Subcomponent verification if (!this.$refs.refLocalParams._verifProp()) { return false } // preStatements Subcomponent verification if (!this.$refs.refPreStatements._verifProp()) { return false } // postStatements Subcomponent verification if (!this.$refs.refPostStatements._verifProp()) { return false } // storage this.$emit('on-params', { type: this.type, datasource: this.rtDatasource, sql: editor.getValue(), udfs: this.udfs, sqlType: this.sqlType, sendEmail: this.sendEmail, displayRows: this.displayRows, title: this.title, groupId: this.groupId, localParams: this.localParams, connParams: this.connParams, preStatements: this.preStatements, postStatements: this.postStatements }) return true }, /** * Processing code highlighting */ _handlerEditor () { this._destroyEditor() // editor editor = codemirror('code-sql-mirror', { mode: 'sql', readOnly: this.isDetails }) this.keypress = () => { if (!editor.getOption('readOnly')) { editor.showHint({ completeSingle: false }) } } this.changes = () => { this._cacheParams() } // Monitor keyboard editor.on('keypress', this.keypress) editor.on('changes', this.changes) editor.setValue(this.sql) return editor }, _cacheParams () { this.$emit('on-cache-params', { type: this.type, datasource: this.rtDatasource, sql: editor ? editor.getValue() : '', udfs: this.udfs, sqlType: this.sqlType, sendEmail: this.sendEmail, displayRows: this.displayRows, title: this.title, groupId: this.groupId, localParams: this.localParams, connParams: this.connParams, preStatements: this.preStatements, postStatements: this.postStatements }) }, _destroyEditor () { if (editor) { editor.toTextArea() // Uninstall editor.off($('.code-sql-mirror'), 'keypress', this.keypress) editor.off($('.code-sql-mirror'), 'changes', this.changes) } } }, watch: { // Listening to sqlType sqlType (val) { if (val !== '0') { this.title = '' this.groupId = null } }, // Listening data source type (val) { if (val !== 'HIVE') { this.connParams = '' } }, // Watch the cacheParams cacheParams (val) { this._cacheParams() } }, created () { let o = this.backfillItem // Non-null objects represent backfill if (!_.isEmpty(o)) { // backfill this.type = o.params.type || '' this.datasource = o.params.datasource || '' this.sql = o.params.sql || '' this.udfs = o.params.udfs || '' this.sqlType = o.params.sqlType this.sendEmail = o.params.sendEmail || false this.displayRows = o.params.displayRows || 10 this.connParams = o.params.connParams || '' this.localParams = o.params.localParams || [] this.preStatements = o.params.preStatements || [] this.postStatements = o.params.postStatements || [] this.title = o.params.title || '' this.groupId = o.params.groupId } }, mounted () { setTimeout(() => { this._handlerEditor() }, 200) }, destroyed () { /** * Destroy the editor instance */ if (editor) { editor.toTextArea() // Uninstall editor.off($('.code-sql-mirror'), 'keypress', this.keypress) editor.off($('.code-sql-mirror'), 'changes', this.changes) } }, computed: { cacheParams () { return { type: this.type, datasource: this.rtDatasource, udfs: this.udfs, sqlType: this.sqlType, sendEmail: this.sendEmail, displayRows: this.displayRows, title: this.title, groupId: this.groupId, localParams: this.localParams, connParams: this.connParams, preStatements: this.preStatements, postStatements: this.postStatements } } }, components: { mListBox, mDatasource, mLocalParams, mUdfs, mSqlType, mStatementList, mScriptBox, mWarningGroups, mSelectInput } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,580
[Improvement][SQL] Query return number should be configurable
**Describe the question** The SQL query result can only return 10,000 records, which are currently hard-coded **Which version of DolphinScheduler:** -[1.3.6-Release] **Describe alternatives you've considered** I think it should be configurable.
https://github.com/apache/dolphinscheduler/issues/5580
https://github.com/apache/dolphinscheduler/pull/5632
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
67711442d5add82164a916452020a68a84693000
2021-06-02T04:11:42Z
java
2021-06-16T01:40:21Z
dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ export default { 'User Name': 'User Name', 'Please enter user name': 'Please enter user name', Password: 'Password', 'Please enter your password': 'Please enter your password', 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22', Login: 'Login', Home: 'Home', 'Failed to create node to save': 'Failed to create node to save', 'Global parameters': 'Global parameters', 'Local parameters': 'Local parameters', 'Copy success': 'Copy success', 'The browser does not support automatic copying': 'The browser does not support automatic copying', 'Whether to save the DAG graph': 'Whether to save the DAG graph', 'Current node settings': 'Current node settings', 'View history': 'View history', 'View log': 'View log', 'Force success': 'Force success', 'Enter this child node': 'Enter this child node', 'Node name': 'Node name', 'Please enter name (required)': 'Please enter name (required)', 'Run flag': 'Run flag', Normal: 'Normal', 'Prohibition execution': 'Prohibition execution', 'Please enter description': 'Please enter description', 'Number of failed retries': 'Number of failed retries', Times: 'Times', 'Failed retry interval': 'Failed retry interval', Minute: 'Minute', 'Delay execution time': 'Delay execution time', 'Delay execution': 'Delay execution', 'Forced success': 'Forced success', Cancel: 'Cancel', 'Confirm add': 'Confirm add', 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process', 'The task has not been executed and cannot enter the sub-Process': 'The task has not been executed and cannot enter the sub-Process', 'Name already exists': 'Name already exists', 'Download Log': 'Download Log', 'Refresh Log': 'Refresh Log', 'Enter full screen': 'Enter full screen', 'Cancel full screen': 'Cancel full screen', Close: 'Close', 'Update log success': 'Update log success', 'No more logs': 'No more logs', 'No log': 'No log', 'Loading Log...': 'Loading Log...', 'Set the DAG diagram name': 'Set the DAG diagram name', 'Please enter description(optional)': 'Please enter description(optional)', 'Set global': 'Set global', 'Whether to go online the process definition': 'Whether to go online the process definition', 'Whether to update the process definition': 'Whether to update the process definition', Add: 'Add', 'DAG graph name cannot be empty': 'DAG graph name cannot be empty', 'Create Datasource': 'Create Datasource', 'Project Home': 'Project Home', 'Project Manage': 'Project', 'Create Project': 'Create Project', 'Cron Manage': 'Cron Manage', 'Copy Workflow': 'Copy Workflow', 'Tenant Manage': 'Tenant Manage', 'Create Tenant': 'Create Tenant', 'User Manage': 'User Manage', 'Create User': 'Create User', 'User Information': 'User Information', 'Edit Password': 'Edit Password', Success: 'Success', Failed: 'Failed', Delete: 'Delete', 'Please choose': 'Please choose', 'Please enter a positive integer': 'Please enter a positive integer', 'Program Type': 'Program Type', 'Main Class': 'Main Class', 'Main Jar Package': 'Main Jar Package', 'Please enter main jar package': 'Please enter main jar package', 'Please enter main class': 'Please enter main class', 'Main Arguments': 'Main Arguments', 'Please enter main arguments': 'Please enter main arguments', 'Option Parameters': 'Option Parameters', 'Please enter option parameters': 'Please enter option parameters', Resources: 'Resources', 'Custom Parameters': 'Custom Parameters', 'Custom template': 'Custom template', Datasource: 'Datasource', methods: 'methods', 'Please enter the procedure method': 'Please enter the procedure script \n\ncall procedure:{call <procedure-name>[(<arg1>,<arg2>, ...)]}\n\ncall function:{?= call <procedure-name>[(<arg1>,<arg2>, ...)]} ', 'The procedure method script example': 'example:{call <procedure-name>[(?,?, ...)]} or {?= call <procedure-name>[(?,?, ...)]}', Script: 'Script', 'Please enter script(required)': 'Please enter script(required)', 'Deploy Mode': 'Deploy Mode', 'Driver Cores': 'Driver Cores', 'Please enter Driver cores': 'Please enter Driver cores', 'Driver Memory': 'Driver Memory', 'Please enter Driver memory': 'Please enter Driver memory', 'Executor Number': 'Executor Number', 'Please enter Executor number': 'Please enter Executor number', 'The Executor number should be a positive integer': 'The Executor number should be a positive integer', 'Executor Memory': 'Executor Memory', 'Please enter Executor memory': 'Please enter Executor memory', 'Executor Cores': 'Executor Cores', 'Please enter Executor cores': 'Please enter Executor cores', 'Memory should be a positive integer': 'Memory should be a positive integer', 'Core number should be positive integer': 'Core number should be positive integer', 'Flink Version': 'Flink Version', 'JobManager Memory': 'JobManager Memory', 'Please enter JobManager memory': 'Please enter JobManager memory', 'TaskManager Memory': 'TaskManager Memory', 'Please enter TaskManager memory': 'Please enter TaskManager memory', 'Slot Number': 'Slot Number', 'Please enter Slot number': 'Please enter Slot number', Parallelism: 'Parallelism', 'Please enter Parallelism': 'Please enter Parallelism', 'TaskManager Number': 'TaskManager Number', 'Please enter TaskManager number': 'Please enter TaskManager number', 'App Name': 'App Name', 'Please enter app name(optional)': 'Please enter app name(optional)', 'SQL Type': 'SQL Type', 'Send Email': 'Send Email', 'Log display': 'Log display', 'rows of result': 'rows of result', Title: 'Title', 'Please enter the title of email': 'Please enter the title of email', Table: 'Table', TableMode: 'Table', Attachment: 'Attachment', 'SQL Parameter': 'SQL Parameter', 'SQL Statement': 'SQL Statement', 'UDF Function': 'UDF Function', 'Please enter a SQL Statement(required)': 'Please enter a SQL Statement(required)', 'Please enter a JSON Statement(required)': 'Please enter a JSON Statement(required)', 'One form or attachment must be selected': 'One form or attachment must be selected', 'Mail subject required': 'Mail subject required', 'Child Node': 'Child Node', 'Please select a sub-Process': 'Please select a sub-Process', Edit: 'Edit', 'Switch To This Version': 'Switch To This Version', 'Datasource Name': 'Datasource Name', 'Please enter datasource name': 'Please enter datasource name', IP: 'IP', 'Please enter IP': 'Please enter IP', Port: 'Port', 'Please enter port': 'Please enter port', 'Database Name': 'Database Name', 'Please enter database name': 'Please enter database name', 'Oracle Connect Type': 'ServiceName or SID', 'Oracle Service Name': 'ServiceName', 'Oracle SID': 'SID', 'jdbc connect parameters': 'jdbc connect parameters', 'Test Connect': 'Test Connect', 'Please enter resource name': 'Please enter resource name', 'Please enter resource folder name': 'Please enter resource folder name', 'Please enter a non-query SQL statement': 'Please enter a non-query SQL statement', 'Please enter IP/hostname': 'Please enter IP/hostname', 'jdbc connection parameters is not a correct JSON format': 'jdbc connection parameters is not a correct JSON format', '#': '#', 'Datasource Type': 'Datasource Type', 'Datasource Parameter': 'Datasource Parameter', 'Create Time': 'Create Time', 'Update Time': 'Update Time', Operation: 'Operation', 'Current Version': 'Current Version', 'Click to view': 'Click to view', 'Delete?': 'Delete?', 'Switch Version Successfully': 'Switch Version Successfully', 'Confirm Switch To This Version?': 'Confirm Switch To This Version?', Confirm: 'Confirm', 'Task status statistics': 'Task Status Statistics', Number: 'Number', State: 'State', 'Process Status Statistics': 'Process Status Statistics', 'Process Definition Statistics': 'Process Definition Statistics', 'Project Name': 'Project Name', 'Please enter name': 'Please enter name', 'Owned Users': 'Owned Users', 'Process Pid': 'Process Pid', 'Zk registration directory': 'Zk registration directory', cpuUsage: 'cpuUsage', memoryUsage: 'memoryUsage', 'Last heartbeat time': 'Last heartbeat time', 'Edit Tenant': 'Edit Tenant', 'OS Tenant Code': 'OS Tenant Code', 'Tenant Name': 'Tenant Name', Queue: 'Yarn Queue', 'Please select a queue': 'default is tenant association queue', 'Please enter the os tenant code in English': 'Please enter the os tenant code in English', 'Please enter os tenant code in English': 'Please enter os tenant code in English', 'Please enter os tenant code': 'Please enter os tenant code', 'Please enter tenant Name': 'Please enter tenant Name', 'The os tenant code. Only letters or a combination of letters and numbers are allowed': 'The os tenant code. Only letters or a combination of letters and numbers are allowed', 'Edit User': 'Edit User', Tenant: 'Tenant', Email: 'Email', Phone: 'Phone', 'User Type': 'User Type', 'Please enter phone number': 'Please enter phone number', 'Please enter email': 'Please enter email', 'Please enter the correct email format': 'Please enter the correct email format', 'Please enter the correct mobile phone format': 'Please enter the correct mobile phone format', Project: 'Project', Authorize: 'Authorize', 'File resources': 'File resources', 'UDF resources': 'UDF resources', 'UDF resources directory': 'UDF resources directory', 'Please select UDF resources directory': 'Please select UDF resources directory', 'Alarm group': 'Alarm group', 'Alarm group required': 'Alarm group required', 'Edit alarm group': 'Edit alarm group', 'Create alarm group': 'Create alarm group', 'Create Alarm Instance': 'Create Alarm Instance', 'Edit Alarm Instance': 'Edit Alarm Instance', 'Group Name': 'Group Name', 'Alarm instance name': 'Alarm instance name', 'Alarm plugin name': 'Alarm plugin name', 'Select plugin': 'Select plugin', 'Please enter group name': 'Please enter group name', 'Instance parameter exception': 'Instance parameter exception', 'Group Type': 'Group Type', 'Alarm plugin instance': 'Alarm plugin instance', Remarks: 'Remarks', SMS: 'SMS', 'Managing Users': 'Managing Users', Permission: 'Permission', Administrator: 'Administrator', 'Confirm Password': 'Confirm Password', 'Please enter confirm password': 'Please enter confirm password', 'Password cannot be in Chinese': 'Password cannot be in Chinese', 'Please enter a password (6-22) character password': 'Please enter a password (6-22) character password', 'Confirmation password cannot be in Chinese': 'Confirmation password cannot be in Chinese', 'Please enter a confirmation password (6-22) character password': 'Please enter a confirmation password (6-22) character password', 'The password is inconsistent with the confirmation password': 'The password is inconsistent with the confirmation password', 'Please select the datasource': 'Please select the datasource', 'Please select resources': 'Please select resources', Query: 'Query', 'Non Query': 'Non Query', 'prop(required)': 'prop(required)', 'value(optional)': 'value(optional)', 'value(required)': 'value(required)', 'prop is empty': 'prop is empty', 'value is empty': 'value is empty', 'prop is repeat': 'prop is repeat', 'Start Time': 'Start Time', 'End Time': 'End Time', crontab: 'crontab', 'Failure Strategy': 'Failure Strategy', online: 'online', offline: 'offline', 'Task Status': 'Task Status', 'Process Instance': 'Process Instance', 'Task Instance': 'Task Instance', 'Select date range': 'Select date range', startDate: 'startDate', endDate: 'endDate', Date: 'Date', Waiting: 'Waiting', Execution: 'Execution', Finish: 'Finish', 'Create File': 'Create File', 'Create folder': 'Create folder', 'File Name': 'File Name', 'Folder Name': 'Folder Name', 'File Format': 'File Format', 'Folder Format': 'Folder Format', 'File Content': 'File Content', 'Upload File Size': 'Upload File size cannot exceed 1g', Create: 'Create', 'Please enter the resource content': 'Please enter the resource content', 'Resource content cannot exceed 3000 lines': 'Resource content cannot exceed 3000 lines', 'File Details': 'File Details', 'Download Details': 'Download Details', Return: 'Return', Save: 'Save', 'File Manage': 'File Manage', 'Upload Files': 'Upload Files', 'Create UDF Function': 'Create UDF Function', 'Upload UDF Resources': 'Upload UDF Resources', 'Service-Master': 'Service-Master', 'Service-Worker': 'Service-Worker', 'Process Name': 'Process Name', Executor: 'Executor', 'Run Type': 'Run Type', 'Scheduling Time': 'Scheduling Time', 'Run Times': 'Run Times', host: 'host', 'fault-tolerant sign': 'fault-tolerant sign', Rerun: 'Rerun', 'Recovery Failed': 'Recovery Failed', Stop: 'Stop', Pause: 'Pause', 'Recovery Suspend': 'Recovery Suspend', Gantt: 'Gantt', 'Node Type': 'Node Type', 'Submit Time': 'Submit Time', Duration: 'Duration', 'Retry Count': 'Retry Count', 'Task Name': 'Task Name', 'Task Date': 'Task Date', 'Source Table': 'Source Table', 'Record Number': 'Record Number', 'Target Table': 'Target Table', 'Online viewing type is not supported': 'Online viewing type is not supported', Size: 'Size', Rename: 'Rename', Download: 'Download', Export: 'Export', 'Version Info': 'Version Info', Submit: 'Submit', 'Edit UDF Function': 'Edit UDF Function', type: 'type', 'UDF Function Name': 'UDF Function Name', FILE: 'FILE', UDF: 'UDF', 'File Subdirectory': 'File Subdirectory', 'Please enter a function name': 'Please enter a function name', 'Package Name': 'Package Name', 'Please enter a Package name': 'Please enter a Package name', Parameter: 'Parameter', 'Please enter a parameter': 'Please enter a parameter', 'UDF Resources': 'UDF Resources', 'Upload Resources': 'Upload Resources', Instructions: 'Instructions', 'Please enter a instructions': 'Please enter a instructions', 'Please enter a UDF function name': 'Please enter a UDF function name', 'Select UDF Resources': 'Select UDF Resources', 'Class Name': 'Class Name', 'Jar Package': 'Jar Package', 'Library Name': 'Library Name', 'UDF Resource Name': 'UDF Resource Name', 'File Size': 'File Size', Description: 'Description', 'Drag Nodes and Selected Items': 'Drag Nodes and Selected Items', 'Select Line Connection': 'Select Line Connection', 'Delete selected lines or nodes': 'Delete selected lines or nodes', 'Full Screen': 'Full Screen', Unpublished: 'Unpublished', 'Start Process': 'Start Process', 'Execute from the current node': 'Execute from the current node', 'Recover tolerance fault process': 'Recover tolerance fault process', 'Resume the suspension process': 'Resume the suspension process', 'Execute from the failed nodes': 'Execute from the failed nodes', 'Complement Data': 'Complement Data', 'Scheduling execution': 'Scheduling execution', 'Recovery waiting thread': 'Recovery waiting thread', 'Submitted successfully': 'Submitted successfully', Executing: 'Executing', 'Ready to pause': 'Ready to pause', 'Ready to stop': 'Ready to stop', 'Need fault tolerance': 'Need fault tolerance', Kill: 'Kill', 'Waiting for thread': 'Waiting for thread', 'Waiting for dependence': 'Waiting for dependence', Start: 'Start', Copy: 'Copy', 'Copy name': 'Copy name', 'Copy path': 'Copy path', 'Please enter keyword': 'Please enter keyword', 'File Upload': 'File Upload', 'Drag the file into the current upload window': 'Drag the file into the current upload window', 'Drag area upload': 'Drag area upload', Upload: 'Upload', 'ReUpload File': 'ReUpload File', 'Please enter file name': 'Please enter file name', 'Please select the file to upload': 'Please select the file to upload', 'Resources manage': 'Resources', Security: 'Security', Logout: 'Logout', 'No data': 'No data', 'Uploading...': 'Uploading...', 'Loading...': 'Loading...', List: 'List', 'Unable to download without proper url': 'Unable to download without proper url', Process: 'Process', 'Process definition': 'Process definition', 'Task record': 'Task record', 'Warning group manage': 'Warning group manage', 'Warning instance manage': 'Warning instance manage', 'Servers manage': 'Servers manage', 'UDF manage': 'UDF manage', 'Resource manage': 'Resource manage', 'Function manage': 'Function manage', 'Edit password': 'Edit password', 'Ordinary users': 'Ordinary users', 'Create process': 'Create process', 'Import process': 'Import process', 'Timing state': 'Timing state', Timing: 'Timing', Timezone: 'Timezone', TreeView: 'TreeView', 'Mailbox already exists! Recipients and copyers cannot repeat': 'Mailbox already exists! Recipients and copyers cannot repeat', 'Mailbox input is illegal': 'Mailbox input is illegal', 'Please set the parameters before starting': 'Please set the parameters before starting', Continue: 'Continue', End: 'End', 'Node execution': 'Node execution', 'Backward execution': 'Backward execution', 'Forward execution': 'Forward execution', 'Execute only the current node': 'Execute only the current node', 'Notification strategy': 'Notification strategy', 'Notification group': 'Notification group', 'Please select a notification group': 'Please select a notification group', receivers: 'receivers', receiverCcs: 'receiverCcs', 'Whether it is a complement process?': 'Whether it is a complement process?', 'Schedule date': 'Schedule date', 'Mode of execution': 'Mode of execution', 'Serial execution': 'Serial execution', 'Parallel execution': 'Parallel execution', 'Set parameters before timing': 'Set parameters before timing', 'Start and stop time': 'Start and stop time', 'Please select time': 'Please select time', 'Please enter crontab': 'Please enter crontab', none_1: 'none', success_1: 'success', failure_1: 'failure', All_1: 'All', Toolbar: 'Toolbar', 'View variables': 'View variables', 'Format DAG': 'Format DAG', 'Refresh DAG status': 'Refresh DAG status', Return_1: 'Return', 'Please enter format': 'Please enter format', 'connection parameter': 'connection parameter', 'Process definition details': 'Process definition details', 'Create process definition': 'Create process definition', 'Scheduled task list': 'Scheduled task list', 'Process instance details': 'Process instance details', 'Create Resource': 'Create Resource', 'User Center': 'User Center', AllStatus: 'All', None: 'None', Name: 'Name', 'Process priority': 'Process priority', 'Task priority': 'Task priority', 'Task timeout alarm': 'Task timeout alarm', 'Timeout strategy': 'Timeout strategy', 'Timeout alarm': 'Timeout alarm', 'Timeout failure': 'Timeout failure', 'Timeout period': 'Timeout period', 'Waiting Dependent complete': 'Waiting Dependent complete', 'Waiting Dependent start': 'Waiting Dependent start', 'Check interval': 'Check interval', 'Timeout must be longer than check interval': 'Timeout must be longer than check interval', 'Timeout strategy must be selected': 'Timeout strategy must be selected', 'Timeout must be a positive integer': 'Timeout must be a positive integer', 'Add dependency': 'Add dependency', and: 'and', or: 'or', month: 'month', week: 'week', day: 'day', hour: 'hour', Running: 'Running', 'Waiting for dependency to complete': 'Waiting for dependency to complete', Selected: 'Selected', CurrentHour: 'CurrentHour', Last1Hour: 'Last1Hour', Last2Hours: 'Last2Hours', Last3Hours: 'Last3Hours', Last24Hours: 'Last24Hours', today: 'today', Last1Days: 'Last1Days', Last2Days: 'Last2Days', Last3Days: 'Last3Days', Last7Days: 'Last7Days', ThisWeek: 'ThisWeek', LastWeek: 'LastWeek', LastMonday: 'LastMonday', LastTuesday: 'LastTuesday', LastWednesday: 'LastWednesday', LastThursday: 'LastThursday', LastFriday: 'LastFriday', LastSaturday: 'LastSaturday', LastSunday: 'LastSunday', ThisMonth: 'ThisMonth', LastMonth: 'LastMonth', LastMonthBegin: 'LastMonthBegin', LastMonthEnd: 'LastMonthEnd', 'Refresh status succeeded': 'Refresh status succeeded', 'Queue manage': 'Yarn Queue manage', 'Create queue': 'Create queue', 'Edit queue': 'Edit queue', 'Datasource manage': 'Datasource', 'History task record': 'History task record', 'Please go online': 'Please go online', 'Queue value': 'Queue value', 'Please enter queue value': 'Please enter queue value', 'Worker group manage': 'Worker group manage', 'Create worker group': 'Create worker group', 'Edit worker group': 'Edit worker group', 'Token manage': 'Token manage', 'Create token': 'Create token', 'Edit token': 'Edit token', Addresses: 'Addresses', 'Worker Addresses': 'Worker Addresses', 'Please select the worker addresses': 'Please select the worker addresses', 'Failure time': 'Failure time', 'Expiration time': 'Expiration time', User: 'User', 'Please enter token': 'Please enter token', 'Generate token': 'Generate token', Monitor: 'Monitor', Group: 'Group', 'Queue statistics': 'Queue statistics', 'Command status statistics': 'Command status statistics', 'Task kill': 'Task Kill', 'Task queue': 'Task queue', 'Error command count': 'Error command count', 'Normal command count': 'Normal command count', Manage: ' Manage', 'Number of connections': 'Number of connections', Sent: 'Sent', Received: 'Received', 'Min latency': 'Min latency', 'Avg latency': 'Avg latency', 'Max latency': 'Max latency', 'Node count': 'Node count', 'Query time': 'Query time', 'Node self-test status': 'Node self-test status', 'Health status': 'Health status', 'Max connections': 'Max connections', 'Threads connections': 'Threads connections', 'Max used connections': 'Max used connections', 'Threads running connections': 'Threads running connections', 'Worker group': 'Worker group', 'Please enter a positive integer greater than 0': 'Please enter a positive integer greater than 0', 'Pre Statement': 'Pre Statement', 'Post Statement': 'Post Statement', 'Statement cannot be empty': 'Statement cannot be empty', 'Process Define Count': 'Work flow Define Count', 'Process Instance Running Count': 'Process Instance Running Count', 'command number of waiting for running': 'command number of waiting for running', 'failure command number': 'failure command number', 'tasks number of waiting running': 'tasks number of waiting running', 'task number of ready to kill': 'task number of ready to kill', 'Statistics manage': 'Statistics Manage', statistics: 'Statistics', 'select tenant': 'select tenant', 'Please enter Principal': 'Please enter Principal', 'Please enter the kerberos authentication parameter java.security.krb5.conf': 'Please enter the kerberos authentication parameter java.security.krb5.conf', 'Please enter the kerberos authentication parameter login.user.keytab.username': 'Please enter the kerberos authentication parameter login.user.keytab.username', 'Please enter the kerberos authentication parameter login.user.keytab.path': 'Please enter the kerberos authentication parameter login.user.keytab.path', 'The start time must not be the same as the end': 'The start time must not be the same as the end', 'Startup parameter': 'Startup parameter', 'Startup type': 'Startup type', 'warning of timeout': 'warning of timeout', 'Next five execution times': 'Next five execution times', 'Execute time': 'Execute time', 'Complement range': 'Complement range', 'Http Url': 'Http Url', 'Http Method': 'Http Method', 'Http Parameters': 'Http Parameters', 'Http Parameters Key': 'Http Parameters Key', 'Http Parameters Position': 'Http Parameters Position', 'Http Parameters Value': 'Http Parameters Value', 'Http Check Condition': 'Http Check Condition', 'Http Condition': 'Http Condition', 'Please Enter Http Url': 'Please Enter Http Url(required)', 'Please Enter Http Condition': 'Please Enter Http Condition', 'There is no data for this period of time': 'There is no data for this period of time', 'Worker addresses cannot be empty': 'Worker addresses cannot be empty', 'Please generate token': 'Please generate token', 'Spark Version': 'Spark Version', TargetDataBase: 'target database', TargetTable: 'target table', 'Please enter the table of target': 'Please enter the table of target', 'Please enter a Target Table(required)': 'Please enter a Target Table(required)', SpeedByte: 'speed(byte count)', SpeedRecord: 'speed(record count)', '0 means unlimited by byte': '0 means unlimited', '0 means unlimited by count': '0 means unlimited', 'Modify User': 'Modify User', 'Whether directory': 'Whether directory', Yes: 'Yes', No: 'No', 'Hadoop Custom Params': 'Hadoop Params', 'Sqoop Advanced Parameters': 'Sqoop Params', 'Sqoop Job Name': 'Job Name', 'Please enter Mysql Database(required)': 'Please enter Mysql Database(required)', 'Please enter Mysql Table(required)': 'Please enter Mysql Table(required)', 'Please enter Columns (Comma separated)': 'Please enter Columns (Comma separated)', 'Please enter Target Dir(required)': 'Please enter Target Dir(required)', 'Please enter Export Dir(required)': 'Please enter Export Dir(required)', 'Please enter Hive Database(required)': 'Please enter Hive Databasec(required)', 'Please enter Hive Table(required)': 'Please enter Hive Table(required)', 'Please enter Hive Partition Keys': 'Please enter Hive Partition Key', 'Please enter Hive Partition Values': 'Please enter Partition Value', 'Please enter Replace Delimiter': 'Please enter Replace Delimiter', 'Please enter Fields Terminated': 'Please enter Fields Terminated', 'Please enter Lines Terminated': 'Please enter Lines Terminated', 'Please enter Concurrency': 'Please enter Concurrency', 'Please enter Update Key': 'Please enter Update Key', 'Please enter Job Name(required)': 'Please enter Job Name(required)', 'Please enter Custom Shell(required)': 'Please enter Custom Shell(required)', Direct: 'Direct', Type: 'Type', ModelType: 'ModelType', ColumnType: 'ColumnType', Database: 'Database', Column: 'Column', 'Map Column Hive': 'Map Column Hive', 'Map Column Java': 'Map Column Java', 'Export Dir': 'Export Dir', 'Hive partition Keys': 'Hive partition Keys', 'Hive partition Values': 'Hive partition Values', FieldsTerminated: 'FieldsTerminated', LinesTerminated: 'LinesTerminated', IsUpdate: 'IsUpdate', UpdateKey: 'UpdateKey', UpdateMode: 'UpdateMode', 'Target Dir': 'Target Dir', DeleteTargetDir: 'DeleteTargetDir', FileType: 'FileType', CompressionCodec: 'CompressionCodec', CreateHiveTable: 'CreateHiveTable', DropDelimiter: 'DropDelimiter', OverWriteSrc: 'OverWriteSrc', ReplaceDelimiter: 'ReplaceDelimiter', Concurrency: 'Concurrency', Form: 'Form', OnlyUpdate: 'OnlyUpdate', AllowInsert: 'AllowInsert', 'Data Source': 'Data Source', 'Data Target': 'Data Target', 'All Columns': 'All Columns', 'Some Columns': 'Some Columns', 'Branch flow': 'Branch flow', 'Custom Job': 'Custom Job', 'Custom Script': 'Custom Script', 'Cannot select the same node for successful branch flow and failed branch flow': 'Cannot select the same node for successful branch flow and failed branch flow', 'Successful branch flow and failed branch flow are required': 'conditions node Successful and failed branch flow are required', 'No resources exist': 'No resources exist', 'Please delete all non-existing resources': 'Please delete all non-existing resources', 'Unauthorized or deleted resources': 'Unauthorized or deleted resources', 'Please delete all non-existent resources': 'Please delete all non-existent resources', Kinship: 'Workflow relationship', Reset: 'Reset', KinshipStateActive: 'Active', KinshipState1: 'Online', KinshipState0: 'Workflow is not online', KinshipState10: 'Scheduling is not online', 'Dag label display control': 'Dag label display control', Enable: 'Enable', Disable: 'Disable', 'The Worker group no longer exists, please select the correct Worker group!': 'The Worker group no longer exists, please select the correct Worker group!', 'Please confirm whether the workflow has been saved before downloading': 'Please confirm whether the workflow has been saved before downloading', 'User name length is between 3 and 39': 'User name length is between 3 and 39', 'Timeout Settings': 'Timeout Settings', 'Connect Timeout': 'Connect Timeout', 'Socket Timeout': 'Socket Timeout', 'Connect timeout be a positive integer': 'Connect timeout be a positive integer', 'Socket Timeout be a positive integer': 'Socket Timeout be a positive integer', ms: 'ms', 'Please Enter Url': 'Please Enter Url eg. 127.0.0.1:7077', Master: 'Master', 'Please select the waterdrop resources': 'Please select the waterdrop resources', zkDirectory: 'zkDirectory', 'Directory detail': 'Directory detail', 'Connection name': 'Connection name', 'Current connection settings': 'Current connection settings', 'Please save the DAG before formatting': 'Please save the DAG before formatting', 'Batch copy': 'Batch copy', 'Related items': 'Related items', 'Project name is required': 'Project name is required', 'Batch move': 'Batch move', Version: 'Version', 'Pre tasks': 'Pre tasks', 'Running Memory': 'Running Memory', 'Max Memory': 'Max Memory', 'Min Memory': 'Min Memory', 'The workflow canvas is abnormal and cannot be saved, please recreate': 'The workflow canvas is abnormal and cannot be saved, please recreate', Info: 'Info', 'Datasource userName': 'owner', 'Resource userName': 'owner' }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,580
[Improvement][SQL] Query return number should be configurable
**Describe the question** The SQL query result can only return 10,000 records, which are currently hard-coded **Which version of DolphinScheduler:** -[1.3.6-Release] **Describe alternatives you've considered** I think it should be configurable.
https://github.com/apache/dolphinscheduler/issues/5580
https://github.com/apache/dolphinscheduler/pull/5632
e2d6265e26b27abdf0a212289cca9c0cdad1e0a6
67711442d5add82164a916452020a68a84693000
2021-06-02T04:11:42Z
java
2021-06-16T01:40:21Z
dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ export default { 'User Name': '用户名', 'Please enter user name': '请输入用户名', Password: '密码', 'Please enter your password': '请输入密码', 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': '密码至少包含数字,字母和字符的两种组合,长度在6-22之间', Login: '登录', Home: '首页', 'Failed to create node to save': '未创建节点保存失败', 'Global parameters': '全局参数', 'Local parameters': '局部参数', 'Copy success': '复制成功', 'The browser does not support automatic copying': '该浏览器不支持自动复制', 'Whether to save the DAG graph': '是否保存DAG图', 'Current node settings': '当前节点设置', 'View history': '查看历史', 'View log': '查看日志', 'Force success': '强制成功', 'Enter this child node': '进入该子节点', 'Node name': '节点名称', 'Please enter name (required)': '请输入名称(必填)', 'Run flag': '运行标志', Normal: '正常', 'Prohibition execution': '禁止执行', 'Please enter description': '请输入描述', 'Number of failed retries': '失败重试次数', Times: '次', 'Failed retry interval': '失败重试间隔', Minute: '分', 'Delay execution time': '延时执行时间', 'Delay execution': '延时执行', 'Forced success': '强制成功', Cancel: '取消', 'Confirm add': '确认添加', 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': '新创建子工作流还未执行,不能进入子工作流', 'The task has not been executed and cannot enter the sub-Process': '该任务还未执行,不能进入子工作流', 'Name already exists': '名称已存在请重新输入', 'Download Log': '下载日志', 'Refresh Log': '刷新日志', 'Enter full screen': '进入全屏', 'Cancel full screen': '取消全屏', Close: '关闭', 'Update log success': '更新日志成功', 'No more logs': '暂无更多日志', 'No log': '暂无日志', 'Loading Log...': '正在努力请求日志中...', 'Set the DAG diagram name': '设置DAG图名称', 'Please enter description(optional)': '请输入描述(选填)', 'Set global': '设置全局', 'Whether to go online the process definition': '是否上线流程定义', 'Whether to update the process definition': '是否更新流程定义', Add: '添加', 'DAG graph name cannot be empty': 'DAG图名称不能为空', 'Create Datasource': '创建数据源', 'Project Home': '项目首页', 'Project Manage': '项目管理', 'Create Project': '创建项目', 'Cron Manage': '定时管理', 'Copy Workflow': '复制工作流', 'Tenant Manage': '租户管理', 'Create Tenant': '创建租户', 'User Manage': '用户管理', 'Create User': '创建用户', 'User Information': '用户信息', 'Edit Password': '密码修改', Success: '成功', Failed: '失败', Delete: '删除', 'Please choose': '请选择', 'Please enter a positive integer': '请输入正整数', 'Program Type': '程序类型', 'Main Class': '主函数的Class', 'Main Jar Package': '主Jar包', 'Please enter main jar package': '请选择主Jar包', 'Please enter main class': '请填写主函数的Class', 'Main Arguments': '主程序参数', 'Please enter main arguments': '请输入主程序参数', 'Option Parameters': '选项参数', 'Please enter option parameters': '请输入选项参数', Resources: '资源', 'Custom Parameters': '自定义参数', 'Custom template': '自定义模版', Datasource: '数据源', methods: '方法', 'Please enter the procedure method': '请输入存储脚本 \n\n调用存储过程:{call <procedure-name>[(<arg1>,<arg2>, ...)]}\n\n调用存储函数:{?= call <procedure-name>[(<arg1>,<arg2>, ...)]} ', 'The procedure method script example': '示例:{call <procedure-name>[(?,?, ...)]} 或 {?= call <procedure-name>[(?,?, ...)]}', Script: '脚本', 'Please enter script(required)': '请输入脚本(必填)', 'Deploy Mode': '部署方式', 'Driver Cores': 'Driver核心数', 'Please enter Driver cores': '请输入Driver核心数', 'Driver Memory': 'Driver内存数', 'Please enter Driver memory': '请输入Driver内存数', 'Executor Number': 'Executor数量', 'Please enter Executor number': '请输入Executor数量', 'The Executor number should be a positive integer': 'Executor数量为正整数', 'Executor Memory': 'Executor内存数', 'Please enter Executor memory': '请输入Executor内存数', 'Executor Cores': 'Executor核心数', 'Please enter Executor cores': '请输入Executor核心数', 'Memory should be a positive integer': '内存数为数字', 'Core number should be positive integer': '核心数为正整数', 'Flink Version': 'Flink版本', 'JobManager Memory': 'JobManager内存数', 'Please enter JobManager memory': '请输入JobManager内存数', 'TaskManager Memory': 'TaskManager内存数', 'Please enter TaskManager memory': '请输入TaskManager内存数', 'Slot Number': 'Slot数量', 'Please enter Slot number': '请输入Slot数量', Parallelism: '并行度', 'Please enter Parallelism': '请输入并行度', 'TaskManager Number': 'TaskManager数量', 'Please enter TaskManager number': '请输入TaskManager数量', 'App Name': '任务名称', 'Please enter app name(optional)': '请输入任务名称(选填)', 'SQL Type': 'sql类型', 'Send Email': '发送邮件', 'Log display': '日志显示', 'rows of result': '行查询结果', Title: '主题', 'Please enter the title of email': '请输入邮件主题', Table: '表名', TableMode: '表格', Attachment: '附件', 'SQL Parameter': 'sql参数', 'SQL Statement': 'sql语句', 'UDF Function': 'UDF函数', 'Please enter a SQL Statement(required)': '请输入sql语句(必填)', 'Please enter a JSON Statement(required)': '请输入json语句(必填)', 'One form or attachment must be selected': '表格、附件必须勾选一个', 'Mail subject required': '邮件主题必填', 'Child Node': '子节点', 'Please select a sub-Process': '请选择子工作流', Edit: '编辑', 'Switch To This Version': '切换到该版本', 'Datasource Name': '数据源名称', 'Please enter datasource name': '请输入数据源名称', IP: 'IP主机名', 'Please enter IP': '请输入IP主机名', Port: '端口', 'Please enter port': '请输入端口', 'Database Name': '数据库名', 'Please enter database name': '请输入数据库名', 'Oracle Connect Type': '服务名或SID', 'Oracle Service Name': '服务名', 'Oracle SID': 'SID', 'jdbc connect parameters': 'jdbc连接参数', 'Test Connect': '测试连接', 'Please enter resource name': '请输入数据源名称', 'Please enter resource folder name': '请输入资源文件夹名称', 'Please enter a non-query SQL statement': '请输入非查询sql语句', 'Please enter IP/hostname': '请输入IP/主机名', 'jdbc connection parameters is not a correct JSON format': 'jdbc连接参数不是一个正确的JSON格式', '#': '编号', 'Datasource Type': '数据源类型', 'Datasource Parameter': '数据源参数', 'Create Time': '创建时间', 'Update Time': '更新时间', Operation: '操作', 'Current Version': '当前版本', 'Click to view': '点击查看', 'Delete?': '确定删除吗?', 'Switch Version Successfully': '切换版本成功', 'Confirm Switch To This Version?': '确定切换到该版本吗?', Confirm: '确定', 'Task status statistics': '任务状态统计', Number: '数量', State: '状态', 'Process Status Statistics': '流程状态统计', 'Process Definition Statistics': '流程定义统计', 'Project Name': '项目名称', 'Please enter name': '请输入名称', 'Owned Users': '所属用户', 'Process Pid': '进程Pid', 'Zk registration directory': 'zk注册目录', cpuUsage: 'cpuUsage', memoryUsage: 'memoryUsage', 'Last heartbeat time': '最后心跳时间', 'Edit Tenant': '编辑租户', 'OS Tenant Code': '操作系统租户', 'Tenant Name': '租户名称', Queue: '队列', 'Please select a queue': '默认为租户关联队列', 'Please enter the os tenant code in English': '请输入操作系统租户只允许英文', 'Please enter os tenant code in English': '请输入英文操作系统租户', 'Please enter os tenant code': '请输入操作系统租户', 'Please enter tenant Name': '请输入租户名称', 'The os tenant code. Only letters or a combination of letters and numbers are allowed': '操作系统租户只允许字母或字母与数字组合', 'Edit User': '编辑用户', Tenant: '租户', Email: '邮件', Phone: '手机', 'User Type': '用户类型', 'Please enter phone number': '请输入手机', 'Please enter email': '请输入邮箱', 'Please enter the correct email format': '请输入正确的邮箱格式', 'Please enter the correct mobile phone format': '请输入正确的手机格式', Project: '项目', Authorize: '授权', 'File resources': '文件资源', 'UDF resources': 'UDF资源', 'UDF resources directory': 'UDF资源目录', 'Please select UDF resources directory': '请选择UDF资源目录', 'Alarm group': '告警组', 'Alarm group required': '告警组必填', 'Edit alarm group': '编辑告警组', 'Create alarm group': '创建告警组', 'Create Alarm Instance': '创建告警实例', 'Edit Alarm Instance': '编辑告警实例', 'Group Name': '组名称', 'Alarm instance name': '告警实例名称', 'Alarm plugin name': '告警插件名称', 'Select plugin': '选择插件', 'Please enter group name': '请输入组名称', 'Instance parameter exception': '实例参数异常', 'Group Type': '组类型', 'Alarm plugin instance': '告警插件实例', Remarks: '备注', SMS: '短信', 'Managing Users': '管理用户', Permission: '权限', Administrator: '管理员', 'Confirm Password': '确认密码', 'Please enter confirm password': '请输入确认密码', 'Password cannot be in Chinese': '密码不能为中文', 'Please enter a password (6-22) character password': '请输入密码(6-22)字符密码', 'Confirmation password cannot be in Chinese': '确认密码不能为中文', 'Please enter a confirmation password (6-22) character password': '请输入确认密码(6-22)字符密码', 'The password is inconsistent with the confirmation password': '密码与确认密码不一致,请重新确认', 'Please select the datasource': '请选择数据源', 'Please select resources': '请选择资源', Query: '查询', 'Non Query': '非查询', 'prop(required)': 'prop(必填)', 'value(optional)': 'value(选填)', 'value(required)': 'value(必填)', 'prop is empty': 'prop不能为空', 'value is empty': 'value不能为空', 'prop is repeat': 'prop中有重复', 'Start Time': '开始时间', 'End Time': '结束时间', crontab: 'crontab', 'Failure Strategy': '失败策略', online: '上线', offline: '下线', 'Task Status': '任务状态', 'Process Instance': '工作流实例', 'Task Instance': '任务实例', 'Select date range': '选择日期区间', startDate: '开始日期', endDate: '结束日期', Date: '日期', Waiting: '等待', Execution: '执行中', Finish: '完成', 'Create File': '创建文件', 'Create folder': '创建文件夹', 'File Name': '文件名称', 'Folder Name': '文件夹名称', 'File Format': '文件格式', 'Folder Format': '文件夹格式', 'File Content': '文件内容', 'Upload File Size': '文件大小不能超过1G', Create: '创建', 'Please enter the resource content': '请输入资源内容', 'Resource content cannot exceed 3000 lines': '资源内容不能超过3000行', 'File Details': '文件详情', 'Download Details': '下载详情', Return: '返回', Save: '保存', 'File Manage': '文件管理', 'Upload Files': '上传文件', 'Create UDF Function': '创建UDF函数', 'Upload UDF Resources': '上传UDF资源', 'Service-Master': '服务管理-Master', 'Service-Worker': '服务管理-Worker', 'Process Name': '工作流名称', Executor: '执行用户', 'Run Type': '运行类型', 'Scheduling Time': '调度时间', 'Run Times': '运行次数', host: 'host', 'fault-tolerant sign': '容错标识', Rerun: '重跑', 'Recovery Failed': '恢复失败', Stop: '停止', Pause: '暂停', 'Recovery Suspend': '恢复运行', Gantt: '甘特图', 'Node Type': '节点类型', 'Submit Time': '提交时间', Duration: '运行时长', 'Retry Count': '重试次数', 'Task Name': '任务名称', 'Task Date': '任务日期', 'Source Table': '源表', 'Record Number': '记录数', 'Target Table': '目标表', 'Online viewing type is not supported': '不支持在线查看类型', Size: '大小', Rename: '重命名', Download: '下载', Export: '导出', 'Version Info': '版本信息', Submit: '提交', 'Edit UDF Function': '编辑UDF函数', type: '类型', 'UDF Function Name': 'UDF函数名称', FILE: '文件', UDF: 'UDF', 'File Subdirectory': '文件子目录', 'Please enter a function name': '请输入函数名', 'Package Name': '包名类名', 'Please enter a Package name': '请输入包名类名', Parameter: '参数', 'Please enter a parameter': '请输入参数', 'UDF Resources': 'UDF资源', 'Upload Resources': '上传资源', Instructions: '使用说明', 'Please enter a instructions': '请输入使用说明', 'Please enter a UDF function name': '请输入UDF函数名称', 'Select UDF Resources': '请选择UDF资源', 'Class Name': '类名', 'Jar Package': 'jar包', 'Library Name': '库名', 'UDF Resource Name': 'UDF资源名称', 'File Size': '文件大小', Description: '描述', 'Drag Nodes and Selected Items': '拖动节点和选中项', 'Select Line Connection': '选择线条连接', 'Delete selected lines or nodes': '删除选中的线或节点', 'Full Screen': '全屏', Unpublished: '未发布', 'Start Process': '启动工作流', 'Execute from the current node': '从当前节点开始执行', 'Recover tolerance fault process': '恢复被容错的工作流', 'Resume the suspension process': '恢复运行流程', 'Execute from the failed nodes': '从失败节点开始执行', 'Complement Data': '补数', 'Scheduling execution': '调度执行', 'Recovery waiting thread': '恢复等待线程', 'Submitted successfully': '提交成功', Executing: '正在执行', 'Ready to pause': '准备暂停', 'Ready to stop': '准备停止', 'Need fault tolerance': '需要容错', Kill: 'Kill', 'Waiting for thread': '等待线程', 'Waiting for dependence': '等待依赖', Start: '运行', Copy: '复制节点', 'Copy name': '复制名称', 'Copy path': '复制路径', 'Please enter keyword': '请输入关键词', 'File Upload': '文件上传', 'Drag the file into the current upload window': '请将文件拖拽到当前上传窗口内!', 'Drag area upload': '拖动区域上传', Upload: '上传', 'ReUpload File': '重新上传文件', 'Please enter file name': '请输入文件名', 'Please select the file to upload': '请选择要上传的文件', 'Resources manage': '资源中心', Security: '安全中心', Logout: '退出', 'No data': '查询无数据', 'Uploading...': '文件上传中', 'Loading...': '正在努力加载中...', List: '列表', 'Unable to download without proper url': '无下载url无法下载', Process: '工作流', 'Process definition': '工作流定义', 'Task record': '任务记录', 'Warning group manage': '告警组管理', 'Warning instance manage': '告警实例管理', 'Servers manage': '服务管理', 'UDF manage': 'UDF管理', 'Resource manage': '资源管理', 'Function manage': '函数管理', 'Edit password': '修改密码', 'Ordinary users': '普通用户', 'Create process': '创建工作流', 'Import process': '导入工作流', 'Timing state': '定时状态', Timing: '定时', Timezone: '时区', TreeView: '树形图', 'Mailbox already exists! Recipients and copyers cannot repeat': '邮箱已存在!收件人和抄送人不能重复', 'Mailbox input is illegal': '邮箱输入不合法', 'Please set the parameters before starting': '启动前请先设置参数', Continue: '继续', End: '结束', 'Node execution': '节点执行', 'Backward execution': '向后执行', 'Forward execution': '向前执行', 'Execute only the current node': '仅执行当前节点', 'Notification strategy': '通知策略', 'Notification group': '通知组', 'Please select a notification group': '请选择通知组', receivers: '收件人', receiverCcs: '抄送人', 'Whether it is a complement process?': '是否补数', 'Schedule date': '调度日期', 'Mode of execution': '执行方式', 'Serial execution': '串行执行', 'Parallel execution': '并行执行', 'Set parameters before timing': '定时前请先设置参数', 'Start and stop time': '起止时间', 'Please select time': '请选择时间', 'Please enter crontab': '请输入crontab', none_1: '都不发', success_1: '成功发', failure_1: '失败发', All_1: '成功或失败都发', Toolbar: '工具栏', 'View variables': '查看变量', 'Format DAG': '格式化DAG', 'Refresh DAG status': '刷新DAG状态', Return_1: '返回上一节点', 'Please enter format': '请输入格式为', 'connection parameter': '连接参数', 'Process definition details': '流程定义详情', 'Create process definition': '创建流程定义', 'Scheduled task list': '定时任务列表', 'Process instance details': '流程实例详情', 'Create Resource': '创建资源', 'User Center': '用户中心', AllStatus: '全部状态', None: '无', Name: '名称', 'Process priority': '流程优先级', 'Task priority': '任务优先级', 'Task timeout alarm': '任务超时告警', 'Timeout strategy': '超时策略', 'Timeout alarm': '超时告警', 'Timeout failure': '超时失败', 'Timeout period': '超时时长', 'Waiting Dependent complete': '等待依赖完成', 'Waiting Dependent start': '等待依赖启动', 'Check interval': '检查间隔', 'Timeout must be longer than check interval': '超时时间必须比检查间隔长', 'Timeout strategy must be selected': '超时策略必须选一个', 'Timeout must be a positive integer': '超时时长必须为正整数', 'Add dependency': '添加依赖', and: '且', or: '或', month: '月', week: '周', day: '日', hour: '时', Running: '正在运行', 'Waiting for dependency to complete': '等待依赖完成', Selected: '已选', CurrentHour: '当前小时', Last1Hour: '前1小时', Last2Hours: '前2小时', Last3Hours: '前3小时', Last24Hours: '前24小时', today: '今天', Last1Days: '昨天', Last2Days: '前两天', Last3Days: '前三天', Last7Days: '前七天', ThisWeek: '本周', LastWeek: '上周', LastMonday: '上周一', LastTuesday: '上周二', LastWednesday: '上周三', LastThursday: '上周四', LastFriday: '上周五', LastSaturday: '上周六', LastSunday: '上周日', ThisMonth: '本月', LastMonth: '上月', LastMonthBegin: '上月初', LastMonthEnd: '上月末', 'Refresh status succeeded': '刷新状态成功', 'Queue manage': 'Yarn 队列管理', 'Create queue': '创建队列', 'Edit queue': '编辑队列', 'Datasource manage': '数据源中心', 'History task record': '历史任务记录', 'Please go online': '不要忘记上线', 'Queue value': '队列值', 'Please enter queue value': '请输入队列值', 'Worker group manage': 'Worker分组管理', 'Create worker group': '创建Worker分组', 'Edit worker group': '编辑Worker分组', 'Token manage': '令牌管理', 'Create token': '创建令牌', 'Edit token': '编辑令牌', Addresses: '地址', 'Worker Addresses': 'Worker地址', 'Please select the worker addresses': '请选择Worker地址', 'Failure time': '失效时间', 'Expiration time': '失效时间', User: '用户', 'Please enter token': '请输入令牌', 'Generate token': '生成令牌', Monitor: '监控中心', Group: '分组', 'Queue statistics': '队列统计', 'Command status statistics': '命令状态统计', 'Task kill': '等待kill任务', 'Task queue': '等待执行任务', 'Error command count': '错误指令数', 'Normal command count': '正确指令数', Manage: '管理', 'Number of connections': '连接数', Sent: '发送量', Received: '接收量', 'Min latency': '最低延时', 'Avg latency': '平均延时', 'Max latency': '最大延时', 'Node count': '节点数', 'Query time': '当前查询时间', 'Node self-test status': '节点自检状态', 'Health status': '健康状态', 'Max connections': '最大连接数', 'Threads connections': '当前连接数', 'Max used connections': '同时使用连接最大数', 'Threads running connections': '数据库当前活跃连接数', 'Worker group': 'Worker分组', 'Please enter a positive integer greater than 0': '请输入大于 0 的正整数', 'Pre Statement': '前置sql', 'Post Statement': '后置sql', 'Statement cannot be empty': '语句不能为空', 'Process Define Count': '工作流定义数', 'Process Instance Running Count': '正在运行的流程数', 'command number of waiting for running': '待执行的命令数', 'failure command number': '执行失败的命令数', 'tasks number of waiting running': '待运行任务数', 'task number of ready to kill': '待杀死任务数', 'Statistics manage': '统计管理', statistics: '统计', 'select tenant': '选择租户', 'Please enter Principal': '请输入Principal', 'Please enter the kerberos authentication parameter java.security.krb5.conf': '请输入kerberos认证参数 java.security.krb5.conf', 'Please enter the kerberos authentication parameter login.user.keytab.username': '请输入kerberos认证参数 login.user.keytab.username', 'Please enter the kerberos authentication parameter login.user.keytab.path': '请输入kerberos认证参数 login.user.keytab.path', 'The start time must not be the same as the end': '开始时间和结束时间不能相同', 'Startup parameter': '启动参数', 'Startup type': '启动类型', 'warning of timeout': '超时告警', 'Next five execution times': '接下来五次执行时间', 'Execute time': '执行时间', 'Complement range': '补数范围', 'Http Url': '请求地址', 'Http Method': '请求类型', 'Http Parameters': '请求参数', 'Http Parameters Key': '参数名', 'Http Parameters Position': '参数位置', 'Http Parameters Value': '参数值', 'Http Check Condition': '校验条件', 'Http Condition': '校验内容', 'Please Enter Http Url': '请填写请求地址(必填)', 'Please Enter Http Condition': '请填写校验内容', 'There is no data for this period of time': '该时间段无数据', 'Worker addresses cannot be empty': 'Worker地址不能为空', 'Please generate token': '请生成Token', 'Spark Version': 'Spark版本', TargetDataBase: '目标库', TargetTable: '目标表', 'Please enter the table of target': '请输入目标表名', 'Please enter a Target Table(required)': '请输入目标表(必填)', SpeedByte: '限流(字节数)', SpeedRecord: '限流(记录数)', '0 means unlimited by byte': 'KB,0代表不限制', '0 means unlimited by count': '0代表不限制', 'Modify User': '修改用户', 'Whether directory': '是否文件夹', Yes: '是', No: '否', 'Hadoop Custom Params': 'Hadoop参数', 'Sqoop Advanced Parameters': 'Sqoop参数', 'Sqoop Job Name': '任务名称', 'Please enter Mysql Database(required)': '请输入Mysql数据库(必填)', 'Please enter Mysql Table(required)': '请输入Mysql表名(必填)', 'Please enter Columns (Comma separated)': '请输入列名,用 , 隔开', 'Please enter Target Dir(required)': '请输入目标路径(必填)', 'Please enter Export Dir(required)': '请输入数据源路径(必填)', 'Please enter Hive Database(required)': '请输入Hive数据库(必填)', 'Please enter Hive Table(required)': '请输入Hive表名(必填)', 'Please enter Hive Partition Keys': '请输入分区键', 'Please enter Hive Partition Values': '请输入分区值', 'Please enter Replace Delimiter': '请输入替换分隔符', 'Please enter Fields Terminated': '请输入列分隔符', 'Please enter Lines Terminated': '请输入行分隔符', 'Please enter Concurrency': '请输入并发度', 'Please enter Update Key': '请输入更新列', 'Please enter Job Name(required)': '请输入任务名称(必填)', 'Please enter Custom Shell(required)': '请输入自定义脚本', Direct: '流向', Type: '类型', ModelType: '模式', ColumnType: '列类型', Database: '数据库', Column: '列', 'Map Column Hive': 'Hive类型映射', 'Map Column Java': 'Java类型映射', 'Export Dir': '数据源路径', 'Hive partition Keys': 'Hive 分区键', 'Hive partition Values': 'Hive 分区值', FieldsTerminated: '列分隔符', LinesTerminated: '行分隔符', IsUpdate: '是否更新', UpdateKey: '更新列', UpdateMode: '更新类型', 'Target Dir': '目标路径', DeleteTargetDir: '是否删除目录', FileType: '保存格式', CompressionCodec: '压缩类型', CreateHiveTable: '是否创建新表', DropDelimiter: '是否删除分隔符', OverWriteSrc: '是否覆盖数据源', ReplaceDelimiter: '替换分隔符', Concurrency: '并发度', Form: '表单', OnlyUpdate: '只更新', AllowInsert: '无更新便插入', 'Data Source': '数据来源', 'Data Target': '数据目的', 'All Columns': '全表导入', 'Some Columns': '选择列', 'Branch flow': '分支流转', 'Custom Job': '自定义任务', 'Custom Script': '自定义脚本', 'Cannot select the same node for successful branch flow and failed branch flow': '成功分支流转和失败分支流转不能选择同一个节点', 'Successful branch flow and failed branch flow are required': 'conditions节点成功和失败分支流转必填', 'No resources exist': '不存在资源', 'Please delete all non-existing resources': '请删除所有不存在资源', 'Unauthorized or deleted resources': '未授权或已删除资源', 'Please delete all non-existent resources': '请删除所有未授权或已删除资源', Kinship: '工作流关系', Reset: '重置', KinshipStateActive: '当前选择', KinshipState1: '已上线', KinshipState0: '工作流未上线', KinshipState10: '调度未上线', 'Dag label display control': 'Dag节点名称显隐', Enable: '启用', Disable: '停用', 'The Worker group no longer exists, please select the correct Worker group!': '该Worker分组已经不存在,请选择正确的Worker分组!', 'Please confirm whether the workflow has been saved before downloading': '下载前请确定工作流是否已保存', 'User name length is between 3 and 39': '用户名长度在3~39之间', 'Timeout Settings': '超时设置', 'Connect Timeout': '连接超时', 'Socket Timeout': 'Socket超时', 'Connect timeout be a positive integer': '连接超时必须为数字', 'Socket Timeout be a positive integer': 'Socket超时必须为数字', ms: '毫秒', 'Please Enter Url': '请直接填写地址,例如:127.0.0.1:7077', Master: 'Master', 'Please select the waterdrop resources': '请选择waterdrop配置文件', zkDirectory: 'zk注册目录', 'Directory detail': '查看目录详情', 'Connection name': '连线名', 'Current connection settings': '当前连线设置', 'Please save the DAG before formatting': '格式化前请先保存DAG', 'Batch copy': '批量复制', 'Related items': '关联项目', 'Project name is required': '项目名称必填', 'Batch move': '批量移动', Version: '版本', 'Pre tasks': '前置任务', 'Running Memory': '运行内存', 'Max Memory': '最大内存', 'Min Memory': '最小内存', 'The workflow canvas is abnormal and cannot be saved, please recreate': '该工作流画布异常,无法保存,请重新创建', Info: '提示', 'Datasource userName': '所属用户', 'Resource userName': '所属用户' }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,452
[Improvement][Task] ds flink task support submit a PyFlink job via the CLI
**Describe the question** ds flink task support submit a PyFlink job via the CLI ``` $ ./bin/flink run --python examples/python/table/batch/word_count.py ``` **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5452
https://github.com/apache/dolphinscheduler/pull/5453
b05957db419bcf05e17b0a6f309d23382b0a95ec
3026f04d8528a63f26d9b62da00a495c8e9f47ab
2021-05-12T07:43:56Z
java
2021-06-17T07:19:25Z
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import java.util.regex.Pattern; /** * Constants */ public final class Constants { private Constants() { throw new UnsupportedOperationException("Construct Constants"); } /** * quartz config */ public static final String ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS = "org.quartz.jobStore.driverDelegateClass"; public static final String ORG_QUARTZ_SCHEDULER_INSTANCENAME = "org.quartz.scheduler.instanceName"; public static final String ORG_QUARTZ_SCHEDULER_INSTANCEID = "org.quartz.scheduler.instanceId"; public static final String ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON = "org.quartz.scheduler.makeSchedulerThreadDaemon"; public static final String ORG_QUARTZ_JOBSTORE_USEPROPERTIES = "org.quartz.jobStore.useProperties"; public static final String ORG_QUARTZ_THREADPOOL_CLASS = "org.quartz.threadPool.class"; public static final String ORG_QUARTZ_THREADPOOL_THREADCOUNT = "org.quartz.threadPool.threadCount"; public static final String ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS = "org.quartz.threadPool.makeThreadsDaemons"; public static final String ORG_QUARTZ_THREADPOOL_THREADPRIORITY = "org.quartz.threadPool.threadPriority"; public static final String ORG_QUARTZ_JOBSTORE_CLASS = "org.quartz.jobStore.class"; public static final String ORG_QUARTZ_JOBSTORE_TABLEPREFIX = "org.quartz.jobStore.tablePrefix"; public static final String ORG_QUARTZ_JOBSTORE_ISCLUSTERED = "org.quartz.jobStore.isClustered"; public static final String ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD = "org.quartz.jobStore.misfireThreshold"; public static final String ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL = "org.quartz.jobStore.clusterCheckinInterval"; public static final String ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK = "org.quartz.jobStore.acquireTriggersWithinLock"; public static final String ORG_QUARTZ_JOBSTORE_DATASOURCE = "org.quartz.jobStore.dataSource"; public static final String ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS = "org.quartz.dataSource.myDs.connectionProvider.class"; /** * quartz config default value */ public static final String QUARTZ_TABLE_PREFIX = "QRTZ_"; public static final String QUARTZ_MISFIRETHRESHOLD = "60000"; public static final String QUARTZ_CLUSTERCHECKININTERVAL = "5000"; public static final String QUARTZ_DATASOURCE = "myDs"; public static final String QUARTZ_THREADCOUNT = "25"; public static final String QUARTZ_THREADPRIORITY = "5"; public static final String QUARTZ_INSTANCENAME = "DolphinScheduler"; public static final String QUARTZ_INSTANCEID = "AUTO"; public static final String QUARTZ_ACQUIRETRIGGERSWITHINLOCK = "true"; /** * common properties path */ public static final String COMMON_PROPERTIES_PATH = "/common.properties"; /** * fs.defaultFS */ public static final String FS_DEFAULTFS = "fs.defaultFS"; /** * fs s3a endpoint */ public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint"; /** * fs s3a access key */ public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key"; /** * fs s3a secret key */ public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key"; /** * hadoop configuration */ public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE"; public static final String HADOOP_RM_STATE_STANDBY = "STANDBY"; public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port"; /** * yarn.resourcemanager.ha.rm.ids */ public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids"; /** * yarn.application.status.address */ public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address"; /** * yarn.job.history.status.address */ public static final String YARN_JOB_HISTORY_STATUS_ADDRESS = "yarn.job.history.status.address"; /** * hdfs configuration * hdfs.root.user */ public static final String HDFS_ROOT_USER = "hdfs.root.user"; /** * hdfs/s3 configuration * resource.upload.path */ public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path"; /** * data basedir path */ public static final String DATA_BASEDIR_PATH = "data.basedir.path"; /** * dolphinscheduler.env.path */ public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path"; /** * environment properties default path */ public static final String ENV_PATH = "env/dolphinscheduler_env.sh"; /** * python home */ public static final String PYTHON_HOME = "PYTHON_HOME"; /** * resource.view.suffixs */ public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs"; public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js"; /** * development.state */ public static final String DEVELOPMENT_STATE = "development.state"; /** * sudo enable */ public static final String SUDO_ENABLE = "sudo.enable"; /** * string true */ public static final String STRING_TRUE = "true"; /** * string false */ public static final String STRING_FALSE = "false"; /** * resource storage type */ public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type"; /** * MasterServer directory registered in zookeeper */ public static final String REGISTRY_DOLPHINSCHEDULER_MASTERS = "/nodes/master"; /** * WorkerServer directory registered in zookeeper */ public static final String REGISTRY_DOLPHINSCHEDULER_WORKERS = "/nodes/worker"; /** * all servers directory registered in zookeeper */ public static final String REGISTRY_DOLPHINSCHEDULER_DEAD_SERVERS = "/dead-servers"; /** * registry node prefix */ public static final String REGISTRY_DOLPHINSCHEDULER_NODE = "/nodes"; /** * MasterServer lock directory registered in zookeeper */ public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters"; /** * MasterServer failover directory registered in zookeeper */ public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS = "/lock/failover/masters"; /** * WorkerServer failover directory registered in zookeeper */ public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS = "/lock/failover/workers"; /** * MasterServer startup failover runing and fault tolerance process */ public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters"; /** * comma , */ public static final String COMMA = ","; /** * slash / */ public static final String SLASH = "/"; /** * COLON : */ public static final String COLON = ":"; /** * SPACE " " */ public static final String SPACE = " "; /** * SINGLE_SLASH / */ public static final String SINGLE_SLASH = "/"; /** * DOUBLE_SLASH // */ public static final String DOUBLE_SLASH = "//"; /** * SINGLE_QUOTES "'" */ public static final String SINGLE_QUOTES = "'"; /** * DOUBLE_QUOTES "\"" */ public static final String DOUBLE_QUOTES = "\""; /** * SEMICOLON ; */ public static final String SEMICOLON = ";"; /** * EQUAL SIGN */ public static final String EQUAL_SIGN = "="; /** * AT SIGN */ public static final String AT_SIGN = "@"; /** * date format of yyyy-MM-dd HH:mm:ss */ public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss"; /** * date format of yyyyMMddHHmmss */ public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss"; /** * date format of yyyyMMddHHmmssSSS */ public static final String YYYYMMDDHHMMSSSSS = "yyyyMMddHHmmssSSS"; /** * http connect time out */ public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000; /** * http connect request time out */ public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000; /** * httpclient soceket time out */ public static final int SOCKET_TIMEOUT = 60 * 1000; /** * http header */ public static final String HTTP_HEADER_UNKNOWN = "unKnown"; /** * http X-Forwarded-For */ public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For"; /** * http X-Real-IP */ public static final String HTTP_X_REAL_IP = "X-Real-IP"; /** * UTF-8 */ public static final String UTF_8 = "UTF-8"; /** * user name regex */ public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,39}$"); /** * email regex */ public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[_|\\-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$"); /** * default display rows */ public static final int DEFAULT_DISPLAY_ROWS = 10; /** * read permission */ public static final int READ_PERMISSION = 2 * 1; /** * write permission */ public static final int WRITE_PERMISSION = 2 * 2; /** * execute permission */ public static final int EXECUTE_PERMISSION = 1; /** * default admin permission */ public static final int DEFAULT_ADMIN_PERMISSION = 7; /** * all permissions */ public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION; /** * max task timeout */ public static final int MAX_TASK_TIMEOUT = 24 * 3600; /** * master cpu load */ public static final int DEFAULT_MASTER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2; /** * worker cpu load */ public static final int DEFAULT_WORKER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2; /** * worker host weight */ public static final int DEFAULT_WORKER_HOST_WEIGHT = 100; /** * default log cache rows num,output when reach the number */ public static final int DEFAULT_LOG_ROWS_NUM = 4 * 16; /** * log flush interval?output when reach the interval */ public static final int DEFAULT_LOG_FLUSH_INTERVAL = 1000; /** * time unit secong to minutes */ public static final int SEC_2_MINUTES_TIME_UNIT = 60; /*** * * rpc port */ public static final int RPC_PORT = 50051; /*** * alert rpc port */ public static final int ALERT_RPC_PORT = 50052; /** * forbid running task */ public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN"; /** * normal running task */ public static final String FLOWNODE_RUN_FLAG_NORMAL = "NORMAL"; /** * datasource configuration path */ public static final String DATASOURCE_PROPERTIES = "/datasource.properties"; public static final String DEFAULT = "Default"; public static final String USER = "user"; public static final String PASSWORD = "password"; public static final String XXXXXX = "******"; public static final String NULL = "NULL"; public static final String THREAD_NAME_MASTER_SERVER = "Master-Server"; public static final String THREAD_NAME_WORKER_SERVER = "Worker-Server"; /** * command parameter keys */ public static final String CMD_PARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId"; public static final String CMD_PARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList"; public static final String CMD_PARAM_RECOVERY_WAITING_THREAD = "WaitingThreadInstanceId"; public static final String CMD_PARAM_SUB_PROCESS = "processInstanceId"; public static final String CMD_PARAM_EMPTY_SUB_PROCESS = "0"; public static final String CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId"; public static final String CMD_PARAM_SUB_PROCESS_DEFINE_ID = "processDefinitionId"; public static final String CMD_PARAM_START_NODE_NAMES = "StartNodeNameList"; public static final String CMD_PARAM_START_PARAMS = "StartParams"; public static final String CMD_PARAM_FATHER_PARAMS = "fatherParams"; /** * complement data start date */ public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate"; /** * complement data end date */ public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate"; /** * data source config */ public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name"; public static final String SPRING_DATASOURCE_URL = "spring.datasource.url"; public static final String SPRING_DATASOURCE_USERNAME = "spring.datasource.username"; public static final String SPRING_DATASOURCE_PASSWORD = "spring.datasource.password"; public static final String SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT = "spring.datasource.validationQueryTimeout"; public static final String SPRING_DATASOURCE_INITIAL_SIZE = "spring.datasource.initialSize"; public static final String SPRING_DATASOURCE_MIN_IDLE = "spring.datasource.minIdle"; public static final String SPRING_DATASOURCE_MAX_ACTIVE = "spring.datasource.maxActive"; public static final String SPRING_DATASOURCE_MAX_WAIT = "spring.datasource.maxWait"; public static final String SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS = "spring.datasource.timeBetweenEvictionRunsMillis"; public static final String SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS = "spring.datasource.timeBetweenConnectErrorMillis"; public static final String SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS = "spring.datasource.minEvictableIdleTimeMillis"; public static final String SPRING_DATASOURCE_VALIDATION_QUERY = "spring.datasource.validationQuery"; public static final String SPRING_DATASOURCE_TEST_WHILE_IDLE = "spring.datasource.testWhileIdle"; public static final String SPRING_DATASOURCE_TEST_ON_BORROW = "spring.datasource.testOnBorrow"; public static final String SPRING_DATASOURCE_TEST_ON_RETURN = "spring.datasource.testOnReturn"; public static final String SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS = "spring.datasource.poolPreparedStatements"; public static final String SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT = "spring.datasource.defaultAutoCommit"; public static final String SPRING_DATASOURCE_KEEP_ALIVE = "spring.datasource.keepAlive"; public static final String SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE = "spring.datasource.maxPoolPreparedStatementPerConnectionSize"; public static final String DEVELOPMENT = "development"; public static final String QUARTZ_PROPERTIES_PATH = "quartz.properties"; /** * sleep time */ public static final int SLEEP_TIME_MILLIS = 1000; /** * master task instance cache-database refresh interval */ public static final int CACHE_REFRESH_TIME_MILLIS = 20 * 1000; /** * heartbeat for zk info length */ public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 10; public static final int HEARTBEAT_WITH_WEIGHT_FOR_ZOOKEEPER_INFO_LENGTH = 11; /** * jar */ public static final String JAR = "jar"; /** * hadoop */ public static final String HADOOP = "hadoop"; /** * -D <property>=<value> */ public static final String D = "-D"; /** * -D mapreduce.job.name=name */ public static final String MR_NAME = "mapreduce.job.name"; /** * -D mapreduce.job.queuename=queuename */ public static final String MR_QUEUE = "mapreduce.job.queuename"; /** * spark params constant */ public static final String MASTER = "--master"; public static final String DEPLOY_MODE = "--deploy-mode"; /** * --class CLASS_NAME */ public static final String MAIN_CLASS = "--class"; /** * --driver-cores NUM */ public static final String DRIVER_CORES = "--driver-cores"; /** * --driver-memory MEM */ public static final String DRIVER_MEMORY = "--driver-memory"; /** * --num-executors NUM */ public static final String NUM_EXECUTORS = "--num-executors"; /** * --executor-cores NUM */ public static final String EXECUTOR_CORES = "--executor-cores"; /** * --executor-memory MEM */ public static final String EXECUTOR_MEMORY = "--executor-memory"; /** * --name NAME */ public static final String SPARK_NAME = "--name"; /** * --queue QUEUE */ public static final String SPARK_QUEUE = "--queue"; /** * exit code success */ public static final int EXIT_CODE_SUCCESS = 0; /** * exit code kill */ public static final int EXIT_CODE_KILL = 137; /** * exit code failure */ public static final int EXIT_CODE_FAILURE = -1; /** * process or task definition failure */ public static final int DEFINITION_FAILURE = -1; /** * date format of yyyyMMdd */ public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd"; /** * date format of yyyyMMddHHmmss */ public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss"; /** * system date(yyyyMMddHHmmss) */ public static final String PARAMETER_DATETIME = "system.datetime"; /** * system date(yyyymmdd) today */ public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate"; /** * system date(yyyymmdd) yesterday */ public static final String PARAMETER_BUSINESS_DATE = "system.biz.date"; /** * ACCEPTED */ public static final String ACCEPTED = "ACCEPTED"; /** * SUCCEEDED */ public static final String SUCCEEDED = "SUCCEEDED"; /** * NEW */ public static final String NEW = "NEW"; /** * NEW_SAVING */ public static final String NEW_SAVING = "NEW_SAVING"; /** * SUBMITTED */ public static final String SUBMITTED = "SUBMITTED"; /** * FAILED */ public static final String FAILED = "FAILED"; /** * KILLED */ public static final String KILLED = "KILLED"; /** * RUNNING */ public static final String RUNNING = "RUNNING"; /** * underline "_" */ public static final String UNDERLINE = "_"; /** * quartz job prifix */ public static final String QUARTZ_JOB_PRIFIX = "job"; /** * quartz job group prifix */ public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup"; /** * projectId */ public static final String PROJECT_ID = "projectId"; /** * processId */ public static final String SCHEDULE_ID = "scheduleId"; /** * schedule */ public static final String SCHEDULE = "schedule"; /** * application regex */ public static final String APPLICATION_REGEX = "application_\\d+_\\d+"; public static final String PID = OSUtils.isWindows() ? "handle" : "pid"; /** * month_begin */ public static final String MONTH_BEGIN = "month_begin"; /** * add_months */ public static final String ADD_MONTHS = "add_months"; /** * month_end */ public static final String MONTH_END = "month_end"; /** * week_begin */ public static final String WEEK_BEGIN = "week_begin"; /** * week_end */ public static final String WEEK_END = "week_end"; /** * timestamp */ public static final String TIMESTAMP = "timestamp"; public static final char SUBTRACT_CHAR = '-'; public static final char ADD_CHAR = '+'; public static final char MULTIPLY_CHAR = '*'; public static final char DIVISION_CHAR = '/'; public static final char LEFT_BRACE_CHAR = '('; public static final char RIGHT_BRACE_CHAR = ')'; public static final String ADD_STRING = "+"; public static final String MULTIPLY_STRING = "*"; public static final String DIVISION_STRING = "/"; public static final String LEFT_BRACE_STRING = "("; public static final char P = 'P'; public static final char N = 'N'; public static final String SUBTRACT_STRING = "-"; public static final String GLOBAL_PARAMS = "globalParams"; public static final String LOCAL_PARAMS = "localParams"; public static final String LOCAL_PARAMS_LIST = "localParamsList"; public static final String SUBPROCESS_INSTANCE_ID = "subProcessInstanceId"; public static final String PROCESS_INSTANCE_STATE = "processInstanceState"; public static final String PARENT_WORKFLOW_INSTANCE = "parentWorkflowInstance"; public static final String CONDITION_RESULT = "conditionResult"; public static final String DEPENDENCE = "dependence"; public static final String TASK_TYPE = "taskType"; public static final String TASK_LIST = "taskList"; public static final String RWXR_XR_X = "rwxr-xr-x"; public static final String QUEUE = "queue"; public static final String QUEUE_NAME = "queueName"; public static final int LOG_QUERY_SKIP_LINE_NUMBER = 0; public static final int LOG_QUERY_LIMIT = 4096; /** * master/worker server use for zk */ public static final String MASTER_TYPE = "master"; public static final String WORKER_TYPE = "worker"; public static final String DELETE_OP = "delete"; public static final String ADD_OP = "add"; public static final String ALIAS = "alias"; public static final String CONTENT = "content"; public static final String DEPENDENT_SPLIT = ":||"; public static final String DEPENDENT_ALL = "ALL"; /** * preview schedule execute count */ public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5; /** * kerberos */ public static final String KERBEROS = "kerberos"; /** * kerberos expire time */ public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time"; /** * java.security.krb5.conf */ public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf"; /** * java.security.krb5.conf.path */ public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state"; /** * com.amazonaws.services.s3.enableV4 */ public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4"; /** * loginUserFromKeytab user */ public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username"; /** * loginUserFromKeytab path */ public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path"; /** * task log info format */ public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s"; /** * hive conf */ public static final String HIVE_CONF = "hiveconf:"; /** * flink */ public static final String FLINK_YARN_CLUSTER = "yarn-cluster"; public static final String FLINK_RUN_MODE = "-m"; public static final String FLINK_YARN_SLOT = "-ys"; public static final String FLINK_APP_NAME = "-ynm"; public static final String FLINK_QUEUE = "-yqu"; public static final String FLINK_TASK_MANAGE = "-yn"; public static final String FLINK_JOB_MANAGE_MEM = "-yjm"; public static final String FLINK_TASK_MANAGE_MEM = "-ytm"; public static final String FLINK_MAIN_CLASS = "-c"; public static final String FLINK_PARALLELISM = "-p"; public static final String FLINK_SHUTDOWN_ON_ATTACHED_EXIT = "-sae"; public static final int[] NOT_TERMINATED_STATES = new int[] { ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal(), ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(), ExecutionStatus.WAITTING_THREAD.ordinal(), ExecutionStatus.WAITTING_DEPEND.ordinal() }; /** * status */ public static final String STATUS = "status"; /** * message */ public static final String MSG = "msg"; /** * data total */ public static final String COUNT = "count"; /** * page size */ public static final String PAGE_SIZE = "pageSize"; /** * current page no */ public static final String PAGE_NUMBER = "pageNo"; /** * */ public static final String DATA_LIST = "data"; public static final String TOTAL_LIST = "totalList"; public static final String CURRENT_PAGE = "currentPage"; public static final String TOTAL_PAGE = "totalPage"; public static final String TOTAL = "total"; /** * workflow */ public static final String WORKFLOW_LIST = "workFlowList"; public static final String WORKFLOW_RELATION_LIST = "workFlowRelationList"; /** * session user */ public static final String SESSION_USER = "session.user"; public static final String SESSION_ID = "sessionId"; public static final String PASSWORD_DEFAULT = "******"; /** * locale */ public static final String LOCALE_LANGUAGE = "language"; /** * driver */ public static final String ORG_POSTGRESQL_DRIVER = "org.postgresql.Driver"; public static final String COM_MYSQL_JDBC_DRIVER = "com.mysql.jdbc.Driver"; public static final String ORG_APACHE_HIVE_JDBC_HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver"; public static final String COM_CLICKHOUSE_JDBC_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver"; public static final String COM_ORACLE_JDBC_DRIVER = "oracle.jdbc.driver.OracleDriver"; public static final String COM_SQLSERVER_JDBC_DRIVER = "com.microsoft.sqlserver.jdbc.SQLServerDriver"; public static final String COM_DB2_JDBC_DRIVER = "com.ibm.db2.jcc.DB2Driver"; public static final String COM_PRESTO_JDBC_DRIVER = "com.facebook.presto.jdbc.PrestoDriver"; /** * database type */ public static final String MYSQL = "MYSQL"; public static final String POSTGRESQL = "POSTGRESQL"; public static final String HIVE = "HIVE"; public static final String SPARK = "SPARK"; public static final String CLICKHOUSE = "CLICKHOUSE"; public static final String ORACLE = "ORACLE"; public static final String SQLSERVER = "SQLSERVER"; public static final String DB2 = "DB2"; public static final String PRESTO = "PRESTO"; /** * jdbc url */ public static final String JDBC_MYSQL = "jdbc:mysql://"; public static final String JDBC_POSTGRESQL = "jdbc:postgresql://"; public static final String JDBC_HIVE_2 = "jdbc:hive2://"; public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://"; public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@"; public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//"; public static final String JDBC_SQLSERVER = "jdbc:sqlserver://"; public static final String JDBC_DB2 = "jdbc:db2://"; public static final String JDBC_PRESTO = "jdbc:presto://"; public static final String ADDRESS = "address"; public static final String DATABASE = "database"; public static final String JDBC_URL = "jdbcUrl"; public static final String PRINCIPAL = "principal"; public static final String OTHER = "other"; public static final String ORACLE_DB_CONNECT_TYPE = "connectType"; public static final String KERBEROS_KRB5_CONF_PATH = "javaSecurityKrb5Conf"; public static final String KERBEROS_KEY_TAB_USERNAME = "loginUserKeytabUsername"; public static final String KERBEROS_KEY_TAB_PATH = "loginUserKeytabPath"; /** * session timeout */ public static final int SESSION_TIME_OUT = 7200; public static final int MAX_FILE_SIZE = 1024 * 1024 * 1024; public static final String UDF = "UDF"; public static final String CLASS = "class"; public static final String RECEIVERS = "receivers"; public static final String RECEIVERS_CC = "receiversCc"; /** * dataSource sensitive param */ public static final String DATASOURCE_PASSWORD_REGEX = "(?<=(\"password\":\")).*?(?=(\"))"; /** * default worker group */ public static final String DEFAULT_WORKER_GROUP = "default"; public static final Integer TASK_INFO_LENGTH = 5; /** * new * schedule time */ public static final String PARAMETER_SHECDULE_TIME = "schedule.time"; /** * authorize writable perm */ public static final int AUTHORIZE_WRITABLE_PERM = 7; /** * authorize readable perm */ public static final int AUTHORIZE_READABLE_PERM = 4; /** * plugin configurations */ public static final String PLUGIN_JAR_SUFFIX = ".jar"; public static final int NORMAL_NODE_STATUS = 0; public static final int ABNORMAL_NODE_STATUS = 1; public static final String START_TIME = "start time"; public static final String END_TIME = "end time"; public static final String START_END_DATE = "startDate,endDate"; /** * system line separator */ public static final String SYSTEM_LINE_SEPARATOR = System.getProperty("line.separator"); public static final String EXCEL_SUFFIX_XLS = ".xls"; /** * datasource encryption salt */ public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*"; public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable"; public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt"; /** * network interface preferred */ public static final String DOLPHIN_SCHEDULER_NETWORK_INTERFACE_PREFERRED = "dolphin.scheduler.network.interface.preferred"; /** * network IP gets priority, default inner outer */ public static final String DOLPHIN_SCHEDULER_NETWORK_PRIORITY_STRATEGY = "dolphin.scheduler.network.priority.strategy"; /** * exec shell scripts */ public static final String SH = "sh"; /** * pstree, get pud and sub pid */ public static final String PSTREE = "pstree"; /** * snow flake, data center id, this id must be greater than 0 and less than 32 */ public static final String SNOW_FLAKE_DATA_CENTER_ID = "data.center.id"; /** * docker & kubernetes */ public static final boolean DOCKER_MODE = StringUtils.isNotEmpty(System.getenv("DOCKER")); public static final boolean KUBERNETES_MODE = StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && StringUtils.isNotEmpty(System.getenv("KUBERNETES_SERVICE_PORT")); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,452
[Improvement][Task] ds flink task support submit a PyFlink job via the CLI
**Describe the question** ds flink task support submit a PyFlink job via the CLI ``` $ ./bin/flink run --python examples/python/table/batch/word_count.py ``` **Which version of DolphinScheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5452
https://github.com/apache/dolphinscheduler/pull/5453
b05957db419bcf05e17b0a6f309d23382b0a95ec
3026f04d8528a63f26d9b62da00a495c8e9f47ab
2021-05-12T07:43:56Z
java
2021-06-17T07:19:25Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.utils; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ProgramType; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.flink.FlinkParameters; import org.apache.dolphinscheduler.common.utils.StringUtils; import java.util.ArrayList; import java.util.List; /** * flink args utils */ public class FlinkArgsUtils { private static final String LOCAL_DEPLOY_MODE = "local"; private static final String FLINK_VERSION_BEFORE_1_10 = "<1.10"; private FlinkArgsUtils() { throw new IllegalStateException("Utility class"); } /** * build args * @param param flink parameters * @return argument list */ public static List<String> buildArgs(FlinkParameters param) { List<String> args = new ArrayList<>(); String deployMode = "cluster"; String tmpDeployMode = param.getDeployMode(); if (StringUtils.isNotEmpty(tmpDeployMode)) { deployMode = tmpDeployMode; } String others = param.getOthers(); if (!LOCAL_DEPLOY_MODE.equals(deployMode)) { args.add(Constants.FLINK_RUN_MODE); //-m args.add(Constants.FLINK_YARN_CLUSTER); //yarn-cluster int slot = param.getSlot(); if (slot > 0) { args.add(Constants.FLINK_YARN_SLOT); args.add(String.format("%d", slot)); //-ys } String appName = param.getAppName(); if (StringUtils.isNotEmpty(appName)) { //-ynm args.add(Constants.FLINK_APP_NAME); args.add(ArgsUtils.escape(appName)); } // judge flink version, the parameter -yn has removed from flink 1.10 String flinkVersion = param.getFlinkVersion(); if (flinkVersion == null || FLINK_VERSION_BEFORE_1_10.equals(flinkVersion)) { int taskManager = param.getTaskManager(); if (taskManager > 0) { //-yn args.add(Constants.FLINK_TASK_MANAGE); args.add(String.format("%d", taskManager)); } } String jobManagerMemory = param.getJobManagerMemory(); if (StringUtils.isNotEmpty(jobManagerMemory)) { args.add(Constants.FLINK_JOB_MANAGE_MEM); args.add(jobManagerMemory); //-yjm } String taskManagerMemory = param.getTaskManagerMemory(); if (StringUtils.isNotEmpty(taskManagerMemory)) { // -ytm args.add(Constants.FLINK_TASK_MANAGE_MEM); args.add(taskManagerMemory); } if (StringUtils.isEmpty(others) || !others.contains(Constants.FLINK_QUEUE)) { String queue = param.getQueue(); if (StringUtils.isNotEmpty(queue)) { // -yqu args.add(Constants.FLINK_QUEUE); args.add(queue); } } } int parallelism = param.getParallelism(); if (parallelism > 0) { args.add(Constants.FLINK_PARALLELISM); args.add(String.format("%d", parallelism)); // -p } // If the job is submitted in attached mode, perform a best-effort cluster shutdown when the CLI is terminated abruptly // The task status will be synchronized with the cluster job status args.add(Constants.FLINK_SHUTDOWN_ON_ATTACHED_EXIT); // -sae // -s -yqu -yat -yD -D if (StringUtils.isNotEmpty(others)) { args.add(others); } ProgramType programType = param.getProgramType(); String mainClass = param.getMainClass(); if (programType != null && programType != ProgramType.PYTHON && StringUtils.isNotEmpty(mainClass)) { args.add(Constants.FLINK_MAIN_CLASS); //-c args.add(param.getMainClass()); //main class } ResourceInfo mainJar = param.getMainJar(); if (mainJar != null) { args.add(mainJar.getRes()); } String mainArgs = param.getMainArgs(); if (StringUtils.isNotEmpty(mainArgs)) { args.add(mainArgs); } return args; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,655
[Bug][Master] Task always failed.
Currently, if we have to two tasks A and in one DAG with A -> B. The execution result of A will always be judged as `Failure` by line 919 ![image](https://user-images.githubusercontent.com/13628428/122369780-15548400-cf91-11eb-9888-15a297042a63.png) since the task id is alway 0. This mistake is releated to this [PR](https://github.com/apache/dolphinscheduler/pull/5572/files?file-filters%5B%5D=.java#diff-8e3989af7cb0208a57e34d7797a5fd4b84b96eb4a1f5064ec0cb0482fcf322ccL80). ![image](https://user-images.githubusercontent.com/13628428/122370375-9744ad00-cf91-11eb-9c70-743b569554ea.png) where we didn't set the `id` of cached TaskInstance. **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5655
https://github.com/apache/dolphinscheduler/pull/5656
3026f04d8528a63f26d9b62da00a495c8e9f47ab
0d7c32a1e829202d8359f112383b1b56eec6653d
2021-06-17T09:30:01Z
java
2021-06-17T10:45:34Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/cache/impl/TaskInstanceCacheManagerImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.cache.impl; import static org.apache.dolphinscheduler.common.Constants.CACHE_REFRESH_TIME_MILLIS; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.remote.command.TaskExecuteAckCommand; import org.apache.dolphinscheduler.remote.command.TaskExecuteResponseCommand; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.master.cache.TaskInstanceCacheManager; import org.apache.dolphinscheduler.service.process.ProcessService; import java.util.Map; import java.util.Map.Entry; import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.ConcurrentHashMap; import javax.annotation.PostConstruct; import javax.annotation.PreDestroy; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; /** * taskInstance state manager */ @Component public class TaskInstanceCacheManagerImpl implements TaskInstanceCacheManager { /** * taskInstance cache */ private Map<Integer,TaskInstance> taskInstanceCache = new ConcurrentHashMap<>(); /** * process service */ @Autowired private ProcessService processService; /** * taskInstance cache refresh timer */ private Timer refreshTaskInstanceTimer = null; @PostConstruct public void init() { //issue#5539 add thread to fetch task state from database in a fixed rate this.refreshTaskInstanceTimer = new Timer(true); refreshTaskInstanceTimer.scheduleAtFixedRate( new RefreshTaskInstanceTimerTask(), CACHE_REFRESH_TIME_MILLIS, CACHE_REFRESH_TIME_MILLIS ); } @PreDestroy public void close() { this.refreshTaskInstanceTimer.cancel(); } /** * get taskInstance by taskInstance id * * @param taskInstanceId taskInstanceId * @return taskInstance */ @Override public TaskInstance getByTaskInstanceId(Integer taskInstanceId) { return taskInstanceCache.computeIfAbsent(taskInstanceId, k -> processService.findTaskInstanceById(taskInstanceId)); } /** * cache taskInstance * * @param taskExecutionContext taskExecutionContext */ @Override public void cacheTaskInstance(TaskExecutionContext taskExecutionContext) { TaskInstance taskInstance = new TaskInstance(); taskInstance.setId(taskExecutionContext.getTaskInstanceId()); taskInstance.setName(taskExecutionContext.getTaskName()); taskInstance.setStartTime(taskExecutionContext.getStartTime()); taskInstance.setTaskType(taskExecutionContext.getTaskType()); taskInstance.setExecutePath(taskExecutionContext.getExecutePath()); taskInstanceCache.put(taskExecutionContext.getTaskInstanceId(), taskInstance); } /** * cache taskInstance * * @param taskAckCommand taskAckCommand */ @Override public void cacheTaskInstance(TaskExecuteAckCommand taskAckCommand) { TaskInstance taskInstance = new TaskInstance(); taskInstance.setState(ExecutionStatus.of(taskAckCommand.getStatus())); taskInstance.setStartTime(taskAckCommand.getStartTime()); taskInstance.setHost(taskAckCommand.getHost()); taskInstance.setExecutePath(taskAckCommand.getExecutePath()); taskInstance.setLogPath(taskAckCommand.getLogPath()); taskInstanceCache.put(taskAckCommand.getTaskInstanceId(), taskInstance); } /** * cache taskInstance * * @param taskExecuteResponseCommand taskExecuteResponseCommand */ @Override public void cacheTaskInstance(TaskExecuteResponseCommand taskExecuteResponseCommand) { TaskInstance taskInstance = getByTaskInstanceId(taskExecuteResponseCommand.getTaskInstanceId()); taskInstance.setState(ExecutionStatus.of(taskExecuteResponseCommand.getStatus())); taskInstance.setEndTime(taskExecuteResponseCommand.getEndTime()); taskInstanceCache.put(taskExecuteResponseCommand.getTaskInstanceId(), taskInstance); } /** * remove taskInstance by taskInstanceId * @param taskInstanceId taskInstanceId */ @Override public void removeByTaskInstanceId(Integer taskInstanceId) { taskInstanceCache.remove(taskInstanceId); } class RefreshTaskInstanceTimerTask extends TimerTask { @Override public void run() { for (Entry<Integer, TaskInstance> taskInstanceEntry : taskInstanceCache.entrySet()) { TaskInstance taskInstance = processService.findTaskInstanceById(taskInstanceEntry.getKey()); if (null != taskInstance && taskInstance.getState() == ExecutionStatus.NEED_FAULT_TOLERANCE) { taskInstanceCache.computeIfPresent(taskInstanceEntry.getKey(), (k, v) -> taskInstance); } } } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,653
[Bug][API] Caused by: org.postgresql.util.PSQLException: ERROR: operator does not exist: ` character varying
**Describe the bug** Caused by: org.postgresql.util.PSQLException: ERROR: operator does not exist: ` character varying ![image](https://user-images.githubusercontent.com/37063904/122350133-855a0e80-cf7f-11eb-86b3-45a227a11ced.png) **To Reproduce** Create a workflow definition **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5653
https://github.com/apache/dolphinscheduler/pull/5654
0d7c32a1e829202d8359f112383b1b56eec6653d
b21e821845ba7e6a8609b1a79502ea0067085293
2021-06-17T08:16:25Z
java
2021-06-17T13:23:39Z
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessTaskRelationLogMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper"> <sql id="baseSql"> id, `name`, process_definition_version, project_code, process_definition_code, pre_task_code, pre_task_version, post_task_code, post_task_version, condition_type, condition_params, operator, operate_time, create_time, update_time </sql> <select id="queryByProcessCodeAndVersion" resultType="org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog"> select <include refid="baseSql"/> from t_ds_process_task_relation_log WHERE process_definition_code = #{processCode} and process_definition_version = #{processVersion} </select> <select id="queryByTaskRelationList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog"> select <include refid="baseSql"/> from t_ds_process_task_relation_log WHERE process_definition_code = #{processCode} and process_definition_version = #{processVersion} and post_task_code = #{taskCode} and post_task_version = #{taskVersion} </select> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,653
[Bug][API] Caused by: org.postgresql.util.PSQLException: ERROR: operator does not exist: ` character varying
**Describe the bug** Caused by: org.postgresql.util.PSQLException: ERROR: operator does not exist: ` character varying ![image](https://user-images.githubusercontent.com/37063904/122350133-855a0e80-cf7f-11eb-86b3-45a227a11ced.png) **To Reproduce** Create a workflow definition **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5653
https://github.com/apache/dolphinscheduler/pull/5654
0d7c32a1e829202d8359f112383b1b56eec6653d
b21e821845ba7e6a8609b1a79502ea0067085293
2021-06-17T08:16:25Z
java
2021-06-17T13:23:39Z
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessTaskRelationMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper"> <sql id="baseSql"> id, `name`, process_definition_version, project_code, process_definition_code, pre_task_code, pre_task_version, post_task_code, post_task_version, condition_type, condition_params, create_time, update_time </sql> <select id="queryByProcessCode" resultType="org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation"> select <include refid="baseSql"/> from t_ds_process_task_relation WHERE project_code = #{projectCode} and process_definition_code = #{processCode} </select> <select id="queryByTaskCode" resultType="org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation"> select <include refid="baseSql"/> from t_ds_process_task_relation WHERE pre_task_code = #{taskCode} <if test="taskCode != 0"> or post_task_code = #{taskCode} </if> </select> <select id="queryByTaskCodes" resultType="org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation"> select <include refid="baseSql"/> from t_ds_process_task_relation WHERE 1 = 1 <if test="taskCodes != null and taskCodes.length != 0"> and pre_task_code in <foreach collection="taskCodes" index="index" item="i" open="(" separator="," close=")"> #{i} </foreach> or post_task_code in <foreach collection="taskCodes" index="index" item="i" open="(" separator="," close=")"> #{i} </foreach> </if> </select> <delete id="deleteByCode"> delete from t_ds_process_task_relation WHERE project_code = #{projectCode} and process_definition_code = #{processCode} </delete> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,653
[Bug][API] Caused by: org.postgresql.util.PSQLException: ERROR: operator does not exist: ` character varying
**Describe the bug** Caused by: org.postgresql.util.PSQLException: ERROR: operator does not exist: ` character varying ![image](https://user-images.githubusercontent.com/37063904/122350133-855a0e80-cf7f-11eb-86b3-45a227a11ced.png) **To Reproduce** Create a workflow definition **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5653
https://github.com/apache/dolphinscheduler/pull/5654
0d7c32a1e829202d8359f112383b1b56eec6653d
b21e821845ba7e6a8609b1a79502ea0067085293
2021-06-17T08:16:25Z
java
2021-06-17T13:23:39Z
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/TaskDefinitionLogMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper"> <sql id="baseSql"> id, code, `name`, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, operator, operate_time, create_time, update_time </sql> <select id="queryByDefinitionName" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog"> select td.id, td.code, td.name, td.version, td.description, td.project_code, td.user_id, td.task_type, td.task_params, td.flag, td.task_priority, td.worker_group, td.fail_retry_times, td.fail_retry_interval, td.timeout_flag, td.timeout_notify_strategy, td.timeout, td.delay_time, td.resource_ids, td.operator,td.operate_time, td.create_time, td.update_time, u.user_name,p.name as project_name from t_ds_task_definition_log td JOIN t_ds_user u ON td.user_id = u.id JOIN t_ds_project p ON td.project_code = p.code WHERE p.code = #{projectCode} and td.name = #{taskDefinitionName} </select> <select id="queryMaxVersionForDefinition" resultType="java.lang.Integer"> select max(version) from t_ds_task_definition_log WHERE code = #{taskDefinitionCode} </select> <select id="queryByDefinitionCodeAndVersion" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog"> select <include refid="baseSql"/> from t_ds_task_definition_log WHERE code = #{taskDefinitionCode} and version = #{version} </select> <select id="queryByTaskDefinitions" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog"> select <include refid="baseSql"/> from t_ds_task_definition_log WHERE 1 = 1 <if test="taskDefinitions != null and taskDefinitions.size != 0"> and <foreach collection="taskDefinitions" index="index" item="item" open="(" separator=" or " close=")"> (code = #{item.code} and version = #{item.version}) </foreach> </if> </select> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,653
[Bug][API] Caused by: org.postgresql.util.PSQLException: ERROR: operator does not exist: ` character varying
**Describe the bug** Caused by: org.postgresql.util.PSQLException: ERROR: operator does not exist: ` character varying ![image](https://user-images.githubusercontent.com/37063904/122350133-855a0e80-cf7f-11eb-86b3-45a227a11ced.png) **To Reproduce** Create a workflow definition **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/5653
https://github.com/apache/dolphinscheduler/pull/5654
0d7c32a1e829202d8359f112383b1b56eec6653d
b21e821845ba7e6a8609b1a79502ea0067085293
2021-06-17T08:16:25Z
java
2021-06-17T13:23:39Z
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/TaskDefinitionMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper"> <sql id="baseSql"> id, code, `name`, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, create_time, update_time </sql> <select id="queryByDefinitionName" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinition"> select <include refid="baseSql"/> from t_ds_task_definition WHERE project_code = #{projectCode} and `name` = #{taskDefinitionName} </select> <select id="queryAllDefinitionList" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinition"> select <include refid="baseSql"/> from t_ds_task_definition where project_code = #{projectCode} order by create_time desc </select> <select id="queryDefinitionListByIdList" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinition"> select <include refid="baseSql"/> from t_ds_task_definition where id in <foreach collection="ids" index="index" item="i" open="(" separator="," close=")"> #{i} </foreach> </select> <select id="countDefinitionGroupByUser" resultType="org.apache.dolphinscheduler.dao.entity.DefinitionGroupByUser"> SELECT td.user_id as user_id, tu.user_name as user_name, count(0) as count FROM t_ds_task_definition td JOIN t_ds_user tu on tu.id=td.user_id where 1 = 1 <if test="projectCodes != null and projectCodes.length != 0"> and td.project_code in <foreach collection="projectCodes" index="index" item="i" open="(" separator="," close=")"> #{i} </foreach> </if> group by td.user_id,tu.user_name </select> <select id="queryByDefinitionId" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinition"> select td.id, td.code, td.name, td.version, td.description, td.project_code, td.user_id, td.task_type, td.task_params, td.flag, td.task_priority, td.worker_group, td.fail_retry_times, td.fail_retry_interval, td.timeout_flag, td.timeout_notify_strategy, td.timeout, td.delay_time, td.resource_ids, td.create_time, td.update_time, u.user_name,p.name as project_name from t_ds_task_definition td JOIN t_ds_user u ON td.user_id = u.id JOIN t_ds_project p ON td.project_code = p.code WHERE td.id = #{taskDefinitionId} </select> <select id="queryByDefinitionCode" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinition"> select <include refid="baseSql"/> from t_ds_task_definition where code = #{taskDefinitionCode} </select> <select id="listResources" resultType="java.util.HashMap"> SELECT id,resource_ids FROM t_ds_task_definition WHERE resource_ids is not null and resource_ids != '' </select> <select id="listResourcesByUser" resultType="java.util.HashMap"> SELECT id,resource_ids FROM t_ds_task_definition WHERE user_id = #{userId} and resource_ids is not null and resource_ids != '' </select> <delete id="deleteByCode"> delete from t_ds_task_definition where code = #{code} </delete> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,469
[Improvement][Task] Improved shell task execution result log information
**Describe the question** Improved shell task execution result log information, adding process.waitFor() and process.exitValue() information to the original log ``` logger.info("process has exited, execute path:{}, processId:{} ,exitStatusCode:{}", taskExecutionContext.getExecutePath(), processId, result.getExitStatusCode()); ``` ``` logger.error("process has failure , exitStatusCode : {} , ready to kill ...", result.getExitStatusCode()); ``` **Which version of DolphinScheduler:** -[dev] easy to improvement #4124
https://github.com/apache/dolphinscheduler/issues/5469
https://github.com/apache/dolphinscheduler/pull/5691
bae047e4a38e4d0e985fad2fcc0d184cf9ca8b53
3b80760c42e4d3208349969a9ec9d5ee1ed86222
2021-05-14T05:09:44Z
java
2021-06-25T01:13:26Z
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractCommandExecutor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.task; import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_FAILURE; import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_KILL; import static org.apache.dolphinscheduler.common.Constants.EXIT_CODE_SUCCESS; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.CommonUtils; import org.apache.dolphinscheduler.common.utils.HadoopUtils; import org.apache.dolphinscheduler.common.utils.LoggerUtils; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.utils.ProcessUtils; import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager; import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.lang.reflect.Field; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.slf4j.Logger; /** * abstract command executor */ public abstract class AbstractCommandExecutor { /** * rules for extracting application ID */ protected static final Pattern APPLICATION_REGEX = Pattern.compile(Constants.APPLICATION_REGEX); protected StringBuilder varPool = new StringBuilder(); /** * process */ private Process process; /** * log handler */ protected Consumer<List<String>> logHandler; /** * logger */ protected Logger logger; /** * log list */ protected final List<String> logBuffer; protected boolean logOutputIsScuccess = false; /** * SHELL result string */ protected String taskResultString; /** * taskExecutionContext */ protected TaskExecutionContext taskExecutionContext; /** * taskExecutionContextCacheManager */ private TaskExecutionContextCacheManager taskExecutionContextCacheManager; public AbstractCommandExecutor(Consumer<List<String>> logHandler, TaskExecutionContext taskExecutionContext, Logger logger) { this.logHandler = logHandler; this.taskExecutionContext = taskExecutionContext; this.logger = logger; this.logBuffer = Collections.synchronizedList(new ArrayList<>()); this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class); } protected AbstractCommandExecutor(List<String> logBuffer) { this.logBuffer = logBuffer; } /** * build process * * @param commandFile command file * @throws IOException IO Exception */ private void buildProcess(String commandFile) throws IOException { // setting up user to run commands List<String> command = new LinkedList<>(); //init process builder ProcessBuilder processBuilder = new ProcessBuilder(); // setting up a working directory processBuilder.directory(new File(taskExecutionContext.getExecutePath())); // merge error information to standard output stream processBuilder.redirectErrorStream(true); // setting up user to run commands if (!OSUtils.isWindows() && CommonUtils.isSudoEnable()) { command.add("sudo"); command.add("-u"); command.add(taskExecutionContext.getTenantCode()); } command.add(commandInterpreter()); command.addAll(commandOptions()); command.add(commandFile); // setting commands processBuilder.command(command); process = processBuilder.start(); // print command printCommand(command); } /** * task specific execution logic * * @param execCommand execCommand * @return CommandExecuteResult * @throws Exception if error throws Exception */ public CommandExecuteResult run(String execCommand) throws Exception { CommandExecuteResult result = new CommandExecuteResult(); int taskInstanceId = taskExecutionContext.getTaskInstanceId(); // If the task has been killed, then the task in the cache is null if (null == taskExecutionContextCacheManager.getByTaskInstanceId(taskInstanceId)) { result.setExitStatusCode(EXIT_CODE_KILL); return result; } if (StringUtils.isEmpty(execCommand)) { taskExecutionContextCacheManager.removeByTaskInstanceId(taskInstanceId); return result; } String commandFilePath = buildCommandFilePath(); // create command file if not exists createCommandFileIfNotExists(execCommand, commandFilePath); //build process buildProcess(commandFilePath); // parse process output parseProcessOutput(process); Integer processId = getProcessId(process); result.setProcessId(processId); // cache processId taskExecutionContext.setProcessId(processId); boolean updateTaskExecutionContextStatus = taskExecutionContextCacheManager.updateTaskExecutionContext(taskExecutionContext); if (Boolean.FALSE.equals(updateTaskExecutionContextStatus)) { ProcessUtils.kill(taskExecutionContext); result.setExitStatusCode(EXIT_CODE_KILL); return result; } // print process id logger.info("process start, process id is: {}", processId); // if timeout occurs, exit directly long remainTime = getRemaintime(); // waiting for the run to finish boolean status = process.waitFor(remainTime, TimeUnit.SECONDS); logger.info("process has exited, execute path:{}, processId:{} ,exitStatusCode:{}", taskExecutionContext.getExecutePath(), processId, result.getExitStatusCode()); // if SHELL task exit if (status) { // set appIds List<String> appIds = getAppIds(taskExecutionContext.getLogPath()); result.setAppIds(String.join(Constants.COMMA, appIds)); // SHELL task state result.setExitStatusCode(process.exitValue()); // if yarn task , yarn state is final state if (process.exitValue() == 0) { result.setExitStatusCode(isSuccessOfYarnState(appIds) ? EXIT_CODE_SUCCESS : EXIT_CODE_FAILURE); } } else { logger.error("process has failure , exitStatusCode : {} , ready to kill ...", result.getExitStatusCode()); ProcessUtils.kill(taskExecutionContext); result.setExitStatusCode(EXIT_CODE_FAILURE); } return result; } public String getVarPool() { return varPool.toString(); } /** * cancel application * * @throws Exception exception */ public void cancelApplication() throws Exception { if (process == null) { return; } // clear log clear(); int processId = getProcessId(process); logger.info("cancel process: {}", processId); // kill , waiting for completion boolean killed = softKill(processId); if (!killed) { // hard kill hardKill(processId); // destory process.destroy(); process = null; } } /** * soft kill * * @param processId process id * @return process is alive * @throws InterruptedException interrupted exception */ private boolean softKill(int processId) { if (processId != 0 && process.isAlive()) { try { // sudo -u user command to run command String cmd = String.format("kill %d", processId); cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd); logger.info("soft kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd); Runtime.getRuntime().exec(cmd); } catch (IOException e) { logger.info("kill attempt failed", e); } } return !process.isAlive(); } /** * hard kill * * @param processId process id */ private void hardKill(int processId) { if (processId != 0 && process.isAlive()) { try { String cmd = String.format("kill -9 %d", processId); cmd = OSUtils.getSudoCmd(taskExecutionContext.getTenantCode(), cmd); logger.info("hard kill task:{}, process id:{}, cmd:{}", taskExecutionContext.getTaskAppId(), processId, cmd); Runtime.getRuntime().exec(cmd); } catch (IOException e) { logger.error("kill attempt failed ", e); } } } /** * print command * * @param commands process builder */ private void printCommand(List<String> commands) { String cmdStr; try { cmdStr = ProcessUtils.buildCommandStr(commands); logger.info("task run command:\n{}", cmdStr); } catch (Exception e) { logger.error(e.getMessage(), e); } } /** * clear */ private void clear() { List<String> markerList = new ArrayList<>(); markerList.add(ch.qos.logback.classic.ClassicConstants.FINALIZE_SESSION_MARKER.toString()); if (!logBuffer.isEmpty()) { // log handle logHandler.accept(logBuffer); logBuffer.clear(); } logHandler.accept(markerList); } /** * get the standard output of the process * * @param process process */ private void parseProcessOutput(Process process) { String threadLoggerInfoName = String.format(LoggerUtils.TASK_LOGGER_THREAD_NAME + "-%s", taskExecutionContext.getTaskAppId()); ExecutorService getOutputLogService = ThreadUtils.newDaemonSingleThreadExecutor(threadLoggerInfoName + "-" + "getOutputLogService"); getOutputLogService.submit(() -> { BufferedReader inReader = null; try { inReader = new BufferedReader(new InputStreamReader(process.getInputStream())); String line; logBuffer.add("welcome to use bigdata scheduling system..."); while ((line = inReader.readLine()) != null) { if (line.startsWith("${setValue(")) { varPool.append(line.substring("${setValue(".length(), line.length() - 2)); varPool.append("$VarPool$"); } else { logBuffer.add(line); taskResultString = line; } } } catch (Exception e) { logger.error(e.getMessage(), e); } finally { logOutputIsScuccess = true; close(inReader); } }); getOutputLogService.shutdown(); ExecutorService parseProcessOutputExecutorService = ThreadUtils.newDaemonSingleThreadExecutor(threadLoggerInfoName); parseProcessOutputExecutorService.submit(() -> { try { long lastFlushTime = System.currentTimeMillis(); while (logBuffer.size() > 0 || !logOutputIsScuccess) { if (logBuffer.size() > 0) { lastFlushTime = flush(lastFlushTime); } else { Thread.sleep(Constants.DEFAULT_LOG_FLUSH_INTERVAL); } } } catch (Exception e) { logger.error(e.getMessage(), e); } finally { clear(); } }); parseProcessOutputExecutorService.shutdown(); } /** * check yarn state * * @param appIds application id list * @return is success of yarn task state */ public boolean isSuccessOfYarnState(List<String> appIds) { boolean result = true; try { for (String appId : appIds) { logger.info("check yarn application status, appId:{}", appId); while (Stopper.isRunning()) { ExecutionStatus applicationStatus = HadoopUtils.getInstance().getApplicationStatus(appId); if (logger.isDebugEnabled()) { logger.debug("check yarn application status, appId:{}, final state:{}", appId, applicationStatus.name()); } if (applicationStatus.equals(ExecutionStatus.FAILURE) || applicationStatus.equals(ExecutionStatus.KILL)) { return false; } if (applicationStatus.equals(ExecutionStatus.SUCCESS)) { break; } ThreadUtils.sleep(Constants.SLEEP_TIME_MILLIS); } } } catch (Exception e) { logger.error("yarn applications: {} , query status failed, exception:{}", StringUtils.join(appIds, ","), e); result = false; } return result; } public int getProcessId() { return getProcessId(process); } /** * get app links * * @param logPath log path * @return app id list */ private List<String> getAppIds(String logPath) { List<String> logs = convertFile2List(logPath); List<String> appIds = new ArrayList<>(); /** * analysis log?get submited yarn application id */ for (String log : logs) { String appId = findAppId(log); if (StringUtils.isNotEmpty(appId) && !appIds.contains(appId)) { logger.info("find app id: {}", appId); appIds.add(appId); } } return appIds; } /** * convert file to list * * @param filename file name * @return line list */ private List<String> convertFile2List(String filename) { List lineList = new ArrayList<String>(100); File file = new File(filename); if (!file.exists()) { return lineList; } BufferedReader br = null; try { br = new BufferedReader(new InputStreamReader(new FileInputStream(filename), StandardCharsets.UTF_8)); String line = null; while ((line = br.readLine()) != null) { lineList.add(line); } } catch (Exception e) { logger.error(String.format("read file: %s failed : ", filename), e); } finally { if (br != null) { try { br.close(); } catch (IOException e) { logger.error(e.getMessage(), e); } } } return lineList; } /** * find app id * * @param line line * @return appid */ private String findAppId(String line) { Matcher matcher = APPLICATION_REGEX.matcher(line); if (matcher.find()) { return matcher.group(); } return null; } /** * get remain time(s) * * @return remain time */ private long getRemaintime() { long usedTime = (System.currentTimeMillis() - taskExecutionContext.getStartTime().getTime()) / 1000; long remainTime = taskExecutionContext.getTaskTimeout() - usedTime; if (remainTime < 0) { throw new RuntimeException("task execution time out"); } return remainTime; } /** * get process id * * @param process process * @return process id */ private int getProcessId(Process process) { int processId = 0; try { Field f = process.getClass().getDeclaredField(Constants.PID); f.setAccessible(true); processId = f.getInt(process); } catch (Throwable e) { logger.error(e.getMessage(), e); } return processId; } /** * when log buffer siz or flush time reach condition , then flush * * @param lastFlushTime last flush time * @return last flush time */ private long flush(long lastFlushTime) { long now = System.currentTimeMillis(); /** * when log buffer siz or flush time reach condition , then flush */ if (logBuffer.size() >= Constants.DEFAULT_LOG_ROWS_NUM || now - lastFlushTime > Constants.DEFAULT_LOG_FLUSH_INTERVAL) { lastFlushTime = now; /** log handle */ logHandler.accept(logBuffer); logBuffer.clear(); } return lastFlushTime; } /** * close buffer reader * * @param inReader in reader */ private void close(BufferedReader inReader) { if (inReader != null) { try { inReader.close(); } catch (IOException e) { logger.error(e.getMessage(), e); } } } protected List<String> commandOptions() { return Collections.emptyList(); } protected abstract String buildCommandFilePath(); protected abstract String commandInterpreter(); protected abstract void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException; public String getTaskResultString() { return taskResultString; } public void setTaskResultString(String taskResultString) { this.taskResultString = taskResultString; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,702
[Improvement] [APIServer] When update the existing alarm plug-in instance, the api should use POST instead of GET
When update the existing alarm plug-in instance, the front-end and back-end interaction should use POST method instead of GET. ![image](https://user-images.githubusercontent.com/52202080/123516818-9bc73f00-d6d0-11eb-9186-4f3b2f8dad00.png)
https://github.com/apache/dolphinscheduler/issues/5702
https://github.com/apache/dolphinscheduler/pull/5703
b31ba7e18b89885c37c2129bf9b4816001f09e51
2d71930837092c52dc63a76941b19051ae38cc2e
2021-06-26T14:53:59Z
java
2021-06-27T10:58:22Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/AlertPluginInstanceController.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.apache.dolphinscheduler.api.enums.Status.CREATE_ALERT_PLUGIN_INSTANCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.DELETE_ALERT_PLUGIN_INSTANCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.GET_ALERT_PLUGIN_INSTANCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.LIST_PAGING_ALERT_PLUGIN_INSTANCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_ALL_ALERT_PLUGIN_INSTANCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.UPDATE_ALERT_PLUGIN_INSTANCE_ERROR; import org.apache.dolphinscheduler.api.aspect.AccessLogAnnotation; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ApiException; import org.apache.dolphinscheduler.api.service.AlertPluginInstanceService; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.dao.entity.User; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestAttribute; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import io.swagger.annotations.Api; import io.swagger.annotations.ApiImplicitParam; import io.swagger.annotations.ApiImplicitParams; import io.swagger.annotations.ApiOperation; import springfox.documentation.annotations.ApiIgnore; /** * alert plugin instance controller */ @Api(tags = "ALERT_PLUGIN_INSTANCE_TAG") @RestController @RequestMapping("alert-plugin-instance") public class AlertPluginInstanceController extends BaseController { private static final Logger logger = LoggerFactory.getLogger(AlertPluginInstanceController.class); @Autowired private AlertPluginInstanceService alertPluginInstanceService; /** * create alert plugin instance * * @param loginUser login user * @param pluginDefineId alert plugin define id * @param instanceName instance name * @param pluginInstanceParams instance params * @return result */ @ApiOperation(value = "createAlertPluginInstance", notes = "CREATE_ALERT_PLUGIN_INSTANCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "pluginDefineId", value = "ALERT_PLUGIN_DEFINE_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "instanceName", value = "ALERT_PLUGIN_INSTANCE_NAME", required = true, dataType = "String", example = "DING TALK"), @ApiImplicitParam(name = "pluginInstanceParams", value = "ALERT_PLUGIN_INSTANCE_PARAMS", required = true, dataType = "String", example = "ALERT_PLUGIN_INSTANCE_PARAMS") }) @PostMapping(value = "/create") @ResponseStatus(HttpStatus.CREATED) @ApiException(CREATE_ALERT_PLUGIN_INSTANCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result createAlertPluginInstance(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "pluginDefineId") int pluginDefineId, @RequestParam(value = "instanceName") String instanceName, @RequestParam(value = "pluginInstanceParams") String pluginInstanceParams) { Map<String, Object> result = alertPluginInstanceService.create(loginUser, pluginDefineId, instanceName, pluginInstanceParams); return returnDataList(result); } /** * updateAlertPluginInstance * * @param loginUser login user * @param alertPluginInstanceId alert plugin instance id * @param instanceName instance name * @param pluginInstanceParams instance params * @return result */ @ApiOperation(value = "update", notes = "UPDATE_ALERT_PLUGIN_INSTANCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "alertPluginInstanceId", value = "ALERT_PLUGIN_INSTANCE_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "instanceName", value = "ALERT_PLUGIN_INSTANCE_NAME", required = true, dataType = "String", example = "DING TALK"), @ApiImplicitParam(name = "pluginInstanceParams", value = "ALERT_PLUGIN_INSTANCE_PARAMS", required = true, dataType = "String", example = "ALERT_PLUGIN_INSTANCE_PARAMS") }) @GetMapping(value = "/update") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_ALERT_PLUGIN_INSTANCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result updateAlertPluginInstance(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "alertPluginInstanceId") int alertPluginInstanceId, @RequestParam(value = "instanceName") String instanceName, @RequestParam(value = "pluginInstanceParams") String pluginInstanceParams) { Map<String, Object> result = alertPluginInstanceService.update(loginUser, alertPluginInstanceId, instanceName, pluginInstanceParams); return returnDataList(result); } /** * deleteAlertPluginInstance * * @param loginUser login user * @param id id * @return result */ @ApiOperation(value = "delete", notes = "DELETE_ALERT_PLUGIN_INSTANCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "ALERT_PLUGIN_ID", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/delete") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_ALERT_PLUGIN_INSTANCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result deleteAlertPluginInstance(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int id) { Map<String, Object> result = alertPluginInstanceService.delete(loginUser, id); return returnDataList(result); } /** * getAlertPluginInstance * * @param loginUser login user * @param id alert plugin instance id * @return result */ @ApiOperation(value = "get", notes = "GET_ALERT_PLUGIN_INSTANCE_NOTES") @PostMapping(value = "/get") @ResponseStatus(HttpStatus.OK) @ApiException(GET_ALERT_PLUGIN_INSTANCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result getAlertPluginInstance(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "id") int id) { Map<String, Object> result = alertPluginInstanceService.get(loginUser, id); return returnDataList(result); } /** * getAlertPluginInstance * * @param loginUser login user * @return result */ @ApiOperation(value = "/queryAll", notes = "QUERY_ALL_ALERT_PLUGIN_INSTANCE_NOTES") @PostMapping(value = "/queryAll") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_ALL_ALERT_PLUGIN_INSTANCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result getAlertPluginInstance(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) { Map<String, Object> result = alertPluginInstanceService.queryAll(); return returnDataList(result); } /** * check alert group exist * * @param loginUser login user * @param alertInstanceName alert instance name * @return check result code */ @ApiOperation(value = "verifyAlertInstanceName", notes = "VERIFY_ALERT_INSTANCE_NAME_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "groupName", value = "GROUP_NAME", required = true, dataType = "String"), }) @GetMapping(value = "/verify-alert-instance-name") @ResponseStatus(HttpStatus.OK) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result verifyGroupName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "alertInstanceName") String alertInstanceName) { boolean exist = alertPluginInstanceService.checkExistPluginInstanceName(alertInstanceName); Result result = new Result(); if (exist) { logger.error("alert plugin instance {} has exist, can't create again.", alertInstanceName); result.setCode(Status.PLUGIN_INSTANCE_ALREADY_EXIT.getCode()); result.setMsg(Status.PLUGIN_INSTANCE_ALREADY_EXIT.getMsg()); } else { result.setCode(Status.SUCCESS.getCode()); result.setMsg(Status.SUCCESS.getMsg()); } return result; } /** * paging query alert plugin instance group list * * @param loginUser login user * @param pageNo page number * @param pageSize page size * @return alert plugin instance list page */ @ApiOperation(value = "queryAlertPluginInstanceListPaging", notes = "QUERY_ALERT_PLUGIN_INSTANCE_LIST_PAGING_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "20") }) @GetMapping(value = "/list-paging") @ResponseStatus(HttpStatus.OK) @ApiException(LIST_PAGING_ALERT_PLUGIN_INSTANCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result listPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("pageNo") Integer pageNo, @RequestParam("pageSize") Integer pageSize) { Map<String, Object> result = checkPageParams(pageNo, pageSize); if (result.get(Constants.STATUS) != Status.SUCCESS) { return returnDataListPaging(result); } result = alertPluginInstanceService.queryPluginPage(pageNo, pageSize); return returnDataListPaging(result); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,702
[Improvement] [APIServer] When update the existing alarm plug-in instance, the api should use POST instead of GET
When update the existing alarm plug-in instance, the front-end and back-end interaction should use POST method instead of GET. ![image](https://user-images.githubusercontent.com/52202080/123516818-9bc73f00-d6d0-11eb-9186-4f3b2f8dad00.png)
https://github.com/apache/dolphinscheduler/issues/5702
https://github.com/apache/dolphinscheduler/pull/5703
b31ba7e18b89885c37c2129bf9b4816001f09e51
2d71930837092c52dc63a76941b19051ae38cc2e
2021-06-26T14:53:59Z
java
2021-06-27T10:58:22Z
dolphinscheduler-ui/src/js/conf/home/store/security/actions.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import io from '@/module/io' export default { /** * verify Name * @param alertgroup/verifyGroupName * @param users/verifyUserName * @param tenant/verifyTenantCode */ verifyName ({ state }, payload) { const o = { user: { param: { userName: payload.userName }, api: 'users/verify-user-name' }, tenant: { param: { tenantCode: payload.tenantCode }, api: 'tenant/verify-tenant-code' }, alertgroup: { param: { groupName: payload.groupName }, api: 'alert-group/verify-group-name' }, alarmInstance: { param: { alertInstanceName: payload.instanceName }, api: 'alert-plugin-instance/verify-alert-instance-name' } } return new Promise((resolve, reject) => { io.get(o[payload.type].api, o[payload.type].param, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Create user * @param "userName":string * @param "userPassword": string * @param "tenantId":int * @param "email":string * @param "phone":string */ createUser ({ state }, payload) { return new Promise((resolve, reject) => { io.post('users/create', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Verify that the username exists * @param userName */ verifyUserName ({ state }, payload) { return new Promise((resolve, reject) => { io.post('users/verify-user-name', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Paging query user list * @param "pageNo":int, * @param "searchVal":string, * @param "pageSize":int */ getUsersListP ({ state }, payload) { return new Promise((resolve, reject) => { io.get('users/list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * user list expect admin */ getUsersList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('users/list', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * user all list */ getUsersAll ({ state }, payload) { return new Promise((resolve, reject) => { io.get('users/list-all', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Update user * @param "id":int, * @param "userName":string, * @param "userPassword": string, * @param "tenantId":int, * @param "email":string, * @param "phone":string */ updateUser ({ state }, payload) { return new Promise((resolve, reject) => { io.post('users/update', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * delete users * @param "id":int */ deleteUser ({ state }, payload) { return new Promise((resolve, reject) => { io.post('users/delete', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Obtain authorized and unauthorized items */ getAuthList ({ state }, payload) { const o = { type: payload.type, category: payload.category } const param = {} // Manage user if (o.type === 'user') { param.alertgroupId = payload.id } else { param.userId = payload.id } // Authorized project const p1 = new Promise((resolve, reject) => { io.get(`${o.category}/unauth-${o.type}`, param, res => { resolve(res.data) }).catch(e => { reject(e) }) }) // Unauthorized project const p2 = new Promise((resolve, reject) => { io.get(`${o.category}/authed-${o.type}`, param, res => { resolve(res.data) }).catch(e => { reject(e) }) }) return new Promise((resolve, reject) => { Promise.all([p1, p2]).then(a => { resolve(a) }).catch(e => { reject(e) }) }) }, getResourceList ({ state }, payload) { const o = { type: payload.type, category: payload.category } const param = {} // Manage user if (o.type === 'user') { param.alertgroupId = payload.id } else { param.userId = payload.id } // Authorized project const p1 = new Promise((resolve, reject) => { io.get(`${o.category}/authorize-resource-tree`, param, res => { resolve(res.data) }).catch(e => { reject(e) }) }) // Unauthorized project const p2 = new Promise((resolve, reject) => { io.get(`${o.category}/authed-${o.type}`, param, res => { resolve(res.data) }).catch(e => { reject(e) }) }) return new Promise((resolve, reject) => { Promise.all([p1, p2]).then(a => { resolve(a) }).catch(e => { reject(e) }) }) }, /** * Authorization [project, resource, data source] * @param Project,Resources,Datasource */ grantAuthorization ({ state }, payload) { return new Promise((resolve, reject) => { io.post(payload.api, payload.param, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Query user details * @param "userId":int */ getUsersDetails ({ state }, payload) { return new Promise((resolve, reject) => { io.post('users/select-by-id', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Tenant list - pagination */ getTenantListP ({ state }, payload) { return new Promise((resolve, reject) => { io.get('tenant/list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Tenant list - no paging */ getTenantList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('tenant/list', payload, res => { const list = res.data list.unshift({ id: -1, tenantCode: 'default' }) state.tenantAllList = list resolve(list) }).catch(e => { reject(e) }) }) }, /** * Queue interface */ getQueueList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('queue/list', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Create Queue */ createQueue ({ state }, payload) { return new Promise((resolve, reject) => { io.post('tenant/create', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * update Queue */ updateQueue ({ state }, payload) { return new Promise((resolve, reject) => { io.post('tenant/update', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * delete Queue */ deleteQueue ({ state }, payload) { return new Promise((resolve, reject) => { io.post('tenant/delete', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * queryAlertGroupListPaging */ queryAlertGroupListPaging ({ state }, payload) { return new Promise((resolve, reject) => { io.get('alert-group/list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * queryAlertPluginInstanceListPaging */ queryAlertPluginInstanceListPaging ({ state }, payload) { return new Promise((resolve, reject) => { io.get('alert-plugin-instance/list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * queryUiPlugins */ getPlugins ({ state }, payload) { return new Promise((resolve, reject) => { io.post('ui-plugins/queryUiPluginsByType', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * queryUiPluginById */ getUiPluginById ({ state }, payload) { return new Promise((resolve, reject) => { io.post('ui-plugins/queryUiPluginDetailById', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * queryAll alert-plugin-instance */ queryAllAlertPluginInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.post('alert-plugin-instance/queryAll', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Alarm group list */ getAlertgroup ({ state }, payload) { return new Promise((resolve, reject) => { io.get('alert-group/list', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Create an alarm group. */ createAlertgrou ({ state }, payload) { return new Promise((resolve, reject) => { io.post('alert-group/create', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * create alert plugin instance operation */ createAlertPluginInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.post('alert-plugin-instance/create', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * update alert plugin instance operation */ updateAlertPluginInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.get('alert-plugin-instance/update', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * update an alarm group. */ updateAlertgrou ({ state }, payload) { return new Promise((resolve, reject) => { io.post('alert-group/update', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * delete alarm group. */ deleteAlertgrou ({ state }, payload) { return new Promise((resolve, reject) => { io.post('alert-group/delete', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * delete alert plugin instance operation */ deletAelertPluginInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.get('alert-plugin-instance/delete', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Master list */ getProcessMasterList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('process/master/list', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Worker list */ getProcessWorkerList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('process/worker/list', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * get queue list pages */ getQueueListP ({ state }, payload) { return new Promise((resolve, reject) => { io.get('queue/list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * create queue */ createQueueQ ({ state }, payload) { return new Promise((resolve, reject) => { io.post('queue/create', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * update queue */ updateQueueQ ({ state }, payload) { return new Promise((resolve, reject) => { io.post('queue/update', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * update queue */ verifyQueueQ ({ state }, payload) { return new Promise((resolve, reject) => { io.post('queue/verify-queue', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * get worker groups */ getWorkerGroups ({ state }, payload) { return new Promise((resolve, reject) => { io.get('worker-group/list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * get worker groups all */ getWorkerGroupsAll ({ state }, payload) { return new Promise((resolve, reject) => { io.get('worker-group/all-groups', payload, res => { let list = res.data if (list.length > 0) { list = list.map(item => { return { id: item, name: item } }) } else { list.unshift({ id: 'default', name: 'default' }) } state.workerGroupsListAll = list resolve(list) }).catch(e => { reject(e) }) }) }, /** * get alarm groups all */ getAlarmGroupsAll ({ state }, payload) { return new Promise((resolve, reject) => { io.get('alert-group/list', payload, res => { state.alarmGroupsListAll = res.data resolve(res) }).catch(e => { reject(e) }) }) }, saveWorkerGroups ({ state }, payload) { return new Promise((resolve, reject) => { io.post('worker-group/save', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, deleteWorkerGroups ({ state }, payload) { return new Promise((resolve, reject) => { io.post('worker-group/delete-by-id', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, getWorkerAddresses ({ state }, payload) { return new Promise((resolve, reject) => { io.get('worker-group/worker-address-list', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,710
[Improvement][ALERT-LOG] The usage of Preconditions.checkState in Guava is wrong
The usage of Preconditions.checkState in Guava is wrong, the placeholder should use the form of %s instead of {} like this: [DolphinPluginLoader.java#L107](https://github.com/apache/dolphinscheduler/blob/3d8d1ebe67cc3331ff5fda649675b5c61a04df5f/dolphinscheduler-spi/src/main/java/org/apache/dolphinscheduler/spi/plugin/DolphinPluginLoader.java#L107) ```java private void loadPlugin(URLClassLoader pluginClassLoader) { ServiceLoader<DolphinSchedulerPlugin> serviceLoader = ServiceLoader.load(DolphinSchedulerPlugin.class, pluginClassLoader); List<DolphinSchedulerPlugin> plugins = ImmutableList.copyOf(serviceLoader); Preconditions.checkState(!plugins.isEmpty(), "No service providers the plugin {}",DolphinSchedulerPlugin.class.getName()); for (DolphinSchedulerPlugin plugin : plugins) { logger.info("Installing {}", plugin.getClass().getName()); for (AbstractDolphinPluginManager dolphinPluginManager : dolphinPluginManagerList) { dolphinPluginManager.installPlugin(plugin); } } } ``` we can reproduce this like this simple sample: ```java public class Test { public static void main(String[] args) { String word = "letters"; checkState(word.isEmpty(), "Alert Plugin {} is not null", word); } } ``` the result is: **Alert Plugin {} is not null [letters]** but we actually want to show the message like this: **Alert Plugin letters is not null**
https://github.com/apache/dolphinscheduler/issues/5710
https://github.com/apache/dolphinscheduler/pull/5711
3d8d1ebe67cc3331ff5fda649675b5c61a04df5f
239cfe5027b558abe1f17b1e7dd38504f10ab3e0
2021-06-28T16:34:24Z
java
2021-06-28T17:03:52Z
dolphinscheduler-alert/src/main/java/org/apache/dolphinscheduler/alert/plugin/AlertPluginManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.alert.plugin; import static java.lang.String.format; import static java.util.Objects.requireNonNull; import static com.google.common.base.Preconditions.checkState; import org.apache.dolphinscheduler.common.enums.PluginType; import org.apache.dolphinscheduler.spi.plugin.AbstractDolphinPluginManager; import org.apache.dolphinscheduler.dao.DaoFactory; import org.apache.dolphinscheduler.dao.PluginDao; import org.apache.dolphinscheduler.dao.entity.PluginDefine; import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin; import org.apache.dolphinscheduler.spi.alert.AlertChannel; import org.apache.dolphinscheduler.spi.alert.AlertChannelFactory; import org.apache.dolphinscheduler.spi.classloader.ThreadContextClassLoader; import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer; import org.apache.dolphinscheduler.spi.params.base.PluginParams; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * load the configured alert plugin and manager them */ public class AlertPluginManager extends AbstractDolphinPluginManager { private static final Logger logger = LoggerFactory.getLogger(AlertPluginManager.class); private final Map<String, AlertChannelFactory> alertChannelFactoryMap = new ConcurrentHashMap<>(); private final Map<String, AlertChannel> alertChannelMap = new ConcurrentHashMap<>(); /** * k->pluginDefineId v->pluginDefineName */ private final Map<Integer, String> pluginDefineMap = new HashMap<>(); private PluginDao pluginDao = DaoFactory.getDaoInstance(PluginDao.class); private void addAlertChannelFactory(AlertChannelFactory alertChannelFactory) { requireNonNull(alertChannelFactory, "alertChannelFactory is null"); if (alertChannelFactoryMap.putIfAbsent(alertChannelFactory.getName(), alertChannelFactory) != null) { throw new IllegalArgumentException(format("Alert Plugin '%s' is already registered", alertChannelFactory.getName())); } try { loadAlertChannel(alertChannelFactory.getName()); } catch (Exception e) { throw new IllegalArgumentException(format("Alert Plugin '%s' is can not load .", alertChannelFactory.getName())); } } private void loadAlertChannel(String name) { requireNonNull(name, "name is null"); AlertChannelFactory alertChannelFactory = alertChannelFactoryMap.get(name); checkState(alertChannelFactory != null, "Alert Plugin {} is not registered", name); try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(alertChannelFactory.getClass().getClassLoader())) { AlertChannel alertChannel = alertChannelFactory.create(); this.alertChannelMap.put(name, alertChannel); } logger.info("-- Loaded Alert Plugin {} --", name); } Map<String, AlertChannelFactory> getAlertChannelFactoryMap() { return alertChannelFactoryMap; } public Map<String, AlertChannel> getAlertChannelMap() { return alertChannelMap; } public String getPluginNameById(int id) { return pluginDefineMap.get(id); } @Override public void installPlugin(DolphinSchedulerPlugin dolphinSchedulerPlugin) { for (AlertChannelFactory alertChannelFactory : dolphinSchedulerPlugin.getAlertChannelFactorys()) { logger.info("Registering Alert Plugin '{}'", alertChannelFactory.getName()); this.addAlertChannelFactory(alertChannelFactory); List<PluginParams> params = alertChannelFactory.getParams(); String nameEn = alertChannelFactory.getName(); String paramsJson = PluginParamsTransfer.transferParamsToJson(params); PluginDefine pluginDefine = new PluginDefine(nameEn, PluginType.ALERT.getDesc(), paramsJson); int id = pluginDao.addOrUpdatePluginDefine(pluginDefine); pluginDefineMap.put(id, pluginDefine.getPluginName()); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,710
[Improvement][ALERT-LOG] The usage of Preconditions.checkState in Guava is wrong
The usage of Preconditions.checkState in Guava is wrong, the placeholder should use the form of %s instead of {} like this: [DolphinPluginLoader.java#L107](https://github.com/apache/dolphinscheduler/blob/3d8d1ebe67cc3331ff5fda649675b5c61a04df5f/dolphinscheduler-spi/src/main/java/org/apache/dolphinscheduler/spi/plugin/DolphinPluginLoader.java#L107) ```java private void loadPlugin(URLClassLoader pluginClassLoader) { ServiceLoader<DolphinSchedulerPlugin> serviceLoader = ServiceLoader.load(DolphinSchedulerPlugin.class, pluginClassLoader); List<DolphinSchedulerPlugin> plugins = ImmutableList.copyOf(serviceLoader); Preconditions.checkState(!plugins.isEmpty(), "No service providers the plugin {}",DolphinSchedulerPlugin.class.getName()); for (DolphinSchedulerPlugin plugin : plugins) { logger.info("Installing {}", plugin.getClass().getName()); for (AbstractDolphinPluginManager dolphinPluginManager : dolphinPluginManagerList) { dolphinPluginManager.installPlugin(plugin); } } } ``` we can reproduce this like this simple sample: ```java public class Test { public static void main(String[] args) { String word = "letters"; checkState(word.isEmpty(), "Alert Plugin {} is not null", word); } } ``` the result is: **Alert Plugin {} is not null [letters]** but we actually want to show the message like this: **Alert Plugin letters is not null**
https://github.com/apache/dolphinscheduler/issues/5710
https://github.com/apache/dolphinscheduler/pull/5711
3d8d1ebe67cc3331ff5fda649675b5c61a04df5f
239cfe5027b558abe1f17b1e7dd38504f10ab3e0
2021-06-28T16:34:24Z
java
2021-06-28T17:03:52Z
dolphinscheduler-spi/src/main/java/org/apache/dolphinscheduler/spi/plugin/DolphinPluginLoader.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.spi.plugin; import static java.lang.String.format; import static java.util.Objects.requireNonNull; import org.apache.dolphinscheduler.spi.DolphinSchedulerPlugin; import org.apache.dolphinscheduler.spi.classloader.ThreadContextClassLoader; import java.io.File; import java.io.IOException; import java.net.URL; import java.net.URLClassLoader; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.ServiceLoader; import java.util.Set; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.sonatype.aether.artifact.Artifact; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Ordering; import io.airlift.resolver.ArtifactResolver; /** * Plugin Loader * Load Plugin from pom when development and run server in IDE * Load Plugin from the plugin directory when running on the server */ public class DolphinPluginLoader { private static final Logger logger = LoggerFactory.getLogger(DolphinPluginLoader.class); /** * All third-party jar packages used in the classes which in spi package need to be add */ private static final ImmutableList<String> DOLPHIN_SPI_PACKAGES = ImmutableList.<String>builder() .add("org.apache.dolphinscheduler.spi.") .add("com.fasterxml.jackson.") .build(); private final File installedPluginsDir; private final List<String> configPlugins; private ArtifactResolver resolver = null; private final List<AbstractDolphinPluginManager> dolphinPluginManagerList; public DolphinPluginLoader(DolphinPluginManagerConfig config, List<AbstractDolphinPluginManager> dolphinPluginManagerList) { installedPluginsDir = config.getInstalledPluginsDir(); if (config.getPlugins() == null) { this.configPlugins = ImmutableList.of(); } else { this.configPlugins = ImmutableList.copyOf(config.getPlugins()); } this.dolphinPluginManagerList = requireNonNull(dolphinPluginManagerList, "dolphinPluginManagerList is null"); if (configPlugins != null && configPlugins.size() > 0) { this.resolver = new ArtifactResolver(config.getMavenLocalRepository(), config.getMavenRemoteRepository()); } } public void loadPlugins() throws Exception { for (File file : listPluginDirs(installedPluginsDir)) { if (file.isDirectory()) { loadPlugin(file.getAbsolutePath()); } } for (String plugin : configPlugins) { loadPlugin(plugin); } } private void loadPlugin(String plugin) throws Exception { logger.info("-- Loading plugin {} --", plugin); URLClassLoader pluginClassLoader = buildPluginClassLoader(plugin); try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(pluginClassLoader)) { loadPlugin(pluginClassLoader); } logger.info("-- Finished loading plugin {} --", plugin); } private void loadPlugin(URLClassLoader pluginClassLoader) { ServiceLoader<DolphinSchedulerPlugin> serviceLoader = ServiceLoader.load(DolphinSchedulerPlugin.class, pluginClassLoader); List<DolphinSchedulerPlugin> plugins = ImmutableList.copyOf(serviceLoader); Preconditions.checkState(!plugins.isEmpty(), "No service providers the plugin {}", DolphinSchedulerPlugin.class.getName()); for (DolphinSchedulerPlugin plugin : plugins) { logger.info("Installing {}", plugin.getClass().getName()); for (AbstractDolphinPluginManager dolphinPluginManager : dolphinPluginManagerList) { dolphinPluginManager.installPlugin(plugin); } } } private URLClassLoader buildPluginClassLoader(String plugin) throws Exception { File file = new File(plugin); if (!file.isDirectory() && (file.getName().equals("pom.xml") || file.getName().endsWith(".pom"))) { return buildPluginClassLoaderFromPom(file); } if (file.isDirectory()) { return buildPluginClassLoaderFromDirectory(file); } else { throw new IllegalArgumentException(format("plugin must be a pom file or directory %s .", plugin)); } } private URLClassLoader buildPluginClassLoaderFromPom(File pomFile) throws Exception { List<Artifact> artifacts = resolver.resolvePom(pomFile); URLClassLoader classLoader = createClassLoader(artifacts, pomFile.getPath()); Artifact artifact = artifacts.get(0); Set<String> plugins = DolphinPluginDiscovery.discoverPluginsFromArtifact(artifact, classLoader); if (!plugins.isEmpty()) { DolphinPluginDiscovery.writePluginServices(plugins, artifact.getFile()); } return classLoader; } private URLClassLoader buildPluginClassLoaderFromDirectory(File dir) throws Exception { logger.info("Classpath for {}:", dir.getName()); List<URL> urls = new ArrayList<>(); for (File file : listPluginDirs(dir)) { logger.info(" {}", file); urls.add(file.toURI().toURL()); } return createClassLoader(urls); } private URLClassLoader createClassLoader(List<Artifact> artifacts, String name) throws IOException { logger.info("Classpath for {}:", name); List<URL> urls = new ArrayList<>(); for (Artifact artifact : sortArtifacts(artifacts)) { if (artifact.getFile() == null) { throw new RuntimeException("Could not resolve artifact: " + artifact); } File file = artifact.getFile().getCanonicalFile(); logger.info(" {}", file); urls.add(file.toURI().toURL()); } return createClassLoader(urls); } private URLClassLoader createClassLoader(List<URL> urls) { ClassLoader parent = getClass().getClassLoader(); return new DolphinPluginClassLoader(urls, parent, DOLPHIN_SPI_PACKAGES); } private static List<File> listPluginDirs(File installedPluginsDir) { if (installedPluginsDir != null && installedPluginsDir.isDirectory()) { File[] files = installedPluginsDir.listFiles(); if (files != null) { Arrays.sort(files); return ImmutableList.copyOf(files); } } return ImmutableList.of(); } private static List<Artifact> sortArtifacts(List<Artifact> artifacts) { List<Artifact> list = new ArrayList<>(artifacts); list.sort(Ordering.natural().nullsLast().onResultOf(Artifact::getFile)); return list; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,701
[Bug][UI][DAO]When deleting a user, the accessToken associated with the user should also be deleted
When deleting a user, the accessToken associated with the user should be also deleted 1. create a normal user ![image](https://user-images.githubusercontent.com/52202080/123464117-0029c600-d61f-11eb-84ba-bd4717ca5ada.png) 2. add an accesstoken associated with the user created in the step 1. ![image (1)](https://user-images.githubusercontent.com/52202080/123464155-0c158800-d61f-11eb-8c4a-f41c58f7a441.png) 3. delete the user created in the step 1, then have a look at the accessToken: ![image (2)](https://user-images.githubusercontent.com/52202080/123464249-2c454700-d61f-11eb-8790-2c21ef2fccbb.png) **Which version of Dolphin Scheduler:** 1.3.7-release
https://github.com/apache/dolphinscheduler/issues/5701
https://github.com/apache/dolphinscheduler/pull/5697
239cfe5027b558abe1f17b1e7dd38504f10ab3e0
8d7d3a816672c196cf827055ce0221efcac68b55
2021-06-26T13:17:23Z
java
2021-06-28T17:27:39Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/UsersServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent; import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.UsersService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.EncryptionUtils; import org.apache.dolphinscheduler.common.utils.HadoopUtils; import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.entity.AlertGroup; import org.apache.dolphinscheduler.dao.entity.DatasourceUser; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.ResourcesUser; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UDFUser; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UDFUserMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils; import java.io.IOException; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; /** * users service impl */ @Service public class UsersServiceImpl extends BaseServiceImpl implements UsersService { private static final Logger logger = LoggerFactory.getLogger(UsersServiceImpl.class); @Autowired private UserMapper userMapper; @Autowired private TenantMapper tenantMapper; @Autowired private ProjectUserMapper projectUserMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ResourceMapper resourceMapper; @Autowired private DataSourceUserMapper datasourceUserMapper; @Autowired private UDFUserMapper udfUserMapper; @Autowired private AlertGroupMapper alertGroupMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProjectMapper projectMapper; /** * create user, only system admin have permission * * @param loginUser login user * @param userName user name * @param userPassword user password * @param email email * @param tenantId tenant id * @param phone phone * @param queue queue * @return create result code * @throws Exception exception */ @Override @Transactional(rollbackFor = Exception.class) public Map<String, Object> createUser(User loginUser, String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) throws IOException { Map<String, Object> result = new HashMap<>(); //check all user params String msg = this.checkUserParams(userName, userPassword, email, phone); if (!StringUtils.isEmpty(msg)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, msg); return result; } if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (!checkTenantExists(tenantId)) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } User user = createUser(userName, userPassword, email, tenantId, phone, queue, state); Tenant tenant = tenantMapper.queryById(tenantId); // resource upload startup if (PropertyUtils.getResUploadStartupState()) { // if tenant not exists if (!HadoopUtils.getInstance().exists(HadoopUtils.getHdfsTenantDir(tenant.getTenantCode()))) { createTenantDirIfNotExists(tenant.getTenantCode()); } String userPath = HadoopUtils.getHdfsUserDir(tenant.getTenantCode(), user.getId()); HadoopUtils.getInstance().mkdir(userPath); } putMsg(result, Status.SUCCESS); return result; } @Override @Transactional(rollbackFor = RuntimeException.class) public User createUser(String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) { User user = new User(); Date now = new Date(); user.setUserName(userName); user.setUserPassword(EncryptionUtils.getMd5(userPassword)); user.setEmail(email); user.setTenantId(tenantId); user.setPhone(phone); user.setState(state); // create general users, administrator users are currently built-in user.setUserType(UserType.GENERAL_USER); user.setCreateTime(now); user.setUpdateTime(now); if (StringUtils.isEmpty(queue)) { queue = ""; } user.setQueue(queue); // save user userMapper.insert(user); return user; } /*** * create User for ldap login */ @Override @Transactional(rollbackFor = Exception.class) public User createUser(UserType userType, String userId, String email) { User user = new User(); Date now = new Date(); user.setUserName(userId); user.setEmail(email); // create general users, administrator users are currently built-in user.setUserType(userType); user.setCreateTime(now); user.setUpdateTime(now); user.setQueue(""); // save user userMapper.insert(user); return user; } /** * get user by user name * * @param userName user name * @return exist user or null */ @Override public User getUserByUserName(String userName) { return userMapper.queryByUserNameAccurately(userName); } /** * query user by id * * @param id id * @return user info */ @Override public User queryUser(int id) { return userMapper.selectById(id); } @Override public List<User> queryUser(List<Integer> ids) { if (CollectionUtils.isEmpty(ids)) { return new ArrayList<>(); } return userMapper.selectByIds(ids); } /** * query user * * @param name name * @return user info */ @Override public User queryUser(String name) { return userMapper.queryByUserNameAccurately(name); } /** * query user * * @param name name * @param password password * @return user info */ @Override public User queryUser(String name, String password) { String md5 = EncryptionUtils.getMd5(password); return userMapper.queryUserByNamePassword(name, md5); } /** * get user id by user name * * @param name user name * @return if name empty 0, user not exists -1, user exist user id */ @Override public int getUserIdByName(String name) { //executor name query int executorId = 0; if (StringUtils.isNotEmpty(name)) { User executor = queryUser(name); if (null != executor) { executorId = executor.getId(); } else { executorId = -1; } } return executorId; } /** * query user list * * @param loginUser login user * @param pageNo page number * @param searchVal search avlue * @param pageSize page size * @return user list page */ @Override public Map<String, Object> queryUserList(User loginUser, String searchVal, Integer pageNo, Integer pageSize) { Map<String, Object> result = new HashMap<>(); if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } Page<User> page = new Page<>(pageNo, pageSize); IPage<User> scheduleList = userMapper.queryUserPaging(page, searchVal); PageInfo<User> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotalCount((int) scheduleList.getTotal()); pageInfo.setLists(scheduleList.getRecords()); result.put(Constants.DATA_LIST, pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * updateProcessInstance user * * * @param loginUser * @param userId user id * @param userName user name * @param userPassword user password * @param email email * @param tenantId tennat id * @param phone phone * @param queue queue * @return update result code * @throws Exception exception */ @Override public Map<String, Object> updateUser(User loginUser, int userId, String userName, String userPassword, String email, int tenantId, String phone, String queue, int state) throws IOException { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); if (check(result, !hasPerm(loginUser, userId), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } if (StringUtils.isNotEmpty(userName)) { if (!CheckUtils.checkUserName(userName)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userName); return result; } User tempUser = userMapper.queryByUserNameAccurately(userName); if (tempUser != null && tempUser.getId() != userId) { putMsg(result, Status.USER_NAME_EXIST); return result; } user.setUserName(userName); } if (StringUtils.isNotEmpty(userPassword)) { if (!CheckUtils.checkPassword(userPassword)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userPassword); return result; } user.setUserPassword(EncryptionUtils.getMd5(userPassword)); } if (StringUtils.isNotEmpty(email)) { if (!CheckUtils.checkEmail(email)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, email); return result; } user.setEmail(email); } if (StringUtils.isNotEmpty(phone) && !CheckUtils.checkPhone(phone)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, phone); return result; } user.setPhone(phone); user.setQueue(queue); user.setState(state); Date now = new Date(); user.setUpdateTime(now); //if user switches the tenant, the user's resources need to be copied to the new tenant if (user.getTenantId() != tenantId) { Tenant oldTenant = tenantMapper.queryById(user.getTenantId()); //query tenant Tenant newTenant = tenantMapper.queryById(tenantId); if (newTenant != null) { // if hdfs startup if (PropertyUtils.getResUploadStartupState() && oldTenant != null) { String newTenantCode = newTenant.getTenantCode(); String oldResourcePath = HadoopUtils.getHdfsResDir(oldTenant.getTenantCode()); String oldUdfsPath = HadoopUtils.getHdfsUdfDir(oldTenant.getTenantCode()); // if old tenant dir exists if (HadoopUtils.getInstance().exists(oldResourcePath)) { String newResourcePath = HadoopUtils.getHdfsResDir(newTenantCode); String newUdfsPath = HadoopUtils.getHdfsUdfDir(newTenantCode); //file resources list List<Resource> fileResourcesList = resourceMapper.queryResourceList( null, userId, ResourceType.FILE.ordinal()); if (CollectionUtils.isNotEmpty(fileResourcesList)) { ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(fileResourcesList); ResourceComponent resourceComponent = resourceTreeVisitor.visit(); copyResourceFiles(resourceComponent, oldResourcePath, newResourcePath); } //udf resources List<Resource> udfResourceList = resourceMapper.queryResourceList( null, userId, ResourceType.UDF.ordinal()); if (CollectionUtils.isNotEmpty(udfResourceList)) { ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(udfResourceList); ResourceComponent resourceComponent = resourceTreeVisitor.visit(); copyResourceFiles(resourceComponent, oldUdfsPath, newUdfsPath); } //Delete the user from the old tenant directory String oldUserPath = HadoopUtils.getHdfsUserDir(oldTenant.getTenantCode(), userId); HadoopUtils.getInstance().delete(oldUserPath, true); } else { // if old tenant dir not exists , create createTenantDirIfNotExists(oldTenant.getTenantCode()); } if (HadoopUtils.getInstance().exists(HadoopUtils.getHdfsTenantDir(newTenant.getTenantCode()))) { //create user in the new tenant directory String newUserPath = HadoopUtils.getHdfsUserDir(newTenant.getTenantCode(), user.getId()); HadoopUtils.getInstance().mkdir(newUserPath); } else { // if new tenant dir not exists , create createTenantDirIfNotExists(newTenant.getTenantCode()); } } } user.setTenantId(tenantId); } // updateProcessInstance user userMapper.updateById(user); putMsg(result, Status.SUCCESS); return result; } /** * delete user * * @param loginUser login user * @param id user id * @return delete result code * @throws Exception exception when operate hdfs */ @Override public Map<String, Object> deleteUserById(User loginUser, int id) throws IOException { Map<String, Object> result = new HashMap<>(); //only admin can operate if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM, id); return result; } //check exist User tempUser = userMapper.selectById(id); if (tempUser == null) { putMsg(result, Status.USER_NOT_EXIST, id); return result; } // check if is a project owner List<Project> projects = projectMapper.queryProjectCreatedByUser(id); if (CollectionUtils.isNotEmpty(projects)) { String projectNames = projects.stream().map(Project::getName).collect(Collectors.joining(",")); putMsg(result, Status.TRANSFORM_PROJECT_OWNERSHIP, projectNames); return result; } // delete user User user = userMapper.queryTenantCodeByUserId(id); if (user != null) { if (PropertyUtils.getResUploadStartupState()) { String userPath = HadoopUtils.getHdfsUserDir(user.getTenantCode(), id); if (HadoopUtils.getInstance().exists(userPath)) { HadoopUtils.getInstance().delete(userPath, true); } } } userMapper.deleteById(id); putMsg(result, Status.SUCCESS); return result; } /** * grant project * * @param loginUser login user * @param userId user id * @param projectIds project id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantProject(User loginUser, int userId, String projectIds) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } //check exist User tempUser = userMapper.selectById(userId); if (tempUser == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } //if the selected projectIds are empty, delete all items associated with the user if (check(result, StringUtils.isEmpty(projectIds), Status.SUCCESS)) { projectUserMapper.deleteProjectRelation(0, userId); return result; } String[] projectIdArr = projectIds.split(","); for (String projectId : projectIdArr) { Date now = new Date(); ProjectUser projectUser = new ProjectUser(); projectUser.setUserId(userId); projectUser.setProjectId(Integer.parseInt(projectId)); projectUser.setPerm(7); projectUser.setCreateTime(now); projectUser.setUpdateTime(now); projectUserMapper.insert(projectUser); } putMsg(result, Status.SUCCESS); return result; } /** * grant resource * * @param loginUser login user * @param userId user id * @param resourceIds resource id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantResources(User loginUser, int userId, String resourceIds) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } Set<Integer> needAuthorizeResIds = new HashSet<>(); if (StringUtils.isNotBlank(resourceIds)) { String[] resourceFullIdArr = resourceIds.split(","); // need authorize resource id set for (String resourceFullId : resourceFullIdArr) { String[] resourceIdArr = resourceFullId.split("-"); for (int i = 0; i <= resourceIdArr.length - 1; i++) { int resourceIdValue = Integer.parseInt(resourceIdArr[i]); needAuthorizeResIds.add(resourceIdValue); } } } //get the authorized resource id list by user id List<Integer> resIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, Constants.AUTHORIZE_WRITABLE_PERM); List<Resource> oldAuthorizedRes = CollectionUtils.isEmpty(resIds) ? new ArrayList<>() : resourceMapper.queryResourceListById(resIds); //if resource type is UDF,need check whether it is bound by UDF function Set<Integer> oldAuthorizedResIds = oldAuthorizedRes.stream().map(Resource::getId).collect(Collectors.toSet()); //get the unauthorized resource id list oldAuthorizedResIds.removeAll(needAuthorizeResIds); if (CollectionUtils.isNotEmpty(oldAuthorizedResIds)) { // get all resource id of process definitions those is released List<Map<String, Object>> list = processDefinitionMapper.listResourcesByUser(userId); Map<Integer, Set<Long>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list); Set<Integer> resourceIdSet = resourceProcessMap.keySet(); resourceIdSet.retainAll(oldAuthorizedResIds); if (CollectionUtils.isNotEmpty(resourceIdSet)) { logger.error("can't be deleted,because it is used of process definition"); for (Integer resId : resourceIdSet) { logger.error("resource id:{} is used of process definition {}", resId, resourceProcessMap.get(resId)); } putMsg(result, Status.RESOURCE_IS_USED); return result; } } resourceUserMapper.deleteResourceUser(userId, 0); if (check(result, StringUtils.isEmpty(resourceIds), Status.SUCCESS)) { return result; } for (int resourceIdValue : needAuthorizeResIds) { Resource resource = resourceMapper.selectById(resourceIdValue); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } Date now = new Date(); ResourcesUser resourcesUser = new ResourcesUser(); resourcesUser.setUserId(userId); resourcesUser.setResourcesId(resourceIdValue); if (resource.isDirectory()) { resourcesUser.setPerm(Constants.AUTHORIZE_READABLE_PERM); } else { resourcesUser.setPerm(Constants.AUTHORIZE_WRITABLE_PERM); } resourcesUser.setCreateTime(now); resourcesUser.setUpdateTime(now); resourceUserMapper.insert(resourcesUser); } putMsg(result, Status.SUCCESS); return result; } /** * grant udf function * * @param loginUser login user * @param userId user id * @param udfIds udf id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantUDFFunction(User loginUser, int userId, String udfIds) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } udfUserMapper.deleteByUserId(userId); if (check(result, StringUtils.isEmpty(udfIds), Status.SUCCESS)) { return result; } String[] resourcesIdArr = udfIds.split(","); for (String udfId : resourcesIdArr) { Date now = new Date(); UDFUser udfUser = new UDFUser(); udfUser.setUserId(userId); udfUser.setUdfId(Integer.parseInt(udfId)); udfUser.setPerm(7); udfUser.setCreateTime(now); udfUser.setUpdateTime(now); udfUserMapper.insert(udfUser); } putMsg(result, Status.SUCCESS); return result; } /** * grant datasource * * @param loginUser login user * @param userId user id * @param datasourceIds data source id array * @return grant result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> grantDataSource(User loginUser, int userId, String datasourceIds) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } User user = userMapper.selectById(userId); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userId); return result; } datasourceUserMapper.deleteByUserId(userId); if (check(result, StringUtils.isEmpty(datasourceIds), Status.SUCCESS)) { return result; } String[] datasourceIdArr = datasourceIds.split(","); for (String datasourceId : datasourceIdArr) { Date now = new Date(); DatasourceUser datasourceUser = new DatasourceUser(); datasourceUser.setUserId(userId); datasourceUser.setDatasourceId(Integer.parseInt(datasourceId)); datasourceUser.setPerm(7); datasourceUser.setCreateTime(now); datasourceUser.setUpdateTime(now); datasourceUserMapper.insert(datasourceUser); } putMsg(result, Status.SUCCESS); return result; } /** * query user info * * @param loginUser login user * @return user info */ @Override public Map<String, Object> getUserInfo(User loginUser) { Map<String, Object> result = new HashMap<>(); User user = null; if (loginUser.getUserType() == UserType.ADMIN_USER) { user = loginUser; } else { user = userMapper.queryDetailsById(loginUser.getId()); List<AlertGroup> alertGroups = alertGroupMapper.queryByUserId(loginUser.getId()); StringBuilder sb = new StringBuilder(); if (alertGroups != null && !alertGroups.isEmpty()) { for (int i = 0; i < alertGroups.size() - 1; i++) { sb.append(alertGroups.get(i).getGroupName() + ","); } sb.append(alertGroups.get(alertGroups.size() - 1)); user.setAlertGroup(sb.toString()); } } result.put(Constants.DATA_LIST, user); putMsg(result, Status.SUCCESS); return result; } /** * query user list * * @param loginUser login user * @return user list */ @Override public Map<String, Object> queryAllGeneralUsers(User loginUser) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.queryAllGeneralUser(); result.put(Constants.DATA_LIST, userList); putMsg(result, Status.SUCCESS); return result; } /** * query user list * * @param loginUser login user * @return user list */ @Override public Map<String, Object> queryUserList(User loginUser) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.selectList(null); result.put(Constants.DATA_LIST, userList); putMsg(result, Status.SUCCESS); return result; } /** * verify user name exists * * @param userName user name * @return true if user name not exists, otherwise return false */ @Override public Result<Object> verifyUserName(String userName) { Result<Object> result = new Result<>(); User user = userMapper.queryByUserNameAccurately(userName); if (user != null) { putMsg(result, Status.USER_NAME_EXIST); } else { putMsg(result, Status.SUCCESS); } return result; } /** * unauthorized user * * @param loginUser login user * @param alertgroupId alert group id * @return unauthorize result code */ @Override public Map<String, Object> unauthorizedUser(User loginUser, Integer alertgroupId) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.selectList(null); List<User> resultUsers = new ArrayList<>(); Set<User> userSet = null; if (userList != null && !userList.isEmpty()) { userSet = new HashSet<>(userList); List<User> authedUserList = userMapper.queryUserListByAlertGroupId(alertgroupId); Set<User> authedUserSet = null; if (authedUserList != null && !authedUserList.isEmpty()) { authedUserSet = new HashSet<>(authedUserList); userSet.removeAll(authedUserSet); } resultUsers = new ArrayList<>(userSet); } result.put(Constants.DATA_LIST, resultUsers); putMsg(result, Status.SUCCESS); return result; } /** * authorized user * * @param loginUser login user * @param alertgroupId alert group id * @return authorized result code */ @Override public Map<String, Object> authorizedUser(User loginUser, Integer alertgroupId) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (check(result, !isAdmin(loginUser), Status.USER_NO_OPERATION_PERM)) { return result; } List<User> userList = userMapper.queryUserListByAlertGroupId(alertgroupId); result.put(Constants.DATA_LIST, userList); putMsg(result, Status.SUCCESS); return result; } /** * @param tenantId tenant id * @return true if tenant exists, otherwise return false */ private boolean checkTenantExists(int tenantId) { return tenantMapper.queryById(tenantId) != null; } /** * @return if check failed return the field, otherwise return null */ private String checkUserParams(String userName, String password, String email, String phone) { String msg = null; if (!CheckUtils.checkUserName(userName)) { msg = userName; } else if (!CheckUtils.checkPassword(password)) { msg = password; } else if (!CheckUtils.checkEmail(email)) { msg = email; } else if (!CheckUtils.checkPhone(phone)) { msg = phone; } return msg; } /** * copy resource files * * @param resourceComponent resource component * @param srcBasePath src base path * @param dstBasePath dst base path * @throws IOException io exception */ private void copyResourceFiles(ResourceComponent resourceComponent, String srcBasePath, String dstBasePath) throws IOException { List<ResourceComponent> components = resourceComponent.getChildren(); if (CollectionUtils.isNotEmpty(components)) { for (ResourceComponent component : components) { // verify whether exist if (!HadoopUtils.getInstance().exists(String.format("%s/%s", srcBasePath, component.getFullName()))) { logger.error("resource file: {} not exist,copy error", component.getFullName()); throw new ServiceException(Status.RESOURCE_NOT_EXIST); } if (!component.isDirctory()) { // copy it to dst HadoopUtils.getInstance().copy(String.format("%s/%s", srcBasePath, component.getFullName()), String.format("%s/%s", dstBasePath, component.getFullName()), false, true); continue; } if (CollectionUtils.isEmpty(component.getChildren())) { // if not exist,need create it if (!HadoopUtils.getInstance().exists(String.format("%s/%s", dstBasePath, component.getFullName()))) { HadoopUtils.getInstance().mkdir(String.format("%s/%s", dstBasePath, component.getFullName())); } } else { copyResourceFiles(component, srcBasePath, dstBasePath); } } } } /** * registry user, default state is 0, default tenant_id is 1, no phone, no queue * * @param userName user name * @param userPassword user password * @param repeatPassword repeat password * @param email email * @return registry result code * @throws Exception exception */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> registerUser(String userName, String userPassword, String repeatPassword, String email) { Map<String, Object> result = new HashMap<>(); //check user params String msg = this.checkUserParams(userName, userPassword, email, ""); if (!StringUtils.isEmpty(msg)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, msg); return result; } if (!userPassword.equals(repeatPassword)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "two passwords are not same"); return result; } User user = createUser(userName, userPassword, email, 1, "", "", Flag.NO.ordinal()); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, user); return result; } /** * activate user, only system admin have permission, change user state code 0 to 1 * * @param loginUser login user * @param userName user name * @return create result code */ @Override public Map<String, Object> activateUser(User loginUser, String userName) { Map<String, Object> result = new HashMap<>(); result.put(Constants.STATUS, false); if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (!CheckUtils.checkUserName(userName)) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userName); return result; } User user = userMapper.queryByUserNameAccurately(userName); if (user == null) { putMsg(result, Status.USER_NOT_EXIST, userName); return result; } if (user.getState() != Flag.NO.ordinal()) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, userName); return result; } user.setState(Flag.YES.ordinal()); Date now = new Date(); user.setUpdateTime(now); userMapper.updateById(user); User responseUser = userMapper.queryByUserNameAccurately(userName); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, responseUser); return result; } /** * activate user, only system admin have permission, change users state code 0 to 1 * * @param loginUser login user * @param userNames user name * @return create result code */ @Override public Map<String, Object> batchActivateUser(User loginUser, List<String> userNames) { Map<String, Object> result = new HashMap<>(); if (!isAdmin(loginUser)) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } int totalSuccess = 0; List<String> successUserNames = new ArrayList<>(); Map<String, Object> successRes = new HashMap<>(); int totalFailed = 0; List<Map<String, String>> failedInfo = new ArrayList<>(); Map<String, Object> failedRes = new HashMap<>(); for (String userName : userNames) { Map<String, Object> tmpResult = activateUser(loginUser, userName); if (tmpResult.get(Constants.STATUS) != Status.SUCCESS) { totalFailed++; Map<String, String> failedBody = new HashMap<>(); failedBody.put("userName", userName); Status status = (Status) tmpResult.get(Constants.STATUS); String errorMessage = MessageFormat.format(status.getMsg(), userName); failedBody.put("msg", errorMessage); failedInfo.add(failedBody); } else { totalSuccess++; successUserNames.add(userName); } } successRes.put("sum", totalSuccess); successRes.put("userName", successUserNames); failedRes.put("sum", totalFailed); failedRes.put("info", failedInfo); Map<String, Object> res = new HashMap<>(); res.put("success", successRes); res.put("failed", failedRes); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, res); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,701
[Bug][UI][DAO]When deleting a user, the accessToken associated with the user should also be deleted
When deleting a user, the accessToken associated with the user should be also deleted 1. create a normal user ![image](https://user-images.githubusercontent.com/52202080/123464117-0029c600-d61f-11eb-84ba-bd4717ca5ada.png) 2. add an accesstoken associated with the user created in the step 1. ![image (1)](https://user-images.githubusercontent.com/52202080/123464155-0c158800-d61f-11eb-8c4a-f41c58f7a441.png) 3. delete the user created in the step 1, then have a look at the accessToken: ![image (2)](https://user-images.githubusercontent.com/52202080/123464249-2c454700-d61f-11eb-8790-2c21ef2fccbb.png) **Which version of Dolphin Scheduler:** 1.3.7-release
https://github.com/apache/dolphinscheduler/issues/5701
https://github.com/apache/dolphinscheduler/pull/5697
239cfe5027b558abe1f17b1e7dd38504f10ab3e0
8d7d3a816672c196cf827055ce0221efcac68b55
2021-06-26T13:17:23Z
java
2021-06-28T17:27:39Z
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/UsersServiceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.impl.UsersServiceImpl; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.EncryptionUtils; import org.apache.dolphinscheduler.dao.entity.AlertGroup; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UDFUserMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import java.util.ArrayList; import java.util.List; import java.util.Map; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.google.common.collect.Lists; /** * users service test */ @RunWith(MockitoJUnitRunner.class) public class UsersServiceTest { private static final Logger logger = LoggerFactory.getLogger(UsersServiceTest.class); @InjectMocks private UsersServiceImpl usersService; @Mock private UserMapper userMapper; @Mock private TenantMapper tenantMapper; @Mock private ResourceMapper resourceMapper; @Mock private AlertGroupMapper alertGroupMapper; @Mock private DataSourceUserMapper datasourceUserMapper; @Mock private ProjectUserMapper projectUserMapper; @Mock private ResourceUserMapper resourceUserMapper; @Mock private UDFUserMapper udfUserMapper; @Mock private ProjectMapper projectMapper; private String queueName = "UsersServiceTestQueue"; @Before public void before() { } @After public void after() { } @Test public void testCreateUserForLdap() { String userName = "user1"; String email = "[email protected]"; User user = usersService.createUser(UserType.ADMIN_USER, userName, email); Assert.assertNotNull(user); } @Test public void testCreateUser() { User user = new User(); user.setUserType(UserType.ADMIN_USER); String userName = "userTest0001~"; String userPassword = "userTest"; String email = "[email protected]"; int tenantId = Integer.MAX_VALUE; String phone = "13456432345"; int state = 1; try { //userName error Map<String, Object> result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); userName = "userTest0001"; userPassword = "userTest000111111111111111"; //password error result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); userPassword = "userTest0001"; email = "1q.com"; //email error result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); email = "[email protected]"; phone = "2233"; //phone error result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); phone = "13456432345"; //tenantId not exists result = usersService.createUser(user, userName, userPassword, email, tenantId, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.TENANT_NOT_EXIST, result.get(Constants.STATUS)); //success Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant()); result = usersService.createUser(user, userName, userPassword, email, 1, phone, queueName, state); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { logger.error(Status.CREATE_USER_ERROR.getMsg(), e); Assert.assertTrue(false); } } @Test public void testQueryUser() { String userName = "userTest0001"; String userPassword = "userTest0001"; when(userMapper.queryUserByNamePassword(userName, EncryptionUtils.getMd5(userPassword))).thenReturn(getGeneralUser()); User queryUser = usersService.queryUser(userName, userPassword); logger.info(queryUser.toString()); Assert.assertTrue(queryUser != null); } @Test public void testSelectByIds() { List<Integer> ids = new ArrayList<>(); List<User> users = usersService.queryUser(ids); Assert.assertTrue(users.isEmpty()); ids.add(1); List<User> userList = new ArrayList<>(); userList.add(new User()); when(userMapper.selectByIds(ids)).thenReturn(userList); List<User> userList1 = usersService.queryUser(ids); Assert.assertFalse(userList1.isEmpty()); } @Test public void testGetUserIdByName() { User user = new User(); user.setId(1); user.setUserType(UserType.ADMIN_USER); user.setUserName("test_user"); //user name null int userId = usersService.getUserIdByName(""); Assert.assertEquals(0, userId); //user not exist when(usersService.queryUser(user.getUserName())).thenReturn(null); int userNotExistId = usersService.getUserIdByName(user.getUserName()); Assert.assertEquals(-1, userNotExistId); //user exist when(usersService.queryUser(user.getUserName())).thenReturn(user); int userExistId = usersService.getUserIdByName(user.getUserName()); Assert.assertEquals(user.getId(), userExistId); } @Test public void testQueryUserList() { User user = new User(); //no operate Map<String, Object> result = usersService.queryUserList(user); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success user.setUserType(UserType.ADMIN_USER); when(userMapper.selectList(null)).thenReturn(getUserList()); result = usersService.queryUserList(user); List<User> userList = (List<User>) result.get(Constants.DATA_LIST); Assert.assertTrue(userList.size() > 0); } @Test public void testQueryUserListPage() { User user = new User(); IPage<User> page = new Page<>(1, 10); page.setRecords(getUserList()); when(userMapper.queryUserPaging(any(Page.class), eq("userTest"))).thenReturn(page); //no operate Map<String, Object> result = usersService.queryUserList(user, "userTest", 1, 10); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success user.setUserType(UserType.ADMIN_USER); result = usersService.queryUserList(user, "userTest", 1, 10); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); PageInfo<User> pageInfo = (PageInfo<User>) result.get(Constants.DATA_LIST); Assert.assertTrue(pageInfo.getLists().size() > 0); } @Test public void testUpdateUser() { String userName = "userTest0001"; String userPassword = "userTest0001"; try { //user not exist Map<String, Object> result = usersService.updateUser(getLoginUser(), 0,userName,userPassword,"[email protected]",1,"13457864543","queue", 1); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); logger.info(result.toString()); //success when(userMapper.selectById(1)).thenReturn(getUser()); result = usersService.updateUser(getLoginUser(), 1,userName,userPassword,"[email protected]",1,"13457864543","queue", 1); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { logger.error("update user error", e); Assert.assertTrue(false); } } @Test public void testDeleteUserById() { User loginUser = new User(); try { when(userMapper.queryTenantCodeByUserId(1)).thenReturn(getUser()); when(userMapper.selectById(1)).thenReturn(getUser()); //no operate Map<String, Object> result = usersService.deleteUserById(loginUser, 3); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); // user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.deleteUserById(loginUser, 3); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); // user is project owner Mockito.when(projectMapper.queryProjectCreatedByUser(1)).thenReturn(Lists.newArrayList(new Project())); result = usersService.deleteUserById(loginUser, 1); Assert.assertEquals(Status.TRANSFORM_PROJECT_OWNERSHIP, result.get(Constants.STATUS)); //success Mockito.when(projectMapper.queryProjectCreatedByUser(1)).thenReturn(null); result = usersService.deleteUserById(loginUser, 1); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { logger.error("delete user error", e); Assert.assertTrue(false); } } @Test public void testGrantProject() { when(userMapper.selectById(1)).thenReturn(getUser()); User loginUser = new User(); String projectIds = "100000,120000"; Map<String, Object> result = usersService.grantProject(loginUser, 1, projectIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.grantProject(loginUser, 2, projectIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //success result = usersService.grantProject(loginUser, 1, projectIds); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testGrantResources() { String resourceIds = "100000,120000"; when(userMapper.selectById(1)).thenReturn(getUser()); User loginUser = new User(); Map<String, Object> result = usersService.grantResources(loginUser, 1, resourceIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.grantResources(loginUser, 2, resourceIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //success when(resourceMapper.selectById(Mockito.anyInt())).thenReturn(getResource()); when(resourceUserMapper.deleteResourceUser(1, 0)).thenReturn(1); result = usersService.grantResources(loginUser, 1, resourceIds); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testGrantUDFFunction() { String udfIds = "100000,120000"; when(userMapper.selectById(1)).thenReturn(getUser()); User loginUser = new User(); Map<String, Object> result = usersService.grantUDFFunction(loginUser, 1, udfIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.grantUDFFunction(loginUser, 2, udfIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //success when(udfUserMapper.deleteByUserId(1)).thenReturn(1); result = usersService.grantUDFFunction(loginUser, 1, udfIds); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testGrantDataSource() { String datasourceIds = "100000,120000"; when(userMapper.selectById(1)).thenReturn(getUser()); User loginUser = new User(); Map<String, Object> result = usersService.grantDataSource(loginUser, 1, datasourceIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //user not exist loginUser.setUserType(UserType.ADMIN_USER); result = usersService.grantDataSource(loginUser, 2, datasourceIds); logger.info(result.toString()); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //success when(datasourceUserMapper.deleteByUserId(Mockito.anyInt())).thenReturn(1); result = usersService.grantDataSource(loginUser, 1, datasourceIds); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } private User getLoginUser(){ User loginUser = new User(); loginUser.setId(1); loginUser.setUserType(UserType.ADMIN_USER); return loginUser; } @Test public void getUserInfo() { User loginUser = new User(); loginUser.setUserName("admin"); loginUser.setUserType(UserType.ADMIN_USER); // get admin user Map<String, Object> result = usersService.getUserInfo(loginUser); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); User tempUser = (User) result.get(Constants.DATA_LIST); //check userName Assert.assertEquals("admin", tempUser.getUserName()); //get general user loginUser.setUserType(null); loginUser.setId(1); when(userMapper.queryDetailsById(1)).thenReturn(getGeneralUser()); when(alertGroupMapper.queryByUserId(1)).thenReturn(getAlertGroups()); result = usersService.getUserInfo(loginUser); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); tempUser = (User) result.get(Constants.DATA_LIST); //check userName Assert.assertEquals("userTest0001", tempUser.getUserName()); } @Test public void testQueryAllGeneralUsers() { User loginUser = new User(); //no operate Map<String, Object> result = usersService.queryAllGeneralUsers(loginUser); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success loginUser.setUserType(UserType.ADMIN_USER); when(userMapper.queryAllGeneralUser()).thenReturn(getUserList()); result = usersService.queryAllGeneralUsers(loginUser); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); List<User> userList = (List<User>) result.get(Constants.DATA_LIST); Assert.assertTrue(CollectionUtils.isNotEmpty(userList)); } @Test public void testVerifyUserName() { //not exist user Result result = usersService.verifyUserName("admin89899"); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg()); //exist user when(userMapper.queryByUserNameAccurately("userTest0001")).thenReturn(getUser()); result = usersService.verifyUserName("userTest0001"); logger.info(result.toString()); Assert.assertEquals(Status.USER_NAME_EXIST.getMsg(), result.getMsg()); } @Test public void testUnauthorizedUser() { User loginUser = new User(); when(userMapper.selectList(null)).thenReturn(getUserList()); when(userMapper.queryUserListByAlertGroupId(2)).thenReturn(getUserList()); //no operate Map<String, Object> result = usersService.unauthorizedUser(loginUser, 2); logger.info(result.toString()); loginUser.setUserType(UserType.ADMIN_USER); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success result = usersService.unauthorizedUser(loginUser, 2); logger.info(result.toString()); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } @Test public void testAuthorizedUser() { User loginUser = new User(); when(userMapper.queryUserListByAlertGroupId(2)).thenReturn(getUserList()); //no operate Map<String, Object> result = usersService.authorizedUser(loginUser, 2); logger.info(result.toString()); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //success loginUser.setUserType(UserType.ADMIN_USER); result = usersService.authorizedUser(loginUser, 2); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); List<User> userList = (List<User>) result.get(Constants.DATA_LIST); logger.info(result.toString()); Assert.assertTrue(CollectionUtils.isNotEmpty(userList)); } @Test public void testRegisterUser() { String userName = "userTest0002~"; String userPassword = "userTest"; String repeatPassword = "userTest"; String email = "[email protected]"; try { //userName error Map<String, Object> result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); userName = "userTest0002"; userPassword = "userTest000111111111111111"; //password error result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); userPassword = "userTest0002"; email = "1q.com"; //email error result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); //repeatPassword error email = "[email protected]"; repeatPassword = "userPassword"; result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); //success repeatPassword = "userTest0002"; result = usersService.registerUser(userName, userPassword, repeatPassword, email); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { Assert.assertTrue(false); } } @Test public void testActivateUser() { User user = new User(); user.setUserType(UserType.GENERAL_USER); String userName = "userTest0002~"; try { //not admin Map<String, Object> result = usersService.activateUser(user, userName); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //userName error user.setUserType(UserType.ADMIN_USER); result = usersService.activateUser(user, userName); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); //user not exist userName = "userTest10013"; result = usersService.activateUser(user, userName); Assert.assertEquals(Status.USER_NOT_EXIST, result.get(Constants.STATUS)); //user state error userName = "userTest0001"; when(userMapper.queryByUserNameAccurately(userName)).thenReturn(getUser()); result = usersService.activateUser(user, userName); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, result.get(Constants.STATUS)); //success when(userMapper.queryByUserNameAccurately(userName)).thenReturn(getDisabledUser()); result = usersService.activateUser(user, userName); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { Assert.assertTrue(false); } } @Test public void testBatchActivateUser() { User user = new User(); user.setUserType(UserType.GENERAL_USER); List<String> userNames = new ArrayList<>(); userNames.add("userTest0001"); userNames.add("userTest0002"); userNames.add("userTest0003~"); userNames.add("userTest0004"); try { //not admin Map<String, Object> result = usersService.batchActivateUser(user, userNames); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, result.get(Constants.STATUS)); //batch activate user names user.setUserType(UserType.ADMIN_USER); when(userMapper.queryByUserNameAccurately("userTest0001")).thenReturn(getUser()); when(userMapper.queryByUserNameAccurately("userTest0002")).thenReturn(getDisabledUser()); result = usersService.batchActivateUser(user, userNames); Map<String, Object> responseData = (Map<String, Object>) result.get(Constants.DATA_LIST); Map<String, Object> successData = (Map<String, Object>) responseData.get("success"); int totalSuccess = (Integer) successData.get("sum"); Map<String, Object> failedData = (Map<String, Object>) responseData.get("failed"); int totalFailed = (Integer) failedData.get("sum"); Assert.assertEquals(1, totalSuccess); Assert.assertEquals(3, totalFailed); Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS)); } catch (Exception e) { Assert.assertTrue(false); } } /** * get disabled user */ private User getDisabledUser() { User user = new User(); user.setUserType(UserType.GENERAL_USER); user.setUserName("userTest0001"); user.setUserPassword("userTest0001"); user.setState(0); return user; } /** * get user */ private User getGeneralUser() { User user = new User(); user.setUserType(UserType.GENERAL_USER); user.setUserName("userTest0001"); user.setUserPassword("userTest0001"); return user; } private List<User> getUserList() { List<User> userList = new ArrayList<>(); userList.add(getGeneralUser()); return userList; } /** * get user */ private User getUser() { User user = new User(); user.setUserType(UserType.ADMIN_USER); user.setUserName("userTest0001"); user.setUserPassword("userTest0001"); user.setState(1); return user; } /** * get tenant * * @return tenant */ private Tenant getTenant() { Tenant tenant = new Tenant(); tenant.setId(1); return tenant; } /** * get resource * * @return resource */ private Resource getResource() { Resource resource = new Resource(); resource.setPid(-1); resource.setUserId(1); resource.setDescription("ResourcesServiceTest.jar"); resource.setAlias("ResourcesServiceTest.jar"); resource.setFullName("/ResourcesServiceTest.jar"); resource.setType(ResourceType.FILE); return resource; } private List<AlertGroup> getAlertGroups() { List<AlertGroup> alertGroups = new ArrayList<>(); AlertGroup alertGroup = new AlertGroup(); alertGroups.add(alertGroup); return alertGroups; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,701
[Bug][UI][DAO]When deleting a user, the accessToken associated with the user should also be deleted
When deleting a user, the accessToken associated with the user should be also deleted 1. create a normal user ![image](https://user-images.githubusercontent.com/52202080/123464117-0029c600-d61f-11eb-84ba-bd4717ca5ada.png) 2. add an accesstoken associated with the user created in the step 1. ![image (1)](https://user-images.githubusercontent.com/52202080/123464155-0c158800-d61f-11eb-8c4a-f41c58f7a441.png) 3. delete the user created in the step 1, then have a look at the accessToken: ![image (2)](https://user-images.githubusercontent.com/52202080/123464249-2c454700-d61f-11eb-8790-2c21ef2fccbb.png) **Which version of Dolphin Scheduler:** 1.3.7-release
https://github.com/apache/dolphinscheduler/issues/5701
https://github.com/apache/dolphinscheduler/pull/5697
239cfe5027b558abe1f17b1e7dd38504f10ab3e0
8d7d3a816672c196cf827055ce0221efcac68b55
2021-06-26T13:17:23Z
java
2021-06-28T17:27:39Z
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/AccessTokenMapper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.mapper; import org.apache.dolphinscheduler.dao.entity.AccessToken; import com.baomidou.mybatisplus.core.mapper.BaseMapper; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import org.apache.ibatis.annotations.Param; /** * accesstoken mapper interface */ public interface AccessTokenMapper extends BaseMapper<AccessToken> { /** * access token page * @param page page * @param userName userName * @param userId userId * @return access token Ipage */ IPage<AccessToken> selectAccessTokenPage(Page page, @Param("userName") String userName, @Param("userId") int userId ); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,701
[Bug][UI][DAO]When deleting a user, the accessToken associated with the user should also be deleted
When deleting a user, the accessToken associated with the user should be also deleted 1. create a normal user ![image](https://user-images.githubusercontent.com/52202080/123464117-0029c600-d61f-11eb-84ba-bd4717ca5ada.png) 2. add an accesstoken associated with the user created in the step 1. ![image (1)](https://user-images.githubusercontent.com/52202080/123464155-0c158800-d61f-11eb-8c4a-f41c58f7a441.png) 3. delete the user created in the step 1, then have a look at the accessToken: ![image (2)](https://user-images.githubusercontent.com/52202080/123464249-2c454700-d61f-11eb-8790-2c21ef2fccbb.png) **Which version of Dolphin Scheduler:** 1.3.7-release
https://github.com/apache/dolphinscheduler/issues/5701
https://github.com/apache/dolphinscheduler/pull/5697
239cfe5027b558abe1f17b1e7dd38504f10ab3e0
8d7d3a816672c196cf827055ce0221efcac68b55
2021-06-26T13:17:23Z
java
2021-06-28T17:27:39Z
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/AccessTokenMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.AccessTokenMapper"> <select id="selectAccessTokenPage" resultType="org.apache.dolphinscheduler.dao.entity.AccessToken"> select t.id, t.user_id, t.token, t.expire_time, t.create_time, t.update_time, u.user_name from t_ds_access_token t left join t_ds_user u on t.user_id = u.id where 1 = 1 <if test="userName != null and userName != ''"> and u.user_name like concat ('%', #{userName}, '%') </if> <if test="userId != 0"> and t.user_id = #{userId} </if> order by t.update_time desc </select> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,701
[Bug][UI][DAO]When deleting a user, the accessToken associated with the user should also be deleted
When deleting a user, the accessToken associated with the user should be also deleted 1. create a normal user ![image](https://user-images.githubusercontent.com/52202080/123464117-0029c600-d61f-11eb-84ba-bd4717ca5ada.png) 2. add an accesstoken associated with the user created in the step 1. ![image (1)](https://user-images.githubusercontent.com/52202080/123464155-0c158800-d61f-11eb-8c4a-f41c58f7a441.png) 3. delete the user created in the step 1, then have a look at the accessToken: ![image (2)](https://user-images.githubusercontent.com/52202080/123464249-2c454700-d61f-11eb-8790-2c21ef2fccbb.png) **Which version of Dolphin Scheduler:** 1.3.7-release
https://github.com/apache/dolphinscheduler/issues/5701
https://github.com/apache/dolphinscheduler/pull/5697
239cfe5027b558abe1f17b1e7dd38504f10ab3e0
8d7d3a816672c196cf827055ce0221efcac68b55
2021-06-26T13:17:23Z
java
2021-06-28T17:27:39Z
dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/AccessTokenMapperTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.mapper; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.dao.entity.AccessToken; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import org.apache.dolphinscheduler.dao.entity.User; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.annotation.Rollback; import org.springframework.test.context.junit4.SpringRunner; import org.springframework.transaction.annotation.Transactional; import javax.annotation.Resource; import java.text.SimpleDateFormat; import java.util.*; import java.util.concurrent.ThreadLocalRandom; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.greaterThan; import static org.junit.Assert.*; /** * AccessToken mapper test */ @RunWith(SpringRunner.class) @SpringBootTest @Transactional @Rollback public class AccessTokenMapperTest { @Autowired AccessTokenMapper accessTokenMapper; @Autowired UserMapper userMapper; /** * test insert * * @throws Exception */ @Test public void testInsert() throws Exception { Integer userId = 1; AccessToken accessToken = createAccessToken(userId); assertThat(accessToken.getId(), greaterThan(0)); } /** * test select by id * * @throws Exception */ @Test public void testSelectById() throws Exception { Integer userId = 1; AccessToken accessToken = createAccessToken(userId); AccessToken resultAccessToken = accessTokenMapper.selectById(accessToken.getId()); assertEquals(accessToken, resultAccessToken); } /** * test hashCode method * * @throws Exception */ @Test public void testHashCodeMethod() throws Exception { Integer userId = 1; AccessToken accessToken = createAccessToken(userId); AccessToken resultAccessToken = accessTokenMapper.selectById(accessToken.getId()); boolean flag = accessToken.equals(resultAccessToken); assertTrue(flag); } /** * test equals method * * @throws Exception */ @Test public void testEqualsMethod() throws Exception { Integer userId = 1; AccessToken accessToken = createAccessToken(userId); int result = accessToken.hashCode(); assertNotNull(result); } /** * test page */ @Test public void testSelectAccessTokenPage() throws Exception { Integer count = 4; String userName = "zhangsan"; Integer offset = 2; Integer size = 2; Map<Integer, AccessToken> accessTokenMap = createAccessTokens(count, userName); Page page = new Page(offset, size); IPage<AccessToken> accessTokenPage = accessTokenMapper.selectAccessTokenPage(page, userName, 0); assertEquals(Integer.valueOf(accessTokenPage.getRecords().size()), size); for (AccessToken accessToken : accessTokenPage.getRecords()) { AccessToken resultAccessToken = accessTokenMap.get(accessToken.getId()); assertEquals(accessToken, resultAccessToken); } } /** * test update */ @Test public void testUpdate() throws Exception { Integer userId = 1; AccessToken accessToken = createAccessToken(userId); //update accessToken.setToken("56789"); accessToken.setExpireTime(DateUtils.getCurrentDate()); accessToken.setUpdateTime(DateUtils.getCurrentDate()); int status = accessTokenMapper.updateById(accessToken); if (status != 1) { Assert.fail("update access token fail"); } AccessToken resultAccessToken = accessTokenMapper.selectById(accessToken.getId()); assertEquals(accessToken, resultAccessToken); } /** * test delete */ @Test public void testDelete() throws Exception { Integer userId = 1; AccessToken accessToken = createAccessToken(userId); int status = accessTokenMapper.deleteById(accessToken.getId()); if (status != 1) { Assert.fail("delete access token data fail"); } AccessToken resultAccessToken = accessTokenMapper.selectById(accessToken.getId()); assertNull(resultAccessToken); } /** * create accessTokens * * @param count create accessToken count * @param userName username * @return accessToken map * @throws Exception */ private Map<Integer, AccessToken> createAccessTokens( Integer count, String userName) throws Exception { User user = createUser(userName); Map<Integer, AccessToken> accessTokenMap = new HashMap<>(); for (int i = 1; i <= count; i++) { AccessToken accessToken = createAccessToken(user.getId(), userName); accessTokenMap.put(accessToken.getId(), accessToken); } return accessTokenMap; } /** * create user * * @param userName userName * @return user * @throws Exception */ private User createUser(String userName) throws Exception { User user = new User(); user.setUserName(userName); user.setUserPassword("123"); user.setUserType(UserType.GENERAL_USER); user.setEmail("[email protected]"); user.setPhone("13102557272"); user.setTenantId(1); user.setCreateTime(DateUtils.getCurrentDate()); user.setUpdateTime(DateUtils.getCurrentDate()); user.setQueue("default"); int status = userMapper.insert(user); if (status != 1) { Assert.fail("insert user data error"); } return user; } /** * create access token * * @param userId userId * @param userName userName * @return accessToken * @throws Exception */ private AccessToken createAccessToken(Integer userId, String userName) throws Exception { //insertOne AccessToken accessToken = new AccessToken(); accessToken.setUserName(userName); accessToken.setUserId(userId); accessToken.setToken(String.valueOf(ThreadLocalRandom.current().nextLong())); accessToken.setCreateTime(DateUtils.getCurrentDate()); accessToken.setUpdateTime(DateUtils.getCurrentDate()); accessToken.setExpireTime(DateUtils.getCurrentDate()); int status = accessTokenMapper.insert(accessToken); if (status != 1) { Assert.fail("insert data error"); } return accessToken; } /** * create access token * * @param userId userId * @return accessToken * @throws Exception */ private AccessToken createAccessToken(Integer userId) throws Exception { return createAccessToken(userId, null); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,622
[Improvement][project management] Modify the title
*For better global communication, please give priority to using English description, thx! * *Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.* **Describe the question** “项目管理”功能模块中“项目首页”菜单与页面内容关系不大。. **What are the current deficiencies and the benefits of improvement** - “项目首页”文字,修改成“工作流监控”. **Which version of DolphinScheduler:** -[1.3.7] **Describe alternatives you've considered** A clear and concise description of any alternative improvement solutions you've considered.
https://github.com/apache/dolphinscheduler/issues/5622
https://github.com/apache/dolphinscheduler/pull/5723
ee0a4391e7a21ef7773eba015fa783ff616ac4a6
2ba569acd028c00c22f4853de7c58251ac72816c
2021-06-10T13:47:51Z
java
2021-06-30T05:23:06Z
dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ export default { 'User Name': 'User Name', 'Please enter user name': 'Please enter user name', Password: 'Password', 'Please enter your password': 'Please enter your password', 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22', Login: 'Login', Home: 'Home', 'Failed to create node to save': 'Failed to create node to save', 'Global parameters': 'Global parameters', 'Local parameters': 'Local parameters', 'Copy success': 'Copy success', 'The browser does not support automatic copying': 'The browser does not support automatic copying', 'Whether to save the DAG graph': 'Whether to save the DAG graph', 'Current node settings': 'Current node settings', 'View history': 'View history', 'View log': 'View log', 'Force success': 'Force success', 'Enter this child node': 'Enter this child node', 'Node name': 'Node name', 'Please enter name (required)': 'Please enter name (required)', 'Run flag': 'Run flag', Normal: 'Normal', 'Prohibition execution': 'Prohibition execution', 'Please enter description': 'Please enter description', 'Number of failed retries': 'Number of failed retries', Times: 'Times', 'Failed retry interval': 'Failed retry interval', Minute: 'Minute', 'Delay execution time': 'Delay execution time', 'Delay execution': 'Delay execution', 'Forced success': 'Forced success', Cancel: 'Cancel', 'Confirm add': 'Confirm add', 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process', 'The task has not been executed and cannot enter the sub-Process': 'The task has not been executed and cannot enter the sub-Process', 'Name already exists': 'Name already exists', 'Download Log': 'Download Log', 'Refresh Log': 'Refresh Log', 'Enter full screen': 'Enter full screen', 'Cancel full screen': 'Cancel full screen', Close: 'Close', 'Update log success': 'Update log success', 'No more logs': 'No more logs', 'No log': 'No log', 'Loading Log...': 'Loading Log...', 'Set the DAG diagram name': 'Set the DAG diagram name', 'Please enter description(optional)': 'Please enter description(optional)', 'Set global': 'Set global', 'Whether to go online the process definition': 'Whether to go online the process definition', 'Whether to update the process definition': 'Whether to update the process definition', Add: 'Add', 'DAG graph name cannot be empty': 'DAG graph name cannot be empty', 'Create Datasource': 'Create Datasource', 'Project Home': 'Project Home', 'Project Manage': 'Project', 'Create Project': 'Create Project', 'Cron Manage': 'Cron Manage', 'Copy Workflow': 'Copy Workflow', 'Tenant Manage': 'Tenant Manage', 'Create Tenant': 'Create Tenant', 'User Manage': 'User Manage', 'Create User': 'Create User', 'User Information': 'User Information', 'Edit Password': 'Edit Password', Success: 'Success', Failed: 'Failed', Delete: 'Delete', 'Please choose': 'Please choose', 'Please enter a positive integer': 'Please enter a positive integer', 'Program Type': 'Program Type', 'Main Class': 'Main Class', 'Main Jar Package': 'Main Jar Package', 'Please enter main jar package': 'Please enter main jar package', 'Please enter main class': 'Please enter main class', 'Main Arguments': 'Main Arguments', 'Please enter main arguments': 'Please enter main arguments', 'Option Parameters': 'Option Parameters', 'Please enter option parameters': 'Please enter option parameters', Resources: 'Resources', 'Custom Parameters': 'Custom Parameters', 'Custom template': 'Custom template', Datasource: 'Datasource', methods: 'methods', 'Please enter the procedure method': 'Please enter the procedure script \n\ncall procedure:{call <procedure-name>[(<arg1>,<arg2>, ...)]}\n\ncall function:{?= call <procedure-name>[(<arg1>,<arg2>, ...)]} ', 'The procedure method script example': 'example:{call <procedure-name>[(?,?, ...)]} or {?= call <procedure-name>[(?,?, ...)]}', Script: 'Script', 'Please enter script(required)': 'Please enter script(required)', 'Deploy Mode': 'Deploy Mode', 'Driver Cores': 'Driver Cores', 'Please enter Driver cores': 'Please enter Driver cores', 'Driver Memory': 'Driver Memory', 'Please enter Driver memory': 'Please enter Driver memory', 'Executor Number': 'Executor Number', 'Please enter Executor number': 'Please enter Executor number', 'The Executor number should be a positive integer': 'The Executor number should be a positive integer', 'Executor Memory': 'Executor Memory', 'Please enter Executor memory': 'Please enter Executor memory', 'Executor Cores': 'Executor Cores', 'Please enter Executor cores': 'Please enter Executor cores', 'Memory should be a positive integer': 'Memory should be a positive integer', 'Core number should be positive integer': 'Core number should be positive integer', 'Flink Version': 'Flink Version', 'JobManager Memory': 'JobManager Memory', 'Please enter JobManager memory': 'Please enter JobManager memory', 'TaskManager Memory': 'TaskManager Memory', 'Please enter TaskManager memory': 'Please enter TaskManager memory', 'Slot Number': 'Slot Number', 'Please enter Slot number': 'Please enter Slot number', Parallelism: 'Parallelism', 'Please enter Parallelism': 'Please enter Parallelism', 'TaskManager Number': 'TaskManager Number', 'Please enter TaskManager number': 'Please enter TaskManager number', 'App Name': 'App Name', 'Please enter app name(optional)': 'Please enter app name(optional)', 'SQL Type': 'SQL Type', 'Send Email': 'Send Email', 'Log display': 'Log display', 'Max Numbers Return': 'Number of records to return', 'Max Numbers Return placeholder': 'Default is 10000, a large value may cause high pressure on the memory', 'Max Numbers Return required': 'Number of records to return parameter must be a number in the range of 0 - 2147483647', 'rows of result': 'rows of result', Title: 'Title', 'Please enter the title of email': 'Please enter the title of email', Table: 'Table', TableMode: 'Table', Attachment: 'Attachment', 'SQL Parameter': 'SQL Parameter', 'SQL Statement': 'SQL Statement', 'UDF Function': 'UDF Function', 'Please enter a SQL Statement(required)': 'Please enter a SQL Statement(required)', 'Please enter a JSON Statement(required)': 'Please enter a JSON Statement(required)', 'One form or attachment must be selected': 'One form or attachment must be selected', 'Mail subject required': 'Mail subject required', 'Child Node': 'Child Node', 'Please select a sub-Process': 'Please select a sub-Process', Edit: 'Edit', 'Switch To This Version': 'Switch To This Version', 'Datasource Name': 'Datasource Name', 'Please enter datasource name': 'Please enter datasource name', IP: 'IP', 'Please enter IP': 'Please enter IP', Port: 'Port', 'Please enter port': 'Please enter port', 'Database Name': 'Database Name', 'Please enter database name': 'Please enter database name', 'Oracle Connect Type': 'ServiceName or SID', 'Oracle Service Name': 'ServiceName', 'Oracle SID': 'SID', 'jdbc connect parameters': 'jdbc connect parameters', 'Test Connect': 'Test Connect', 'Please enter resource name': 'Please enter resource name', 'Please enter resource folder name': 'Please enter resource folder name', 'Please enter a non-query SQL statement': 'Please enter a non-query SQL statement', 'Please enter IP/hostname': 'Please enter IP/hostname', 'jdbc connection parameters is not a correct JSON format': 'jdbc connection parameters is not a correct JSON format', '#': '#', 'Datasource Type': 'Datasource Type', 'Datasource Parameter': 'Datasource Parameter', 'Create Time': 'Create Time', 'Update Time': 'Update Time', Operation: 'Operation', 'Current Version': 'Current Version', 'Click to view': 'Click to view', 'Delete?': 'Delete?', 'Switch Version Successfully': 'Switch Version Successfully', 'Confirm Switch To This Version?': 'Confirm Switch To This Version?', Confirm: 'Confirm', 'Task status statistics': 'Task Status Statistics', Number: 'Number', State: 'State', 'Process Status Statistics': 'Process Status Statistics', 'Process Definition Statistics': 'Process Definition Statistics', 'Project Name': 'Project Name', 'Please enter name': 'Please enter name', 'Owned Users': 'Owned Users', 'Process Pid': 'Process Pid', 'Zk registration directory': 'Zk registration directory', cpuUsage: 'cpuUsage', memoryUsage: 'memoryUsage', 'Last heartbeat time': 'Last heartbeat time', 'Edit Tenant': 'Edit Tenant', 'OS Tenant Code': 'OS Tenant Code', 'Tenant Name': 'Tenant Name', Queue: 'Yarn Queue', 'Please select a queue': 'default is tenant association queue', 'Please enter the os tenant code in English': 'Please enter the os tenant code in English', 'Please enter os tenant code in English': 'Please enter os tenant code in English', 'Please enter os tenant code': 'Please enter os tenant code', 'Please enter tenant Name': 'Please enter tenant Name', 'The os tenant code. Only letters or a combination of letters and numbers are allowed': 'The os tenant code. Only letters or a combination of letters and numbers are allowed', 'Edit User': 'Edit User', Tenant: 'Tenant', Email: 'Email', Phone: 'Phone', 'User Type': 'User Type', 'Please enter phone number': 'Please enter phone number', 'Please enter email': 'Please enter email', 'Please enter the correct email format': 'Please enter the correct email format', 'Please enter the correct mobile phone format': 'Please enter the correct mobile phone format', Project: 'Project', Authorize: 'Authorize', 'File resources': 'File resources', 'UDF resources': 'UDF resources', 'UDF resources directory': 'UDF resources directory', 'Please select UDF resources directory': 'Please select UDF resources directory', 'Alarm group': 'Alarm group', 'Alarm group required': 'Alarm group required', 'Edit alarm group': 'Edit alarm group', 'Create alarm group': 'Create alarm group', 'Create Alarm Instance': 'Create Alarm Instance', 'Edit Alarm Instance': 'Edit Alarm Instance', 'Group Name': 'Group Name', 'Alarm instance name': 'Alarm instance name', 'Alarm plugin name': 'Alarm plugin name', 'Select plugin': 'Select plugin', 'Select Alarm plugin': 'Please select an Alarm plugin', 'Please enter group name': 'Please enter group name', 'Instance parameter exception': 'Instance parameter exception', 'Group Type': 'Group Type', 'Alarm plugin instance': 'Alarm plugin instance', 'Select Alarm plugin instance': 'Please select an Alarm plugin instance', Remarks: 'Remarks', SMS: 'SMS', 'Managing Users': 'Managing Users', Permission: 'Permission', Administrator: 'Administrator', 'Confirm Password': 'Confirm Password', 'Please enter confirm password': 'Please enter confirm password', 'Password cannot be in Chinese': 'Password cannot be in Chinese', 'Please enter a password (6-22) character password': 'Please enter a password (6-22) character password', 'Confirmation password cannot be in Chinese': 'Confirmation password cannot be in Chinese', 'Please enter a confirmation password (6-22) character password': 'Please enter a confirmation password (6-22) character password', 'The password is inconsistent with the confirmation password': 'The password is inconsistent with the confirmation password', 'Please select the datasource': 'Please select the datasource', 'Please select resources': 'Please select resources', Query: 'Query', 'Non Query': 'Non Query', 'prop(required)': 'prop(required)', 'value(optional)': 'value(optional)', 'value(required)': 'value(required)', 'prop is empty': 'prop is empty', 'value is empty': 'value is empty', 'prop is repeat': 'prop is repeat', 'Start Time': 'Start Time', 'End Time': 'End Time', crontab: 'crontab', 'Failure Strategy': 'Failure Strategy', online: 'online', offline: 'offline', 'Task Status': 'Task Status', 'Process Instance': 'Process Instance', 'Task Instance': 'Task Instance', 'Select date range': 'Select date range', startDate: 'startDate', endDate: 'endDate', Date: 'Date', Waiting: 'Waiting', Execution: 'Execution', Finish: 'Finish', 'Create File': 'Create File', 'Create folder': 'Create folder', 'File Name': 'File Name', 'Folder Name': 'Folder Name', 'File Format': 'File Format', 'Folder Format': 'Folder Format', 'File Content': 'File Content', 'Upload File Size': 'Upload File size cannot exceed 1g', Create: 'Create', 'Please enter the resource content': 'Please enter the resource content', 'Resource content cannot exceed 3000 lines': 'Resource content cannot exceed 3000 lines', 'File Details': 'File Details', 'Download Details': 'Download Details', Return: 'Return', Save: 'Save', 'File Manage': 'File Manage', 'Upload Files': 'Upload Files', 'Create UDF Function': 'Create UDF Function', 'Upload UDF Resources': 'Upload UDF Resources', 'Service-Master': 'Service-Master', 'Service-Worker': 'Service-Worker', 'Process Name': 'Process Name', Executor: 'Executor', 'Run Type': 'Run Type', 'Scheduling Time': 'Scheduling Time', 'Run Times': 'Run Times', host: 'host', 'fault-tolerant sign': 'fault-tolerant sign', Rerun: 'Rerun', 'Recovery Failed': 'Recovery Failed', Stop: 'Stop', Pause: 'Pause', 'Recovery Suspend': 'Recovery Suspend', Gantt: 'Gantt', 'Node Type': 'Node Type', 'Submit Time': 'Submit Time', Duration: 'Duration', 'Retry Count': 'Retry Count', 'Task Name': 'Task Name', 'Task Date': 'Task Date', 'Source Table': 'Source Table', 'Record Number': 'Record Number', 'Target Table': 'Target Table', 'Online viewing type is not supported': 'Online viewing type is not supported', Size: 'Size', Rename: 'Rename', Download: 'Download', Export: 'Export', 'Version Info': 'Version Info', Submit: 'Submit', 'Edit UDF Function': 'Edit UDF Function', type: 'type', 'UDF Function Name': 'UDF Function Name', FILE: 'FILE', UDF: 'UDF', 'File Subdirectory': 'File Subdirectory', 'Please enter a function name': 'Please enter a function name', 'Package Name': 'Package Name', 'Please enter a Package name': 'Please enter a Package name', Parameter: 'Parameter', 'Please enter a parameter': 'Please enter a parameter', 'UDF Resources': 'UDF Resources', 'Upload Resources': 'Upload Resources', Instructions: 'Instructions', 'Please enter a instructions': 'Please enter a instructions', 'Please enter a UDF function name': 'Please enter a UDF function name', 'Select UDF Resources': 'Select UDF Resources', 'Class Name': 'Class Name', 'Jar Package': 'Jar Package', 'Library Name': 'Library Name', 'UDF Resource Name': 'UDF Resource Name', 'File Size': 'File Size', Description: 'Description', 'Drag Nodes and Selected Items': 'Drag Nodes and Selected Items', 'Select Line Connection': 'Select Line Connection', 'Delete selected lines or nodes': 'Delete selected lines or nodes', 'Full Screen': 'Full Screen', Unpublished: 'Unpublished', 'Start Process': 'Start Process', 'Execute from the current node': 'Execute from the current node', 'Recover tolerance fault process': 'Recover tolerance fault process', 'Resume the suspension process': 'Resume the suspension process', 'Execute from the failed nodes': 'Execute from the failed nodes', 'Complement Data': 'Complement Data', 'Scheduling execution': 'Scheduling execution', 'Recovery waiting thread': 'Recovery waiting thread', 'Submitted successfully': 'Submitted successfully', Executing: 'Executing', 'Ready to pause': 'Ready to pause', 'Ready to stop': 'Ready to stop', 'Need fault tolerance': 'Need fault tolerance', Kill: 'Kill', 'Waiting for thread': 'Waiting for thread', 'Waiting for dependence': 'Waiting for dependence', Start: 'Start', Copy: 'Copy', 'Copy name': 'Copy name', 'Copy path': 'Copy path', 'Please enter keyword': 'Please enter keyword', 'File Upload': 'File Upload', 'Drag the file into the current upload window': 'Drag the file into the current upload window', 'Drag area upload': 'Drag area upload', Upload: 'Upload', 'ReUpload File': 'ReUpload File', 'Please enter file name': 'Please enter file name', 'Please select the file to upload': 'Please select the file to upload', 'Resources manage': 'Resources', Security: 'Security', Logout: 'Logout', 'No data': 'No data', 'Uploading...': 'Uploading...', 'Loading...': 'Loading...', List: 'List', 'Unable to download without proper url': 'Unable to download without proper url', Process: 'Process', 'Process definition': 'Process definition', 'Task record': 'Task record', 'Warning group manage': 'Warning group manage', 'Warning instance manage': 'Warning instance manage', 'Servers manage': 'Servers manage', 'UDF manage': 'UDF manage', 'Resource manage': 'Resource manage', 'Function manage': 'Function manage', 'Edit password': 'Edit password', 'Ordinary users': 'Ordinary users', 'Create process': 'Create process', 'Import process': 'Import process', 'Timing state': 'Timing state', Timing: 'Timing', Timezone: 'Timezone', TreeView: 'TreeView', 'Mailbox already exists! Recipients and copyers cannot repeat': 'Mailbox already exists! Recipients and copyers cannot repeat', 'Mailbox input is illegal': 'Mailbox input is illegal', 'Please set the parameters before starting': 'Please set the parameters before starting', Continue: 'Continue', End: 'End', 'Node execution': 'Node execution', 'Backward execution': 'Backward execution', 'Forward execution': 'Forward execution', 'Execute only the current node': 'Execute only the current node', 'Notification strategy': 'Notification strategy', 'Notification group': 'Notification group', 'Please select a notification group': 'Please select a notification group', receivers: 'receivers', receiverCcs: 'receiverCcs', 'Whether it is a complement process?': 'Whether it is a complement process?', 'Schedule date': 'Schedule date', 'Mode of execution': 'Mode of execution', 'Serial execution': 'Serial execution', 'Parallel execution': 'Parallel execution', 'Set parameters before timing': 'Set parameters before timing', 'Start and stop time': 'Start and stop time', 'Please select time': 'Please select time', 'Please enter crontab': 'Please enter crontab', none_1: 'none', success_1: 'success', failure_1: 'failure', All_1: 'All', Toolbar: 'Toolbar', 'View variables': 'View variables', 'Format DAG': 'Format DAG', 'Refresh DAG status': 'Refresh DAG status', Return_1: 'Return', 'Please enter format': 'Please enter format', 'connection parameter': 'connection parameter', 'Process definition details': 'Process definition details', 'Create process definition': 'Create process definition', 'Scheduled task list': 'Scheduled task list', 'Process instance details': 'Process instance details', 'Create Resource': 'Create Resource', 'User Center': 'User Center', AllStatus: 'All', None: 'None', Name: 'Name', 'Process priority': 'Process priority', 'Task priority': 'Task priority', 'Task timeout alarm': 'Task timeout alarm', 'Timeout strategy': 'Timeout strategy', 'Timeout alarm': 'Timeout alarm', 'Timeout failure': 'Timeout failure', 'Timeout period': 'Timeout period', 'Waiting Dependent complete': 'Waiting Dependent complete', 'Waiting Dependent start': 'Waiting Dependent start', 'Check interval': 'Check interval', 'Timeout must be longer than check interval': 'Timeout must be longer than check interval', 'Timeout strategy must be selected': 'Timeout strategy must be selected', 'Timeout must be a positive integer': 'Timeout must be a positive integer', 'Add dependency': 'Add dependency', and: 'and', or: 'or', month: 'month', week: 'week', day: 'day', hour: 'hour', Running: 'Running', 'Waiting for dependency to complete': 'Waiting for dependency to complete', Selected: 'Selected', CurrentHour: 'CurrentHour', Last1Hour: 'Last1Hour', Last2Hours: 'Last2Hours', Last3Hours: 'Last3Hours', Last24Hours: 'Last24Hours', today: 'today', Last1Days: 'Last1Days', Last2Days: 'Last2Days', Last3Days: 'Last3Days', Last7Days: 'Last7Days', ThisWeek: 'ThisWeek', LastWeek: 'LastWeek', LastMonday: 'LastMonday', LastTuesday: 'LastTuesday', LastWednesday: 'LastWednesday', LastThursday: 'LastThursday', LastFriday: 'LastFriday', LastSaturday: 'LastSaturday', LastSunday: 'LastSunday', ThisMonth: 'ThisMonth', LastMonth: 'LastMonth', LastMonthBegin: 'LastMonthBegin', LastMonthEnd: 'LastMonthEnd', 'Refresh status succeeded': 'Refresh status succeeded', 'Queue manage': 'Yarn Queue manage', 'Create queue': 'Create queue', 'Edit queue': 'Edit queue', 'Datasource manage': 'Datasource', 'History task record': 'History task record', 'Please go online': 'Please go online', 'Queue value': 'Queue value', 'Please enter queue value': 'Please enter queue value', 'Worker group manage': 'Worker group manage', 'Create worker group': 'Create worker group', 'Edit worker group': 'Edit worker group', 'Token manage': 'Token manage', 'Create token': 'Create token', 'Edit token': 'Edit token', Addresses: 'Addresses', 'Worker Addresses': 'Worker Addresses', 'Please select the worker addresses': 'Please select the worker addresses', 'Failure time': 'Failure time', 'Expiration time': 'Expiration time', User: 'User', 'Please enter token': 'Please enter token', 'Generate token': 'Generate token', Monitor: 'Monitor', Group: 'Group', 'Queue statistics': 'Queue statistics', 'Command status statistics': 'Command status statistics', 'Task kill': 'Task Kill', 'Task queue': 'Task queue', 'Error command count': 'Error command count', 'Normal command count': 'Normal command count', Manage: ' Manage', 'Number of connections': 'Number of connections', Sent: 'Sent', Received: 'Received', 'Min latency': 'Min latency', 'Avg latency': 'Avg latency', 'Max latency': 'Max latency', 'Node count': 'Node count', 'Query time': 'Query time', 'Node self-test status': 'Node self-test status', 'Health status': 'Health status', 'Max connections': 'Max connections', 'Threads connections': 'Threads connections', 'Max used connections': 'Max used connections', 'Threads running connections': 'Threads running connections', 'Worker group': 'Worker group', 'Please enter a positive integer greater than 0': 'Please enter a positive integer greater than 0', 'Pre Statement': 'Pre Statement', 'Post Statement': 'Post Statement', 'Statement cannot be empty': 'Statement cannot be empty', 'Process Define Count': 'Work flow Define Count', 'Process Instance Running Count': 'Process Instance Running Count', 'command number of waiting for running': 'command number of waiting for running', 'failure command number': 'failure command number', 'tasks number of waiting running': 'tasks number of waiting running', 'task number of ready to kill': 'task number of ready to kill', 'Statistics manage': 'Statistics Manage', statistics: 'Statistics', 'select tenant': 'select tenant', 'Please enter Principal': 'Please enter Principal', 'Please enter the kerberos authentication parameter java.security.krb5.conf': 'Please enter the kerberos authentication parameter java.security.krb5.conf', 'Please enter the kerberos authentication parameter login.user.keytab.username': 'Please enter the kerberos authentication parameter login.user.keytab.username', 'Please enter the kerberos authentication parameter login.user.keytab.path': 'Please enter the kerberos authentication parameter login.user.keytab.path', 'The start time must not be the same as the end': 'The start time must not be the same as the end', 'Startup parameter': 'Startup parameter', 'Startup type': 'Startup type', 'warning of timeout': 'warning of timeout', 'Next five execution times': 'Next five execution times', 'Execute time': 'Execute time', 'Complement range': 'Complement range', 'Http Url': 'Http Url', 'Http Method': 'Http Method', 'Http Parameters': 'Http Parameters', 'Http Parameters Key': 'Http Parameters Key', 'Http Parameters Position': 'Http Parameters Position', 'Http Parameters Value': 'Http Parameters Value', 'Http Check Condition': 'Http Check Condition', 'Http Condition': 'Http Condition', 'Please Enter Http Url': 'Please Enter Http Url(required)', 'Please Enter Http Condition': 'Please Enter Http Condition', 'There is no data for this period of time': 'There is no data for this period of time', 'Worker addresses cannot be empty': 'Worker addresses cannot be empty', 'Please generate token': 'Please generate token', 'Spark Version': 'Spark Version', TargetDataBase: 'target database', TargetTable: 'target table', 'Please enter the table of target': 'Please enter the table of target', 'Please enter a Target Table(required)': 'Please enter a Target Table(required)', SpeedByte: 'speed(byte count)', SpeedRecord: 'speed(record count)', '0 means unlimited by byte': '0 means unlimited', '0 means unlimited by count': '0 means unlimited', 'Modify User': 'Modify User', 'Whether directory': 'Whether directory', Yes: 'Yes', No: 'No', 'Hadoop Custom Params': 'Hadoop Params', 'Sqoop Advanced Parameters': 'Sqoop Params', 'Sqoop Job Name': 'Job Name', 'Please enter Mysql Database(required)': 'Please enter Mysql Database(required)', 'Please enter Mysql Table(required)': 'Please enter Mysql Table(required)', 'Please enter Columns (Comma separated)': 'Please enter Columns (Comma separated)', 'Please enter Target Dir(required)': 'Please enter Target Dir(required)', 'Please enter Export Dir(required)': 'Please enter Export Dir(required)', 'Please enter Hive Database(required)': 'Please enter Hive Databasec(required)', 'Please enter Hive Table(required)': 'Please enter Hive Table(required)', 'Please enter Hive Partition Keys': 'Please enter Hive Partition Key', 'Please enter Hive Partition Values': 'Please enter Partition Value', 'Please enter Replace Delimiter': 'Please enter Replace Delimiter', 'Please enter Fields Terminated': 'Please enter Fields Terminated', 'Please enter Lines Terminated': 'Please enter Lines Terminated', 'Please enter Concurrency': 'Please enter Concurrency', 'Please enter Update Key': 'Please enter Update Key', 'Please enter Job Name(required)': 'Please enter Job Name(required)', 'Please enter Custom Shell(required)': 'Please enter Custom Shell(required)', Direct: 'Direct', Type: 'Type', ModelType: 'ModelType', ColumnType: 'ColumnType', Database: 'Database', Column: 'Column', 'Map Column Hive': 'Map Column Hive', 'Map Column Java': 'Map Column Java', 'Export Dir': 'Export Dir', 'Hive partition Keys': 'Hive partition Keys', 'Hive partition Values': 'Hive partition Values', FieldsTerminated: 'FieldsTerminated', LinesTerminated: 'LinesTerminated', IsUpdate: 'IsUpdate', UpdateKey: 'UpdateKey', UpdateMode: 'UpdateMode', 'Target Dir': 'Target Dir', DeleteTargetDir: 'DeleteTargetDir', FileType: 'FileType', CompressionCodec: 'CompressionCodec', CreateHiveTable: 'CreateHiveTable', DropDelimiter: 'DropDelimiter', OverWriteSrc: 'OverWriteSrc', ReplaceDelimiter: 'ReplaceDelimiter', Concurrency: 'Concurrency', Form: 'Form', OnlyUpdate: 'OnlyUpdate', AllowInsert: 'AllowInsert', 'Data Source': 'Data Source', 'Data Target': 'Data Target', 'All Columns': 'All Columns', 'Some Columns': 'Some Columns', 'Branch flow': 'Branch flow', 'Custom Job': 'Custom Job', 'Custom Script': 'Custom Script', 'Cannot select the same node for successful branch flow and failed branch flow': 'Cannot select the same node for successful branch flow and failed branch flow', 'Successful branch flow and failed branch flow are required': 'conditions node Successful and failed branch flow are required', 'No resources exist': 'No resources exist', 'Please delete all non-existing resources': 'Please delete all non-existing resources', 'Unauthorized or deleted resources': 'Unauthorized or deleted resources', 'Please delete all non-existent resources': 'Please delete all non-existent resources', Kinship: 'Workflow relationship', Reset: 'Reset', KinshipStateActive: 'Active', KinshipState1: 'Online', KinshipState0: 'Workflow is not online', KinshipState10: 'Scheduling is not online', 'Dag label display control': 'Dag label display control', Enable: 'Enable', Disable: 'Disable', 'The Worker group no longer exists, please select the correct Worker group!': 'The Worker group no longer exists, please select the correct Worker group!', 'Please confirm whether the workflow has been saved before downloading': 'Please confirm whether the workflow has been saved before downloading', 'User name length is between 3 and 39': 'User name length is between 3 and 39', 'Timeout Settings': 'Timeout Settings', 'Connect Timeout': 'Connect Timeout', 'Socket Timeout': 'Socket Timeout', 'Connect timeout be a positive integer': 'Connect timeout be a positive integer', 'Socket Timeout be a positive integer': 'Socket Timeout be a positive integer', ms: 'ms', 'Please Enter Url': 'Please Enter Url eg. 127.0.0.1:7077', Master: 'Master', 'Please select the waterdrop resources': 'Please select the waterdrop resources', zkDirectory: 'zkDirectory', 'Directory detail': 'Directory detail', 'Connection name': 'Connection name', 'Current connection settings': 'Current connection settings', 'Please save the DAG before formatting': 'Please save the DAG before formatting', 'Batch copy': 'Batch copy', 'Related items': 'Related items', 'Project name is required': 'Project name is required', 'Batch move': 'Batch move', Version: 'Version', 'Pre tasks': 'Pre tasks', 'Running Memory': 'Running Memory', 'Max Memory': 'Max Memory', 'Min Memory': 'Min Memory', 'The workflow canvas is abnormal and cannot be saved, please recreate': 'The workflow canvas is abnormal and cannot be saved, please recreate', Info: 'Info', 'Datasource userName': 'owner', 'Resource userName': 'owner' }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,622
[Improvement][project management] Modify the title
*For better global communication, please give priority to using English description, thx! * *Please review https://dolphinscheduler.apache.org/en-us/community/development/issue.html when describe an issue.* **Describe the question** “项目管理”功能模块中“项目首页”菜单与页面内容关系不大。. **What are the current deficiencies and the benefits of improvement** - “项目首页”文字,修改成“工作流监控”. **Which version of DolphinScheduler:** -[1.3.7] **Describe alternatives you've considered** A clear and concise description of any alternative improvement solutions you've considered.
https://github.com/apache/dolphinscheduler/issues/5622
https://github.com/apache/dolphinscheduler/pull/5723
ee0a4391e7a21ef7773eba015fa783ff616ac4a6
2ba569acd028c00c22f4853de7c58251ac72816c
2021-06-10T13:47:51Z
java
2021-06-30T05:23:06Z
dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ export default { 'User Name': '用户名', 'Please enter user name': '请输入用户名', Password: '密码', 'Please enter your password': '请输入密码', 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': '密码至少包含数字,字母和字符的两种组合,长度在6-22之间', Login: '登录', Home: '首页', 'Failed to create node to save': '未创建节点保存失败', 'Global parameters': '全局参数', 'Local parameters': '局部参数', 'Copy success': '复制成功', 'The browser does not support automatic copying': '该浏览器不支持自动复制', 'Whether to save the DAG graph': '是否保存DAG图', 'Current node settings': '当前节点设置', 'View history': '查看历史', 'View log': '查看日志', 'Force success': '强制成功', 'Enter this child node': '进入该子节点', 'Node name': '节点名称', 'Please enter name (required)': '请输入名称(必填)', 'Run flag': '运行标志', Normal: '正常', 'Prohibition execution': '禁止执行', 'Please enter description': '请输入描述', 'Number of failed retries': '失败重试次数', Times: '次', 'Failed retry interval': '失败重试间隔', Minute: '分', 'Delay execution time': '延时执行时间', 'Delay execution': '延时执行', 'Forced success': '强制成功', Cancel: '取消', 'Confirm add': '确认添加', 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': '新创建子工作流还未执行,不能进入子工作流', 'The task has not been executed and cannot enter the sub-Process': '该任务还未执行,不能进入子工作流', 'Name already exists': '名称已存在请重新输入', 'Download Log': '下载日志', 'Refresh Log': '刷新日志', 'Enter full screen': '进入全屏', 'Cancel full screen': '取消全屏', Close: '关闭', 'Update log success': '更新日志成功', 'No more logs': '暂无更多日志', 'No log': '暂无日志', 'Loading Log...': '正在努力请求日志中...', 'Set the DAG diagram name': '设置DAG图名称', 'Please enter description(optional)': '请输入描述(选填)', 'Set global': '设置全局', 'Whether to go online the process definition': '是否上线流程定义', 'Whether to update the process definition': '是否更新流程定义', Add: '添加', 'DAG graph name cannot be empty': 'DAG图名称不能为空', 'Create Datasource': '创建数据源', 'Project Home': '项目首页', 'Project Manage': '项目管理', 'Create Project': '创建项目', 'Cron Manage': '定时管理', 'Copy Workflow': '复制工作流', 'Tenant Manage': '租户管理', 'Create Tenant': '创建租户', 'User Manage': '用户管理', 'Create User': '创建用户', 'User Information': '用户信息', 'Edit Password': '密码修改', Success: '成功', Failed: '失败', Delete: '删除', 'Please choose': '请选择', 'Please enter a positive integer': '请输入正整数', 'Program Type': '程序类型', 'Main Class': '主函数的Class', 'Main Jar Package': '主Jar包', 'Please enter main jar package': '请选择主Jar包', 'Please enter main class': '请填写主函数的Class', 'Main Arguments': '主程序参数', 'Please enter main arguments': '请输入主程序参数', 'Option Parameters': '选项参数', 'Please enter option parameters': '请输入选项参数', Resources: '资源', 'Custom Parameters': '自定义参数', 'Custom template': '自定义模版', Datasource: '数据源', methods: '方法', 'Please enter the procedure method': '请输入存储脚本 \n\n调用存储过程:{call <procedure-name>[(<arg1>,<arg2>, ...)]}\n\n调用存储函数:{?= call <procedure-name>[(<arg1>,<arg2>, ...)]} ', 'The procedure method script example': '示例:{call <procedure-name>[(?,?, ...)]} 或 {?= call <procedure-name>[(?,?, ...)]}', Script: '脚本', 'Please enter script(required)': '请输入脚本(必填)', 'Deploy Mode': '部署方式', 'Driver Cores': 'Driver核心数', 'Please enter Driver cores': '请输入Driver核心数', 'Driver Memory': 'Driver内存数', 'Please enter Driver memory': '请输入Driver内存数', 'Executor Number': 'Executor数量', 'Please enter Executor number': '请输入Executor数量', 'The Executor number should be a positive integer': 'Executor数量为正整数', 'Executor Memory': 'Executor内存数', 'Please enter Executor memory': '请输入Executor内存数', 'Executor Cores': 'Executor核心数', 'Please enter Executor cores': '请输入Executor核心数', 'Memory should be a positive integer': '内存数为数字', 'Core number should be positive integer': '核心数为正整数', 'Flink Version': 'Flink版本', 'JobManager Memory': 'JobManager内存数', 'Please enter JobManager memory': '请输入JobManager内存数', 'TaskManager Memory': 'TaskManager内存数', 'Please enter TaskManager memory': '请输入TaskManager内存数', 'Slot Number': 'Slot数量', 'Please enter Slot number': '请输入Slot数量', Parallelism: '并行度', 'Please enter Parallelism': '请输入并行度', 'TaskManager Number': 'TaskManager数量', 'Please enter TaskManager number': '请输入TaskManager数量', 'App Name': '任务名称', 'Please enter app name(optional)': '请输入任务名称(选填)', 'SQL Type': 'sql类型', 'Send Email': '发送邮件', 'Log display': '日志显示', 'rows of result': '行查询结果', 'Max Numbers Return': '返回的记录行数', 'Max Numbers Return placeholder': '默认值10000,如果值过大可能会对内存造成较大压力', 'Max Numbers Return required': '返回的记录行数值必须是一个在0-2147483647范围内的整数', Title: '主题', 'Please enter the title of email': '请输入邮件主题', Table: '表名', TableMode: '表格', Attachment: '附件', 'SQL Parameter': 'sql参数', 'SQL Statement': 'sql语句', 'UDF Function': 'UDF函数', 'Please enter a SQL Statement(required)': '请输入sql语句(必填)', 'Please enter a JSON Statement(required)': '请输入json语句(必填)', 'One form or attachment must be selected': '表格、附件必须勾选一个', 'Mail subject required': '邮件主题必填', 'Child Node': '子节点', 'Please select a sub-Process': '请选择子工作流', Edit: '编辑', 'Switch To This Version': '切换到该版本', 'Datasource Name': '数据源名称', 'Please enter datasource name': '请输入数据源名称', IP: 'IP主机名', 'Please enter IP': '请输入IP主机名', Port: '端口', 'Please enter port': '请输入端口', 'Database Name': '数据库名', 'Please enter database name': '请输入数据库名', 'Oracle Connect Type': '服务名或SID', 'Oracle Service Name': '服务名', 'Oracle SID': 'SID', 'jdbc connect parameters': 'jdbc连接参数', 'Test Connect': '测试连接', 'Please enter resource name': '请输入数据源名称', 'Please enter resource folder name': '请输入资源文件夹名称', 'Please enter a non-query SQL statement': '请输入非查询sql语句', 'Please enter IP/hostname': '请输入IP/主机名', 'jdbc connection parameters is not a correct JSON format': 'jdbc连接参数不是一个正确的JSON格式', '#': '编号', 'Datasource Type': '数据源类型', 'Datasource Parameter': '数据源参数', 'Create Time': '创建时间', 'Update Time': '更新时间', Operation: '操作', 'Current Version': '当前版本', 'Click to view': '点击查看', 'Delete?': '确定删除吗?', 'Switch Version Successfully': '切换版本成功', 'Confirm Switch To This Version?': '确定切换到该版本吗?', Confirm: '确定', 'Task status statistics': '任务状态统计', Number: '数量', State: '状态', 'Process Status Statistics': '流程状态统计', 'Process Definition Statistics': '流程定义统计', 'Project Name': '项目名称', 'Please enter name': '请输入名称', 'Owned Users': '所属用户', 'Process Pid': '进程Pid', 'Zk registration directory': 'zk注册目录', cpuUsage: 'cpuUsage', memoryUsage: 'memoryUsage', 'Last heartbeat time': '最后心跳时间', 'Edit Tenant': '编辑租户', 'OS Tenant Code': '操作系统租户', 'Tenant Name': '租户名称', Queue: '队列', 'Please select a queue': '默认为租户关联队列', 'Please enter the os tenant code in English': '请输入操作系统租户只允许英文', 'Please enter os tenant code in English': '请输入英文操作系统租户', 'Please enter os tenant code': '请输入操作系统租户', 'Please enter tenant Name': '请输入租户名称', 'The os tenant code. Only letters or a combination of letters and numbers are allowed': '操作系统租户只允许字母或字母与数字组合', 'Edit User': '编辑用户', Tenant: '租户', Email: '邮件', Phone: '手机', 'User Type': '用户类型', 'Please enter phone number': '请输入手机', 'Please enter email': '请输入邮箱', 'Please enter the correct email format': '请输入正确的邮箱格式', 'Please enter the correct mobile phone format': '请输入正确的手机格式', Project: '项目', Authorize: '授权', 'File resources': '文件资源', 'UDF resources': 'UDF资源', 'UDF resources directory': 'UDF资源目录', 'Please select UDF resources directory': '请选择UDF资源目录', 'Alarm group': '告警组', 'Alarm group required': '告警组必填', 'Edit alarm group': '编辑告警组', 'Create alarm group': '创建告警组', 'Create Alarm Instance': '创建告警实例', 'Edit Alarm Instance': '编辑告警实例', 'Group Name': '组名称', 'Alarm instance name': '告警实例名称', 'Alarm plugin name': '告警插件名称', 'Select plugin': '选择插件', 'Select Alarm plugin': '请选择告警插件', 'Please enter group name': '请输入组名称', 'Instance parameter exception': '实例参数异常', 'Group Type': '组类型', 'Alarm plugin instance': '告警插件实例', 'Select Alarm plugin instance': '请选择告警插件实例', Remarks: '备注', SMS: '短信', 'Managing Users': '管理用户', Permission: '权限', Administrator: '管理员', 'Confirm Password': '确认密码', 'Please enter confirm password': '请输入确认密码', 'Password cannot be in Chinese': '密码不能为中文', 'Please enter a password (6-22) character password': '请输入密码(6-22)字符密码', 'Confirmation password cannot be in Chinese': '确认密码不能为中文', 'Please enter a confirmation password (6-22) character password': '请输入确认密码(6-22)字符密码', 'The password is inconsistent with the confirmation password': '密码与确认密码不一致,请重新确认', 'Please select the datasource': '请选择数据源', 'Please select resources': '请选择资源', Query: '查询', 'Non Query': '非查询', 'prop(required)': 'prop(必填)', 'value(optional)': 'value(选填)', 'value(required)': 'value(必填)', 'prop is empty': 'prop不能为空', 'value is empty': 'value不能为空', 'prop is repeat': 'prop中有重复', 'Start Time': '开始时间', 'End Time': '结束时间', crontab: 'crontab', 'Failure Strategy': '失败策略', online: '上线', offline: '下线', 'Task Status': '任务状态', 'Process Instance': '工作流实例', 'Task Instance': '任务实例', 'Select date range': '选择日期区间', startDate: '开始日期', endDate: '结束日期', Date: '日期', Waiting: '等待', Execution: '执行中', Finish: '完成', 'Create File': '创建文件', 'Create folder': '创建文件夹', 'File Name': '文件名称', 'Folder Name': '文件夹名称', 'File Format': '文件格式', 'Folder Format': '文件夹格式', 'File Content': '文件内容', 'Upload File Size': '文件大小不能超过1G', Create: '创建', 'Please enter the resource content': '请输入资源内容', 'Resource content cannot exceed 3000 lines': '资源内容不能超过3000行', 'File Details': '文件详情', 'Download Details': '下载详情', Return: '返回', Save: '保存', 'File Manage': '文件管理', 'Upload Files': '上传文件', 'Create UDF Function': '创建UDF函数', 'Upload UDF Resources': '上传UDF资源', 'Service-Master': '服务管理-Master', 'Service-Worker': '服务管理-Worker', 'Process Name': '工作流名称', Executor: '执行用户', 'Run Type': '运行类型', 'Scheduling Time': '调度时间', 'Run Times': '运行次数', host: 'host', 'fault-tolerant sign': '容错标识', Rerun: '重跑', 'Recovery Failed': '恢复失败', Stop: '停止', Pause: '暂停', 'Recovery Suspend': '恢复运行', Gantt: '甘特图', 'Node Type': '节点类型', 'Submit Time': '提交时间', Duration: '运行时长', 'Retry Count': '重试次数', 'Task Name': '任务名称', 'Task Date': '任务日期', 'Source Table': '源表', 'Record Number': '记录数', 'Target Table': '目标表', 'Online viewing type is not supported': '不支持在线查看类型', Size: '大小', Rename: '重命名', Download: '下载', Export: '导出', 'Version Info': '版本信息', Submit: '提交', 'Edit UDF Function': '编辑UDF函数', type: '类型', 'UDF Function Name': 'UDF函数名称', FILE: '文件', UDF: 'UDF', 'File Subdirectory': '文件子目录', 'Please enter a function name': '请输入函数名', 'Package Name': '包名类名', 'Please enter a Package name': '请输入包名类名', Parameter: '参数', 'Please enter a parameter': '请输入参数', 'UDF Resources': 'UDF资源', 'Upload Resources': '上传资源', Instructions: '使用说明', 'Please enter a instructions': '请输入使用说明', 'Please enter a UDF function name': '请输入UDF函数名称', 'Select UDF Resources': '请选择UDF资源', 'Class Name': '类名', 'Jar Package': 'jar包', 'Library Name': '库名', 'UDF Resource Name': 'UDF资源名称', 'File Size': '文件大小', Description: '描述', 'Drag Nodes and Selected Items': '拖动节点和选中项', 'Select Line Connection': '选择线条连接', 'Delete selected lines or nodes': '删除选中的线或节点', 'Full Screen': '全屏', Unpublished: '未发布', 'Start Process': '启动工作流', 'Execute from the current node': '从当前节点开始执行', 'Recover tolerance fault process': '恢复被容错的工作流', 'Resume the suspension process': '恢复运行流程', 'Execute from the failed nodes': '从失败节点开始执行', 'Complement Data': '补数', 'Scheduling execution': '调度执行', 'Recovery waiting thread': '恢复等待线程', 'Submitted successfully': '提交成功', Executing: '正在执行', 'Ready to pause': '准备暂停', 'Ready to stop': '准备停止', 'Need fault tolerance': '需要容错', Kill: 'Kill', 'Waiting for thread': '等待线程', 'Waiting for dependence': '等待依赖', Start: '运行', Copy: '复制节点', 'Copy name': '复制名称', 'Copy path': '复制路径', 'Please enter keyword': '请输入关键词', 'File Upload': '文件上传', 'Drag the file into the current upload window': '请将文件拖拽到当前上传窗口内!', 'Drag area upload': '拖动区域上传', Upload: '上传', 'ReUpload File': '重新上传文件', 'Please enter file name': '请输入文件名', 'Please select the file to upload': '请选择要上传的文件', 'Resources manage': '资源中心', Security: '安全中心', Logout: '退出', 'No data': '查询无数据', 'Uploading...': '文件上传中', 'Loading...': '正在努力加载中...', List: '列表', 'Unable to download without proper url': '无下载url无法下载', Process: '工作流', 'Process definition': '工作流定义', 'Task record': '任务记录', 'Warning group manage': '告警组管理', 'Warning instance manage': '告警实例管理', 'Servers manage': '服务管理', 'UDF manage': 'UDF管理', 'Resource manage': '资源管理', 'Function manage': '函数管理', 'Edit password': '修改密码', 'Ordinary users': '普通用户', 'Create process': '创建工作流', 'Import process': '导入工作流', 'Timing state': '定时状态', Timing: '定时', Timezone: '时区', TreeView: '树形图', 'Mailbox already exists! Recipients and copyers cannot repeat': '邮箱已存在!收件人和抄送人不能重复', 'Mailbox input is illegal': '邮箱输入不合法', 'Please set the parameters before starting': '启动前请先设置参数', Continue: '继续', End: '结束', 'Node execution': '节点执行', 'Backward execution': '向后执行', 'Forward execution': '向前执行', 'Execute only the current node': '仅执行当前节点', 'Notification strategy': '通知策略', 'Notification group': '通知组', 'Please select a notification group': '请选择通知组', receivers: '收件人', receiverCcs: '抄送人', 'Whether it is a complement process?': '是否补数', 'Schedule date': '调度日期', 'Mode of execution': '执行方式', 'Serial execution': '串行执行', 'Parallel execution': '并行执行', 'Set parameters before timing': '定时前请先设置参数', 'Start and stop time': '起止时间', 'Please select time': '请选择时间', 'Please enter crontab': '请输入crontab', none_1: '都不发', success_1: '成功发', failure_1: '失败发', All_1: '成功或失败都发', Toolbar: '工具栏', 'View variables': '查看变量', 'Format DAG': '格式化DAG', 'Refresh DAG status': '刷新DAG状态', Return_1: '返回上一节点', 'Please enter format': '请输入格式为', 'connection parameter': '连接参数', 'Process definition details': '流程定义详情', 'Create process definition': '创建流程定义', 'Scheduled task list': '定时任务列表', 'Process instance details': '流程实例详情', 'Create Resource': '创建资源', 'User Center': '用户中心', AllStatus: '全部状态', None: '无', Name: '名称', 'Process priority': '流程优先级', 'Task priority': '任务优先级', 'Task timeout alarm': '任务超时告警', 'Timeout strategy': '超时策略', 'Timeout alarm': '超时告警', 'Timeout failure': '超时失败', 'Timeout period': '超时时长', 'Waiting Dependent complete': '等待依赖完成', 'Waiting Dependent start': '等待依赖启动', 'Check interval': '检查间隔', 'Timeout must be longer than check interval': '超时时间必须比检查间隔长', 'Timeout strategy must be selected': '超时策略必须选一个', 'Timeout must be a positive integer': '超时时长必须为正整数', 'Add dependency': '添加依赖', and: '且', or: '或', month: '月', week: '周', day: '日', hour: '时', Running: '正在运行', 'Waiting for dependency to complete': '等待依赖完成', Selected: '已选', CurrentHour: '当前小时', Last1Hour: '前1小时', Last2Hours: '前2小时', Last3Hours: '前3小时', Last24Hours: '前24小时', today: '今天', Last1Days: '昨天', Last2Days: '前两天', Last3Days: '前三天', Last7Days: '前七天', ThisWeek: '本周', LastWeek: '上周', LastMonday: '上周一', LastTuesday: '上周二', LastWednesday: '上周三', LastThursday: '上周四', LastFriday: '上周五', LastSaturday: '上周六', LastSunday: '上周日', ThisMonth: '本月', LastMonth: '上月', LastMonthBegin: '上月初', LastMonthEnd: '上月末', 'Refresh status succeeded': '刷新状态成功', 'Queue manage': 'Yarn 队列管理', 'Create queue': '创建队列', 'Edit queue': '编辑队列', 'Datasource manage': '数据源中心', 'History task record': '历史任务记录', 'Please go online': '不要忘记上线', 'Queue value': '队列值', 'Please enter queue value': '请输入队列值', 'Worker group manage': 'Worker分组管理', 'Create worker group': '创建Worker分组', 'Edit worker group': '编辑Worker分组', 'Token manage': '令牌管理', 'Create token': '创建令牌', 'Edit token': '编辑令牌', Addresses: '地址', 'Worker Addresses': 'Worker地址', 'Please select the worker addresses': '请选择Worker地址', 'Failure time': '失效时间', 'Expiration time': '失效时间', User: '用户', 'Please enter token': '请输入令牌', 'Generate token': '生成令牌', Monitor: '监控中心', Group: '分组', 'Queue statistics': '队列统计', 'Command status statistics': '命令状态统计', 'Task kill': '等待kill任务', 'Task queue': '等待执行任务', 'Error command count': '错误指令数', 'Normal command count': '正确指令数', Manage: '管理', 'Number of connections': '连接数', Sent: '发送量', Received: '接收量', 'Min latency': '最低延时', 'Avg latency': '平均延时', 'Max latency': '最大延时', 'Node count': '节点数', 'Query time': '当前查询时间', 'Node self-test status': '节点自检状态', 'Health status': '健康状态', 'Max connections': '最大连接数', 'Threads connections': '当前连接数', 'Max used connections': '同时使用连接最大数', 'Threads running connections': '数据库当前活跃连接数', 'Worker group': 'Worker分组', 'Please enter a positive integer greater than 0': '请输入大于 0 的正整数', 'Pre Statement': '前置sql', 'Post Statement': '后置sql', 'Statement cannot be empty': '语句不能为空', 'Process Define Count': '工作流定义数', 'Process Instance Running Count': '正在运行的流程数', 'command number of waiting for running': '待执行的命令数', 'failure command number': '执行失败的命令数', 'tasks number of waiting running': '待运行任务数', 'task number of ready to kill': '待杀死任务数', 'Statistics manage': '统计管理', statistics: '统计', 'select tenant': '选择租户', 'Please enter Principal': '请输入Principal', 'Please enter the kerberos authentication parameter java.security.krb5.conf': '请输入kerberos认证参数 java.security.krb5.conf', 'Please enter the kerberos authentication parameter login.user.keytab.username': '请输入kerberos认证参数 login.user.keytab.username', 'Please enter the kerberos authentication parameter login.user.keytab.path': '请输入kerberos认证参数 login.user.keytab.path', 'The start time must not be the same as the end': '开始时间和结束时间不能相同', 'Startup parameter': '启动参数', 'Startup type': '启动类型', 'warning of timeout': '超时告警', 'Next five execution times': '接下来五次执行时间', 'Execute time': '执行时间', 'Complement range': '补数范围', 'Http Url': '请求地址', 'Http Method': '请求类型', 'Http Parameters': '请求参数', 'Http Parameters Key': '参数名', 'Http Parameters Position': '参数位置', 'Http Parameters Value': '参数值', 'Http Check Condition': '校验条件', 'Http Condition': '校验内容', 'Please Enter Http Url': '请填写请求地址(必填)', 'Please Enter Http Condition': '请填写校验内容', 'There is no data for this period of time': '该时间段无数据', 'Worker addresses cannot be empty': 'Worker地址不能为空', 'Please generate token': '请生成Token', 'Spark Version': 'Spark版本', TargetDataBase: '目标库', TargetTable: '目标表', 'Please enter the table of target': '请输入目标表名', 'Please enter a Target Table(required)': '请输入目标表(必填)', SpeedByte: '限流(字节数)', SpeedRecord: '限流(记录数)', '0 means unlimited by byte': 'KB,0代表不限制', '0 means unlimited by count': '0代表不限制', 'Modify User': '修改用户', 'Whether directory': '是否文件夹', Yes: '是', No: '否', 'Hadoop Custom Params': 'Hadoop参数', 'Sqoop Advanced Parameters': 'Sqoop参数', 'Sqoop Job Name': '任务名称', 'Please enter Mysql Database(required)': '请输入Mysql数据库(必填)', 'Please enter Mysql Table(required)': '请输入Mysql表名(必填)', 'Please enter Columns (Comma separated)': '请输入列名,用 , 隔开', 'Please enter Target Dir(required)': '请输入目标路径(必填)', 'Please enter Export Dir(required)': '请输入数据源路径(必填)', 'Please enter Hive Database(required)': '请输入Hive数据库(必填)', 'Please enter Hive Table(required)': '请输入Hive表名(必填)', 'Please enter Hive Partition Keys': '请输入分区键', 'Please enter Hive Partition Values': '请输入分区值', 'Please enter Replace Delimiter': '请输入替换分隔符', 'Please enter Fields Terminated': '请输入列分隔符', 'Please enter Lines Terminated': '请输入行分隔符', 'Please enter Concurrency': '请输入并发度', 'Please enter Update Key': '请输入更新列', 'Please enter Job Name(required)': '请输入任务名称(必填)', 'Please enter Custom Shell(required)': '请输入自定义脚本', Direct: '流向', Type: '类型', ModelType: '模式', ColumnType: '列类型', Database: '数据库', Column: '列', 'Map Column Hive': 'Hive类型映射', 'Map Column Java': 'Java类型映射', 'Export Dir': '数据源路径', 'Hive partition Keys': 'Hive 分区键', 'Hive partition Values': 'Hive 分区值', FieldsTerminated: '列分隔符', LinesTerminated: '行分隔符', IsUpdate: '是否更新', UpdateKey: '更新列', UpdateMode: '更新类型', 'Target Dir': '目标路径', DeleteTargetDir: '是否删除目录', FileType: '保存格式', CompressionCodec: '压缩类型', CreateHiveTable: '是否创建新表', DropDelimiter: '是否删除分隔符', OverWriteSrc: '是否覆盖数据源', ReplaceDelimiter: '替换分隔符', Concurrency: '并发度', Form: '表单', OnlyUpdate: '只更新', AllowInsert: '无更新便插入', 'Data Source': '数据来源', 'Data Target': '数据目的', 'All Columns': '全表导入', 'Some Columns': '选择列', 'Branch flow': '分支流转', 'Custom Job': '自定义任务', 'Custom Script': '自定义脚本', 'Cannot select the same node for successful branch flow and failed branch flow': '成功分支流转和失败分支流转不能选择同一个节点', 'Successful branch flow and failed branch flow are required': 'conditions节点成功和失败分支流转必填', 'No resources exist': '不存在资源', 'Please delete all non-existing resources': '请删除所有不存在资源', 'Unauthorized or deleted resources': '未授权或已删除资源', 'Please delete all non-existent resources': '请删除所有未授权或已删除资源', Kinship: '工作流关系', Reset: '重置', KinshipStateActive: '当前选择', KinshipState1: '已上线', KinshipState0: '工作流未上线', KinshipState10: '调度未上线', 'Dag label display control': 'Dag节点名称显隐', Enable: '启用', Disable: '停用', 'The Worker group no longer exists, please select the correct Worker group!': '该Worker分组已经不存在,请选择正确的Worker分组!', 'Please confirm whether the workflow has been saved before downloading': '下载前请确定工作流是否已保存', 'User name length is between 3 and 39': '用户名长度在3~39之间', 'Timeout Settings': '超时设置', 'Connect Timeout': '连接超时', 'Socket Timeout': 'Socket超时', 'Connect timeout be a positive integer': '连接超时必须为数字', 'Socket Timeout be a positive integer': 'Socket超时必须为数字', ms: '毫秒', 'Please Enter Url': '请直接填写地址,例如:127.0.0.1:7077', Master: 'Master', 'Please select the waterdrop resources': '请选择waterdrop配置文件', zkDirectory: 'zk注册目录', 'Directory detail': '查看目录详情', 'Connection name': '连线名', 'Current connection settings': '当前连线设置', 'Please save the DAG before formatting': '格式化前请先保存DAG', 'Batch copy': '批量复制', 'Related items': '关联项目', 'Project name is required': '项目名称必填', 'Batch move': '批量移动', Version: '版本', 'Pre tasks': '前置任务', 'Running Memory': '运行内存', 'Max Memory': '最大内存', 'Min Memory': '最小内存', 'The workflow canvas is abnormal and cannot be saved, please recreate': '该工作流画布异常,无法保存,请重新创建', Info: '提示', 'Datasource userName': '所属用户', 'Resource userName': '所属用户' }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,714
[Improvement][Plugin] When updating the existing alarm instance, the `creation time` should't be updated.
When updating the existing alarm instance, it would cause the `creation time` to be always same with the `modification time` + when create a new alam instance, `creation time` and `update time` is same, it's right. ![image](https://user-images.githubusercontent.com/52202080/123791406-f8fe0300-d911-11eb-8d6d-25e81b6f5deb.png) + when I try to update the existing alarm instance, after updating success, the 'creation time' changes, it's wrong: ![image](https://user-images.githubusercontent.com/52202080/123791564-306caf80-d912-11eb-9195-eac521ab11f9.png)
https://github.com/apache/dolphinscheduler/issues/5714
https://github.com/apache/dolphinscheduler/pull/5715
2ba569acd028c00c22f4853de7c58251ac72816c
cf99df3de00ef63ee96b7ab00427c7385c42720a
2021-06-29T11:45:35Z
java
2021-07-04T15:46:47Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/AlertPluginInstanceServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.AlertPluginInstanceService; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.vo.AlertPluginInstanceVO; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.utils.BooleanUtils; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.AlertPluginInstance; import org.apache.dolphinscheduler.dao.entity.PluginDefine; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper; import org.apache.dolphinscheduler.dao.mapper.AlertPluginInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.PluginDefineMapper; import org.apache.dolphinscheduler.spi.params.PluginParamsTransfer; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.function.Function; import java.util.stream.Collectors; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Lazy; import org.springframework.stereotype.Service; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; /** * alert plugin instance service impl */ @Service @Lazy public class AlertPluginInstanceServiceImpl extends BaseServiceImpl implements AlertPluginInstanceService { @Autowired private AlertPluginInstanceMapper alertPluginInstanceMapper; @Autowired private PluginDefineMapper pluginDefineMapper; @Autowired private AlertGroupMapper alertGroupMapper; /** * creat alert plugin instance * * @param loginUser login user * @param pluginDefineId plugin define id * @param instanceName instance name * @param pluginInstanceParams plugin instance params */ @Override public Map<String, Object> create(User loginUser, int pluginDefineId, String instanceName, String pluginInstanceParams) { AlertPluginInstance alertPluginInstance = new AlertPluginInstance(); String paramsMapJson = parsePluginParamsMap(pluginInstanceParams); alertPluginInstance.setPluginInstanceParams(paramsMapJson); alertPluginInstance.setInstanceName(instanceName); alertPluginInstance.setPluginDefineId(pluginDefineId); Map<String, Object> result = new HashMap<>(); if (BooleanUtils.isTrue(alertPluginInstanceMapper.existInstanceName(alertPluginInstance.getInstanceName()))) { putMsg(result, Status.PLUGIN_INSTANCE_ALREADY_EXIT); return result; } int i = alertPluginInstanceMapper.insert(alertPluginInstance); if (i > 0) { putMsg(result, Status.SUCCESS); return result; } putMsg(result, Status.SAVE_ERROR); return result; } /** * update alert plugin instance * * @param loginUser login user * @param pluginInstanceId plugin instance id * @param instanceName instance name * @param pluginInstanceParams plugin instance params */ @Override public Map<String, Object> update(User loginUser, int pluginInstanceId, String instanceName, String pluginInstanceParams) { AlertPluginInstance alertPluginInstance = new AlertPluginInstance(); String paramsMapJson = parsePluginParamsMap(pluginInstanceParams); alertPluginInstance.setPluginInstanceParams(paramsMapJson); alertPluginInstance.setInstanceName(instanceName); alertPluginInstance.setId(pluginInstanceId); Map<String, Object> result = new HashMap<>(); int i = alertPluginInstanceMapper.updateById(alertPluginInstance); if (i > 0) { putMsg(result, Status.SUCCESS); return result; } putMsg(result, Status.SAVE_ERROR); return result; } /** * delete alert plugin instance * * @param loginUser login user * @param id id * @return result */ @Override public Map<String, Object> delete(User loginUser, int id) { Map<String, Object> result = new HashMap<>(); //check if there is an associated alert group boolean hasAssociatedAlertGroup = checkHasAssociatedAlertGroup(String.valueOf(id)); if (hasAssociatedAlertGroup) { putMsg(result, Status.DELETE_ALERT_PLUGIN_INSTANCE_ERROR_HAS_ALERT_GROUP_ASSOCIATED); return result; } int i = alertPluginInstanceMapper.deleteById(id); if (i > 0) { putMsg(result, Status.SUCCESS); } return result; } /** * get alert plugin instance * * @param loginUser login user * @param id get id * @return alert plugin */ @Override public Map<String, Object> get(User loginUser, int id) { Map<String, Object> result = new HashMap<>(); AlertPluginInstance alertPluginInstance = alertPluginInstanceMapper.selectById(id); if (null != alertPluginInstance) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, alertPluginInstance); } return result; } @Override public Map<String, Object> queryAll() { Map<String, Object> result = new HashMap<>(); List<AlertPluginInstance> alertPluginInstances = alertPluginInstanceMapper.queryAllAlertPluginInstanceList(); List<AlertPluginInstanceVO> alertPluginInstanceVOS = buildPluginInstanceVOList(alertPluginInstances); if (null != alertPluginInstances) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, alertPluginInstanceVOS); } return result; } @Override public boolean checkExistPluginInstanceName(String pluginInstanceName) { return BooleanUtils.isTrue(alertPluginInstanceMapper.existInstanceName(pluginInstanceName)); } @Override public Map<String, Object> queryPluginPage(int pageIndex, int pageSize) { IPage<AlertPluginInstance> pluginInstanceIPage = new Page<>(pageIndex, pageSize); pluginInstanceIPage = alertPluginInstanceMapper.selectPage(pluginInstanceIPage, null); PageInfo<AlertPluginInstanceVO> pageInfo = new PageInfo<>(pageIndex, pageSize); pageInfo.setTotalCount((int) pluginInstanceIPage.getTotal()); pageInfo.setLists(buildPluginInstanceVOList(pluginInstanceIPage.getRecords())); Map<String, Object> result = new HashMap<>(); result.put(Constants.DATA_LIST, pageInfo); putMsg(result, Status.SUCCESS); return result; } private List<AlertPluginInstanceVO> buildPluginInstanceVOList(List<AlertPluginInstance> alertPluginInstances) { if (CollectionUtils.isEmpty(alertPluginInstances)) { return null; } List<PluginDefine> pluginDefineList = pluginDefineMapper.queryAllPluginDefineList(); if (CollectionUtils.isEmpty(pluginDefineList)) { return null; } Map<Integer, PluginDefine> pluginDefineMap = pluginDefineList.stream().collect(Collectors.toMap(PluginDefine::getId, Function.identity())); List<AlertPluginInstanceVO> alertPluginInstanceVOS = new ArrayList<>(); alertPluginInstances.forEach(alertPluginInstance -> { AlertPluginInstanceVO alertPluginInstanceVO = new AlertPluginInstanceVO(); alertPluginInstanceVO.setCreateTime(alertPluginInstance.getCreateTime()); alertPluginInstanceVO.setUpdateTime(alertPluginInstance.getUpdateTime()); alertPluginInstanceVO.setPluginDefineId(alertPluginInstance.getPluginDefineId()); alertPluginInstanceVO.setInstanceName(alertPluginInstance.getInstanceName()); alertPluginInstanceVO.setId(alertPluginInstance.getId()); PluginDefine pluginDefine = pluginDefineMap.get(alertPluginInstance.getPluginDefineId()); //FIXME When the user removes the plug-in, this will happen. At this time, maybe we should add a new field to indicate that the plug-in has expired? if (null == pluginDefine) { return; } alertPluginInstanceVO.setAlertPluginName(pluginDefine.getPluginName()); //todo List pages do not recommend returning this parameter String pluginParamsMapString = alertPluginInstance.getPluginInstanceParams(); String uiPluginParams = parseToPluginUiParams(pluginParamsMapString, pluginDefine.getPluginParams()); alertPluginInstanceVO.setPluginInstanceParams(uiPluginParams); alertPluginInstanceVOS.add(alertPluginInstanceVO); }); return alertPluginInstanceVOS; } /** * Get the parameters actually needed by the plugin * * @param pluginParams Complete parameters(include ui) * @return k, v(json string) */ private String parsePluginParamsMap(String pluginParams) { Map<String, String> paramsMap = PluginParamsTransfer.getPluginParamsMap(pluginParams); return JSONUtils.toJsonString(paramsMap); } /** * parse To Plugin Ui Params * * @param pluginParamsMapString k-v data * @param pluginUiParams Complete parameters(include ui) * @return Complete parameters list(include ui) */ private String parseToPluginUiParams(String pluginParamsMapString, String pluginUiParams) { List<Map<String, Object>> pluginParamsList = PluginParamsTransfer.generatePluginParams(pluginParamsMapString, pluginUiParams); return JSONUtils.toJsonString(pluginParamsList); } private boolean checkHasAssociatedAlertGroup(String id) { List<String> idsList = alertGroupMapper.queryInstanceIdsList(); if (CollectionUtils.isEmpty(idsList)) { return false; } Optional<String> first = idsList.stream().filter(k -> null != k && Arrays.asList(k.split(",")).contains(id)).findFirst(); return first.isPresent(); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,714
[Improvement][Plugin] When updating the existing alarm instance, the `creation time` should't be updated.
When updating the existing alarm instance, it would cause the `creation time` to be always same with the `modification time` + when create a new alam instance, `creation time` and `update time` is same, it's right. ![image](https://user-images.githubusercontent.com/52202080/123791406-f8fe0300-d911-11eb-8d6d-25e81b6f5deb.png) + when I try to update the existing alarm instance, after updating success, the 'creation time' changes, it's wrong: ![image](https://user-images.githubusercontent.com/52202080/123791564-306caf80-d912-11eb-9195-eac521ab11f9.png)
https://github.com/apache/dolphinscheduler/issues/5714
https://github.com/apache/dolphinscheduler/pull/5715
2ba569acd028c00c22f4853de7c58251ac72816c
cf99df3de00ef63ee96b7ab00427c7385c42720a
2021-06-29T11:45:35Z
java
2021-07-04T15:46:47Z
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/AlertPluginInstance.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.entity; import java.util.Date; import com.baomidou.mybatisplus.annotation.FieldStrategy; import com.baomidou.mybatisplus.annotation.IdType; import com.baomidou.mybatisplus.annotation.TableField; import com.baomidou.mybatisplus.annotation.TableId; import com.baomidou.mybatisplus.annotation.TableName; /** * t_ds_alert_plugin_instance */ @TableName("t_ds_alert_plugin_instance") public class AlertPluginInstance { /** * id */ @TableId(value = "id", type = IdType.AUTO) private int id; /** * plugin_define_id */ @TableField(value = "plugin_define_id", updateStrategy = FieldStrategy.NEVER) private int pluginDefineId; /** * alert plugin instance name */ @TableField("instance_name") private String instanceName; /** * plugin_instance_params */ @TableField("plugin_instance_params") private String pluginInstanceParams; /** * create_time */ @TableField("create_time") private Date createTime; /** * update_time */ @TableField("update_time") private Date updateTime; public AlertPluginInstance() { this.createTime = new Date(); this.updateTime = new Date(); } public AlertPluginInstance(int pluginDefineId, String pluginInstanceParams, String instanceName) { this.pluginDefineId = pluginDefineId; this.pluginInstanceParams = pluginInstanceParams; this.createTime = new Date(); this.updateTime = new Date(); this.instanceName = instanceName; } public int getId() { return id; } public void setId(int id) { this.id = id; } public int getPluginDefineId() { return pluginDefineId; } public void setPluginDefineId(int pluginDefineId) { this.pluginDefineId = pluginDefineId; } public String getPluginInstanceParams() { return pluginInstanceParams; } public void setPluginInstanceParams(String pluginInstanceParams) { this.pluginInstanceParams = pluginInstanceParams; } public Date getCreateTime() { return createTime; } public void setCreateTime(Date createTime) { this.createTime = createTime; } public Date getUpdateTime() { return updateTime; } public void setUpdateTime(Date updateTime) { this.updateTime = updateTime; } public String getInstanceName() { return instanceName; } public void setInstanceName(String instanceName) { this.instanceName = instanceName; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,714
[Improvement][Plugin] When updating the existing alarm instance, the `creation time` should't be updated.
When updating the existing alarm instance, it would cause the `creation time` to be always same with the `modification time` + when create a new alam instance, `creation time` and `update time` is same, it's right. ![image](https://user-images.githubusercontent.com/52202080/123791406-f8fe0300-d911-11eb-8d6d-25e81b6f5deb.png) + when I try to update the existing alarm instance, after updating success, the 'creation time' changes, it's wrong: ![image](https://user-images.githubusercontent.com/52202080/123791564-306caf80-d912-11eb-9195-eac521ab11f9.png)
https://github.com/apache/dolphinscheduler/issues/5714
https://github.com/apache/dolphinscheduler/pull/5715
2ba569acd028c00c22f4853de7c58251ac72816c
cf99df3de00ef63ee96b7ab00427c7385c42720a
2021-06-29T11:45:35Z
java
2021-07-04T15:46:47Z
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/AlertPluginInstanceMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.AlertPluginInstanceMapper"> <select id="queryAllAlertPluginInstanceList" resultType="org.apache.dolphinscheduler.dao.entity.AlertPluginInstance"> select * from t_ds_alert_plugin_instance where 1 = 1 </select> <select id="queryByIds" resultType="org.apache.dolphinscheduler.dao.entity.AlertPluginInstance"> select * from t_ds_alert_plugin_instance where id in <foreach item="item" index="index" collection="ids" open="(" separator="," close=")"> #{item} </foreach> </select> <select id="queryByInstanceName" resultType="org.apache.dolphinscheduler.dao.entity.AlertPluginInstance"> select * from t_ds_alert_plugin_instance where instance_name = #{instanceName} </select> <select id="existInstanceName" resultType="java.lang.Boolean"> select 1 from t_ds_alert_plugin_instance where instance_name = #{instanceName} limit 1 </select> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,511
[Feature][JsonSplit-api]schedule update interface
from #5498 remove the request parameter workerGroupId,including the front end and controller interface
https://github.com/apache/dolphinscheduler/issues/5511
https://github.com/apache/dolphinscheduler/pull/5761
d382a7ba8c454b41944958c6e42692843a765234
cfa22d7c89bcd8e35b8a286b39b67b9b36b3b4dc
2021-05-18T13:58:16Z
java
2021-07-07T10:15:19Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/SchedulerController.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.apache.dolphinscheduler.api.enums.Status.CREATE_SCHEDULE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.OFFLINE_SCHEDULE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.PREVIEW_SCHEDULE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.PUBLISH_SCHEDULE_ONLINE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_SCHEDULE_LIST_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_SCHEDULE_LIST_PAGING_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.UPDATE_SCHEDULE_ERROR; import static org.apache.dolphinscheduler.common.Constants.SESSION_USER; import org.apache.dolphinscheduler.api.aspect.AccessLogAnnotation; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ApiException; import org.apache.dolphinscheduler.api.service.SchedulerService; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.User; import java.util.Map; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.RequestAttribute; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import io.swagger.annotations.Api; import io.swagger.annotations.ApiImplicitParam; import io.swagger.annotations.ApiImplicitParams; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; import springfox.documentation.annotations.ApiIgnore; /** * scheduler controller */ @Api(tags = "SCHEDULER_TAG") @RestController @RequestMapping("/projects/{projectCode}/schedule") public class SchedulerController extends BaseController { public static final String DEFAULT_WARNING_TYPE = "NONE"; public static final String DEFAULT_NOTIFY_GROUP_ID = "1"; public static final String DEFAULT_FAILURE_POLICY = "CONTINUE"; public static final String DEFAULT_PROCESS_INSTANCE_PRIORITY = "MEDIUM"; @Autowired private SchedulerService schedulerService; /** * create schedule * * @param loginUser login user * @param projectCode project code * @param processDefinitionCode process definition code * @param schedule scheduler * @param warningType warning type * @param warningGroupId warning group id * @param failureStrategy failure strategy * @param processInstancePriority process instance priority * @param workerGroup worker group * @return create result code */ @ApiOperation(value = "createSchedule", notes = "CREATE_SCHEDULE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "processDefinitionCode", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "100"), @ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "String", example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','timezoneId':'America/Phoenix','crontab':'0 0 3/6 * * ? *'}"), @ApiImplicitParam(name = "warningType", value = "WARNING_TYPE", type = "WarningType"), @ApiImplicitParam(name = "warningGroupId", value = "WARNING_GROUP_ID", dataType = "Int", example = "100"), @ApiImplicitParam(name = "failureStrategy", value = "FAILURE_STRATEGY", type = "FailureStrategy"), @ApiImplicitParam(name = "workerGroupId", value = "WORKER_GROUP_ID", dataType = "Int", example = "100"), @ApiImplicitParam(name = "processInstancePriority", value = "PROCESS_INSTANCE_PRIORITY", type = "Priority"), }) @PostMapping("/create") @ResponseStatus(HttpStatus.CREATED) @ApiException(CREATE_SCHEDULE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result createSchedule(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "processDefinitionCode") long processDefinitionCode, @RequestParam(value = "schedule") String schedule, @RequestParam(value = "warningType", required = false, defaultValue = DEFAULT_WARNING_TYPE) WarningType warningType, @RequestParam(value = "warningGroupId", required = false, defaultValue = DEFAULT_NOTIFY_GROUP_ID) int warningGroupId, @RequestParam(value = "failureStrategy", required = false, defaultValue = DEFAULT_FAILURE_POLICY) FailureStrategy failureStrategy, @RequestParam(value = "workerGroup", required = false, defaultValue = "default") String workerGroup, @RequestParam(value = "processInstancePriority", required = false, defaultValue = DEFAULT_PROCESS_INSTANCE_PRIORITY) Priority processInstancePriority) { Map<String, Object> result = schedulerService.insertSchedule(loginUser, projectCode, processDefinitionCode, schedule, warningType, warningGroupId, failureStrategy, processInstancePriority, workerGroup); return returnDataList(result); } /** * updateProcessInstance schedule * * @param loginUser login user * @param projectName project name * @param id scheduler id * @param schedule scheduler * @param warningType warning type * @param warningGroupId warning group id * @param failureStrategy failure strategy * @param workerGroup worker group * @param processInstancePriority process instance priority * @return update result code */ @ApiOperation(value = "updateSchedule", notes = "UPDATE_SCHEDULE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "SCHEDULE_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "String", example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"), @ApiImplicitParam(name = "warningType", value = "WARNING_TYPE", type = "WarningType"), @ApiImplicitParam(name = "warningGroupId", value = "WARNING_GROUP_ID", dataType = "Int", example = "100"), @ApiImplicitParam(name = "failureStrategy", value = "FAILURE_STRATEGY", type = "FailureStrategy"), @ApiImplicitParam(name = "workerGroupId", value = "WORKER_GROUP_ID", dataType = "Int", example = "100"), @ApiImplicitParam(name = "processInstancePriority", value = "PROCESS_INSTANCE_PRIORITY", type = "Priority"), }) @PostMapping("/update") @ApiException(UPDATE_SCHEDULE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result updateSchedule(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser, @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName, @RequestParam(value = "id") Integer id, @RequestParam(value = "schedule") String schedule, @RequestParam(value = "warningType", required = false, defaultValue = DEFAULT_WARNING_TYPE) WarningType warningType, @RequestParam(value = "warningGroupId", required = false) int warningGroupId, @RequestParam(value = "failureStrategy", required = false, defaultValue = "END") FailureStrategy failureStrategy, @RequestParam(value = "workerGroup", required = false, defaultValue = "default") String workerGroup, @RequestParam(value = "processInstancePriority", required = false) Priority processInstancePriority) { Map<String, Object> result = schedulerService.updateSchedule(loginUser, projectName, id, schedule, warningType, warningGroupId, failureStrategy, null, processInstancePriority, workerGroup); return returnDataList(result); } /** * publish schedule setScheduleState * * @param loginUser login user * @param projectName project name * @param id scheduler id * @return publish result code */ @ApiOperation(value = "online", notes = "ONLINE_SCHEDULE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "SCHEDULE_ID", required = true, dataType = "Int", example = "100") }) @PostMapping("/online") @ApiException(PUBLISH_SCHEDULE_ONLINE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result online(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser, @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName, @RequestParam("id") Integer id) { Map<String, Object> result = schedulerService.setScheduleState(loginUser, projectName, id, ReleaseState.ONLINE); return returnDataList(result); } /** * offline schedule * * @param loginUser login user * @param projectName project name * @param id schedule id * @return operation result code */ @ApiOperation(value = "offline", notes = "OFFLINE_SCHEDULE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "SCHEDULE_ID", required = true, dataType = "Int", example = "100") }) @PostMapping("/offline") @ApiException(OFFLINE_SCHEDULE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result offline(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser, @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName, @RequestParam("id") Integer id) { Map<String, Object> result = schedulerService.setScheduleState(loginUser, projectName, id, ReleaseState.OFFLINE); return returnDataList(result); } /** * query schedule list paging * * @param loginUser login user * @param projectName project name * @param processDefinitionId process definition id * @param pageNo page number * @param pageSize page size * @param searchVal search value * @return schedule list page */ @ApiOperation(value = "queryScheduleListPaging", notes = "QUERY_SCHEDULE_LIST_PAGING_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "processDefinitionId", value = "PROCESS_DEFINITION_ID", required = true, dataType = "Int", example = "100"), @ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", type = "String"), @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "100"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "100") }) @GetMapping("/list-paging") @ApiException(QUERY_SCHEDULE_LIST_PAGING_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryScheduleListPaging(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser, @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName, @RequestParam Integer processDefinitionId, @RequestParam(value = "searchVal", required = false) String searchVal, @RequestParam("pageNo") Integer pageNo, @RequestParam("pageSize") Integer pageSize) { Map<String, Object> result = checkPageParams(pageNo, pageSize); if (result.get(Constants.STATUS) != Status.SUCCESS) { return returnDataListPaging(result); } searchVal = ParameterUtils.handleEscapes(searchVal); result = schedulerService.querySchedule(loginUser, projectName, processDefinitionId, searchVal, pageNo, pageSize); return returnDataListPaging(result); } /** * delete schedule by id * * @param loginUser login user * @param projectName project name * @param scheduleId scheule id * @return delete result code */ @ApiOperation(value = "deleteScheduleById", notes = "OFFLINE_SCHEDULE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "scheduleId", value = "SCHEDULE_ID", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/delete") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_SCHEDULE_CRON_BY_ID_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result deleteScheduleById(@RequestAttribute(value = SESSION_USER) User loginUser, @PathVariable String projectName, @RequestParam("scheduleId") Integer scheduleId ) { Map<String, Object> result = schedulerService.deleteScheduleById(loginUser, projectName, scheduleId); return returnDataList(result); } /** * query schedule list * * @param loginUser login user * @param projectName project name * @return schedule list */ @ApiOperation(value = "queryScheduleList", notes = "QUERY_SCHEDULE_LIST_NOTES") @PostMapping("/list") @ApiException(QUERY_SCHEDULE_LIST_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryScheduleList(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser, @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName) { Map<String, Object> result = schedulerService.queryScheduleList(loginUser, projectName); return returnDataList(result); } /** * preview schedule * * @param loginUser login user * @param projectName project name * @param schedule schedule expression * @return the next five fire time */ @ApiOperation(value = "previewSchedule", notes = "PREVIEW_SCHEDULE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "schedule", value = "SCHEDULE", dataType = "String", example = "{'startTime':'2019-06-10 00:00:00','endTime':'2019-06-13 00:00:00','crontab':'0 0 3/6 * * ? *'}"), }) @PostMapping("/preview") @ResponseStatus(HttpStatus.CREATED) @ApiException(PREVIEW_SCHEDULE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result previewSchedule(@ApiIgnore @RequestAttribute(value = SESSION_USER) User loginUser, @ApiParam(name = "projectName", value = "PROJECT_NAME", required = true) @PathVariable String projectName, @RequestParam(value = "schedule") String schedule ) { Map<String, Object> result = schedulerService.previewSchedule(loginUser, projectName, schedule); return returnDataList(result); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,511
[Feature][JsonSplit-api]schedule update interface
from #5498 remove the request parameter workerGroupId,including the front end and controller interface
https://github.com/apache/dolphinscheduler/issues/5511
https://github.com/apache/dolphinscheduler/pull/5761
d382a7ba8c454b41944958c6e42692843a765234
cfa22d7c89bcd8e35b8a286b39b67b9b36b3b4dc
2021-05-18T13:58:16Z
java
2021-07-07T10:15:19Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/SchedulerService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.dao.entity.User; import java.util.Map; /** * scheduler service */ public interface SchedulerService { /** * save schedule * * @param loginUser login user * @param projectCode project code * @param processDefineCode process definition code * @param schedule scheduler * @param warningType warning type * @param warningGroupId warning group id * @param failureStrategy failure strategy * @param processInstancePriority process instance priority * @param workerGroup worker group * @return create result code */ Map<String, Object> insertSchedule(User loginUser, long projectCode, long processDefineCode, String schedule, WarningType warningType, int warningGroupId, FailureStrategy failureStrategy, Priority processInstancePriority, String workerGroup); /** * updateProcessInstance schedule * * @param loginUser login user * @param projectName project name * @param id scheduler id * @param scheduleExpression scheduler * @param warningType warning type * @param warningGroupId warning group id * @param failureStrategy failure strategy * @param workerGroup worker group * @param processInstancePriority process instance priority * @param scheduleStatus schedule status * @return update result code */ Map<String, Object> updateSchedule(User loginUser, String projectName, Integer id, String scheduleExpression, WarningType warningType, int warningGroupId, FailureStrategy failureStrategy, ReleaseState scheduleStatus, Priority processInstancePriority, String workerGroup); /** * set schedule online or offline * * @param loginUser login user * @param projectName project name * @param id scheduler id * @param scheduleStatus schedule status * @return publish result code */ Map<String, Object> setScheduleState(User loginUser, String projectName, Integer id, ReleaseState scheduleStatus); /** * query schedule * * @param loginUser login user * @param projectName project name * @param processDefineId process definition id * @param pageNo page number * @param pageSize page size * @param searchVal search value * @return schedule list page */ Map<String, Object> querySchedule(User loginUser, String projectName, Integer processDefineId, String searchVal, Integer pageNo, Integer pageSize); /** * query schedule list * * @param loginUser login user * @param projectName project name * @return schedule list */ Map<String, Object> queryScheduleList(User loginUser, String projectName); /** * delete schedule * * @param projectId project id * @param scheduleId schedule id * @throws RuntimeException runtime exception */ void deleteSchedule(int projectId, int scheduleId); /** * delete schedule by id * * @param loginUser login user * @param projectName project name * @param scheduleId scheule id * @return delete result code */ Map<String, Object> deleteScheduleById(User loginUser, String projectName, Integer scheduleId); /** * preview schedule * * @param loginUser login user * @param projectName project name * @param schedule schedule expression * @return the next five fire time */ Map<String, Object> previewSchedule(User loginUser, String projectName, String schedule); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,511
[Feature][JsonSplit-api]schedule update interface
from #5498 remove the request parameter workerGroupId,including the front end and controller interface
https://github.com/apache/dolphinscheduler/issues/5511
https://github.com/apache/dolphinscheduler/pull/5761
d382a7ba8c454b41944958c6e42692843a765234
cfa22d7c89bcd8e35b8a286b39b67b9b36b3b4dc
2021-05-18T13:58:16Z
java
2021-07-07T10:15:19Z
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/SchedulerServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import org.apache.dolphinscheduler.api.dto.ScheduleParam; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.ExecutorService; import org.apache.dolphinscheduler.api.service.MonitorService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.SchedulerService; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.model.Server; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.quartz.ProcessScheduleJob; import org.apache.dolphinscheduler.service.quartz.QuartzExecutors; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import java.text.ParseException; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import org.quartz.CronExpression; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; /** * scheduler service impl */ @Service public class SchedulerServiceImpl extends BaseServiceImpl implements SchedulerService { private static final Logger logger = LoggerFactory.getLogger(SchedulerServiceImpl.class); @Autowired private ProjectService projectService; @Autowired private ExecutorService executorService; @Autowired private MonitorService monitorService; @Autowired private ProcessService processService; @Autowired private ScheduleMapper scheduleMapper; @Autowired private ProjectMapper projectMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; /** * save schedule * * @param loginUser login user * @param projectCode project name * @param processDefineCode process definition code * @param schedule scheduler * @param warningType warning type * @param warningGroupId warning group id * @param failureStrategy failure strategy * @param processInstancePriority process instance priority * @param workerGroup worker group * @return create result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> insertSchedule(User loginUser, long projectCode, long processDefineCode, String schedule, WarningType warningType, int warningGroupId, FailureStrategy failureStrategy, Priority processInstancePriority, String workerGroup) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByCode(projectCode); // check project auth boolean hasProjectAndPerm = projectService.hasProjectAndPerm(loginUser, project, result); if (!hasProjectAndPerm) { return result; } // check work flow define release state ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(processDefineCode); result = executorService.checkProcessDefinitionValid(processDefinition, processDefineCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } Schedule scheduleObj = new Schedule(); Date now = new Date(); scheduleObj.setProjectName(project.getName()); scheduleObj.setProcessDefinitionId(processDefinition.getId()); scheduleObj.setProcessDefinitionName(processDefinition.getName()); ScheduleParam scheduleParam = JSONUtils.parseObject(schedule, ScheduleParam.class); if (DateUtils.differSec(scheduleParam.getStartTime(), scheduleParam.getEndTime()) == 0) { logger.warn("The start time must not be the same as the end"); putMsg(result, Status.SCHEDULE_START_TIME_END_TIME_SAME); return result; } scheduleObj.setStartTime(scheduleParam.getStartTime()); scheduleObj.setEndTime(scheduleParam.getEndTime()); if (!org.quartz.CronExpression.isValidExpression(scheduleParam.getCrontab())) { logger.error("{} verify failure", scheduleParam.getCrontab()); putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, scheduleParam.getCrontab()); return result; } scheduleObj.setCrontab(scheduleParam.getCrontab()); scheduleObj.setTimezoneId(scheduleParam.getTimezoneId()); scheduleObj.setWarningType(warningType); scheduleObj.setWarningGroupId(warningGroupId); scheduleObj.setFailureStrategy(failureStrategy); scheduleObj.setCreateTime(now); scheduleObj.setUpdateTime(now); scheduleObj.setUserId(loginUser.getId()); scheduleObj.setUserName(loginUser.getUserName()); scheduleObj.setReleaseState(ReleaseState.OFFLINE); scheduleObj.setProcessInstancePriority(processInstancePriority); scheduleObj.setWorkerGroup(workerGroup); scheduleMapper.insert(scheduleObj); /** * updateProcessInstance receivers and cc by process definition id */ processDefinition.setWarningGroupId(warningGroupId); processDefinitionMapper.updateById(processDefinition); // return scheduler object with ID result.put(Constants.DATA_LIST, scheduleMapper.selectById(scheduleObj.getId())); putMsg(result, Status.SUCCESS); result.put("scheduleId", scheduleObj.getId()); return result; } /** * updateProcessInstance schedule * * @param loginUser login user * @param projectName project name * @param id scheduler id * @param scheduleExpression scheduler * @param warningType warning type * @param warningGroupId warning group id * @param failureStrategy failure strategy * @param workerGroup worker group * @param processInstancePriority process instance priority * @param scheduleStatus schedule status * @return update result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> updateSchedule(User loginUser, String projectName, Integer id, String scheduleExpression, WarningType warningType, int warningGroupId, FailureStrategy failureStrategy, ReleaseState scheduleStatus, Priority processInstancePriority, String workerGroup) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); // check project auth boolean hasProjectAndPerm = projectService.hasProjectAndPerm(loginUser, project, result); if (!hasProjectAndPerm) { return result; } // check schedule exists Schedule schedule = scheduleMapper.selectById(id); if (schedule == null) { putMsg(result, Status.SCHEDULE_CRON_NOT_EXISTS, id); return result; } ProcessDefinition processDefinition = processService.findProcessDefineById(schedule.getProcessDefinitionId()); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, schedule.getProcessDefinitionId()); return result; } /** * scheduling on-line status forbid modification */ if (checkValid(result, schedule.getReleaseState() == ReleaseState.ONLINE, Status.SCHEDULE_CRON_ONLINE_FORBID_UPDATE)) { return result; } Date now = new Date(); // updateProcessInstance param if (StringUtils.isNotEmpty(scheduleExpression)) { ScheduleParam scheduleParam = JSONUtils.parseObject(scheduleExpression, ScheduleParam.class); if (DateUtils.differSec(scheduleParam.getStartTime(), scheduleParam.getEndTime()) == 0) { logger.warn("The start time must not be the same as the end"); putMsg(result, Status.SCHEDULE_START_TIME_END_TIME_SAME); return result; } schedule.setStartTime(scheduleParam.getStartTime()); schedule.setEndTime(scheduleParam.getEndTime()); if (!org.quartz.CronExpression.isValidExpression(scheduleParam.getCrontab())) { putMsg(result, Status.SCHEDULE_CRON_CHECK_FAILED, scheduleParam.getCrontab()); return result; } schedule.setCrontab(scheduleParam.getCrontab()); schedule.setTimezoneId(scheduleParam.getTimezoneId()); } if (warningType != null) { schedule.setWarningType(warningType); } schedule.setWarningGroupId(warningGroupId); if (failureStrategy != null) { schedule.setFailureStrategy(failureStrategy); } if (scheduleStatus != null) { schedule.setReleaseState(scheduleStatus); } schedule.setWorkerGroup(workerGroup); schedule.setUpdateTime(now); schedule.setProcessInstancePriority(processInstancePriority); scheduleMapper.updateById(schedule); /** * updateProcessInstance recipients and cc by process definition ID */ processDefinition.setWarningGroupId(warningGroupId); processDefinitionMapper.updateById(processDefinition); putMsg(result, Status.SUCCESS); return result; } /** * set schedule online or offline * * @param loginUser login user * @param projectName project name * @param id scheduler id * @param scheduleStatus schedule status * @return publish result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> setScheduleState(User loginUser, String projectName, Integer id, ReleaseState scheduleStatus) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); // check project auth boolean hasProjectAndPerm = projectService.hasProjectAndPerm(loginUser, project, result); if (!hasProjectAndPerm) { return result; } // check schedule exists Schedule scheduleObj = scheduleMapper.selectById(id); if (scheduleObj == null) { putMsg(result, Status.SCHEDULE_CRON_NOT_EXISTS, id); return result; } // check schedule release state if (scheduleObj.getReleaseState() == scheduleStatus) { logger.info("schedule release is already {},needn't to change schedule id: {} from {} to {}", scheduleObj.getReleaseState(), scheduleObj.getId(), scheduleObj.getReleaseState(), scheduleStatus); putMsg(result, Status.SCHEDULE_CRON_REALEASE_NEED_NOT_CHANGE, scheduleStatus); return result; } ProcessDefinition processDefinition = processService.findProcessDefineById(scheduleObj.getProcessDefinitionId()); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, scheduleObj.getProcessDefinitionId()); return result; } if (scheduleStatus == ReleaseState.ONLINE) { // check process definition release state if (processDefinition.getReleaseState() != ReleaseState.ONLINE) { logger.info("not release process definition id: {} , name : {}", processDefinition.getId(), processDefinition.getName()); putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefinition.getName()); return result; } // check sub process definition release state List<Integer> subProcessDefineIds = new ArrayList<>(); processService.recurseFindSubProcessId(scheduleObj.getProcessDefinitionId(), subProcessDefineIds); Integer[] idArray = subProcessDefineIds.toArray(new Integer[subProcessDefineIds.size()]); if (!subProcessDefineIds.isEmpty()) { List<ProcessDefinition> subProcessDefinitionList = processDefinitionMapper.queryDefinitionListByIdList(idArray); if (subProcessDefinitionList != null && !subProcessDefinitionList.isEmpty()) { for (ProcessDefinition subProcessDefinition : subProcessDefinitionList) { /** * if there is no online process, exit directly */ if (subProcessDefinition.getReleaseState() != ReleaseState.ONLINE) { logger.info("not release process definition id: {} , name : {}", subProcessDefinition.getId(), subProcessDefinition.getName()); putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, subProcessDefinition.getId()); return result; } } } } } // check master server exists List<Server> masterServers = monitorService.getServerListFromRegistry(true); if (masterServers.isEmpty()) { putMsg(result, Status.MASTER_NOT_EXISTS); return result; } // set status scheduleObj.setReleaseState(scheduleStatus); scheduleMapper.updateById(scheduleObj); try { switch (scheduleStatus) { case ONLINE: logger.info("Call master client set schedule online, project id: {}, flow id: {},host: {}", project.getId(), processDefinition.getId(), masterServers); setSchedule(project.getId(), scheduleObj); break; case OFFLINE: logger.info("Call master client set schedule offline, project id: {}, flow id: {},host: {}", project.getId(), processDefinition.getId(), masterServers); deleteSchedule(project.getId(), id); break; default: putMsg(result, Status.SCHEDULE_STATUS_UNKNOWN, scheduleStatus.toString()); return result; } } catch (Exception e) { result.put(Constants.MSG, scheduleStatus == ReleaseState.ONLINE ? "set online failure" : "set offline failure"); throw new ServiceException(result.get(Constants.MSG).toString()); } putMsg(result, Status.SUCCESS); return result; } /** * query schedule * * @param loginUser login user * @param projectName project name * @param processDefineId process definition id * @param pageNo page number * @param pageSize page size * @param searchVal search value * @return schedule list page */ @Override public Map<String, Object> querySchedule(User loginUser, String projectName, Integer processDefineId, String searchVal, Integer pageNo, Integer pageSize) { HashMap<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); // check project auth boolean hasProjectAndPerm = projectService.hasProjectAndPerm(loginUser, project, result); if (!hasProjectAndPerm) { return result; } ProcessDefinition processDefinition = processService.findProcessDefineById(processDefineId); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefineId); return result; } Page<Schedule> page = new Page<>(pageNo, pageSize); IPage<Schedule> scheduleIPage = scheduleMapper.queryByProcessDefineIdPaging( page, processDefineId, searchVal ); PageInfo<Schedule> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotalCount((int) scheduleIPage.getTotal()); pageInfo.setLists(scheduleIPage.getRecords()); result.put(Constants.DATA_LIST, pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * query schedule list * * @param loginUser login user * @param projectName project name * @return schedule list */ @Override public Map<String, Object> queryScheduleList(User loginUser, String projectName) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); // check project auth boolean hasProjectAndPerm = projectService.hasProjectAndPerm(loginUser, project, result); if (!hasProjectAndPerm) { return result; } List<Schedule> schedules = scheduleMapper.querySchedulerListByProjectName(projectName); result.put(Constants.DATA_LIST, schedules); putMsg(result, Status.SUCCESS); return result; } public void setSchedule(int projectId, Schedule schedule) { logger.info("set schedule, project id: {}, scheduleId: {}", projectId, schedule.getId()); QuartzExecutors.getInstance().addJob(ProcessScheduleJob.class, projectId, schedule); } /** * delete schedule * * @param projectId project id * @param scheduleId schedule id * @throws RuntimeException runtime exception */ @Override public void deleteSchedule(int projectId, int scheduleId) { logger.info("delete schedules of project id:{}, schedule id:{}", projectId, scheduleId); String jobName = QuartzExecutors.buildJobName(scheduleId); String jobGroupName = QuartzExecutors.buildJobGroupName(projectId); if (!QuartzExecutors.getInstance().deleteJob(jobName, jobGroupName)) { logger.warn("set offline failure:projectId:{},scheduleId:{}", projectId, scheduleId); throw new ServiceException("set offline failure"); } } /** * check valid * * @param result result * @param bool bool * @param status status * @return check result code */ private boolean checkValid(Map<String, Object> result, boolean bool, Status status) { // timeout is valid if (bool) { putMsg(result, status); return true; } return false; } /** * delete schedule by id * * @param loginUser login user * @param projectName project name * @param scheduleId scheule id * @return delete result code */ @Override public Map<String, Object> deleteScheduleById(User loginUser, String projectName, Integer scheduleId) { Map<String, Object> result = new HashMap<>(); Project project = projectMapper.queryByName(projectName); Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { return checkResult; } Schedule schedule = scheduleMapper.selectById(scheduleId); if (schedule == null) { putMsg(result, Status.SCHEDULE_CRON_NOT_EXISTS, scheduleId); return result; } // Determine if the login user is the owner of the schedule if (loginUser.getId() != schedule.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } // check schedule is already online if (schedule.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId()); return result; } int delete = scheduleMapper.deleteById(scheduleId); if (delete > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR); } return result; } /** * preview schedule * * @param loginUser login user * @param projectName project name * @param schedule schedule expression * @return the next five fire time */ @Override public Map<String, Object> previewSchedule(User loginUser, String projectName, String schedule) { Map<String, Object> result = new HashMap<>(); CronExpression cronExpression; ScheduleParam scheduleParam = JSONUtils.parseObject(schedule, ScheduleParam.class); Date now = new Date(); Date startTime = now.after(scheduleParam.getStartTime()) ? now : scheduleParam.getStartTime(); Date endTime = scheduleParam.getEndTime(); try { cronExpression = CronUtils.parse2CronExpression(scheduleParam.getCrontab()); } catch (ParseException e) { logger.error(e.getMessage(), e); putMsg(result, Status.PARSE_TO_CRON_EXPRESSION_ERROR); return result; } List<Date> selfFireDateList = CronUtils.getSelfFireDateList(startTime, endTime, cronExpression, Constants.PREVIEW_SCHEDULE_EXECUTE_COUNT); result.put(Constants.DATA_LIST, selfFireDateList.stream().map(DateUtils::dateToString)); putMsg(result, Status.SUCCESS); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,511
[Feature][JsonSplit-api]schedule update interface
from #5498 remove the request parameter workerGroupId,including the front end and controller interface
https://github.com/apache/dolphinscheduler/issues/5511
https://github.com/apache/dolphinscheduler/pull/5761
d382a7ba8c454b41944958c6e42692843a765234
cfa22d7c89bcd8e35b8a286b39b67b9b36b3b4dc
2021-05-18T13:58:16Z
java
2021-07-07T10:15:19Z
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/AbstractControllerTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.mockito.Mockito.doNothing; import java.text.MessageFormat; import java.util.Map; import org.apache.dolphinscheduler.api.ApiApplicationServer; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.SessionService; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.service.registry.RegistryClient; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.boot.test.mock.mockito.MockBean; import org.springframework.test.context.junit4.SpringRunner; import org.springframework.test.web.servlet.MockMvc; import org.springframework.test.web.servlet.setup.MockMvcBuilders; import org.springframework.web.context.WebApplicationContext; /** * abstract controller test */ @RunWith(SpringRunner.class) @SpringBootTest(classes = ApiApplicationServer.class) public class AbstractControllerTest { public static final String SESSION_ID = "sessionId"; protected MockMvc mockMvc; @Autowired private WebApplicationContext webApplicationContext; @Autowired private SessionService sessionService; protected User user; protected String sessionId; @MockBean RegistryClient registryClient; @Before public void setUp() { doNothing().when(registryClient).init(); mockMvc = MockMvcBuilders.webAppContextSetup(webApplicationContext).build(); createSession(); } @After public void after() throws Exception { sessionService.signOut("127.0.0.1", user); } private void createSession() { User loginUser = new User(); loginUser.setId(1); loginUser.setUserType(UserType.GENERAL_USER); user = loginUser; String session = sessionService.createSession(loginUser, "127.0.0.1"); sessionId = session; Assert.assertTrue(StringUtils.isNotEmpty(session)); } public void putMsg(Map<String, Object> result, Status status, Object... statusParams) { result.put(Constants.STATUS, status); if (statusParams != null && statusParams.length > 0) { result.put(Constants.MSG, MessageFormat.format(status.getMsg(), statusParams)); } else { result.put(Constants.MSG, status.getMsg()); } } }