status
stringclasses 1
value | repo_name
stringclasses 31
values | repo_url
stringclasses 31
values | issue_id
int64 1
104k
| title
stringlengths 4
369
| body
stringlengths 0
254k
⌀ | issue_url
stringlengths 37
56
| pull_url
stringlengths 37
54
| before_fix_sha
stringlengths 40
40
| after_fix_sha
stringlengths 40
40
| report_datetime
timestamp[us, tz=UTC] | language
stringclasses 5
values | commit_datetime
timestamp[us, tz=UTC] | updated_file
stringlengths 4
188
| file_content
stringlengths 0
5.12M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,463 |
[Bug][api] rename the udf resource file associated with the udf function, Failed to execute hive task
|
1. Rename the udf resource file associated with the udf function


2. Executing hive task is fail

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3463
|
https://github.com/apache/dolphinscheduler/pull/3482
|
a678c827600d44623f30311574b8226c1c59ace2
|
c8322482bbd021a89c407809abfcdd50cf3b2dc6
| 2020-08-11T03:37:09Z |
java
| 2020-08-13T08:19:11Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.commons.collections.BeanMap;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.*;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* resources service
*/
@Service
public class ResourcesService extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(ResourcesService.class);
@Autowired
private ResourceMapper resourcesMapper;
@Autowired
private UdfFuncMapper udfFunctionMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private UserMapper userMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Transactional(rollbackFor = Exception.class)
public Result createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<String, Object>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirecotry(loginUser,fullName,type,result);
return result;
}
/**
* create resource
*
* @param loginUser login user
* @param name alias
* @param desc description
* @param file file
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result createResource(User loginUser,
String name,
String desc,
ResourceType type,
MultipartFile file,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
/**
* rename file suffix and original suffix must be consistent
*/
logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
// check resoure name exists
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
return result;
}
/**
* check resource is exists
*
* @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
private boolean checkResourceExists(String fullName, int userId, int type ){
List<Resource> resources = resourcesMapper.queryResourceList(fullName, userId, type);
if (resources != null && resources.size() > 0) {
return true;
}
return false;
}
/**
* update resource
* @param loginUser login user
* @param resourceId resource id
* @param name name
* @param desc description
* @param type resource type
* @param file resource file
* @return update result code
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResource(User loginUser,
int resourceId,
String name,
String desc,
ResourceType type,
MultipartFile file) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
if (name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) {
putMsg(result, Status.SUCCESS);
return result;
}
//check resource aleady exists
String originFullName = resource.getFullName();
String originResourceName = resource.getAlias();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
if (file != null) {
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
/**
* rename file suffix and original suffix must be consistent
*/
logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(FileUtils.suffix(originFullName))) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
}
// query tenant by user id
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// verify whether the resource exists in storage
// get the path of origin file in storage
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
try {
if (!HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.error("{} not exist", originHdfsFileName);
putMsg(result,Status.RESOURCE_NOT_EXIST);
return result;
}
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
}
if (!resource.isDirectory()) {
//get the origin file suffix
String originSuffix = FileUtils.suffix(originFullName);
String suffix = FileUtils.suffix(fullName);
boolean suffixIsChanged = false;
if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) {
suffixIsChanged = true;
}
if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) {
suffixIsChanged = true;
}
//verify whether suffix is changed
if (suffixIsChanged) {
//need verify whether this resource is authorized to other users
Map<String, Object> columnMap = new HashMap<>();
columnMap.put("resources_id", resourceId);
List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap);
if (CollectionUtils.isNotEmpty(resourcesUsers)) {
List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList());
List<User> users = userMapper.selectBatchIds(userIds);
String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString();
logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames);
putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames);
return result;
}
}
}
// updateResource data
Date now = new Date();
resource.setAlias(name);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
if (file != null) {
resource.setFileName(file.getOriginalFilename());
resource.setSize(file.getSize());
}
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory()) {
List<Integer> childrenResource = listAllChildren(resource,false);
if (CollectionUtils.isNotEmpty(childrenResource)) {
String matcherFullName = Matcher.quoteReplacement(fullName);
List<Resource> childResourceList = new ArrayList<>();
List<Resource> resourceList = resourcesMapper.listResourceByIds(childrenResource.toArray(new Integer[childrenResource.size()]));
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
}
}
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>(5);
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e);
throw new ServiceException(Status.UPDATE_RESOURCE_ERROR);
}
// if name unchanged, return directly without moving on HDFS
if (originResourceName.equals(name) && file == null) {
return result;
}
if (file != null) {
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
if (!fullName.equals(originFullName)) {
try {
HadoopUtils.getInstance().delete(originHdfsFileName,false);
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(String.format("delete resource: %s failed.", originFullName));
}
}
return result;
}
// get the path of dest file in hdfs
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
} catch (Exception e) {
logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e);
putMsg(result,Status.HDFS_COPY_FAIL);
throw new ServiceException(Status.HDFS_COPY_FAIL);
}
return result;
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
public Map<String, Object> queryResourceListPaging(User loginUser, int direcotryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>(5);
Page<Resource> page = new Page(pageNo, pageSize);
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId= 0;
}
if (direcotryId != -1) {
Resource directory = resourcesMapper.selectById(direcotryId);
if (directory == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
}
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page,
userId,direcotryId, type.ordinal(), searchVal);
PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* create direcoty
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirecotry(User loginUser,String fullName,ResourceType type,Result result){
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
}
/**
* upload file to hdfs
*
* @param loginUser login user
* @param fullName full name
* @param file file
*/
private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
return false;
}
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
// random file name
String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
// save file to hdfs, and delete original file
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
createTenantDirIfNotExists(tenantCode);
}
org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename);
HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
return false;
}
return true;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList);
//JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceJarList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
List<Resource> resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
* @throws Exception exception
*/
@Transactional(rollbackFor = Exception.class)
public Result delete(User loginUser, int resourceId) throws Exception {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//get resource and hdfs path
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource,true);
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
//if resource type is UDF,need check whether it is bound by UDF functon
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
// get hdfs file by type
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteIds(needDeleteResourceIdArray);
resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* verify resource by name and type
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
public Result verifyResourceName(String fullName, ResourceType type,User loginUser) {
Result result = new Result();
putMsg(result, Status.SUCCESS);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, fullName);
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
if(tenant != null){
String tenantCode = tenant.getTenantCode();
try {
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if(HadoopUtils.getInstance().exists(hdfsFilename)){
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, fullName,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.HDFS_OPERATION_ERROR);
}
}else{
putMsg(result,Status.TENANT_NOT_EXIST);
}
}
return result;
}
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
public Result queryResource(String fullName,Integer id,ResourceType type) {
Result result = new Result();
if (StringUtils.isBlank(fullName) && id == null) {
logger.error("You must input one of fullName and pid");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
logger.error("resource file not exist, resource full name {} ", fullName);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
logger.error("resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
logger.error("parent resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/**
* view resource file online
*
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
public Result readResource(int resourceId, int skipLineNum, int limit) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check preview or not by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// hdfs path
String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {} ", hdfsFileName);
try {
if(HadoopUtils.getInstance().exists(hdfsFileName)){
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
putMsg(result, Status.SUCCESS);
Map<String, Object> map = new HashMap<>();
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, String.join("\n", content));
result.setData(map);
}else{
logger.error("read file {} not exist in hdfs", hdfsFileName);
putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName);
}
} catch (Exception e) {
logger.error("Resource {} read failed", hdfsFileName, e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
}
return result;
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param desc description
* @param content content
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDirectory) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//check file suffix
String nameSuffix = fileSuffix.trim();
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resouce suffix {} not support create", nameSuffix);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDirectory.equals("/") ? String.format("%s%s",currentDirectory,name):String.format("%s/%s",currentDirectory,name);
result = verifyResourceName(fullName,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* updateProcessInstance resource
*
* @param resourceId resource id
* @param content content
* @return update result cod
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResourceContent(int resourceId, String content) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("read file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check can edit by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
resource.setSize(content.getBytes().length);
resource.setUpdateTime(new Date());
resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* @param resourceName resource name
* @param tenantCode tenant code
* @param content content
* @return result
*/
private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result result = new Result();
String localFilename = "";
String hdfsFileName = "";
try {
localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
if (!FileUtils.writeContent2File(content, localFilename)) {
// write file fail
logger.error("file {} fail, content is {}", localFilename, content);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
// get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils hadoopUtils = HadoopUtils.getInstance();
if (!hadoopUtils.exists(resourcePath)) {
// create if tenant dir not exists
createTenantDirIfNotExists(tenantCode);
}
if (hadoopUtils.exists(hdfsFileName)) {
hadoopUtils.delete(hdfsFileName, false);
}
hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
result.setCode(Status.HDFS_OPERATION_ERROR.getCode());
result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName));
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* download file
*
* @param resourceId resource id
* @return resource content
* @throws Exception exception
*/
public org.springframework.core.io.Resource downloadResource(int resourceId) throws Exception {
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new RuntimeException("hdfs not startup");
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new RuntimeException("cant't download directory");
}
int userId = resource.getUserId();
User user = userMapper.selectById(userId);
if(user == null){
logger.error("user id {} not exists", userId);
throw new RuntimeException(String.format("resource owner id %d not exist",userId));
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if(tenant == null){
logger.error("tenant id {} not exists", user.getTenantId());
throw new RuntimeException(String.format("The tenant id %d of resource owner not exist",user.getTenantId()));
}
String tenantCode = tenant.getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true);
return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName);
}
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list ;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Resource> list ;
if (resourceList != null && resourceList.size() > 0) {
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
getAuthorizedResourceList(resourceSet, authedResourceList);
list = new ArrayList<>(resourceSet);
}else {
list = new ArrayList<>(0);
}
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
//only admin can operate
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId);
List<UdfFunc> resultList = new ArrayList<>();
Set<UdfFunc> udfFuncSet = null;
if (CollectionUtils.isNotEmpty(udfFuncList)) {
udfFuncSet = new HashSet<>(udfFuncList);
List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId);
getAuthorizedResourceList(udfFuncSet, authedUDFFuncList);
resultList = new ArrayList<>(udfFuncSet);
}
result.put(Constants.DATA_LIST, resultList);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId);
result.put(Constants.DATA_LIST, udfFuncs);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized file
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
public Map<String, Object> authorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
if (checkAdmin(loginUser, result)){
return result;
}
List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
Visitor visitor = new ResourceTreeVisitor(authedResources);
logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField));
String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get authorized resource list
*
* @param resourceSet resource set
* @param authedResourceList authorized resource list
*/
private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) {
Set<?> authedResourceSet = null;
if (CollectionUtils.isNotEmpty(authedResourceList)) {
authedResourceSet = new HashSet<>(authedResourceList);
resourceSet.removeAll(authedResourceSet);
}
}
/**
* get tenantCode by UserId
*
* @param userId user id
* @param result return result
* @return
*/
private String getTenantCode(int userId,Result result){
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user {} not exists", userId);
putMsg(result, Status.USER_NOT_EXIST,userId);
return null;
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null){
logger.error("tenant not exists");
putMsg(result, Status.TENANT_NOT_EXIST);
return null;
}
return tenant.getTenantCode();
}
/**
* list all children id
* @param resource resource
* @param containSelf whether add self to children list
* @return all children id
*/
List<Integer> listAllChildren(Resource resource,boolean containSelf){
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1 && containSelf) {
childList.add(resource.getId());
}
if(resource.isDirectory()){
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList){
List<Integer> children = resourcesMapper.listChildren(resourceId);
for(int chlidId:children){
childList.add(chlidId);
listAllChildren(chlidId,childList);
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,463 |
[Bug][api] rename the udf resource file associated with the udf function, Failed to execute hive task
|
1. Rename the udf resource file associated with the udf function


2. Executing hive task is fail

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3463
|
https://github.com/apache/dolphinscheduler/pull/3482
|
a678c827600d44623f30311574b8226c1c59ace2
|
c8322482bbd021a89c407809abfcdd50cf3b2dc6
| 2020-08-11T03:37:09Z |
java
| 2020-08-13T08:19:11Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.mapper;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import org.apache.ibatis.annotations.Param;
import java.util.List;
/**
* udf function mapper interface
*/
public interface UdfFuncMapper extends BaseMapper<UdfFunc> {
/**
* select udf by id
* @param id udf id
* @return UdfFunc
*/
UdfFunc selectUdfById(@Param("id") int id);
/**
* query udf function by ids and function name
* @param ids ids
* @param funcNames funcNames
* @return udf function list
*/
List<UdfFunc> queryUdfByIdStr(@Param("ids") int[] ids,
@Param("funcNames") String funcNames);
/**
* udf function page
* @param page page
* @param userId userId
* @param searchVal searchVal
* @return udf function IPage
*/
IPage<UdfFunc> queryUdfFuncPaging(IPage<UdfFunc> page,
@Param("userId") int userId,
@Param("searchVal") String searchVal);
/**
* query udf function by type
* @param userId userId
* @param type type
* @return udf function list
*/
List<UdfFunc> getUdfFuncByType(@Param("userId") int userId,
@Param("type") Integer type);
/**
* query udf function except userId
* @param userId userId
* @return udf function list
*/
List<UdfFunc> queryUdfFuncExceptUserId(@Param("userId") int userId);
/**
* query authed udf function
* @param userId userId
* @return udf function list
*/
List<UdfFunc> queryAuthedUdfFunc(@Param("userId") int userId);
/**
* list authorized UDF function
* @param userId userId
* @param udfIds UDF function id array
* @return UDF function list
*/
<T> List<UdfFunc> listAuthorizedUdfFunc (@Param("userId") int userId,@Param("udfIds")T[] udfIds);
/**
* list UDF by resource id
* @param resourceIds resource id array
* @return UDF function list
*/
List<UdfFunc> listUdfByResourceId(@Param("resourceIds") Integer[] resourceIds);
/**
* list authorized UDF by resource id
* @param resourceIds resource id array
* @return UDF function list
*/
List<UdfFunc> listAuthorizedUdfByResourceId(@Param("userId") int userId,@Param("resourceIds") int[] resourceIds);
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,463 |
[Bug][api] rename the udf resource file associated with the udf function, Failed to execute hive task
|
1. Rename the udf resource file associated with the udf function


2. Executing hive task is fail

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3463
|
https://github.com/apache/dolphinscheduler/pull/3482
|
a678c827600d44623f30311574b8226c1c59ace2
|
c8322482bbd021a89c407809abfcdd50cf3b2dc6
| 2020-08-11T03:37:09Z |
java
| 2020-08-13T08:19:11Z |
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.xml
|
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper">
<select id="selectUdfById" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where id = #{id}
</select>
<select id="queryUdfByIdStr" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where 1 = 1
<if test="ids != null and ids != ''">
and id in
<foreach collection="ids" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
<if test="funcNames != null and funcNames != ''">
and func_name = #{funcNames}
</if>
order by id asc
</select>
<select id="queryUdfFuncPaging" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where 1=1
<if test="searchVal!= null and searchVal != ''">
and func_name like concat('%', #{searchVal}, '%')
</if>
<if test="userId != 0">
and id in (
select udf_id from t_ds_relation_udfs_user where user_id=#{userId}
union select id as udf_id from t_ds_udfs where user_id=#{userId})
</if>
order by create_time desc
</select>
<select id="getUdfFuncByType" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where type=#{type}
<if test="userId != 0">
and id in (
select udf_id from t_ds_relation_udfs_user where user_id=#{userId}
union select id as udf_id from t_ds_udfs where user_id=#{userId})
</if>
</select>
<select id="queryUdfFuncExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where user_id <![CDATA[ <> ]]> #{userId}
</select>
<select id="queryAuthedUdfFunc" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
SELECT u.*
from t_ds_udfs u,t_ds_relation_udfs_user rel
WHERE u.id = rel.udf_id
AND rel.user_id = #{userId}
</select>
<select id="listAuthorizedUdfFunc" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where
id in (select udf_id from t_ds_relation_udfs_user where user_id=#{userId}
union select id as udf_id from t_ds_udfs where user_id=#{userId})
<if test="udfIds != null and udfIds != ''">
and id in
<foreach collection="udfIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
<select id="listUdfByResourceId" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where 1=1
<if test="resourceIds != null and resourceIds != ''">
and resource_id in
<foreach collection="resourceIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
<select id="listAuthorizedUdfByResourceId" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
select *
from t_ds_udfs
where
id in (select udf_id from t_ds_relation_udfs_user where user_id=#{userId}
union select id as udf_id from t_ds_udfs where user_id=#{userId})
<if test="resourceIds != null and resourceIds != ''">
and resource_id in
<foreach collection="resourceIds" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
</select>
</mapper>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,463 |
[Bug][api] rename the udf resource file associated with the udf function, Failed to execute hive task
|
1. Rename the udf resource file associated with the udf function


2. Executing hive task is fail

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3463
|
https://github.com/apache/dolphinscheduler/pull/3482
|
a678c827600d44623f30311574b8226c1c59ace2
|
c8322482bbd021a89c407809abfcdd50cf3b2dc6
| 2020-08-11T03:37:09Z |
java
| 2020-08-13T08:19:11Z |
dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapperTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.mapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.common.enums.UdfType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.dao.entity.UDFUser;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.annotation.Rollback;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.transaction.annotation.Transactional;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import static java.util.stream.Collectors.toList;
@RunWith(SpringRunner.class)
@SpringBootTest
@Transactional
@Rollback(true)
public class UdfFuncMapperTest {
@Autowired
private UserMapper userMapper;
@Autowired
UdfFuncMapper udfFuncMapper;
@Autowired
UDFUserMapper udfUserMapper;
/**
* insert one udf
* @return UdfFunc
*/
private UdfFunc insertOne(){
UdfFunc udfFunc = new UdfFunc();
udfFunc.setUserId(1);
udfFunc.setFuncName("dolphin_udf_func");
udfFunc.setClassName("org.apache.dolphinscheduler.test.mr");
udfFunc.setType(UdfType.HIVE);
udfFunc.setResourceId(1);
udfFunc.setResourceName("dolphin_resource");
udfFunc.setCreateTime(new Date());
udfFunc.setUpdateTime(new Date());
udfFuncMapper.insert(udfFunc);
return udfFunc;
}
/**
* insert one udf
* @return
*/
private UdfFunc insertOne(User user){
UdfFunc udfFunc = new UdfFunc();
udfFunc.setUserId(user.getId());
udfFunc.setFuncName("dolphin_udf_func");
udfFunc.setClassName("org.apache.dolphinscheduler.test.mr");
udfFunc.setType(UdfType.HIVE);
udfFunc.setResourceId(1);
udfFunc.setResourceName("dolphin_resource");
udfFunc.setCreateTime(new Date());
udfFunc.setUpdateTime(new Date());
udfFuncMapper.insert(udfFunc);
return udfFunc;
}
/**
* insert one user
* @return User
*/
private User insertOneUser(){
User user = new User();
user.setUserName("user1");
user.setUserPassword("1");
user.setEmail("[email protected]");
user.setUserType(UserType.GENERAL_USER);
user.setCreateTime(new Date());
user.setTenantId(1);
user.setUpdateTime(new Date());
userMapper.insert(user);
return user;
}
/**
* insert one user
* @return User
*/
private User insertOneUser(String userName){
User user = new User();
user.setUserName(userName);
user.setUserPassword("1");
user.setEmail("[email protected]");
user.setUserType(UserType.GENERAL_USER);
user.setCreateTime(new Date());
user.setTenantId(1);
user.setUpdateTime(new Date());
userMapper.insert(user);
return user;
}
/**
* insert UDFUser
* @param user user
* @param udfFunc udf func
* @return UDFUser
*/
private UDFUser insertOneUDFUser(User user,UdfFunc udfFunc){
UDFUser udfUser = new UDFUser();
udfUser.setUdfId(udfFunc.getId());
udfUser.setUserId(user.getId());
udfUser.setCreateTime(new Date());
udfUser.setUpdateTime(new Date());
udfUserMapper.insert(udfUser);
return udfUser;
}
/**
* create general user
* @return User
*/
private User createGeneralUser(String userName){
User user = new User();
user.setUserName(userName);
user.setUserPassword("1");
user.setEmail("[email protected]");
user.setUserType(UserType.GENERAL_USER);
user.setCreateTime(new Date());
user.setTenantId(1);
user.setUpdateTime(new Date());
userMapper.insert(user);
return user;
}
/**
* test update
*/
@Test
public void testUpdate(){
//insertOne
UdfFunc udfFunc = insertOne();
udfFunc.setResourceName("dolphin_resource_update");
udfFunc.setResourceId(2);
udfFunc.setClassName("org.apache.dolphinscheduler.test.mrUpdate");
udfFunc.setUpdateTime(new Date());
//update
int update = udfFuncMapper.updateById(udfFunc);
Assert.assertEquals(update, 1);
}
/**
* test delete
*/
@Test
public void testDelete(){
//insertOne
UdfFunc udfFunc = insertOne();
//delete
int delete = udfFuncMapper.deleteById(udfFunc.getId());
Assert.assertEquals(delete, 1);
}
/**
* test query
*/
@Test
public void testQuery(){
//insertOne
UdfFunc udfFunc = insertOne();
//query
List<UdfFunc> udfFuncList = udfFuncMapper.selectList(null);
Assert.assertNotEquals(udfFuncList.size(), 0);
}
/**
* test query udf by ids
*/
@Test
public void testQueryUdfByIdStr() {
//insertOne
UdfFunc udfFunc = insertOne();
//insertOne
UdfFunc udfFunc1 = insertOne();
int[] idArray = new int[]{udfFunc.getId(),udfFunc1.getId()};
//queryUdfByIdStr
List<UdfFunc> udfFuncList = udfFuncMapper.queryUdfByIdStr(idArray,"");
Assert.assertNotEquals(udfFuncList.size(), 0);
}
/**
* test page
*/
@Test
public void testQueryUdfFuncPaging() {
//insertOneUser
User user = insertOneUser();
//insertOne
UdfFunc udfFunc = insertOne(user);
//queryUdfFuncPaging
Page<UdfFunc> page = new Page(1,3);
IPage<UdfFunc> udfFuncIPage = udfFuncMapper.queryUdfFuncPaging(page,user.getId(),"");
Assert.assertNotEquals(udfFuncIPage.getTotal(), 0);
}
/**
* test get udffunc by type
*/
@Test
public void testGetUdfFuncByType() {
//insertOneUser
User user = insertOneUser();
//insertOne
UdfFunc udfFunc = insertOne(user);
//getUdfFuncByType
List<UdfFunc> udfFuncList = udfFuncMapper.getUdfFuncByType(user.getId(), udfFunc.getType().ordinal());
Assert.assertNotEquals(udfFuncList.size(), 0);
}
/**
* test query udffunc expect userId
*/
@Test
public void testQueryUdfFuncExceptUserId() {
//insertOneUser
User user1 = insertOneUser();
User user2 = insertOneUser("user2");
//insertOne
UdfFunc udfFunc1 = insertOne(user1);
UdfFunc udfFunc2 = insertOne(user2);
List<UdfFunc> udfFuncList = udfFuncMapper.queryUdfFuncExceptUserId(user1.getId());
Assert.assertNotEquals(udfFuncList.size(), 0);
}
/**
* test query authed udffunc
*/
@Test
public void testQueryAuthedUdfFunc() {
//insertOneUser
User user = insertOneUser();
//insertOne
UdfFunc udfFunc = insertOne(user);
//insertOneUDFUser
UDFUser udfUser = insertOneUDFUser(user, udfFunc);
//queryAuthedUdfFunc
List<UdfFunc> udfFuncList = udfFuncMapper.queryAuthedUdfFunc(user.getId());
Assert.assertNotEquals(udfFuncList.size(), 0);
}
@Test
public void testListAuthorizedUdfFunc(){
//create general user
User generalUser1 = createGeneralUser("user1");
User generalUser2 = createGeneralUser("user2");
//create udf function
UdfFunc udfFunc = insertOne(generalUser1);
UdfFunc unauthorizdUdfFunc = insertOne(generalUser2);
//udf function ids
Integer[] udfFuncIds = new Integer[]{udfFunc.getId(),unauthorizdUdfFunc.getId()};
List<UdfFunc> authorizedUdfFunc = udfFuncMapper.listAuthorizedUdfFunc(generalUser1.getId(), udfFuncIds);
Assert.assertEquals(generalUser1.getId(),udfFunc.getUserId());
Assert.assertNotEquals(generalUser1.getId(),unauthorizdUdfFunc.getUserId());
Assert.assertFalse(authorizedUdfFunc.stream().map(t -> t.getId()).collect(toList()).containsAll(Arrays.asList(udfFuncIds)));
//authorize object unauthorizdUdfFunc to generalUser1
insertOneUDFUser(generalUser1,unauthorizdUdfFunc);
authorizedUdfFunc = udfFuncMapper.listAuthorizedUdfFunc(generalUser1.getId(), udfFuncIds);
Assert.assertTrue(authorizedUdfFunc.stream().map(t -> t.getId()).collect(toList()).containsAll(Arrays.asList(udfFuncIds)));
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,324 |
[Bug][Master] there exists some problems in checking task dependency
|
**Describe the bug**
There exists some problems in Master-Server executing logic. Some nodes could execute normally though their depenpency is not satisfied. Just like the screenshots below, if you get the same work flow, and you will find the result is wrong.
**Screenshots**
As you can see, task B shouldn't be executed which is the opposite of the result. When task X has condition-type node after, the tasks that depend on X will be probably executed.

**Which version of Dolphin Scheduler:**
-[1.3.2-snapshot]
**Probable reason**
- it's wrong because of the code below in the red rectangle.

|
https://github.com/apache/dolphinscheduler/issues/3324
|
https://github.com/apache/dolphinscheduler/pull/3473
|
927985c012342f0c9f1dde397d66b5737571fed2
|
5756d6b02910d139f00b4b412078f32c5ca14c54
| 2020-07-27T15:35:45Z |
java
| 2020-08-13T11:02:14Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import com.alibaba.fastjson.JSON;
import com.google.common.collect.Lists;
import org.apache.commons.io.FileUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.*;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.task.conditions.ConditionsParameters;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.NettyRemotingClient;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.utils.AlertManager;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* master exec thread,split dag
*/
public class MasterExecThread implements Runnable {
/**
* logger of MasterExecThread
*/
private static final Logger logger = LoggerFactory.getLogger(MasterExecThread.class);
/**
* process instance
*/
private ProcessInstance processInstance;
/**
* runing TaskNode
*/
private final Map<MasterBaseTaskExecThread,Future<Boolean>> activeTaskNode = new ConcurrentHashMap<>();
/**
* task exec service
*/
private final ExecutorService taskExecService;
/**
* submit failure nodes
*/
private boolean taskFailedSubmit = false;
/**
* recover node id list
*/
private List<TaskInstance> recoverNodeIdList = new ArrayList<>();
/**
* error task list
*/
private Map<String,TaskInstance> errorTaskList = new ConcurrentHashMap<>();
/**
* complete task list
*/
private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>();
/**
* ready to submit task list
*/
private Map<String, TaskInstance> readyToSubmitTaskList = new ConcurrentHashMap<>();
/**
* depend failed task map
*/
private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>();
/**
* forbidden task map
*/
private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>();
/**
* skip task map
*/
private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>();
/**
* recover tolerance fault task list
*/
private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>();
/**
* alert manager
*/
private AlertManager alertManager = new AlertManager();
/**
* the object of DAG
*/
private DAG<String,TaskNode,TaskNodeRelation> dag;
/**
* process service
*/
private ProcessService processService;
/**
* master config
*/
private MasterConfig masterConfig;
/**
*
*/
private NettyRemotingClient nettyRemotingClient;
/**
* constructor of MasterExecThread
* @param processInstance processInstance
* @param processService processService
* @param nettyRemotingClient nettyRemotingClient
*/
public MasterExecThread(ProcessInstance processInstance, ProcessService processService, NettyRemotingClient nettyRemotingClient){
this.processService = processService;
this.processInstance = processInstance;
this.masterConfig = SpringApplicationContext.getBean(MasterConfig.class);
int masterTaskExecNum = masterConfig.getMasterExecTaskNum();
this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread",
masterTaskExecNum);
this.nettyRemotingClient = nettyRemotingClient;
}
@Override
public void run() {
// process instance is null
if (processInstance == null){
logger.info("process instance is not exists");
return;
}
// check to see if it's done
if (processInstance.getState().typeIsFinished()){
logger.info("process instance is done : {}",processInstance.getId());
return;
}
try {
if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()){
// sub process complement data
executeComplementProcess();
}else{
// execute flow
executeProcess();
}
}catch (Exception e){
logger.error("master exec thread exception", e);
logger.error("process execute failed, process id:{}", processInstance.getId());
processInstance.setState(ExecutionStatus.FAILURE);
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
}finally {
taskExecService.shutdown();
// post handle
postHandle();
}
}
/**
* execute process
* @throws Exception exception
*/
private void executeProcess() throws Exception {
prepareProcess();
runProcess();
endProcess();
}
/**
* execute complement process
* @throws Exception exception
*/
private void executeComplementProcess() throws Exception {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date startDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date endDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
processService.saveProcessInstance(processInstance);
// get schedules
int processDefinitionId = processInstance.getProcessDefinitionId();
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId);
List<Date> listDate = Lists.newLinkedList();
if(!CollectionUtils.isEmpty(schedules)){
for (Schedule schedule : schedules) {
listDate.addAll(CronUtils.getSelfFireDateList(startDate, endDate, schedule.getCrontab()));
}
}
// get first fire date
Iterator<Date> iterator = null;
Date scheduleDate = null;
if(!CollectionUtils.isEmpty(listDate)) {
iterator = listDate.iterator();
scheduleDate = iterator.next();
processInstance.setScheduleTime(scheduleDate);
processService.updateProcessInstance(processInstance);
}else{
scheduleDate = processInstance.getScheduleTime();
if(scheduleDate == null){
scheduleDate = startDate;
}
}
while(Stopper.isRunning()){
logger.info("process {} start to complement {} data",
processInstance.getId(), DateUtils.dateToString(scheduleDate));
// prepare dag and other info
prepareProcess();
if(dag == null){
logger.error("process {} dag is null, please check out parameters",
processInstance.getId());
processInstance.setState(ExecutionStatus.SUCCESS);
processService.updateProcessInstance(processInstance);
return;
}
// execute process ,waiting for end
runProcess();
endProcess();
// process instance failure ,no more complements
if(!processInstance.getState().typeIsSuccess()){
logger.info("process {} state {}, complement not completely!",
processInstance.getId(), processInstance.getState());
break;
}
// current process instance success ,next execute
if(null == iterator){
// loop by day
scheduleDate = DateUtils.getSomeDay(scheduleDate, 1);
if(scheduleDate.after(endDate)){
// all success
logger.info("process {} complement completely!", processInstance.getId());
break;
}
}else{
// loop by schedule date
if(!iterator.hasNext()){
// all success
logger.info("process {} complement completely!", processInstance.getId());
break;
}
scheduleDate = iterator.next();
}
// flow end
// execute next process instance complement data
processInstance.setScheduleTime(scheduleDate);
if(cmdParam.containsKey(Constants.CMDPARAM_RECOVERY_START_NODE_STRING)){
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJson(cmdParam));
}
processInstance.setState(ExecutionStatus.RUNNING_EXEUTION);
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processInstance.getProcessDefinition().getGlobalParamMap(),
processInstance.getProcessDefinition().getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
processInstance.setId(0);
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processService.saveProcessInstance(processInstance);
}
}
/**
* prepare process parameter
* @throws Exception exception
*/
private void prepareProcess() throws Exception {
// init task queue
initTaskQueue();
// gen process dag
buildFlowDag();
logger.info("prepare process :{} end", processInstance.getId());
}
/**
* process end handle
*/
private void endProcess() {
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
if(processInstance.getState().typeIsWaitingThread()){
processService.createRecoveryWaitingThreadCommand(null, processInstance);
}
List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId());
alertManager.sendAlertProcessInstance(processInstance, taskInstances);
}
/**
* generate process dag
* @throws Exception exception
*/
private void buildFlowDag() throws Exception {
recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam());
forbiddenTaskList = DagHelper.getForbiddenTaskNodeMaps(processInstance.getProcessInstanceJson());
// generate process to get DAG info
List<String> recoveryNameList = getRecoveryNodeNameList();
List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam());
ProcessDag processDag = generateFlowDag(processInstance.getProcessInstanceJson(),
startNodeNameList, recoveryNameList, processInstance.getTaskDependType());
if(processDag == null){
logger.error("processDag is null");
return;
}
// generate process dag
dag = DagHelper.buildDagGraph(processDag);
}
/**
* init task queue
*/
private void initTaskQueue(){
taskFailedSubmit = false;
activeTaskNode.clear();
dependFailedTask.clear();
completeTaskList.clear();
errorTaskList.clear();
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId());
for(TaskInstance task : taskInstanceList){
if(task.isTaskComplete()){
completeTaskList.put(task.getName(), task);
}
if(task.getState().typeIsFailure() && !task.taskCanRetry()){
errorTaskList.put(task.getName(), task);
}
}
}
/**
* process post handle
*/
private void postHandle() {
logger.info("develop mode is: {}", CommonUtils.isDevelopMode());
if (!CommonUtils.isDevelopMode()) {
// get exec dir
String execLocalPath = org.apache.dolphinscheduler.common.utils.FileUtils
.getProcessExecDir(processInstance.getProcessDefinition().getProjectId(),
processInstance.getProcessDefinitionId(),
processInstance.getId());
try {
FileUtils.deleteDirectory(new File(execLocalPath));
} catch (IOException e) {
logger.error("delete exec dir failed ", e);
}
}
}
/**
* submit task to execute
* @param taskInstance task instance
* @return TaskInstance
*/
private TaskInstance submitTaskExec(TaskInstance taskInstance) {
MasterBaseTaskExecThread abstractExecThread = null;
if(taskInstance.isSubProcess()){
abstractExecThread = new SubProcessTaskExecThread(taskInstance);
}else if(taskInstance.isDependTask()){
abstractExecThread = new DependentTaskExecThread(taskInstance);
}else if(taskInstance.isConditionsTask()){
abstractExecThread = new ConditionsTaskExecThread(taskInstance);
}else {
abstractExecThread = new MasterTaskExecThread(taskInstance);
}
Future<Boolean> future = taskExecService.submit(abstractExecThread);
activeTaskNode.putIfAbsent(abstractExecThread, future);
return abstractExecThread.getTaskInstance();
}
/**
* find task instance in db.
* in case submit more than one same name task in the same time.
* @param taskName task name
* @return TaskInstance
*/
private TaskInstance findTaskIfExists(String taskName){
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId());
for(TaskInstance taskInstance : taskInstanceList){
if(taskInstance.getName().equals(taskName)){
return taskInstance;
}
}
return null;
}
/**
* encapsulation task
* @param processInstance process instance
* @param nodeName node name
* @return TaskInstance
*/
private TaskInstance createTaskInstance(ProcessInstance processInstance, String nodeName,
TaskNode taskNode) {
TaskInstance taskInstance = findTaskIfExists(nodeName);
if(taskInstance == null){
taskInstance = new TaskInstance();
// task name
taskInstance.setName(nodeName);
// process instance define id
taskInstance.setProcessDefinitionId(processInstance.getProcessDefinitionId());
// task instance state
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
// process instance id
taskInstance.setProcessInstanceId(processInstance.getId());
// task instance node json
taskInstance.setTaskJson(JSON.toJSONString(taskNode));
// task instance type
taskInstance.setTaskType(taskNode.getType());
// task instance whether alert
taskInstance.setAlertFlag(Flag.NO);
// task instance start time
taskInstance.setStartTime(new Date());
// task instance flag
taskInstance.setFlag(Flag.YES);
// task instance retry times
taskInstance.setRetryTimes(0);
// max task instance retry times
taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes());
// retry task instance interval
taskInstance.setRetryInterval(taskNode.getRetryInterval());
// task instance priority
if(taskNode.getTaskInstancePriority() == null){
taskInstance.setTaskInstancePriority(Priority.MEDIUM);
}else{
taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority());
}
String processWorkerGroup = processInstance.getWorkerGroup();
processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup;
String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup();
if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) {
taskInstance.setWorkerGroup(processWorkerGroup);
}else {
taskInstance.setWorkerGroup(taskWorkerGroup);
}
}
return taskInstance;
}
/**
* if all of the task dependence are skip, skip it too.
* @param taskNode
* @return
*/
private boolean isTaskNodeNeedSkip(TaskNode taskNode){
if(CollectionUtils.isEmpty(taskNode.getDepList())){
return false;
}
for(String depNode : taskNode.getDepList()){
if(!skipTaskNodeList.containsKey(depNode)){
return false;
}
}
return true;
}
/**
* set task node skip if dependence all skip
* @param taskNodesSkipList
*/
private void setTaskNodeSkip(List<String> taskNodesSkipList){
for(String skipNode : taskNodesSkipList){
skipTaskNodeList.putIfAbsent(skipNode, dag.getNode(skipNode));
Collection<String> postNodeList = DagHelper.getStartVertex(skipNode, dag, completeTaskList);
List<String> postSkipList = new ArrayList<>();
for(String post : postNodeList){
TaskNode postNode = dag.getNode(post);
if(isTaskNodeNeedSkip(postNode)){
postSkipList.add(post);
}
}
setTaskNodeSkip(postSkipList);
}
}
/**
* parse condition task find the branch process
* set skip flag for another one.
* @param nodeName
* @return
*/
private List<String> parseConditionTask(String nodeName){
List<String> conditionTaskList = new ArrayList<>();
TaskNode taskNode = dag.getNode(nodeName);
if(!taskNode.isConditionsTask()){
return conditionTaskList;
}
ConditionsParameters conditionsParameters =
JSONUtils.parseObject(taskNode.getConditionResult(), ConditionsParameters.class);
TaskInstance taskInstance = completeTaskList.get(nodeName);
if(taskInstance == null){
logger.error("task instance {} cannot find, please check it!", nodeName);
return conditionTaskList;
}
if(taskInstance.getState().typeIsSuccess()){
conditionTaskList = conditionsParameters.getSuccessNode();
setTaskNodeSkip(conditionsParameters.getFailedNode());
}else if(taskInstance.getState().typeIsFailure()){
conditionTaskList = conditionsParameters.getFailedNode();
setTaskNodeSkip(conditionsParameters.getSuccessNode());
}else{
conditionTaskList.add(nodeName);
}
return conditionTaskList;
}
/**
* parse post node list of previous node
* if condition node: return process according to the settings
* if post node completed, return post nodes of the completed node
* @param previousNodeName
* @return
*/
private List<String> parsePostNodeList(String previousNodeName){
List<String> postNodeList = new ArrayList<>();
TaskNode taskNode = dag.getNode(previousNodeName);
if(taskNode != null && taskNode.isConditionsTask()){
return parseConditionTask(previousNodeName);
}
Collection<String> postNodeCollection = DagHelper.getStartVertex(previousNodeName, dag, completeTaskList);
List<String> postSkipList = new ArrayList<>();
// delete success node, parse the past nodes
// if conditions node,
// 1. parse the branch process according the conditions setting
// 2. set skip flag on anther branch process
for(String postNode : postNodeCollection){
if(completeTaskList.containsKey(postNode)){
TaskInstance postTaskInstance = completeTaskList.get(postNode);
if(dag.getNode(postNode).isConditionsTask()){
List<String> conditionTaskNodeList = parseConditionTask(postNode);
for(String conditions : conditionTaskNodeList){
postNodeList.addAll(parsePostNodeList(conditions));
}
}else if(postTaskInstance.getState().typeIsSuccess()){
postNodeList.addAll(parsePostNodeList(postNode));
}else{
postNodeList.add(postNode);
}
}else if(isTaskNodeNeedSkip(dag.getNode(postNode))){
postSkipList.add(postNode);
setTaskNodeSkip(postSkipList);
postSkipList.clear();
}else{
postNodeList.add(postNode);
}
}
return postNodeList;
}
/**
* submit post node
* @param parentNodeName parent node name
*/
private void submitPostNode(String parentNodeName){
List<String> submitTaskNodeList = parsePostNodeList(parentNodeName);
List<TaskInstance> taskInstances = new ArrayList<>();
for(String taskNode : submitTaskNodeList){
taskInstances.add(createTaskInstance(processInstance, taskNode,
dag.getNode(taskNode)));
}
// if previous node success , post node submit
for(TaskInstance task : taskInstances){
if(readyToSubmitTaskList.containsKey(task.getName())){
continue;
}
if(completeTaskList.containsKey(task.getName())){
logger.info("task {} has already run success", task.getName());
continue;
}
if(task.getState().typeIsPause() || task.getState().typeIsCancel()){
logger.info("task {} stopped, the state is {}", task.getName(), task.getState());
}else{
addTaskToStandByList(task);
}
}
}
/**
* determine whether the dependencies of the task node are complete
* @return DependResult
*/
private DependResult isTaskDepsComplete(String taskName) {
Collection<String> startNodes = dag.getBeginNode();
// if vertex,returns true directly
if(startNodes.contains(taskName)){
return DependResult.SUCCESS;
}
TaskNode taskNode = dag.getNode(taskName);
List<String> depNameList = taskNode.getDepList();
for(String depsNode : depNameList ){
if(!dag.containsNode(depsNode)
|| forbiddenTaskList.containsKey(depsNode)){
continue;
}
if(skipTaskNodeList.containsKey(depsNode)){
return DependResult.FAILED;
}
// all the dependencies must be completed
if(!completeTaskList.containsKey(depsNode)){
return DependResult.WAITING;
}
ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState();
if(depTaskState.typeIsPause() || depTaskState.typeIsCancel()){
return DependResult.WAITING;
}
// ignore task state if current task is condition
if(taskNode.isConditionsTask()){
continue;
}
if(!dependTaskSuccess(depsNode, taskName)){
return DependResult.FAILED;
}
}
logger.info("taskName: {} completeDependTaskList: {}", taskName, Arrays.toString(completeTaskList.keySet().toArray()));
return DependResult.SUCCESS;
}
/**
* depend node is completed, but here need check the condition task branch is the next node
* @param dependNodeName
* @param nextNodeName
* @return
*/
private boolean dependTaskSuccess(String dependNodeName, String nextNodeName){
if(dag.getNode(dependNodeName).isConditionsTask()){
//condition task need check the branch to run
List<String> nextTaskList = parseConditionTask(dependNodeName);
if(!nextTaskList.contains(nextNodeName)){
return false;
}
}else {
ExecutionStatus depTaskState = completeTaskList.get(dependNodeName).getState();
if(depTaskState.typeIsFailure()){
return false;
}
}
return true;
}
/**
* query task instance by complete state
* @param state state
* @return task instance list
*/
private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state){
List<TaskInstance> resultList = new ArrayList<>();
for (Map.Entry<String, TaskInstance> entry: completeTaskList.entrySet()) {
if(entry.getValue().getState() == state){
resultList.add(entry.getValue());
}
}
return resultList;
}
/**
* where there are ongoing tasks
* @param state state
* @return ExecutionStatus
*/
private ExecutionStatus runningState(ExecutionStatus state){
if(state == ExecutionStatus.READY_STOP ||
state == ExecutionStatus.READY_PAUSE ||
state == ExecutionStatus.WAITTING_THREAD){
// if the running task is not completed, the state remains unchanged
return state;
}else{
return ExecutionStatus.RUNNING_EXEUTION;
}
}
/**
* exists failure task,contains submit failure、dependency failure,execute failure(retry after)
*
* @return Boolean whether has failed task
*/
private boolean hasFailedTask(){
if(this.taskFailedSubmit){
return true;
}
if(this.errorTaskList.size() > 0){
return true;
}
return this.dependFailedTask.size() > 0;
}
/**
* process instance failure
*
* @return Boolean whether process instance failed
*/
private boolean processFailed(){
if(hasFailedTask()) {
if(processInstance.getFailureStrategy() == FailureStrategy.END){
return true;
}
if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) {
return readyToSubmitTaskList.size() == 0 || activeTaskNode.size() == 0;
}
}
return false;
}
/**
* whether task for waiting thread
* @return Boolean whether has waiting thread task
*/
private boolean hasWaitingThreadTask(){
List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITTING_THREAD);
return CollectionUtils.isNotEmpty(waitingList);
}
/**
* prepare for pause
* 1,failed retry task in the preparation queue , returns to failure directly
* 2,exists pause task,complement not completed, pending submission of tasks, return to suspension
* 3,success
* @return ExecutionStatus
*/
private ExecutionStatus processReadyPause(){
if(hasRetryTaskInStandBy()){
return ExecutionStatus.FAILURE;
}
List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE);
if(CollectionUtils.isNotEmpty(pauseList)
|| !isComplementEnd()
|| readyToSubmitTaskList.size() > 0){
return ExecutionStatus.PAUSE;
}else{
return ExecutionStatus.SUCCESS;
}
}
/**
* generate the latest process instance status by the tasks state
* @return process instance execution status
*/
private ExecutionStatus getProcessInstanceState(){
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
ExecutionStatus state = instance.getState();
if(activeTaskNode.size() > 0 || retryTaskExists()){
// active task and retry task exists
return runningState(state);
}
// process failure
if(processFailed()){
return ExecutionStatus.FAILURE;
}
// waiting thread
if(hasWaitingThreadTask()){
return ExecutionStatus.WAITTING_THREAD;
}
// pause
if(state == ExecutionStatus.READY_PAUSE){
return processReadyPause();
}
// stop
if(state == ExecutionStatus.READY_STOP){
List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP);
List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL);
if(CollectionUtils.isNotEmpty(stopList)
|| CollectionUtils.isNotEmpty(killList)
|| !isComplementEnd()){
return ExecutionStatus.STOP;
}else{
return ExecutionStatus.SUCCESS;
}
}
// success
if(state == ExecutionStatus.RUNNING_EXEUTION){
List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL);
if(readyToSubmitTaskList.size() > 0){
//tasks currently pending submission, no retries, indicating that depend is waiting to complete
return ExecutionStatus.RUNNING_EXEUTION;
}else if(CollectionUtils.isNotEmpty(killTasks)){
// tasks maybe killed manually
return ExecutionStatus.FAILURE;
}else{
// if the waiting queue is empty and the status is in progress, then success
return ExecutionStatus.SUCCESS;
}
}
return state;
}
/**
* whether standby task list have retry tasks
* @return
*/
private boolean retryTaskExists() {
boolean result = false;
for(String taskName : readyToSubmitTaskList.keySet()){
TaskInstance task = readyToSubmitTaskList.get(taskName);
if(task.getState().typeIsFailure()){
result = true;
break;
}
}
return result;
}
/**
* whether complement end
* @return Boolean whether is complement end
*/
private boolean isComplementEnd() {
if(!processInstance.isComplementData()){
return true;
}
try {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
return processInstance.getScheduleTime().equals(endTime);
} catch (Exception e) {
logger.error("complement end failed ",e);
return false;
}
}
/**
* updateProcessInstance process instance state
* after each batch of tasks is executed, the status of the process instance is updated
*/
private void updateProcessInstanceState() {
ExecutionStatus state = getProcessInstanceState();
if(processInstance.getState() != state){
logger.info(
"work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}",
processInstance.getId(), processInstance.getName(),
processInstance.getState(), state,
processInstance.getCommandType());
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
instance.setState(state);
instance.setProcessDefinition(processInstance.getProcessDefinition());
processService.updateProcessInstance(instance);
processInstance = instance;
}
}
/**
* get task dependency result
* @param taskInstance task instance
* @return DependResult
*/
private DependResult getDependResultForTask(TaskInstance taskInstance){
return isTaskDepsComplete(taskInstance.getName());
}
/**
* add task to standby list
* @param taskInstance task instance
*/
private void addTaskToStandByList(TaskInstance taskInstance){
logger.info("add task to stand by list: {}", taskInstance.getName());
readyToSubmitTaskList.putIfAbsent(taskInstance.getName(), taskInstance);
}
/**
* remove task from stand by list
* @param taskInstance task instance
*/
private void removeTaskFromStandbyList(TaskInstance taskInstance){
logger.info("remove task from stand by list: {}", taskInstance.getName());
readyToSubmitTaskList.remove(taskInstance.getName());
}
/**
* has retry task in standby
* @return Boolean whether has retry task in standby
*/
private boolean hasRetryTaskInStandBy(){
for (Map.Entry<String, TaskInstance> entry: readyToSubmitTaskList.entrySet()) {
if(entry.getValue().getState().typeIsFailure()){
return true;
}
}
return false;
}
/**
* submit and watch the tasks, until the work flow stop
*/
private void runProcess(){
// submit start node
submitPostNode(null);
boolean sendTimeWarning = false;
while(!processInstance.isProcessInstanceStop()){
// send warning email if process time out.
if(!sendTimeWarning && checkProcessTimeOut(processInstance) ){
alertManager.sendProcessTimeoutAlert(processInstance,
processService.findProcessDefineById(processInstance.getProcessDefinitionId()));
sendTimeWarning = true;
}
for(Map.Entry<MasterBaseTaskExecThread,Future<Boolean>> entry: activeTaskNode.entrySet()) {
Future<Boolean> future = entry.getValue();
TaskInstance task = entry.getKey().getTaskInstance();
if(!future.isDone()){
continue;
}
// node monitor thread complete
task = this.processService.findTaskInstanceById(task.getId());
if(task == null){
this.taskFailedSubmit = true;
activeTaskNode.remove(entry.getKey());
continue;
}
// node monitor thread complete
if(task.getState().typeIsFinished()){
activeTaskNode.remove(entry.getKey());
}
logger.info("task :{}, id:{} complete, state is {} ",
task.getName(), task.getId(), task.getState());
// node success , post node submit
if(task.getState() == ExecutionStatus.SUCCESS){
completeTaskList.put(task.getName(), task);
submitPostNode(task.getName());
continue;
}
// node fails, retry first, and then execute the failure process
if(task.getState().typeIsFailure()){
if(task.getState() == ExecutionStatus.NEED_FAULT_TOLERANCE){
this.recoverToleranceFaultTaskList.add(task);
}
if(task.taskCanRetry()){
addTaskToStandByList(task);
}else{
completeTaskList.put(task.getName(), task);
if( task.isConditionsTask()
|| DagHelper.haveConditionsAfterNode(task.getName(), dag)) {
submitPostNode(task.getName());
}else{
errorTaskList.put(task.getName(), task);
if(processInstance.getFailureStrategy() == FailureStrategy.END){
killTheOtherTasks();
}
}
}
continue;
}
// other status stop/pause
completeTaskList.put(task.getName(), task);
}
// send alert
if(CollectionUtils.isNotEmpty(this.recoverToleranceFaultTaskList)){
alertManager.sendAlertWorkerToleranceFault(processInstance, recoverToleranceFaultTaskList);
this.recoverToleranceFaultTaskList.clear();
}
// updateProcessInstance completed task status
// failure priority is higher than pause
// if a task fails, other suspended tasks need to be reset kill
if(errorTaskList.size() > 0){
for(Map.Entry<String, TaskInstance> entry: completeTaskList.entrySet()) {
TaskInstance completeTask = entry.getValue();
if(completeTask.getState()== ExecutionStatus.PAUSE){
completeTask.setState(ExecutionStatus.KILL);
completeTaskList.put(entry.getKey(), completeTask);
processService.updateTaskInstance(completeTask);
}
}
}
if(canSubmitTaskToQueue()){
submitStandByTask();
}
try {
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
} catch (InterruptedException e) {
logger.error(e.getMessage(),e);
}
updateProcessInstanceState();
}
logger.info("process:{} end, state :{}", processInstance.getId(), processInstance.getState());
}
/**
* whether check process time out
* @param processInstance task instance
* @return true if time out of process instance > running time of process instance
*/
private boolean checkProcessTimeOut(ProcessInstance processInstance) {
if(processInstance.getTimeout() == 0 ){
return false;
}
Date now = new Date();
long runningTime = DateUtils.diffMin(now, processInstance.getStartTime());
return runningTime > processInstance.getTimeout();
}
/**
* whether can submit task to queue
* @return boolean
*/
private boolean canSubmitTaskToQueue() {
return OSUtils.checkResource(masterConfig.getMasterMaxCpuloadAvg(), masterConfig.getMasterReservedMemory());
}
/**
* close the on going tasks
*/
private void killTheOtherTasks() {
logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(),
activeTaskNode.size());
for (Map.Entry<MasterBaseTaskExecThread, Future<Boolean>> entry : activeTaskNode.entrySet()) {
MasterBaseTaskExecThread taskExecThread = entry.getKey();
Future<Boolean> future = entry.getValue();
TaskInstance taskInstance = taskExecThread.getTaskInstance();
taskInstance = processService.findTaskInstanceById(taskInstance.getId());
if(taskInstance != null && taskInstance.getState().typeIsFinished()){
continue;
}
if (!future.isDone()) {
// record kill info
logger.info("kill process instance, id: {}, task: {}", processInstance.getId(), taskExecThread.getTaskInstance().getId());
// kill node
taskExecThread.kill();
}
}
}
/**
* whether the retry interval is timed out
* @param taskInstance task instance
* @return Boolean
*/
private boolean retryTaskIntervalOverTime(TaskInstance taskInstance){
if(taskInstance.getState() != ExecutionStatus.FAILURE){
return true;
}
if(taskInstance.getId() == 0 ||
taskInstance.getMaxRetryTimes() ==0 ||
taskInstance.getRetryInterval() == 0 ){
return true;
}
Date now = new Date();
long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime());
// task retry does not over time, return false
return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval;
}
/**
* handling the list of tasks to be submitted
*/
private void submitStandByTask(){
for(Map.Entry<String, TaskInstance> entry: readyToSubmitTaskList.entrySet()) {
TaskInstance task = entry.getValue();
DependResult dependResult = getDependResultForTask(task);
if(DependResult.SUCCESS == dependResult){
if(retryTaskIntervalOverTime(task)){
submitTaskExec(task);
removeTaskFromStandbyList(task);
}
}else if(DependResult.FAILED == dependResult){
// if the dependency fails, the current node is not submitted and the state changes to failure.
dependFailedTask.put(entry.getKey(), task);
removeTaskFromStandbyList(task);
logger.info("task {},id:{} depend result : {}",task.getName(), task.getId(), dependResult);
}
}
}
/**
* get recovery task instance
* @param taskId task id
* @return recovery task instance
*/
private TaskInstance getRecoveryTaskInstance(String taskId){
if(!StringUtils.isNotEmpty(taskId)){
return null;
}
try {
Integer intId = Integer.valueOf(taskId);
TaskInstance task = processService.findTaskInstanceById(intId);
if(task == null){
logger.error("start node id cannot be found: {}", taskId);
}else {
return task;
}
}catch (Exception e){
logger.error("get recovery task instance failed ",e);
}
return null;
}
/**
* get start task instance list
* @param cmdParam command param
* @return task instance list
*/
private List<TaskInstance> getStartTaskInstanceList(String cmdParam){
List<TaskInstance> instanceList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if(paramMap != null && paramMap.containsKey(CMDPARAM_RECOVERY_START_NODE_STRING)){
String[] idList = paramMap.get(CMDPARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA);
for(String nodeId : idList){
TaskInstance task = getRecoveryTaskInstance(nodeId);
if(task != null){
instanceList.add(task);
}
}
}
return instanceList;
}
/**
* parse "StartNodeNameList" from cmd param
* @param cmdParam command param
* @return start node name list
*/
private List<String> parseStartNodeName(String cmdParam){
List<String> startNodeNameList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if(paramMap == null){
return startNodeNameList;
}
if(paramMap.containsKey(CMDPARAM_START_NODE_NAMES)){
startNodeNameList = Arrays.asList(paramMap.get(CMDPARAM_START_NODE_NAMES).split(Constants.COMMA));
}
return startNodeNameList;
}
/**
* generate start node name list from parsing command param;
* if "StartNodeIdList" exists in command param, return StartNodeIdList
* @return recovery node name list
*/
private List<String> getRecoveryNodeNameList(){
List<String> recoveryNodeNameList = new ArrayList<>();
if(CollectionUtils.isNotEmpty(recoverNodeIdList)) {
for (TaskInstance task : recoverNodeIdList) {
recoveryNodeNameList.add(task.getName());
}
}
return recoveryNodeNameList;
}
/**
* generate flow dag
* @param processDefinitionJson process definition json
* @param startNodeNameList start node name list
* @param recoveryNodeNameList recovery node name list
* @param depNodeType depend node type
* @return ProcessDag process dag
* @throws Exception exception
*/
public ProcessDag generateFlowDag(String processDefinitionJson,
List<String> startNodeNameList,
List<String> recoveryNodeNameList,
TaskDependType depNodeType)throws Exception{
return DagHelper.generateFlowDag(processDefinitionJson, startNodeNameList, recoveryNodeNameList, depNodeType);
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,469 |
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
|
**Describe the question**

**Which version of DolphinScheduler:**
-[1.3.0]
**Additional context**
The program type of spark node is selected as PYTHON, how should the main jar package be selected?
Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files.
**Requirement or improvement**
no
|
https://github.com/apache/dolphinscheduler/issues/3469
|
https://github.com/apache/dolphinscheduler/pull/3498
|
e367f90bb73c9682739308a0a98887a1c0f407ef
|
5f5c08402fdcecca8c35f4dc3021cc089949ef13
| 2020-08-12T02:09:30Z |
java
| 2020-08-14T08:47:01Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.controller;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ApiException;
import org.apache.dolphinscheduler.api.service.ResourcesService;
import org.apache.dolphinscheduler.api.service.UdfFuncService;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UdfType;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.Resource;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import springfox.documentation.annotations.ApiIgnore;
import java.util.Map;
import static org.apache.dolphinscheduler.api.enums.Status.*;
/**
* resources controller
*/
@Api(tags = "RESOURCES_TAG", position = 1)
@RestController
@RequestMapping("resources")
public class ResourcesController extends BaseController {
private static final Logger logger = LoggerFactory.getLogger(ResourcesController.class);
@Autowired
private ResourcesService resourceService;
@Autowired
private UdfFuncService udfFuncService;
/**
* create directory
*
* @param loginUser login user
* @param type type
* @param alias alias
* @param description description
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@ApiOperation(value = "createDirctory", notes = "CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
})
@PostMapping(value = "/directory/create")
@ApiException(CREATE_RESOURCE_ERROR)
public Result createDirectory(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "pid") int pid,
@RequestParam(value = "currentDir") String currentDir) {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(), type, alias, description, pid, currentDir);
return resourceService.createDirectory(loginUser, alias, description, type, pid, currentDir);
}
/**
* create resource
*
* @param loginUser login user
* @param alias alias
* @param description description
* @param type type
* @param file file
* @return create result code
*/
@ApiOperation(value = "createResource", notes = "CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
})
@PostMapping(value = "/create")
@ApiException(CREATE_RESOURCE_ERROR)
public Result createResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam("file") MultipartFile file,
@RequestParam(value = "pid") int pid,
@RequestParam(value = "currentDir") String currentDir) {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(), type, alias, description, file.getName(), file.getOriginalFilename());
return resourceService.createResource(loginUser, alias, description, type, file, pid, currentDir);
}
/**
* update resource
*
* @param loginUser login user
* @param alias alias
* @param resourceId resource id
* @param type resource type
* @param description description
* @param file resource file
* @return update result code
*/
@ApiOperation(value = "updateResource", notes = "UPDATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
})
@PostMapping(value = "/update")
@ApiException(UPDATE_RESOURCE_ERROR)
public Result updateResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "file" ,required = false) MultipartFile file) {
logger.info("login user {}, update resource, type: {}, resource alias: {}, desc: {}, file: {}",
loginUser.getUserName(), type, alias, description, file);
return resourceService.updateResource(loginUser, resourceId, alias, description, type, file);
}
/**
* query resources list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryResourceList", notes = "QUERY_RESOURCE_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType")
})
@GetMapping(value = "/list")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_RESOURCES_LIST_ERROR)
public Result queryResourceList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type
) {
logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type);
Map<String, Object> result = resourceService.queryResourceList(loginUser, type);
return returnDataList(result);
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
@ApiOperation(value = "queryResourceListPaging", notes = "QUERY_RESOURCE_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "int"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "20")
})
@GetMapping(value = "/list-paging")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_RESOURCES_LIST_PAGING)
public Result queryResourceListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "id") int id,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize
) {
logger.info("query resource list, login user:{}, resource type:{}, search value:{}",
loginUser.getUserName(), type, searchVal);
Map<String, Object> result = checkPageParams(pageNo, pageSize);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = resourceService.queryResourceListPaging(loginUser, id, type, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
*/
@ApiOperation(value = "deleteResource", notes = "DELETE_RESOURCE_BY_ID_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/delete")
@ResponseStatus(HttpStatus.OK)
@ApiException(DELETE_RESOURCE_ERROR)
public Result deleteResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId
) throws Exception {
logger.info("login user {}, delete resource id: {}",
loginUser.getUserName(), resourceId);
return resourceService.delete(loginUser, resourceId);
}
/**
* verify resource by alias and type
*
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "verifyResourceName", notes = "VERIFY_RESOURCE_NAME_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType = "String")
})
@GetMapping(value = "/verify-name")
@ResponseStatus(HttpStatus.OK)
@ApiException(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR)
public Result verifyResourceName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "fullName") String fullName,
@RequestParam(value = "type") ResourceType type
) {
logger.info("login user {}, verfiy resource alias: {},resource type: {}",
loginUser.getUserName(), fullName, type);
return resourceService.verifyResourceName(fullName, type, loginUser);
}
/**
* query resources jar list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryResourceJarList", notes = "QUERY_RESOURCE_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType")
})
@GetMapping(value = "/list/jar")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_RESOURCES_LIST_ERROR)
public Result queryResourceJarList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type
) {
logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type.toString());
Map<String, Object> result = resourceService.queryResourceJarList(loginUser, type);
return returnDataList(result);
}
/**
* query resource by full name and type
*
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "queryResource", notes = "QUERY_BY_RESOURCE_NAME")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType = "String")
})
@GetMapping(value = "/queryResource")
@ResponseStatus(HttpStatus.OK)
@ApiException(RESOURCE_NOT_EXIST)
public Result queryResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "fullName", required = false) String fullName,
@RequestParam(value = "id", required = false) Integer id,
@RequestParam(value = "type") ResourceType type
) {
logger.info("login user {}, query resource by full name: {} or id: {},resource type: {}",
loginUser.getUserName(), fullName, id, type);
return resourceService.queryResource(fullName, id, type);
}
/**
* view resource file online
*
* @param loginUser login user
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
@ApiOperation(value = "viewResource", notes = "VIEW_RESOURCE_BY_ID_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "skipLineNum", value = "SKIP_LINE_NUM", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "limit", value = "LIMIT", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/view")
@ApiException(VIEW_RESOURCE_FILE_ON_LINE_ERROR)
public Result viewResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId,
@RequestParam(value = "skipLineNum") int skipLineNum,
@RequestParam(value = "limit") int limit
) {
logger.info("login user {}, view resource : {}, skipLineNum {} , limit {}",
loginUser.getUserName(), resourceId, skipLineNum, limit);
return resourceService.readResource(resourceId, skipLineNum, limit);
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param description description
* @param content content
* @return create result code
*/
@ApiOperation(value = "onlineCreateResource", notes = "ONLINE_CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "fileName", value = "RESOURCE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "suffix", value = "SUFFIX", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"),
@ApiImplicitParam(name = "content", value = "CONTENT", required = true, dataType = "String")
})
@PostMapping(value = "/online-create")
@ApiException(CREATE_RESOURCE_FILE_ON_LINE_ERROR)
public Result onlineCreateResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "fileName") String fileName,
@RequestParam(value = "suffix") String fileSuffix,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "content") String content,
@RequestParam(value = "pid") int pid,
@RequestParam(value = "currentDir") String currentDir
) {
logger.info("login user {}, online create resource! fileName : {}, type : {}, suffix : {},desc : {},content : {}",
loginUser.getUserName(), fileName, type, fileSuffix, description, content, pid, currentDir);
if (StringUtils.isEmpty(content)) {
logger.error("resource file contents are not allowed to be empty");
return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg());
}
return resourceService.onlineCreateResource(loginUser, type, fileName, fileSuffix, description, content, pid, currentDir);
}
/**
* edit resource file online
*
* @param loginUser login user
* @param resourceId resource id
* @param content content
* @return update result code
*/
@ApiOperation(value = "updateResourceContent", notes = "UPDATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "content", value = "CONTENT", required = true, dataType = "String")
})
@PostMapping(value = "/update-content")
@ApiException(EDIT_RESOURCE_FILE_ON_LINE_ERROR)
public Result updateResourceContent(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId,
@RequestParam(value = "content") String content
) {
logger.info("login user {}, updateProcessInstance resource : {}",
loginUser.getUserName(), resourceId);
if (StringUtils.isEmpty(content)) {
logger.error("The resource file contents are not allowed to be empty");
return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg());
}
return resourceService.updateResourceContent(resourceId, content);
}
/**
* download resource file
*
* @param loginUser login user
* @param resourceId resource id
* @return resource content
*/
@ApiOperation(value = "downloadResource", notes = "DOWNLOAD_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/download")
@ResponseBody
@ApiException(DOWNLOAD_RESOURCE_FILE_ERROR)
public ResponseEntity downloadResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId) throws Exception {
logger.info("login user {}, download resource : {}",
loginUser.getUserName(), resourceId);
Resource file = resourceService.downloadResource(resourceId);
if (file == null) {
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(Status.RESOURCE_NOT_EXIST.getMsg());
}
return ResponseEntity
.ok()
.header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + file.getFilename() + "\"")
.body(file);
}
/**
* create udf function
*
* @param loginUser login user
* @param type udf type
* @param funcName function name
* @param argTypes argument types
* @param database database
* @param description description
* @param className class name
* @param resourceId resource id
* @return create result code
*/
@ApiOperation(value = "createUdfFunc", notes = "CREATE_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType"),
@ApiImplicitParam(name = "funcName", value = "FUNC_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "suffix", value = "CLASS_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "argTypes", value = "ARG_TYPES", dataType = "String"),
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", dataType = "String"),
@ApiImplicitParam(name = "description", value = "UDF_DESC", dataType = "String"),
@ApiImplicitParam(name = "resourceId", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@PostMapping(value = "/udf-func/create")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(CREATE_UDF_FUNCTION_ERROR)
public Result createUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") UdfType type,
@RequestParam(value = "funcName") String funcName,
@RequestParam(value = "className") String className,
@RequestParam(value = "argTypes", required = false) String argTypes,
@RequestParam(value = "database", required = false) String database,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "resourceId") int resourceId) {
logger.info("login user {}, create udf function, type: {}, funcName: {},argTypes: {} ,database: {},desc: {},resourceId: {}",
loginUser.getUserName(), type, funcName, argTypes, database, description, resourceId);
return udfFuncService.createUdfFunction(loginUser, funcName, className, argTypes, database, description, type, resourceId);
}
/**
* view udf function
*
* @param loginUser login user
* @param id resource id
* @return udf function detail
*/
@ApiOperation(value = "viewUIUdfFunction", notes = "VIEW_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "resourceId", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/udf-func/update-ui")
@ResponseStatus(HttpStatus.OK)
@ApiException(VIEW_UDF_FUNCTION_ERROR)
public Result viewUIUdfFunction(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("id") int id) {
logger.info("login user {}, query udf{}",
loginUser.getUserName(), id);
Map<String, Object> map = udfFuncService.queryUdfFuncDetail(id);
return returnDataList(map);
}
/**
* update udf function
*
* @param loginUser login user
* @param type resource type
* @param funcName function name
* @param argTypes argument types
* @param database data base
* @param description description
* @param resourceId resource id
* @param className class name
* @param udfFuncId udf function id
* @return update result code
*/
@ApiOperation(value = "updateUdfFunc", notes = "UPDATE_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType"),
@ApiImplicitParam(name = "funcName", value = "FUNC_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "suffix", value = "CLASS_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "argTypes", value = "ARG_TYPES", dataType = "String"),
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", dataType = "String"),
@ApiImplicitParam(name = "description", value = "UDF_DESC", dataType = "String"),
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@PostMapping(value = "/udf-func/update")
@ApiException(UPDATE_UDF_FUNCTION_ERROR)
public Result updateUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int udfFuncId,
@RequestParam(value = "type") UdfType type,
@RequestParam(value = "funcName") String funcName,
@RequestParam(value = "className") String className,
@RequestParam(value = "argTypes", required = false) String argTypes,
@RequestParam(value = "database", required = false) String database,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "resourceId") int resourceId) {
logger.info("login user {}, updateProcessInstance udf function id: {},type: {}, funcName: {},argTypes: {} ,database: {},desc: {},resourceId: {}",
loginUser.getUserName(), udfFuncId, type, funcName, argTypes, database, description, resourceId);
Map<String, Object> result = udfFuncService.updateUdfFunc(udfFuncId, funcName, className, argTypes, database, description, type, resourceId);
return returnDataList(result);
}
/**
* query udf function list paging
*
* @param loginUser login user
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return udf function list page
*/
@ApiOperation(value = "queryUdfFuncListPaging", notes = "QUERY_UDF_FUNCTION_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "20")
})
@GetMapping(value = "/udf-func/list-paging")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_UDF_FUNCTION_LIST_PAGING_ERROR)
public Result<Object> queryUdfFuncListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize
) {
logger.info("query udf functions list, login user:{},search value:{}",
loginUser.getUserName(), searchVal);
Map<String, Object> result = checkPageParams(pageNo, pageSize);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return returnDataListPaging(result);
}
result = udfFuncService.queryUdfFuncListPaging(loginUser, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}
/**
* query udf func list by type
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryUdfFuncList", notes = "QUERY_UDF_FUNC_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType")
})
@GetMapping(value = "/udf-func/list")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_DATASOURCE_BY_TYPE_ERROR)
public Result<Object> queryUdfFuncList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("type") UdfType type) {
String userName = loginUser.getUserName();
userName = userName.replaceAll("[\n|\r|\t]", "_");
logger.info("query udf func list, user:{}, type:{}", userName, type);
Map<String, Object> result = udfFuncService.queryUdfFuncList(loginUser, type.ordinal());
return returnDataList(result);
}
/**
* verify udf function name can use or not
*
* @param loginUser login user
* @param name name
* @return true if the name can user, otherwise return false
*/
@ApiOperation(value = "verifyUdfFuncName", notes = "VERIFY_UDF_FUNCTION_NAME_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "name", value = "FUNC_NAME", required = true, dataType = "String")
})
@GetMapping(value = "/udf-func/verify-name")
@ResponseStatus(HttpStatus.OK)
@ApiException(VERIFY_UDF_FUNCTION_NAME_ERROR)
public Result verifyUdfFuncName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "name") String name
) {
logger.info("login user {}, verfiy udf function name: {}",
loginUser.getUserName(), name);
return udfFuncService.verifyUdfFuncByName(name);
}
/**
* delete udf function
*
* @param loginUser login user
* @param udfFuncId udf function id
* @return delete result code
*/
@ApiOperation(value = "deleteUdfFunc", notes = "DELETE_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/udf-func/delete")
@ResponseStatus(HttpStatus.OK)
@ApiException(DELETE_UDF_FUNCTION_ERROR)
public Result deleteUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int udfFuncId
) {
logger.info("login user {}, delete udf function id: {}", loginUser.getUserName(), udfFuncId);
return udfFuncService.delete(udfFuncId);
}
/**
* authorized file resource list
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
@ApiOperation(value = "authorizedFile", notes = "AUTHORIZED_FILE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/authed-file")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(AUTHORIZED_FILE_RESOURCE_ERROR)
public Result authorizedFile(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
logger.info("authorized file resource, user: {}, user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizedFile(loginUser, userId);
return returnDataList(result);
}
/**
* unauthorized file resource list
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@ApiOperation(value = "authorizeResourceTree", notes = "AUTHORIZE_RESOURCE_TREE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/authorize-resource-tree")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(AUTHORIZE_RESOURCE_TREE)
public Result authorizeResourceTree(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
logger.info("all resource file, user:{}, user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizeResourceTree(loginUser, userId);
return returnDataList(result);
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@ApiOperation(value = "unauthUDFFunc", notes = "UNAUTHORIZED_UDF_FUNC_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/unauth-udf-func")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(UNAUTHORIZED_UDF_FUNCTION_ERROR)
public Result unauthUDFFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
logger.info("unauthorized udf function, login user:{}, unauthorized user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.unauthorizedUDFFunction(loginUser, userId);
return returnDataList(result);
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
@ApiOperation(value = "authUDFFunc", notes = "AUTHORIZED_UDF_FUNC_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/authed-udf-func")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(AUTHORIZED_UDF_FUNCTION_ERROR)
public Result authorizedUDFFunction(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
logger.info("auth udf function, login user:{}, auth user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizedUDFFunction(loginUser, userId);
return returnDataList(result);
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,469 |
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
|
**Describe the question**

**Which version of DolphinScheduler:**
-[1.3.0]
**Additional context**
The program type of spark node is selected as PYTHON, how should the main jar package be selected?
Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files.
**Requirement or improvement**
no
|
https://github.com/apache/dolphinscheduler/issues/3469
|
https://github.com/apache/dolphinscheduler/pull/3498
|
e367f90bb73c9682739308a0a98887a1c0f407ef
|
5f5c08402fdcecca8c35f4dc3021cc089949ef13
| 2020-08-12T02:09:30Z |
java
| 2020-08-14T08:47:01Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
/**
* resource filter
*/
public class ResourceFilter implements IFilter {
/**
* resource suffix
*/
private String suffix;
/**
* resource list
*/
private List<Resource> resourceList;
/**
* parent list
*/
//Set<Resource> parentList = new HashSet<>();
/**
* constructor
* @param suffix resource suffix
* @param resourceList resource list
*/
public ResourceFilter(String suffix, List<Resource> resourceList) {
this.suffix = suffix;
this.resourceList = resourceList;
}
/**
* file filter
* @return file filtered by suffix
*/
public Set<Resource> fileFilter(){
Set<Resource> resources = resourceList.stream().filter(t -> {
String alias = t.getAlias();
return alias.endsWith(suffix);
}).collect(Collectors.toSet());
return resources;
}
/**
* list all parent dir
* @return parent resource dir set
*/
Set<Resource> listAllParent(){
Set<Resource> parentList = new HashSet<>();
Set<Resource> filterFileList = fileFilter();
for(Resource file:filterFileList){
parentList.add(file);
setAllParent(file,parentList);
}
return parentList;
}
/**
* list all parent dir
* @param resource resource
* @return parent resource dir set
*/
private void setAllParent(Resource resource,Set<Resource> parentList){
for (Resource resourceTemp : resourceList) {
if (resourceTemp.getId() == resource.getPid()) {
parentList.add(resourceTemp);
setAllParent(resourceTemp,parentList);
}
}
}
@Override
public List<Resource> filter() {
return new ArrayList<>(listAllParent());
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,469 |
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
|
**Describe the question**

**Which version of DolphinScheduler:**
-[1.3.0]
**Additional context**
The program type of spark node is selected as PYTHON, how should the main jar package be selected?
Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files.
**Requirement or improvement**
no
|
https://github.com/apache/dolphinscheduler/issues/3469
|
https://github.com/apache/dolphinscheduler/pull/3498
|
e367f90bb73c9682739308a0a98887a1c0f407ef
|
5f5c08402fdcecca8c35f4dc3021cc089949ef13
| 2020-08-12T02:09:30Z |
java
| 2020-08-14T08:47:01Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.commons.collections.BeanMap;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.*;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* resources service
*/
@Service
public class ResourcesService extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(ResourcesService.class);
@Autowired
private ResourceMapper resourcesMapper;
@Autowired
private UdfFuncMapper udfFunctionMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private UserMapper userMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Transactional(rollbackFor = Exception.class)
public Result createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<String, Object>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirecotry(loginUser,fullName,type,result);
return result;
}
/**
* create resource
*
* @param loginUser login user
* @param name alias
* @param desc description
* @param file file
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result createResource(User loginUser,
String name,
String desc,
ResourceType type,
MultipartFile file,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
/**
* rename file suffix and original suffix must be consistent
*/
logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
// check resoure name exists
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
return result;
}
/**
* check resource is exists
*
* @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
private boolean checkResourceExists(String fullName, int userId, int type ){
List<Resource> resources = resourcesMapper.queryResourceList(fullName, userId, type);
if (resources != null && resources.size() > 0) {
return true;
}
return false;
}
/**
* update resource
* @param loginUser login user
* @param resourceId resource id
* @param name name
* @param desc description
* @param type resource type
* @param file resource file
* @return update result code
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResource(User loginUser,
int resourceId,
String name,
String desc,
ResourceType type,
MultipartFile file) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
if (name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) {
putMsg(result, Status.SUCCESS);
return result;
}
//check resource aleady exists
String originFullName = resource.getFullName();
String originResourceName = resource.getAlias();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
if (file != null) {
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
/**
* rename file suffix and original suffix must be consistent
*/
logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(FileUtils.suffix(originFullName))) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
}
// query tenant by user id
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// verify whether the resource exists in storage
// get the path of origin file in storage
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
try {
if (!HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.error("{} not exist", originHdfsFileName);
putMsg(result,Status.RESOURCE_NOT_EXIST);
return result;
}
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
}
if (!resource.isDirectory()) {
//get the origin file suffix
String originSuffix = FileUtils.suffix(originFullName);
String suffix = FileUtils.suffix(fullName);
boolean suffixIsChanged = false;
if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) {
suffixIsChanged = true;
}
if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) {
suffixIsChanged = true;
}
//verify whether suffix is changed
if (suffixIsChanged) {
//need verify whether this resource is authorized to other users
Map<String, Object> columnMap = new HashMap<>();
columnMap.put("resources_id", resourceId);
List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap);
if (CollectionUtils.isNotEmpty(resourcesUsers)) {
List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList());
List<User> users = userMapper.selectBatchIds(userIds);
String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString();
logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames);
putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames);
return result;
}
}
}
// updateResource data
Date now = new Date();
resource.setAlias(name);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
if (file != null) {
resource.setFileName(file.getOriginalFilename());
resource.setSize(file.getSize());
}
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory()) {
List<Integer> childrenResource = listAllChildren(resource,false);
if (CollectionUtils.isNotEmpty(childrenResource)) {
String matcherFullName = Matcher.quoteReplacement(fullName);
List<Resource> childResourceList = new ArrayList<>();
Integer[] childResIdArray = childrenResource.toArray(new Integer[childrenResource.size()]);
List<Resource> resourceList = resourcesMapper.listResourceByIds(childResIdArray);
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(childResIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(t.getResourceName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
}
} else if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new Integer[]{resourceId});
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(fullName);
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>(5);
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e);
throw new ServiceException(Status.UPDATE_RESOURCE_ERROR);
}
// if name unchanged, return directly without moving on HDFS
if (originResourceName.equals(name) && file == null) {
return result;
}
if (file != null) {
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
if (!fullName.equals(originFullName)) {
try {
HadoopUtils.getInstance().delete(originHdfsFileName,false);
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(String.format("delete resource: %s failed.", originFullName));
}
}
return result;
}
// get the path of dest file in hdfs
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
} catch (Exception e) {
logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e);
putMsg(result,Status.HDFS_COPY_FAIL);
throw new ServiceException(Status.HDFS_COPY_FAIL);
}
return result;
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
public Map<String, Object> queryResourceListPaging(User loginUser, int direcotryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>(5);
Page<Resource> page = new Page(pageNo, pageSize);
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId= 0;
}
if (direcotryId != -1) {
Resource directory = resourcesMapper.selectById(direcotryId);
if (directory == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
}
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page,
userId,direcotryId, type.ordinal(), searchVal);
PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* create direcoty
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirecotry(User loginUser,String fullName,ResourceType type,Result result){
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
}
/**
* upload file to hdfs
*
* @param loginUser login user
* @param fullName full name
* @param file file
*/
private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
return false;
}
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
// random file name
String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
// save file to hdfs, and delete original file
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
createTenantDirIfNotExists(tenantCode);
}
org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename);
HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
return false;
}
return true;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList);
//JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceJarList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
List<Resource> resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
* @throws Exception exception
*/
@Transactional(rollbackFor = Exception.class)
public Result delete(User loginUser, int resourceId) throws Exception {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//get resource and hdfs path
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource,true);
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
//if resource type is UDF,need check whether it is bound by UDF functon
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
// get hdfs file by type
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteIds(needDeleteResourceIdArray);
resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* verify resource by name and type
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
public Result verifyResourceName(String fullName, ResourceType type,User loginUser) {
Result result = new Result();
putMsg(result, Status.SUCCESS);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, fullName);
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
if(tenant != null){
String tenantCode = tenant.getTenantCode();
try {
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if(HadoopUtils.getInstance().exists(hdfsFilename)){
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, fullName,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.HDFS_OPERATION_ERROR);
}
}else{
putMsg(result,Status.TENANT_NOT_EXIST);
}
}
return result;
}
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
public Result queryResource(String fullName,Integer id,ResourceType type) {
Result result = new Result();
if (StringUtils.isBlank(fullName) && id == null) {
logger.error("You must input one of fullName and pid");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
logger.error("resource file not exist, resource full name {} ", fullName);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
logger.error("resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
logger.error("parent resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/**
* view resource file online
*
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
public Result readResource(int resourceId, int skipLineNum, int limit) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check preview or not by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// hdfs path
String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {} ", hdfsFileName);
try {
if(HadoopUtils.getInstance().exists(hdfsFileName)){
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
putMsg(result, Status.SUCCESS);
Map<String, Object> map = new HashMap<>();
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, String.join("\n", content));
result.setData(map);
}else{
logger.error("read file {} not exist in hdfs", hdfsFileName);
putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName);
}
} catch (Exception e) {
logger.error("Resource {} read failed", hdfsFileName, e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
}
return result;
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param desc description
* @param content content
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDirectory) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//check file suffix
String nameSuffix = fileSuffix.trim();
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resouce suffix {} not support create", nameSuffix);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDirectory.equals("/") ? String.format("%s%s",currentDirectory,name):String.format("%s/%s",currentDirectory,name);
result = verifyResourceName(fullName,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* updateProcessInstance resource
*
* @param resourceId resource id
* @param content content
* @return update result cod
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResourceContent(int resourceId, String content) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("read file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check can edit by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
resource.setSize(content.getBytes().length);
resource.setUpdateTime(new Date());
resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* @param resourceName resource name
* @param tenantCode tenant code
* @param content content
* @return result
*/
private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result result = new Result();
String localFilename = "";
String hdfsFileName = "";
try {
localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
if (!FileUtils.writeContent2File(content, localFilename)) {
// write file fail
logger.error("file {} fail, content is {}", localFilename, content);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
// get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils hadoopUtils = HadoopUtils.getInstance();
if (!hadoopUtils.exists(resourcePath)) {
// create if tenant dir not exists
createTenantDirIfNotExists(tenantCode);
}
if (hadoopUtils.exists(hdfsFileName)) {
hadoopUtils.delete(hdfsFileName, false);
}
hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
result.setCode(Status.HDFS_OPERATION_ERROR.getCode());
result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName));
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* download file
*
* @param resourceId resource id
* @return resource content
* @throws Exception exception
*/
public org.springframework.core.io.Resource downloadResource(int resourceId) throws Exception {
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new RuntimeException("hdfs not startup");
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new RuntimeException("cant't download directory");
}
int userId = resource.getUserId();
User user = userMapper.selectById(userId);
if(user == null){
logger.error("user id {} not exists", userId);
throw new RuntimeException(String.format("resource owner id %d not exist",userId));
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if(tenant == null){
logger.error("tenant id {} not exists", user.getTenantId());
throw new RuntimeException(String.format("The tenant id %d of resource owner not exist",user.getTenantId()));
}
String tenantCode = tenant.getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true);
return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName);
}
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list ;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Resource> list ;
if (resourceList != null && resourceList.size() > 0) {
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
getAuthorizedResourceList(resourceSet, authedResourceList);
list = new ArrayList<>(resourceSet);
}else {
list = new ArrayList<>(0);
}
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
//only admin can operate
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId);
List<UdfFunc> resultList = new ArrayList<>();
Set<UdfFunc> udfFuncSet = null;
if (CollectionUtils.isNotEmpty(udfFuncList)) {
udfFuncSet = new HashSet<>(udfFuncList);
List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId);
getAuthorizedResourceList(udfFuncSet, authedUDFFuncList);
resultList = new ArrayList<>(udfFuncSet);
}
result.put(Constants.DATA_LIST, resultList);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId);
result.put(Constants.DATA_LIST, udfFuncs);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized file
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
public Map<String, Object> authorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
if (checkAdmin(loginUser, result)){
return result;
}
List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
Visitor visitor = new ResourceTreeVisitor(authedResources);
logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField));
String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get authorized resource list
*
* @param resourceSet resource set
* @param authedResourceList authorized resource list
*/
private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) {
Set<?> authedResourceSet = null;
if (CollectionUtils.isNotEmpty(authedResourceList)) {
authedResourceSet = new HashSet<>(authedResourceList);
resourceSet.removeAll(authedResourceSet);
}
}
/**
* get tenantCode by UserId
*
* @param userId user id
* @param result return result
* @return
*/
private String getTenantCode(int userId,Result result){
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user {} not exists", userId);
putMsg(result, Status.USER_NOT_EXIST,userId);
return null;
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null){
logger.error("tenant not exists");
putMsg(result, Status.TENANT_NOT_EXIST);
return null;
}
return tenant.getTenantCode();
}
/**
* list all children id
* @param resource resource
* @param containSelf whether add self to children list
* @return all children id
*/
List<Integer> listAllChildren(Resource resource,boolean containSelf){
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1 && containSelf) {
childList.add(resource.getId());
}
if(resource.isDirectory()){
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList){
List<Integer> children = resourcesMapper.listChildren(resourceId);
for(int chlidId:children){
childList.add(chlidId);
listAllChildren(chlidId,childList);
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,469 |
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
|
**Describe the question**

**Which version of DolphinScheduler:**
-[1.3.0]
**Additional context**
The program type of spark node is selected as PYTHON, how should the main jar package be selected?
Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files.
**Requirement or improvement**
no
|
https://github.com/apache/dolphinscheduler/issues/3469
|
https://github.com/apache/dolphinscheduler/pull/3498
|
e367f90bb73c9682739308a0a98887a1c0f407ef
|
5f5c08402fdcecca8c35f4dc3021cc089949ef13
| 2020-08-12T02:09:30Z |
java
| 2020-08-14T08:47:01Z |
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.controller;
import com.alibaba.fastjson.JSON;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UdfType;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import com.alibaba.fastjson.JSONObject;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.MediaType;
import org.springframework.test.web.servlet.MvcResult;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
/**
* resources controller test
*/
public class ResourcesControllerTest extends AbstractControllerTest{
private static Logger logger = LoggerFactory.getLogger(ResourcesControllerTest.class);
@Test
public void testQuerytResourceList() throws Exception {
MvcResult mvcResult = mockMvc.perform(get("/resources/list")
.header(SESSION_ID, sessionId)
.param("type", ResourceType.FILE.name()))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testQueryResourceListPaging() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("type", String.valueOf(ResourceType.FILE));
paramsMap.add("pageNo", "1");
paramsMap.add("searchVal", "test");
paramsMap.add("pageSize", "1");
MvcResult mvcResult = mockMvc.perform(get("/resources/list-paging")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testVerifyResourceName() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("name","list_resources_1.sh");
paramsMap.add("type","FILE");
MvcResult mvcResult = mockMvc.perform(get("/resources/verify-name")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testViewResource() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id","5");
paramsMap.add("skipLineNum","2");
paramsMap.add("limit","100");
MvcResult mvcResult = mockMvc.perform(get("/resources/view")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testOnlineCreateResource() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("type", String.valueOf(ResourceType.FILE));
paramsMap.add("fileName","test_file_1");
paramsMap.add("suffix","sh");
paramsMap.add("description","test");
paramsMap.add("content","echo 1111");
MvcResult mvcResult = mockMvc.perform(post("/resources/online-create")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testUpdateResourceContent() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id", "1");
paramsMap.add("content","echo test_1111");
MvcResult mvcResult = mockMvc.perform(post("/resources/update-content")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testDownloadResource() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id", "5");
MvcResult mvcResult = mockMvc.perform(get("/resources/download")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testCreateUdfFunc() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("type", String.valueOf(UdfType.HIVE));
paramsMap.add("funcName", "test_udf");
paramsMap.add("className", "com.test.word.contWord");
paramsMap.add("argTypes", "argTypes");
paramsMap.add("database", "database");
paramsMap.add("description", "description");
paramsMap.add("resourceId", "1");
MvcResult mvcResult = mockMvc.perform(post("/resources/udf-func/create")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testViewUIUdfFunction() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id", "1");
MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/update-ui")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testUpdateUdfFunc() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id", "1");
paramsMap.add("type", String.valueOf(UdfType.HIVE));
paramsMap.add("funcName", "update_duf");
paramsMap.add("className", "com.test.word.contWord");
paramsMap.add("argTypes", "argTypes");
paramsMap.add("database", "database");
paramsMap.add("description", "description");
paramsMap.add("resourceId", "1");
MvcResult mvcResult = mockMvc.perform(post("/resources/udf-func/update")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testQueryUdfFuncList() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("pageNo", "1");
paramsMap.add("searchVal", "udf");
paramsMap.add("pageSize", "1");
MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/list-paging")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testQueryResourceList() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("type", String.valueOf(UdfType.HIVE));
MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/list")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testVerifyUdfFuncName() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("name", "test");
MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/verify-name")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testAuthorizedFile() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userId", "2");
MvcResult mvcResult = mockMvc.perform(get("/resources/authed-file")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testUnauthorizedFile() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userId", "2");
MvcResult mvcResult = mockMvc.perform(get("/resources/unauth-file")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testAuthorizedUDFFunction() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userId", "2");
MvcResult mvcResult = mockMvc.perform(get("/resources/authed-udf-func")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testUnauthUDFFunc() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userId", "2");
MvcResult mvcResult = mockMvc.perform(get("/resources/unauth-udf-func")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testDeleteUdfFunc() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id", "1");
MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/delete")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testDeleteResource() throws Exception {
MvcResult mvcResult = mockMvc.perform(get("/resources/delete")
.header(SESSION_ID, sessionId)
.param("id", "2"))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,469 |
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
|
**Describe the question**

**Which version of DolphinScheduler:**
-[1.3.0]
**Additional context**
The program type of spark node is selected as PYTHON, how should the main jar package be selected?
Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files.
**Requirement or improvement**
no
|
https://github.com/apache/dolphinscheduler/issues/3469
|
https://github.com/apache/dolphinscheduler/pull/3498
|
e367f90bb73c9682739308a0a98887a1c0f407ef
|
5f5c08402fdcecca8c35f4dc3021cc089949ef13
| 2020-08-12T02:09:30Z |
java
| 2020-08-14T08:47:01Z |
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
/**
* resource filter test
*/
public class ResourceFilterTest {
private static Logger logger = LoggerFactory.getLogger(ResourceFilterTest.class);
@Test
public void filterTest(){
List<Resource> allList = new ArrayList<>();
Resource resource1 = new Resource(3,-1,"b","/b",true);
Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
Resource resource5 = new Resource(7,-1,"b2","/b2",true);
Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
allList.add(resource1);
allList.add(resource2);
allList.add(resource3);
allList.add(resource4);
allList.add(resource5);
allList.add(resource6);
allList.add(resource7);
ResourceFilter resourceFilter = new ResourceFilter(".jar",allList);
List<Resource> resourceList = resourceFilter.filter();
Assert.assertNotNull(resourceList);
resourceList.stream().forEach(t-> logger.info(t.toString()));
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,469 |
[Bug] The program type of spark node is selected as PYTHON, how should the main jar package be selected?
|
**Describe the question**

**Which version of DolphinScheduler:**
-[1.3.0]
**Additional context**
The program type of spark node is selected as PYTHON, how should the main jar package be selected?
Because on a linux machine, this command "spark-sumbit *.py" can run spark to run python files.
**Requirement or improvement**
no
|
https://github.com/apache/dolphinscheduler/issues/3469
|
https://github.com/apache/dolphinscheduler/pull/3498
|
e367f90bb73c9682739308a0a98887a1c0f407ef
|
5f5c08402fdcecca8c35f4dc3021cc089949ef13
| 2020-08-12T02:09:30Z |
java
| 2020-08-14T08:47:01Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.upgrade;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.AbstractBaseDao;
import org.apache.dolphinscheduler.dao.datasource.ConnectionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.sql.DataSource;
import java.io.*;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public abstract class UpgradeDao extends AbstractBaseDao {
public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class);
private static final String T_VERSION_NAME = "t_escheduler_version";
private static final String T_NEW_VERSION_NAME = "t_ds_version";
private static final String rootDir = System.getProperty("user.dir");
protected static final DataSource dataSource = getDataSource();
private static final DbType dbType = getCurrentDbType();
@Override
protected void init() {
}
/**
* get datasource
* @return DruidDataSource
*/
public static DataSource getDataSource(){
return ConnectionFactory.getInstance().getDataSource();
}
/**
* get db type
* @return dbType
*/
public static DbType getDbType(){
return dbType;
}
/**
* get current dbType
* @return
*/
private static DbType getCurrentDbType(){
Connection conn = null;
try {
conn = dataSource.getConnection();
String name = conn.getMetaData().getDatabaseProductName().toUpperCase();
return DbType.valueOf(name);
} catch (Exception e) {
logger.error(e.getMessage(),e);
return null;
}finally {
ConnectionUtils.releaseResource(conn);
}
}
/**
* init schema
*/
public void initSchema(){
DbType dbType = getDbType();
String initSqlPath = "";
if (dbType != null) {
switch (dbType) {
case MYSQL:
initSqlPath = "/sql/create/release-1.0.0_schema/mysql/";
initSchema(initSqlPath);
break;
case POSTGRESQL:
initSqlPath = "/sql/create/release-1.2.0_schema/postgresql/";
initSchema(initSqlPath);
break;
default:
logger.error("not support sql type: {},can't upgrade", dbType);
throw new IllegalArgumentException("not support sql type,can't upgrade");
}
}
}
/**
* init scheam
* @param initSqlPath initSqlPath
*/
public void initSchema(String initSqlPath) {
// Execute the dolphinscheduler DDL, it cannot be rolled back
runInitDDL(initSqlPath);
// Execute the dolphinscheduler DML, it can be rolled back
runInitDML(initSqlPath);
}
/**
* run DML
* @param initSqlPath initSqlPath
*/
private void runInitDML(String initSqlPath) {
Connection conn = null;
if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found");
}
String mysqlSQLFilePath = rootDir + initSqlPath + "dolphinscheduler_dml.sql";
try {
conn = dataSource.getConnection();
conn.setAutoCommit(false);
// Execute the dolphinscheduler_dml.sql script to import related data of dolphinscheduler
ScriptRunner initScriptRunner = new ScriptRunner(conn, false, true);
Reader initSqlReader = new FileReader(new File(mysqlSQLFilePath));
initScriptRunner.runScript(initSqlReader);
conn.commit();
} catch (IOException e) {
try {
conn.rollback();
} catch (SQLException e1) {
logger.error(e1.getMessage(),e1);
}
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} catch (Exception e) {
try {
if (null != conn) {
conn.rollback();
}
} catch (SQLException e1) {
logger.error(e1.getMessage(),e1);
}
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(conn);
}
}
/**
* run DDL
* @param initSqlPath initSqlPath
*/
private void runInitDDL(String initSqlPath) {
Connection conn = null;
if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found");
}
//String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/dolphinscheduler_ddl.sql";
String mysqlSQLFilePath = rootDir + initSqlPath + "dolphinscheduler_ddl.sql";
try {
conn = dataSource.getConnection();
// Execute the dolphinscheduler_ddl.sql script to create the table structure of dolphinscheduler
ScriptRunner initScriptRunner = new ScriptRunner(conn, true, true);
Reader initSqlReader = new FileReader(new File(mysqlSQLFilePath));
initScriptRunner.runScript(initSqlReader);
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} catch (Exception e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(conn);
}
}
/**
* determines whether a table exists
* @param tableName tableName
* @return if table exist return true,else return false
*/
public abstract boolean isExistsTable(String tableName);
/**
* determines whether a field exists in the specified table
* @param tableName tableName
* @param columnName columnName
* @return if column name exist return true,else return false
*/
public abstract boolean isExistsColumn(String tableName,String columnName);
/**
* get current version
* @param versionName versionName
* @return version
*/
public String getCurrentVersion(String versionName) {
String sql = String.format("select version from %s",versionName);
Connection conn = null;
ResultSet rs = null;
PreparedStatement pstmt = null;
String version = null;
try {
conn = dataSource.getConnection();
pstmt = conn.prepareStatement(sql);
rs = pstmt.executeQuery();
if (rs.next()) {
version = rs.getString(1);
}
return version;
} catch (SQLException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException("sql: " + sql, e);
} finally {
ConnectionUtils.releaseResource(rs, pstmt, conn);
}
}
/**
* upgrade DolphinScheduler
* @param schemaDir schema dir
*/
public void upgradeDolphinScheduler(String schemaDir) {
upgradeDolphinSchedulerDDL(schemaDir);
upgradeDolphinSchedulerDML(schemaDir);
}
/**
* upgrade DolphinScheduler worker group
* ds-1.3.0 modify the worker group for process definition json
*/
public void upgradeDolphinSchedulerWorkerGroup() {
updateProcessDefinitionJsonWorkerGroup();
}
/**
* upgrade DolphinScheduler resource list
* ds-1.3.2 modify the resource list for process definition json
*/
public void upgradeDolphinSchedulerResourceList() {
updateProcessDefinitionJsonResourceList();
}
/**
* updateProcessDefinitionJsonWorkerGroup
*/
protected void updateProcessDefinitionJsonWorkerGroup(){
WorkerGroupDao workerGroupDao = new WorkerGroupDao();
ProcessDefinitionDao processDefinitionDao = new ProcessDefinitionDao();
Map<Integer,String> replaceProcessDefinitionMap = new HashMap<>();
try {
Map<Integer, String> oldWorkerGroupMap = workerGroupDao.queryAllOldWorkerGroup(dataSource.getConnection());
Map<Integer,String> processDefinitionJsonMap = processDefinitionDao.queryAllProcessDefinition(dataSource.getConnection());
for (Map.Entry<Integer,String> entry : processDefinitionJsonMap.entrySet()){
JSONObject jsonObject = JSONObject.parseObject(entry.getValue());
JSONArray tasks = JSONArray.parseArray(jsonObject.getString("tasks"));
for (int i = 0 ;i < tasks.size() ; i++){
JSONObject task = tasks.getJSONObject(i);
Integer workerGroupId = task.getInteger("workerGroupId");
if (workerGroupId != null) {
if (workerGroupId == -1) {
task.put("workerGroup", "default");
} else {
task.put("workerGroup", oldWorkerGroupMap.get(workerGroupId));
}
}
}
jsonObject.remove(jsonObject.getString("tasks"));
jsonObject.put("tasks",tasks);
replaceProcessDefinitionMap.put(entry.getKey(),jsonObject.toJSONString());
}
if (replaceProcessDefinitionMap.size() > 0){
processDefinitionDao.updateProcessDefinitionJson(dataSource.getConnection(),replaceProcessDefinitionMap);
}
}catch (Exception e){
logger.error("update process definition json workergroup error",e);
}
}
/**
* updateProcessDefinitionJsonResourceList
*/
protected void updateProcessDefinitionJsonResourceList(){
ResourceDao resourceDao = new ResourceDao();
ProcessDefinitionDao processDefinitionDao = new ProcessDefinitionDao();
Map<Integer,String> replaceProcessDefinitionMap = new HashMap<>();
try {
Map<String,Integer> resourcesMap = resourceDao.listAllResources(dataSource.getConnection());
Map<Integer,String> processDefinitionJsonMap = processDefinitionDao.queryAllProcessDefinition(dataSource.getConnection());
for (Map.Entry<Integer,String> entry : processDefinitionJsonMap.entrySet()){
JSONObject jsonObject = JSONObject.parseObject(entry.getValue());
JSONArray tasks = JSONArray.parseArray(jsonObject.getString("tasks"));
for (int i = 0 ;i < tasks.size() ; i++){
JSONObject task = tasks.getJSONObject(i);
JSONObject param = (JSONObject) task.get("params");
if (param != null) {
List<ResourceInfo> resourceList = JSONUtils.toList(param.getString("resourceList"), ResourceInfo.class);
if (CollectionUtils.isNotEmpty(resourceList)) {
List<ResourceInfo> newResourceList = resourceList.stream().map(resInfo -> {
String fullName = resInfo.getRes().startsWith("/") ? resInfo.getRes() : String.format("/%s",resInfo.getRes());
if (resInfo.getId() == 0 && resourcesMap.containsKey(fullName)) {
resInfo.setId(resourcesMap.get(fullName));
}
return resInfo;
}).collect(Collectors.toList());
param.put("resourceList",JSONArray.parse(JSONObject.toJSONString(newResourceList)));
}
}
task.put("params",param);
}
jsonObject.remove(jsonObject.getString("tasks"));
jsonObject.put("tasks",tasks);
replaceProcessDefinitionMap.put(entry.getKey(),jsonObject.toJSONString());
}
if (replaceProcessDefinitionMap.size() > 0){
processDefinitionDao.updateProcessDefinitionJson(dataSource.getConnection(),replaceProcessDefinitionMap);
}
}catch (Exception e){
logger.error("update process definition json resource list error",e);
}
}
/**
* upgradeDolphinScheduler DML
* @param schemaDir schemaDir
*/
private void upgradeDolphinSchedulerDML(String schemaDir) {
String schemaVersion = schemaDir.split("_")[0];
if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found");
}
String sqlFilePath = MessageFormat.format("{0}/sql/upgrade/{1}/{2}/dolphinscheduler_dml.sql",rootDir,schemaDir,getDbType().name().toLowerCase());
logger.info("sqlSQLFilePath"+sqlFilePath);
Connection conn = null;
PreparedStatement pstmt = null;
try {
conn = dataSource.getConnection();
conn.setAutoCommit(false);
// Execute the upgraded dolphinscheduler dml
ScriptRunner scriptRunner = new ScriptRunner(conn, false, true);
Reader sqlReader = new FileReader(new File(sqlFilePath));
scriptRunner.runScript(sqlReader);
if (isExistsTable(T_VERSION_NAME)) {
// Change version in the version table to the new version
String upgradeSQL = String.format("update %s set version = ?",T_VERSION_NAME);
pstmt = conn.prepareStatement(upgradeSQL);
pstmt.setString(1, schemaVersion);
pstmt.executeUpdate();
}else if (isExistsTable(T_NEW_VERSION_NAME)) {
// Change version in the version table to the new version
String upgradeSQL = String.format("update %s set version = ?",T_NEW_VERSION_NAME);
pstmt = conn.prepareStatement(upgradeSQL);
pstmt.setString(1, schemaVersion);
pstmt.executeUpdate();
}
conn.commit();
} catch (FileNotFoundException e) {
try {
conn.rollback();
} catch (SQLException e1) {
logger.error(e1.getMessage(),e1);
}
logger.error(e.getMessage(),e);
throw new RuntimeException("sql file not found ", e);
} catch (IOException e) {
try {
conn.rollback();
} catch (SQLException e1) {
logger.error(e1.getMessage(),e1);
}
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} catch (SQLException e) {
try {
if (null != conn) {
conn.rollback();
}
} catch (SQLException e1) {
logger.error(e1.getMessage(),e1);
}
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} catch (Exception e) {
try {
if (null != conn) {
conn.rollback();
}
} catch (SQLException e1) {
logger.error(e1.getMessage(),e1);
}
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(pstmt, conn);
}
}
/**
* upgradeDolphinScheduler DDL
* @param schemaDir schemaDir
*/
private void upgradeDolphinSchedulerDDL(String schemaDir) {
if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found");
}
String sqlFilePath = MessageFormat.format("{0}/sql/upgrade/{1}/{2}/dolphinscheduler_ddl.sql",rootDir,schemaDir,getDbType().name().toLowerCase());
Connection conn = null;
PreparedStatement pstmt = null;
try {
conn = dataSource.getConnection();
String dbName = conn.getCatalog();
logger.info(dbName);
conn.setAutoCommit(true);
// Execute the dolphinscheduler ddl.sql for the upgrade
ScriptRunner scriptRunner = new ScriptRunner(conn, true, true);
Reader sqlReader = new FileReader(new File(sqlFilePath));
scriptRunner.runScript(sqlReader);
} catch (FileNotFoundException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException("sql file not found ", e);
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} catch (SQLException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} catch (Exception e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(pstmt, conn);
}
}
/**
* update version
* @param version version
*/
public void updateVersion(String version) {
// Change version in the version table to the new version
String versionName = T_VERSION_NAME;
if(!SchemaUtils.isAGreatVersion("1.2.0" , version)){
versionName = "t_ds_version";
}
String upgradeSQL = String.format("update %s set version = ?",versionName);
PreparedStatement pstmt = null;
Connection conn = null;
try {
conn = dataSource.getConnection();
pstmt = conn.prepareStatement(upgradeSQL);
pstmt.setString(1, version);
pstmt.executeUpdate();
} catch (SQLException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException("sql: " + upgradeSQL, e);
} finally {
ConnectionUtils.releaseResource(pstmt, conn);
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,431 |
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
|
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center

2. View the task resources of the workflow definition,display multiple deleted resources directories

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3431
|
https://github.com/apache/dolphinscheduler/pull/3498
|
e367f90bb73c9682739308a0a98887a1c0f407ef
|
5f5c08402fdcecca8c35f4dc3021cc089949ef13
| 2020-08-07T03:58:50Z |
java
| 2020-08-14T08:47:01Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.controller;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ApiException;
import org.apache.dolphinscheduler.api.service.ResourcesService;
import org.apache.dolphinscheduler.api.service.UdfFuncService;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UdfType;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.Resource;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import springfox.documentation.annotations.ApiIgnore;
import java.util.Map;
import static org.apache.dolphinscheduler.api.enums.Status.*;
/**
* resources controller
*/
@Api(tags = "RESOURCES_TAG", position = 1)
@RestController
@RequestMapping("resources")
public class ResourcesController extends BaseController {
private static final Logger logger = LoggerFactory.getLogger(ResourcesController.class);
@Autowired
private ResourcesService resourceService;
@Autowired
private UdfFuncService udfFuncService;
/**
* create directory
*
* @param loginUser login user
* @param type type
* @param alias alias
* @param description description
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@ApiOperation(value = "createDirctory", notes = "CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
})
@PostMapping(value = "/directory/create")
@ApiException(CREATE_RESOURCE_ERROR)
public Result createDirectory(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "pid") int pid,
@RequestParam(value = "currentDir") String currentDir) {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(), type, alias, description, pid, currentDir);
return resourceService.createDirectory(loginUser, alias, description, type, pid, currentDir);
}
/**
* create resource
*
* @param loginUser login user
* @param alias alias
* @param description description
* @param type type
* @param file file
* @return create result code
*/
@ApiOperation(value = "createResource", notes = "CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
})
@PostMapping(value = "/create")
@ApiException(CREATE_RESOURCE_ERROR)
public Result createResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam("file") MultipartFile file,
@RequestParam(value = "pid") int pid,
@RequestParam(value = "currentDir") String currentDir) {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(), type, alias, description, file.getName(), file.getOriginalFilename());
return resourceService.createResource(loginUser, alias, description, type, file, pid, currentDir);
}
/**
* update resource
*
* @param loginUser login user
* @param alias alias
* @param resourceId resource id
* @param type resource type
* @param description description
* @param file resource file
* @return update result code
*/
@ApiOperation(value = "updateResource", notes = "UPDATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
})
@PostMapping(value = "/update")
@ApiException(UPDATE_RESOURCE_ERROR)
public Result updateResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "file" ,required = false) MultipartFile file) {
logger.info("login user {}, update resource, type: {}, resource alias: {}, desc: {}, file: {}",
loginUser.getUserName(), type, alias, description, file);
return resourceService.updateResource(loginUser, resourceId, alias, description, type, file);
}
/**
* query resources list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryResourceList", notes = "QUERY_RESOURCE_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType")
})
@GetMapping(value = "/list")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_RESOURCES_LIST_ERROR)
public Result queryResourceList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type
) {
logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type);
Map<String, Object> result = resourceService.queryResourceList(loginUser, type);
return returnDataList(result);
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
@ApiOperation(value = "queryResourceListPaging", notes = "QUERY_RESOURCE_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "int"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "20")
})
@GetMapping(value = "/list-paging")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_RESOURCES_LIST_PAGING)
public Result queryResourceListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "id") int id,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize
) {
logger.info("query resource list, login user:{}, resource type:{}, search value:{}",
loginUser.getUserName(), type, searchVal);
Map<String, Object> result = checkPageParams(pageNo, pageSize);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = resourceService.queryResourceListPaging(loginUser, id, type, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
*/
@ApiOperation(value = "deleteResource", notes = "DELETE_RESOURCE_BY_ID_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/delete")
@ResponseStatus(HttpStatus.OK)
@ApiException(DELETE_RESOURCE_ERROR)
public Result deleteResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId
) throws Exception {
logger.info("login user {}, delete resource id: {}",
loginUser.getUserName(), resourceId);
return resourceService.delete(loginUser, resourceId);
}
/**
* verify resource by alias and type
*
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "verifyResourceName", notes = "VERIFY_RESOURCE_NAME_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType = "String")
})
@GetMapping(value = "/verify-name")
@ResponseStatus(HttpStatus.OK)
@ApiException(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR)
public Result verifyResourceName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "fullName") String fullName,
@RequestParam(value = "type") ResourceType type
) {
logger.info("login user {}, verfiy resource alias: {},resource type: {}",
loginUser.getUserName(), fullName, type);
return resourceService.verifyResourceName(fullName, type, loginUser);
}
/**
* query resources jar list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryResourceJarList", notes = "QUERY_RESOURCE_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType")
})
@GetMapping(value = "/list/jar")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_RESOURCES_LIST_ERROR)
public Result queryResourceJarList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type
) {
logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type.toString());
Map<String, Object> result = resourceService.queryResourceJarList(loginUser, type);
return returnDataList(result);
}
/**
* query resource by full name and type
*
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "queryResource", notes = "QUERY_BY_RESOURCE_NAME")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType = "String")
})
@GetMapping(value = "/queryResource")
@ResponseStatus(HttpStatus.OK)
@ApiException(RESOURCE_NOT_EXIST)
public Result queryResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "fullName", required = false) String fullName,
@RequestParam(value = "id", required = false) Integer id,
@RequestParam(value = "type") ResourceType type
) {
logger.info("login user {}, query resource by full name: {} or id: {},resource type: {}",
loginUser.getUserName(), fullName, id, type);
return resourceService.queryResource(fullName, id, type);
}
/**
* view resource file online
*
* @param loginUser login user
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
@ApiOperation(value = "viewResource", notes = "VIEW_RESOURCE_BY_ID_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "skipLineNum", value = "SKIP_LINE_NUM", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "limit", value = "LIMIT", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/view")
@ApiException(VIEW_RESOURCE_FILE_ON_LINE_ERROR)
public Result viewResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId,
@RequestParam(value = "skipLineNum") int skipLineNum,
@RequestParam(value = "limit") int limit
) {
logger.info("login user {}, view resource : {}, skipLineNum {} , limit {}",
loginUser.getUserName(), resourceId, skipLineNum, limit);
return resourceService.readResource(resourceId, skipLineNum, limit);
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param description description
* @param content content
* @return create result code
*/
@ApiOperation(value = "onlineCreateResource", notes = "ONLINE_CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "fileName", value = "RESOURCE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "suffix", value = "SUFFIX", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"),
@ApiImplicitParam(name = "content", value = "CONTENT", required = true, dataType = "String")
})
@PostMapping(value = "/online-create")
@ApiException(CREATE_RESOURCE_FILE_ON_LINE_ERROR)
public Result onlineCreateResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "fileName") String fileName,
@RequestParam(value = "suffix") String fileSuffix,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "content") String content,
@RequestParam(value = "pid") int pid,
@RequestParam(value = "currentDir") String currentDir
) {
logger.info("login user {}, online create resource! fileName : {}, type : {}, suffix : {},desc : {},content : {}",
loginUser.getUserName(), fileName, type, fileSuffix, description, content, pid, currentDir);
if (StringUtils.isEmpty(content)) {
logger.error("resource file contents are not allowed to be empty");
return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg());
}
return resourceService.onlineCreateResource(loginUser, type, fileName, fileSuffix, description, content, pid, currentDir);
}
/**
* edit resource file online
*
* @param loginUser login user
* @param resourceId resource id
* @param content content
* @return update result code
*/
@ApiOperation(value = "updateResourceContent", notes = "UPDATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "content", value = "CONTENT", required = true, dataType = "String")
})
@PostMapping(value = "/update-content")
@ApiException(EDIT_RESOURCE_FILE_ON_LINE_ERROR)
public Result updateResourceContent(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId,
@RequestParam(value = "content") String content
) {
logger.info("login user {}, updateProcessInstance resource : {}",
loginUser.getUserName(), resourceId);
if (StringUtils.isEmpty(content)) {
logger.error("The resource file contents are not allowed to be empty");
return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg());
}
return resourceService.updateResourceContent(resourceId, content);
}
/**
* download resource file
*
* @param loginUser login user
* @param resourceId resource id
* @return resource content
*/
@ApiOperation(value = "downloadResource", notes = "DOWNLOAD_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/download")
@ResponseBody
@ApiException(DOWNLOAD_RESOURCE_FILE_ERROR)
public ResponseEntity downloadResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId) throws Exception {
logger.info("login user {}, download resource : {}",
loginUser.getUserName(), resourceId);
Resource file = resourceService.downloadResource(resourceId);
if (file == null) {
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(Status.RESOURCE_NOT_EXIST.getMsg());
}
return ResponseEntity
.ok()
.header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + file.getFilename() + "\"")
.body(file);
}
/**
* create udf function
*
* @param loginUser login user
* @param type udf type
* @param funcName function name
* @param argTypes argument types
* @param database database
* @param description description
* @param className class name
* @param resourceId resource id
* @return create result code
*/
@ApiOperation(value = "createUdfFunc", notes = "CREATE_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType"),
@ApiImplicitParam(name = "funcName", value = "FUNC_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "suffix", value = "CLASS_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "argTypes", value = "ARG_TYPES", dataType = "String"),
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", dataType = "String"),
@ApiImplicitParam(name = "description", value = "UDF_DESC", dataType = "String"),
@ApiImplicitParam(name = "resourceId", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@PostMapping(value = "/udf-func/create")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(CREATE_UDF_FUNCTION_ERROR)
public Result createUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") UdfType type,
@RequestParam(value = "funcName") String funcName,
@RequestParam(value = "className") String className,
@RequestParam(value = "argTypes", required = false) String argTypes,
@RequestParam(value = "database", required = false) String database,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "resourceId") int resourceId) {
logger.info("login user {}, create udf function, type: {}, funcName: {},argTypes: {} ,database: {},desc: {},resourceId: {}",
loginUser.getUserName(), type, funcName, argTypes, database, description, resourceId);
return udfFuncService.createUdfFunction(loginUser, funcName, className, argTypes, database, description, type, resourceId);
}
/**
* view udf function
*
* @param loginUser login user
* @param id resource id
* @return udf function detail
*/
@ApiOperation(value = "viewUIUdfFunction", notes = "VIEW_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "resourceId", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/udf-func/update-ui")
@ResponseStatus(HttpStatus.OK)
@ApiException(VIEW_UDF_FUNCTION_ERROR)
public Result viewUIUdfFunction(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("id") int id) {
logger.info("login user {}, query udf{}",
loginUser.getUserName(), id);
Map<String, Object> map = udfFuncService.queryUdfFuncDetail(id);
return returnDataList(map);
}
/**
* update udf function
*
* @param loginUser login user
* @param type resource type
* @param funcName function name
* @param argTypes argument types
* @param database data base
* @param description description
* @param resourceId resource id
* @param className class name
* @param udfFuncId udf function id
* @return update result code
*/
@ApiOperation(value = "updateUdfFunc", notes = "UPDATE_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType"),
@ApiImplicitParam(name = "funcName", value = "FUNC_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "suffix", value = "CLASS_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "argTypes", value = "ARG_TYPES", dataType = "String"),
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", dataType = "String"),
@ApiImplicitParam(name = "description", value = "UDF_DESC", dataType = "String"),
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@PostMapping(value = "/udf-func/update")
@ApiException(UPDATE_UDF_FUNCTION_ERROR)
public Result updateUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int udfFuncId,
@RequestParam(value = "type") UdfType type,
@RequestParam(value = "funcName") String funcName,
@RequestParam(value = "className") String className,
@RequestParam(value = "argTypes", required = false) String argTypes,
@RequestParam(value = "database", required = false) String database,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "resourceId") int resourceId) {
logger.info("login user {}, updateProcessInstance udf function id: {},type: {}, funcName: {},argTypes: {} ,database: {},desc: {},resourceId: {}",
loginUser.getUserName(), udfFuncId, type, funcName, argTypes, database, description, resourceId);
Map<String, Object> result = udfFuncService.updateUdfFunc(udfFuncId, funcName, className, argTypes, database, description, type, resourceId);
return returnDataList(result);
}
/**
* query udf function list paging
*
* @param loginUser login user
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return udf function list page
*/
@ApiOperation(value = "queryUdfFuncListPaging", notes = "QUERY_UDF_FUNCTION_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "20")
})
@GetMapping(value = "/udf-func/list-paging")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_UDF_FUNCTION_LIST_PAGING_ERROR)
public Result<Object> queryUdfFuncListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize
) {
logger.info("query udf functions list, login user:{},search value:{}",
loginUser.getUserName(), searchVal);
Map<String, Object> result = checkPageParams(pageNo, pageSize);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return returnDataListPaging(result);
}
result = udfFuncService.queryUdfFuncListPaging(loginUser, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}
/**
* query udf func list by type
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryUdfFuncList", notes = "QUERY_UDF_FUNC_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType")
})
@GetMapping(value = "/udf-func/list")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_DATASOURCE_BY_TYPE_ERROR)
public Result<Object> queryUdfFuncList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("type") UdfType type) {
String userName = loginUser.getUserName();
userName = userName.replaceAll("[\n|\r|\t]", "_");
logger.info("query udf func list, user:{}, type:{}", userName, type);
Map<String, Object> result = udfFuncService.queryUdfFuncList(loginUser, type.ordinal());
return returnDataList(result);
}
/**
* verify udf function name can use or not
*
* @param loginUser login user
* @param name name
* @return true if the name can user, otherwise return false
*/
@ApiOperation(value = "verifyUdfFuncName", notes = "VERIFY_UDF_FUNCTION_NAME_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "name", value = "FUNC_NAME", required = true, dataType = "String")
})
@GetMapping(value = "/udf-func/verify-name")
@ResponseStatus(HttpStatus.OK)
@ApiException(VERIFY_UDF_FUNCTION_NAME_ERROR)
public Result verifyUdfFuncName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "name") String name
) {
logger.info("login user {}, verfiy udf function name: {}",
loginUser.getUserName(), name);
return udfFuncService.verifyUdfFuncByName(name);
}
/**
* delete udf function
*
* @param loginUser login user
* @param udfFuncId udf function id
* @return delete result code
*/
@ApiOperation(value = "deleteUdfFunc", notes = "DELETE_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/udf-func/delete")
@ResponseStatus(HttpStatus.OK)
@ApiException(DELETE_UDF_FUNCTION_ERROR)
public Result deleteUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int udfFuncId
) {
logger.info("login user {}, delete udf function id: {}", loginUser.getUserName(), udfFuncId);
return udfFuncService.delete(udfFuncId);
}
/**
* authorized file resource list
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
@ApiOperation(value = "authorizedFile", notes = "AUTHORIZED_FILE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/authed-file")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(AUTHORIZED_FILE_RESOURCE_ERROR)
public Result authorizedFile(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
logger.info("authorized file resource, user: {}, user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizedFile(loginUser, userId);
return returnDataList(result);
}
/**
* unauthorized file resource list
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@ApiOperation(value = "authorizeResourceTree", notes = "AUTHORIZE_RESOURCE_TREE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/authorize-resource-tree")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(AUTHORIZE_RESOURCE_TREE)
public Result authorizeResourceTree(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
logger.info("all resource file, user:{}, user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizeResourceTree(loginUser, userId);
return returnDataList(result);
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@ApiOperation(value = "unauthUDFFunc", notes = "UNAUTHORIZED_UDF_FUNC_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/unauth-udf-func")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(UNAUTHORIZED_UDF_FUNCTION_ERROR)
public Result unauthUDFFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
logger.info("unauthorized udf function, login user:{}, unauthorized user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.unauthorizedUDFFunction(loginUser, userId);
return returnDataList(result);
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
@ApiOperation(value = "authUDFFunc", notes = "AUTHORIZED_UDF_FUNC_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/authed-udf-func")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(AUTHORIZED_UDF_FUNCTION_ERROR)
public Result authorizedUDFFunction(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
logger.info("auth udf function, login user:{}, auth user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizedUDFFunction(loginUser, userId);
return returnDataList(result);
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,431 |
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
|
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center

2. View the task resources of the workflow definition,display multiple deleted resources directories

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3431
|
https://github.com/apache/dolphinscheduler/pull/3498
|
e367f90bb73c9682739308a0a98887a1c0f407ef
|
5f5c08402fdcecca8c35f4dc3021cc089949ef13
| 2020-08-07T03:58:50Z |
java
| 2020-08-14T08:47:01Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
/**
* resource filter
*/
public class ResourceFilter implements IFilter {
/**
* resource suffix
*/
private String suffix;
/**
* resource list
*/
private List<Resource> resourceList;
/**
* parent list
*/
//Set<Resource> parentList = new HashSet<>();
/**
* constructor
* @param suffix resource suffix
* @param resourceList resource list
*/
public ResourceFilter(String suffix, List<Resource> resourceList) {
this.suffix = suffix;
this.resourceList = resourceList;
}
/**
* file filter
* @return file filtered by suffix
*/
public Set<Resource> fileFilter(){
Set<Resource> resources = resourceList.stream().filter(t -> {
String alias = t.getAlias();
return alias.endsWith(suffix);
}).collect(Collectors.toSet());
return resources;
}
/**
* list all parent dir
* @return parent resource dir set
*/
Set<Resource> listAllParent(){
Set<Resource> parentList = new HashSet<>();
Set<Resource> filterFileList = fileFilter();
for(Resource file:filterFileList){
parentList.add(file);
setAllParent(file,parentList);
}
return parentList;
}
/**
* list all parent dir
* @param resource resource
* @return parent resource dir set
*/
private void setAllParent(Resource resource,Set<Resource> parentList){
for (Resource resourceTemp : resourceList) {
if (resourceTemp.getId() == resource.getPid()) {
parentList.add(resourceTemp);
setAllParent(resourceTemp,parentList);
}
}
}
@Override
public List<Resource> filter() {
return new ArrayList<>(listAllParent());
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,431 |
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
|
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center

2. View the task resources of the workflow definition,display multiple deleted resources directories

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3431
|
https://github.com/apache/dolphinscheduler/pull/3498
|
e367f90bb73c9682739308a0a98887a1c0f407ef
|
5f5c08402fdcecca8c35f4dc3021cc089949ef13
| 2020-08-07T03:58:50Z |
java
| 2020-08-14T08:47:01Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.commons.collections.BeanMap;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.*;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* resources service
*/
@Service
public class ResourcesService extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(ResourcesService.class);
@Autowired
private ResourceMapper resourcesMapper;
@Autowired
private UdfFuncMapper udfFunctionMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private UserMapper userMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Transactional(rollbackFor = Exception.class)
public Result createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<String, Object>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirecotry(loginUser,fullName,type,result);
return result;
}
/**
* create resource
*
* @param loginUser login user
* @param name alias
* @param desc description
* @param file file
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result createResource(User loginUser,
String name,
String desc,
ResourceType type,
MultipartFile file,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
/**
* rename file suffix and original suffix must be consistent
*/
logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
// check resoure name exists
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
return result;
}
/**
* check resource is exists
*
* @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
private boolean checkResourceExists(String fullName, int userId, int type ){
List<Resource> resources = resourcesMapper.queryResourceList(fullName, userId, type);
if (resources != null && resources.size() > 0) {
return true;
}
return false;
}
/**
* update resource
* @param loginUser login user
* @param resourceId resource id
* @param name name
* @param desc description
* @param type resource type
* @param file resource file
* @return update result code
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResource(User loginUser,
int resourceId,
String name,
String desc,
ResourceType type,
MultipartFile file) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
if (name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) {
putMsg(result, Status.SUCCESS);
return result;
}
//check resource aleady exists
String originFullName = resource.getFullName();
String originResourceName = resource.getAlias();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
if (file != null) {
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
/**
* rename file suffix and original suffix must be consistent
*/
logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(FileUtils.suffix(originFullName))) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
}
// query tenant by user id
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// verify whether the resource exists in storage
// get the path of origin file in storage
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
try {
if (!HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.error("{} not exist", originHdfsFileName);
putMsg(result,Status.RESOURCE_NOT_EXIST);
return result;
}
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
}
if (!resource.isDirectory()) {
//get the origin file suffix
String originSuffix = FileUtils.suffix(originFullName);
String suffix = FileUtils.suffix(fullName);
boolean suffixIsChanged = false;
if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) {
suffixIsChanged = true;
}
if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) {
suffixIsChanged = true;
}
//verify whether suffix is changed
if (suffixIsChanged) {
//need verify whether this resource is authorized to other users
Map<String, Object> columnMap = new HashMap<>();
columnMap.put("resources_id", resourceId);
List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap);
if (CollectionUtils.isNotEmpty(resourcesUsers)) {
List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList());
List<User> users = userMapper.selectBatchIds(userIds);
String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString();
logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames);
putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames);
return result;
}
}
}
// updateResource data
Date now = new Date();
resource.setAlias(name);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
if (file != null) {
resource.setFileName(file.getOriginalFilename());
resource.setSize(file.getSize());
}
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory()) {
List<Integer> childrenResource = listAllChildren(resource,false);
if (CollectionUtils.isNotEmpty(childrenResource)) {
String matcherFullName = Matcher.quoteReplacement(fullName);
List<Resource> childResourceList = new ArrayList<>();
Integer[] childResIdArray = childrenResource.toArray(new Integer[childrenResource.size()]);
List<Resource> resourceList = resourcesMapper.listResourceByIds(childResIdArray);
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(childResIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(t.getResourceName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
}
} else if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new Integer[]{resourceId});
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(fullName);
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>(5);
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e);
throw new ServiceException(Status.UPDATE_RESOURCE_ERROR);
}
// if name unchanged, return directly without moving on HDFS
if (originResourceName.equals(name) && file == null) {
return result;
}
if (file != null) {
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
if (!fullName.equals(originFullName)) {
try {
HadoopUtils.getInstance().delete(originHdfsFileName,false);
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(String.format("delete resource: %s failed.", originFullName));
}
}
return result;
}
// get the path of dest file in hdfs
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
} catch (Exception e) {
logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e);
putMsg(result,Status.HDFS_COPY_FAIL);
throw new ServiceException(Status.HDFS_COPY_FAIL);
}
return result;
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
public Map<String, Object> queryResourceListPaging(User loginUser, int direcotryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>(5);
Page<Resource> page = new Page(pageNo, pageSize);
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId= 0;
}
if (direcotryId != -1) {
Resource directory = resourcesMapper.selectById(direcotryId);
if (directory == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
}
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page,
userId,direcotryId, type.ordinal(), searchVal);
PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* create direcoty
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirecotry(User loginUser,String fullName,ResourceType type,Result result){
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
}
/**
* upload file to hdfs
*
* @param loginUser login user
* @param fullName full name
* @param file file
*/
private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
return false;
}
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
// random file name
String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
// save file to hdfs, and delete original file
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
createTenantDirIfNotExists(tenantCode);
}
org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename);
HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
return false;
}
return true;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList);
//JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceJarList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
List<Resource> resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
* @throws Exception exception
*/
@Transactional(rollbackFor = Exception.class)
public Result delete(User loginUser, int resourceId) throws Exception {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//get resource and hdfs path
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource,true);
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
//if resource type is UDF,need check whether it is bound by UDF functon
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
// get hdfs file by type
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteIds(needDeleteResourceIdArray);
resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* verify resource by name and type
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
public Result verifyResourceName(String fullName, ResourceType type,User loginUser) {
Result result = new Result();
putMsg(result, Status.SUCCESS);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, fullName);
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
if(tenant != null){
String tenantCode = tenant.getTenantCode();
try {
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if(HadoopUtils.getInstance().exists(hdfsFilename)){
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, fullName,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.HDFS_OPERATION_ERROR);
}
}else{
putMsg(result,Status.TENANT_NOT_EXIST);
}
}
return result;
}
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
public Result queryResource(String fullName,Integer id,ResourceType type) {
Result result = new Result();
if (StringUtils.isBlank(fullName) && id == null) {
logger.error("You must input one of fullName and pid");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
logger.error("resource file not exist, resource full name {} ", fullName);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
logger.error("resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
logger.error("parent resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/**
* view resource file online
*
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
public Result readResource(int resourceId, int skipLineNum, int limit) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check preview or not by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// hdfs path
String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {} ", hdfsFileName);
try {
if(HadoopUtils.getInstance().exists(hdfsFileName)){
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
putMsg(result, Status.SUCCESS);
Map<String, Object> map = new HashMap<>();
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, String.join("\n", content));
result.setData(map);
}else{
logger.error("read file {} not exist in hdfs", hdfsFileName);
putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName);
}
} catch (Exception e) {
logger.error("Resource {} read failed", hdfsFileName, e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
}
return result;
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param desc description
* @param content content
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDirectory) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//check file suffix
String nameSuffix = fileSuffix.trim();
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resouce suffix {} not support create", nameSuffix);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDirectory.equals("/") ? String.format("%s%s",currentDirectory,name):String.format("%s/%s",currentDirectory,name);
result = verifyResourceName(fullName,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* updateProcessInstance resource
*
* @param resourceId resource id
* @param content content
* @return update result cod
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResourceContent(int resourceId, String content) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("read file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check can edit by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
resource.setSize(content.getBytes().length);
resource.setUpdateTime(new Date());
resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* @param resourceName resource name
* @param tenantCode tenant code
* @param content content
* @return result
*/
private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result result = new Result();
String localFilename = "";
String hdfsFileName = "";
try {
localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
if (!FileUtils.writeContent2File(content, localFilename)) {
// write file fail
logger.error("file {} fail, content is {}", localFilename, content);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
// get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils hadoopUtils = HadoopUtils.getInstance();
if (!hadoopUtils.exists(resourcePath)) {
// create if tenant dir not exists
createTenantDirIfNotExists(tenantCode);
}
if (hadoopUtils.exists(hdfsFileName)) {
hadoopUtils.delete(hdfsFileName, false);
}
hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
result.setCode(Status.HDFS_OPERATION_ERROR.getCode());
result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName));
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* download file
*
* @param resourceId resource id
* @return resource content
* @throws Exception exception
*/
public org.springframework.core.io.Resource downloadResource(int resourceId) throws Exception {
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new RuntimeException("hdfs not startup");
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new RuntimeException("cant't download directory");
}
int userId = resource.getUserId();
User user = userMapper.selectById(userId);
if(user == null){
logger.error("user id {} not exists", userId);
throw new RuntimeException(String.format("resource owner id %d not exist",userId));
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if(tenant == null){
logger.error("tenant id {} not exists", user.getTenantId());
throw new RuntimeException(String.format("The tenant id %d of resource owner not exist",user.getTenantId()));
}
String tenantCode = tenant.getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true);
return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName);
}
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list ;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Resource> list ;
if (resourceList != null && resourceList.size() > 0) {
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
getAuthorizedResourceList(resourceSet, authedResourceList);
list = new ArrayList<>(resourceSet);
}else {
list = new ArrayList<>(0);
}
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
//only admin can operate
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId);
List<UdfFunc> resultList = new ArrayList<>();
Set<UdfFunc> udfFuncSet = null;
if (CollectionUtils.isNotEmpty(udfFuncList)) {
udfFuncSet = new HashSet<>(udfFuncList);
List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId);
getAuthorizedResourceList(udfFuncSet, authedUDFFuncList);
resultList = new ArrayList<>(udfFuncSet);
}
result.put(Constants.DATA_LIST, resultList);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId);
result.put(Constants.DATA_LIST, udfFuncs);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized file
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
public Map<String, Object> authorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
if (checkAdmin(loginUser, result)){
return result;
}
List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
Visitor visitor = new ResourceTreeVisitor(authedResources);
logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField));
String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get authorized resource list
*
* @param resourceSet resource set
* @param authedResourceList authorized resource list
*/
private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) {
Set<?> authedResourceSet = null;
if (CollectionUtils.isNotEmpty(authedResourceList)) {
authedResourceSet = new HashSet<>(authedResourceList);
resourceSet.removeAll(authedResourceSet);
}
}
/**
* get tenantCode by UserId
*
* @param userId user id
* @param result return result
* @return
*/
private String getTenantCode(int userId,Result result){
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user {} not exists", userId);
putMsg(result, Status.USER_NOT_EXIST,userId);
return null;
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null){
logger.error("tenant not exists");
putMsg(result, Status.TENANT_NOT_EXIST);
return null;
}
return tenant.getTenantCode();
}
/**
* list all children id
* @param resource resource
* @param containSelf whether add self to children list
* @return all children id
*/
List<Integer> listAllChildren(Resource resource,boolean containSelf){
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1 && containSelf) {
childList.add(resource.getId());
}
if(resource.isDirectory()){
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList){
List<Integer> children = resourcesMapper.listChildren(resourceId);
for(int chlidId:children){
childList.add(chlidId);
listAllChildren(chlidId,childList);
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,431 |
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
|
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center

2. View the task resources of the workflow definition,display multiple deleted resources directories

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3431
|
https://github.com/apache/dolphinscheduler/pull/3498
|
e367f90bb73c9682739308a0a98887a1c0f407ef
|
5f5c08402fdcecca8c35f4dc3021cc089949ef13
| 2020-08-07T03:58:50Z |
java
| 2020-08-14T08:47:01Z |
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ResourcesControllerTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.controller;
import com.alibaba.fastjson.JSON;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UdfType;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import com.alibaba.fastjson.JSONObject;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.MediaType;
import org.springframework.test.web.servlet.MvcResult;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
/**
* resources controller test
*/
public class ResourcesControllerTest extends AbstractControllerTest{
private static Logger logger = LoggerFactory.getLogger(ResourcesControllerTest.class);
@Test
public void testQuerytResourceList() throws Exception {
MvcResult mvcResult = mockMvc.perform(get("/resources/list")
.header(SESSION_ID, sessionId)
.param("type", ResourceType.FILE.name()))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testQueryResourceListPaging() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("type", String.valueOf(ResourceType.FILE));
paramsMap.add("pageNo", "1");
paramsMap.add("searchVal", "test");
paramsMap.add("pageSize", "1");
MvcResult mvcResult = mockMvc.perform(get("/resources/list-paging")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testVerifyResourceName() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("name","list_resources_1.sh");
paramsMap.add("type","FILE");
MvcResult mvcResult = mockMvc.perform(get("/resources/verify-name")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testViewResource() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id","5");
paramsMap.add("skipLineNum","2");
paramsMap.add("limit","100");
MvcResult mvcResult = mockMvc.perform(get("/resources/view")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testOnlineCreateResource() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("type", String.valueOf(ResourceType.FILE));
paramsMap.add("fileName","test_file_1");
paramsMap.add("suffix","sh");
paramsMap.add("description","test");
paramsMap.add("content","echo 1111");
MvcResult mvcResult = mockMvc.perform(post("/resources/online-create")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testUpdateResourceContent() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id", "1");
paramsMap.add("content","echo test_1111");
MvcResult mvcResult = mockMvc.perform(post("/resources/update-content")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testDownloadResource() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id", "5");
MvcResult mvcResult = mockMvc.perform(get("/resources/download")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testCreateUdfFunc() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("type", String.valueOf(UdfType.HIVE));
paramsMap.add("funcName", "test_udf");
paramsMap.add("className", "com.test.word.contWord");
paramsMap.add("argTypes", "argTypes");
paramsMap.add("database", "database");
paramsMap.add("description", "description");
paramsMap.add("resourceId", "1");
MvcResult mvcResult = mockMvc.perform(post("/resources/udf-func/create")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testViewUIUdfFunction() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id", "1");
MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/update-ui")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testUpdateUdfFunc() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id", "1");
paramsMap.add("type", String.valueOf(UdfType.HIVE));
paramsMap.add("funcName", "update_duf");
paramsMap.add("className", "com.test.word.contWord");
paramsMap.add("argTypes", "argTypes");
paramsMap.add("database", "database");
paramsMap.add("description", "description");
paramsMap.add("resourceId", "1");
MvcResult mvcResult = mockMvc.perform(post("/resources/udf-func/update")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assert.assertEquals(Status.TENANT_NOT_EXIST.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testQueryUdfFuncList() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("pageNo", "1");
paramsMap.add("searchVal", "udf");
paramsMap.add("pageSize", "1");
MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/list-paging")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testQueryResourceList() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("type", String.valueOf(UdfType.HIVE));
MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/list")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testVerifyUdfFuncName() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("name", "test");
MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/verify-name")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testAuthorizedFile() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userId", "2");
MvcResult mvcResult = mockMvc.perform(get("/resources/authed-file")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testUnauthorizedFile() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userId", "2");
MvcResult mvcResult = mockMvc.perform(get("/resources/unauth-file")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testAuthorizedUDFFunction() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userId", "2");
MvcResult mvcResult = mockMvc.perform(get("/resources/authed-udf-func")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testUnauthUDFFunc() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("userId", "2");
MvcResult mvcResult = mockMvc.perform(get("/resources/unauth-udf-func")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isCreated())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testDeleteUdfFunc() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("id", "1");
MvcResult mvcResult = mockMvc.perform(get("/resources/udf-func/delete")
.header(SESSION_ID, sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
@Test
public void testDeleteResource() throws Exception {
MvcResult mvcResult = mockMvc.perform(get("/resources/delete")
.header(SESSION_ID, sessionId)
.param("id", "2"))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON_UTF8))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
result.getCode().equals(Status.SUCCESS.getCode());
JSONObject object = (JSONObject) JSON.parse(mvcResult.getResponse().getContentAsString());
Assert.assertEquals(Status.SUCCESS.getCode(),result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,431 |
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
|
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center

2. View the task resources of the workflow definition,display multiple deleted resources directories

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3431
|
https://github.com/apache/dolphinscheduler/pull/3498
|
e367f90bb73c9682739308a0a98887a1c0f407ef
|
5f5c08402fdcecca8c35f4dc3021cc089949ef13
| 2020-08-07T03:58:50Z |
java
| 2020-08-14T08:47:01Z |
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.dto.resources.filter;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
/**
* resource filter test
*/
public class ResourceFilterTest {
private static Logger logger = LoggerFactory.getLogger(ResourceFilterTest.class);
@Test
public void filterTest(){
List<Resource> allList = new ArrayList<>();
Resource resource1 = new Resource(3,-1,"b","/b",true);
Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
Resource resource5 = new Resource(7,-1,"b2","/b2",true);
Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
allList.add(resource1);
allList.add(resource2);
allList.add(resource3);
allList.add(resource4);
allList.add(resource5);
allList.add(resource6);
allList.add(resource7);
ResourceFilter resourceFilter = new ResourceFilter(".jar",allList);
List<Resource> resourceList = resourceFilter.filter();
Assert.assertNotNull(resourceList);
resourceList.stream().forEach(t-> logger.info(t.toString()));
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,431 |
[Bug][api] After the resource is re-uploaded, the deleted resource directory displayed in the workflow definition is incorrect
|
1.Upgrade from 1.2.0 to 1.3.2, Re-upload the file in the resource center

2. View the task resources of the workflow definition,display multiple deleted resources directories

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3431
|
https://github.com/apache/dolphinscheduler/pull/3498
|
e367f90bb73c9682739308a0a98887a1c0f407ef
|
5f5c08402fdcecca8c35f4dc3021cc089949ef13
| 2020-08-07T03:58:50Z |
java
| 2020-08-14T08:47:01Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/upgrade/UpgradeDao.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.upgrade;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.AbstractBaseDao;
import org.apache.dolphinscheduler.dao.datasource.ConnectionFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.sql.DataSource;
import java.io.*;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public abstract class UpgradeDao extends AbstractBaseDao {
public static final Logger logger = LoggerFactory.getLogger(UpgradeDao.class);
private static final String T_VERSION_NAME = "t_escheduler_version";
private static final String T_NEW_VERSION_NAME = "t_ds_version";
private static final String rootDir = System.getProperty("user.dir");
protected static final DataSource dataSource = getDataSource();
private static final DbType dbType = getCurrentDbType();
@Override
protected void init() {
}
/**
* get datasource
* @return DruidDataSource
*/
public static DataSource getDataSource(){
return ConnectionFactory.getInstance().getDataSource();
}
/**
* get db type
* @return dbType
*/
public static DbType getDbType(){
return dbType;
}
/**
* get current dbType
* @return
*/
private static DbType getCurrentDbType(){
Connection conn = null;
try {
conn = dataSource.getConnection();
String name = conn.getMetaData().getDatabaseProductName().toUpperCase();
return DbType.valueOf(name);
} catch (Exception e) {
logger.error(e.getMessage(),e);
return null;
}finally {
ConnectionUtils.releaseResource(conn);
}
}
/**
* init schema
*/
public void initSchema(){
DbType dbType = getDbType();
String initSqlPath = "";
if (dbType != null) {
switch (dbType) {
case MYSQL:
initSqlPath = "/sql/create/release-1.0.0_schema/mysql/";
initSchema(initSqlPath);
break;
case POSTGRESQL:
initSqlPath = "/sql/create/release-1.2.0_schema/postgresql/";
initSchema(initSqlPath);
break;
default:
logger.error("not support sql type: {},can't upgrade", dbType);
throw new IllegalArgumentException("not support sql type,can't upgrade");
}
}
}
/**
* init scheam
* @param initSqlPath initSqlPath
*/
public void initSchema(String initSqlPath) {
// Execute the dolphinscheduler DDL, it cannot be rolled back
runInitDDL(initSqlPath);
// Execute the dolphinscheduler DML, it can be rolled back
runInitDML(initSqlPath);
}
/**
* run DML
* @param initSqlPath initSqlPath
*/
private void runInitDML(String initSqlPath) {
Connection conn = null;
if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found");
}
String mysqlSQLFilePath = rootDir + initSqlPath + "dolphinscheduler_dml.sql";
try {
conn = dataSource.getConnection();
conn.setAutoCommit(false);
// Execute the dolphinscheduler_dml.sql script to import related data of dolphinscheduler
ScriptRunner initScriptRunner = new ScriptRunner(conn, false, true);
Reader initSqlReader = new FileReader(new File(mysqlSQLFilePath));
initScriptRunner.runScript(initSqlReader);
conn.commit();
} catch (IOException e) {
try {
conn.rollback();
} catch (SQLException e1) {
logger.error(e1.getMessage(),e1);
}
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} catch (Exception e) {
try {
if (null != conn) {
conn.rollback();
}
} catch (SQLException e1) {
logger.error(e1.getMessage(),e1);
}
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(conn);
}
}
/**
* run DDL
* @param initSqlPath initSqlPath
*/
private void runInitDDL(String initSqlPath) {
Connection conn = null;
if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found");
}
//String mysqlSQLFilePath = rootDir + "/sql/create/release-1.0.0_schema/mysql/dolphinscheduler_ddl.sql";
String mysqlSQLFilePath = rootDir + initSqlPath + "dolphinscheduler_ddl.sql";
try {
conn = dataSource.getConnection();
// Execute the dolphinscheduler_ddl.sql script to create the table structure of dolphinscheduler
ScriptRunner initScriptRunner = new ScriptRunner(conn, true, true);
Reader initSqlReader = new FileReader(new File(mysqlSQLFilePath));
initScriptRunner.runScript(initSqlReader);
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} catch (Exception e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(conn);
}
}
/**
* determines whether a table exists
* @param tableName tableName
* @return if table exist return true,else return false
*/
public abstract boolean isExistsTable(String tableName);
/**
* determines whether a field exists in the specified table
* @param tableName tableName
* @param columnName columnName
* @return if column name exist return true,else return false
*/
public abstract boolean isExistsColumn(String tableName,String columnName);
/**
* get current version
* @param versionName versionName
* @return version
*/
public String getCurrentVersion(String versionName) {
String sql = String.format("select version from %s",versionName);
Connection conn = null;
ResultSet rs = null;
PreparedStatement pstmt = null;
String version = null;
try {
conn = dataSource.getConnection();
pstmt = conn.prepareStatement(sql);
rs = pstmt.executeQuery();
if (rs.next()) {
version = rs.getString(1);
}
return version;
} catch (SQLException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException("sql: " + sql, e);
} finally {
ConnectionUtils.releaseResource(rs, pstmt, conn);
}
}
/**
* upgrade DolphinScheduler
* @param schemaDir schema dir
*/
public void upgradeDolphinScheduler(String schemaDir) {
upgradeDolphinSchedulerDDL(schemaDir);
upgradeDolphinSchedulerDML(schemaDir);
}
/**
* upgrade DolphinScheduler worker group
* ds-1.3.0 modify the worker group for process definition json
*/
public void upgradeDolphinSchedulerWorkerGroup() {
updateProcessDefinitionJsonWorkerGroup();
}
/**
* upgrade DolphinScheduler resource list
* ds-1.3.2 modify the resource list for process definition json
*/
public void upgradeDolphinSchedulerResourceList() {
updateProcessDefinitionJsonResourceList();
}
/**
* updateProcessDefinitionJsonWorkerGroup
*/
protected void updateProcessDefinitionJsonWorkerGroup(){
WorkerGroupDao workerGroupDao = new WorkerGroupDao();
ProcessDefinitionDao processDefinitionDao = new ProcessDefinitionDao();
Map<Integer,String> replaceProcessDefinitionMap = new HashMap<>();
try {
Map<Integer, String> oldWorkerGroupMap = workerGroupDao.queryAllOldWorkerGroup(dataSource.getConnection());
Map<Integer,String> processDefinitionJsonMap = processDefinitionDao.queryAllProcessDefinition(dataSource.getConnection());
for (Map.Entry<Integer,String> entry : processDefinitionJsonMap.entrySet()){
JSONObject jsonObject = JSONObject.parseObject(entry.getValue());
JSONArray tasks = JSONArray.parseArray(jsonObject.getString("tasks"));
for (int i = 0 ;i < tasks.size() ; i++){
JSONObject task = tasks.getJSONObject(i);
Integer workerGroupId = task.getInteger("workerGroupId");
if (workerGroupId != null) {
if (workerGroupId == -1) {
task.put("workerGroup", "default");
} else {
task.put("workerGroup", oldWorkerGroupMap.get(workerGroupId));
}
}
}
jsonObject.remove(jsonObject.getString("tasks"));
jsonObject.put("tasks",tasks);
replaceProcessDefinitionMap.put(entry.getKey(),jsonObject.toJSONString());
}
if (replaceProcessDefinitionMap.size() > 0){
processDefinitionDao.updateProcessDefinitionJson(dataSource.getConnection(),replaceProcessDefinitionMap);
}
}catch (Exception e){
logger.error("update process definition json workergroup error",e);
}
}
/**
* updateProcessDefinitionJsonResourceList
*/
protected void updateProcessDefinitionJsonResourceList(){
ResourceDao resourceDao = new ResourceDao();
ProcessDefinitionDao processDefinitionDao = new ProcessDefinitionDao();
Map<Integer,String> replaceProcessDefinitionMap = new HashMap<>();
try {
Map<String,Integer> resourcesMap = resourceDao.listAllResources(dataSource.getConnection());
Map<Integer,String> processDefinitionJsonMap = processDefinitionDao.queryAllProcessDefinition(dataSource.getConnection());
for (Map.Entry<Integer,String> entry : processDefinitionJsonMap.entrySet()){
JSONObject jsonObject = JSONObject.parseObject(entry.getValue());
JSONArray tasks = JSONArray.parseArray(jsonObject.getString("tasks"));
for (int i = 0 ;i < tasks.size() ; i++){
JSONObject task = tasks.getJSONObject(i);
JSONObject param = (JSONObject) task.get("params");
if (param != null) {
List<ResourceInfo> resourceList = JSONUtils.toList(param.getString("resourceList"), ResourceInfo.class);
if (CollectionUtils.isNotEmpty(resourceList)) {
List<ResourceInfo> newResourceList = resourceList.stream().map(resInfo -> {
String fullName = resInfo.getRes().startsWith("/") ? resInfo.getRes() : String.format("/%s",resInfo.getRes());
if (resInfo.getId() == 0 && resourcesMap.containsKey(fullName)) {
resInfo.setId(resourcesMap.get(fullName));
}
return resInfo;
}).collect(Collectors.toList());
param.put("resourceList",JSONArray.parse(JSONObject.toJSONString(newResourceList)));
}
}
task.put("params",param);
}
jsonObject.remove(jsonObject.getString("tasks"));
jsonObject.put("tasks",tasks);
replaceProcessDefinitionMap.put(entry.getKey(),jsonObject.toJSONString());
}
if (replaceProcessDefinitionMap.size() > 0){
processDefinitionDao.updateProcessDefinitionJson(dataSource.getConnection(),replaceProcessDefinitionMap);
}
}catch (Exception e){
logger.error("update process definition json resource list error",e);
}
}
/**
* upgradeDolphinScheduler DML
* @param schemaDir schemaDir
*/
private void upgradeDolphinSchedulerDML(String schemaDir) {
String schemaVersion = schemaDir.split("_")[0];
if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found");
}
String sqlFilePath = MessageFormat.format("{0}/sql/upgrade/{1}/{2}/dolphinscheduler_dml.sql",rootDir,schemaDir,getDbType().name().toLowerCase());
logger.info("sqlSQLFilePath"+sqlFilePath);
Connection conn = null;
PreparedStatement pstmt = null;
try {
conn = dataSource.getConnection();
conn.setAutoCommit(false);
// Execute the upgraded dolphinscheduler dml
ScriptRunner scriptRunner = new ScriptRunner(conn, false, true);
Reader sqlReader = new FileReader(new File(sqlFilePath));
scriptRunner.runScript(sqlReader);
if (isExistsTable(T_VERSION_NAME)) {
// Change version in the version table to the new version
String upgradeSQL = String.format("update %s set version = ?",T_VERSION_NAME);
pstmt = conn.prepareStatement(upgradeSQL);
pstmt.setString(1, schemaVersion);
pstmt.executeUpdate();
}else if (isExistsTable(T_NEW_VERSION_NAME)) {
// Change version in the version table to the new version
String upgradeSQL = String.format("update %s set version = ?",T_NEW_VERSION_NAME);
pstmt = conn.prepareStatement(upgradeSQL);
pstmt.setString(1, schemaVersion);
pstmt.executeUpdate();
}
conn.commit();
} catch (FileNotFoundException e) {
try {
conn.rollback();
} catch (SQLException e1) {
logger.error(e1.getMessage(),e1);
}
logger.error(e.getMessage(),e);
throw new RuntimeException("sql file not found ", e);
} catch (IOException e) {
try {
conn.rollback();
} catch (SQLException e1) {
logger.error(e1.getMessage(),e1);
}
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} catch (SQLException e) {
try {
if (null != conn) {
conn.rollback();
}
} catch (SQLException e1) {
logger.error(e1.getMessage(),e1);
}
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} catch (Exception e) {
try {
if (null != conn) {
conn.rollback();
}
} catch (SQLException e1) {
logger.error(e1.getMessage(),e1);
}
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(pstmt, conn);
}
}
/**
* upgradeDolphinScheduler DDL
* @param schemaDir schemaDir
*/
private void upgradeDolphinSchedulerDDL(String schemaDir) {
if (StringUtils.isEmpty(rootDir)) {
throw new RuntimeException("Environment variable user.dir not found");
}
String sqlFilePath = MessageFormat.format("{0}/sql/upgrade/{1}/{2}/dolphinscheduler_ddl.sql",rootDir,schemaDir,getDbType().name().toLowerCase());
Connection conn = null;
PreparedStatement pstmt = null;
try {
conn = dataSource.getConnection();
String dbName = conn.getCatalog();
logger.info(dbName);
conn.setAutoCommit(true);
// Execute the dolphinscheduler ddl.sql for the upgrade
ScriptRunner scriptRunner = new ScriptRunner(conn, true, true);
Reader sqlReader = new FileReader(new File(sqlFilePath));
scriptRunner.runScript(sqlReader);
} catch (FileNotFoundException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException("sql file not found ", e);
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} catch (SQLException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} catch (Exception e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage(),e);
} finally {
ConnectionUtils.releaseResource(pstmt, conn);
}
}
/**
* update version
* @param version version
*/
public void updateVersion(String version) {
// Change version in the version table to the new version
String versionName = T_VERSION_NAME;
if(!SchemaUtils.isAGreatVersion("1.2.0" , version)){
versionName = "t_ds_version";
}
String upgradeSQL = String.format("update %s set version = ?",versionName);
PreparedStatement pstmt = null;
Connection conn = null;
try {
conn = dataSource.getConnection();
pstmt = conn.prepareStatement(upgradeSQL);
pstmt.setString(1, version);
pstmt.executeUpdate();
} catch (SQLException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException("sql: " + upgradeSQL, e);
} finally {
ConnectionUtils.releaseResource(pstmt, conn);
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,480 |
[Bug][Server] zookeeper multi directories, tasks cannot be assigned
|
**Describe the bug**
zookeeper多级目录场景下任务无法分配worker
**To Reproduce**
Steps to reproduce the behavior, for example:
1. 修改配置文件zookeeper.properties
zookeeper.dolphinscheduler.root=/path/to/dolphinscheduler
2. 正常启动master, worker
3. 新建工作流定义, 新建shell节点, echo "hello world"
**Expected behavior**
1.控制台正常显示已注册的master, worker
2.启动任务后, master报错: "fail to execute : xxx due to no suitable worker, current task need to yyy worker group execute"
**Which version of Dolphin Scheduler:**
-[1.3.1.release]
**Additional context**
1.ExecutorDispatcher#dispatch //hostManager.select 根据已经注册的worker, 通过group找到允许执行的worker
2.ZookeeperNodeManager$WorkerGroupNodeListener#dataChanged 监听worker变化
3.错误代码部分
> `
if (event.getType() == TreeCacheEvent.Type.NODE_ADDED) {
logger.info("worker group node : {} added.", path);
String group = parseGroup(path); //格式化获取group出错, 导致无法刷新syncWorkerGroupNodes
Set<String> workerNodes = workerGroupNodes.getOrDefault(group, new HashSet<>());
Set<String> previousNodes = new HashSet<>(workerNodes);
Set<String> currentNodes = registryCenter.getWorkerGroupNodesDirectly(group);
logger.info("currentNodes : {}", currentNodes);
syncWorkerGroupNodes(group, currentNodes);
}
`
> `
private String parseGroup(String path){
String[] parts = path.split("\\/");
if(parts.length != 6){
throw new IllegalArgumentException(String.format("worker group path : %s is not valid, ignore", path));
}
String group = parts[4]; // /dolphinscheduler/nodes/worker/default, 这段代码符合的路径
return group;
}
`
4. 临时修改
`String group = parts[parts.length-2];// 临时修改`
5.问题原因: workerGroupNodes保存worker信息, workerGroupNodes通过dataChange刷新, parseGroup错误导致变量一直无法更新, 最终ExecutorDispatcher#dispatch无法获取worker, 任务无法继续
|
https://github.com/apache/dolphinscheduler/issues/3480
|
https://github.com/apache/dolphinscheduler/pull/3532
|
ad89f433f16d65ddcab3186baa84e14762d03e52
|
0b7b6d4e2a600bb3af36363c4814da6b169317bf
| 2020-08-13T06:13:48Z |
java
| 2020-08-17T23:09:39Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/registry/ZookeeperNodeManager.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.registry;
import org.apache.commons.collections.CollectionUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.service.zk.AbstractListener;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP;
/**
* zookeeper node manager
*/
@Service
public class ZookeeperNodeManager implements InitializingBean {
private final Logger logger = LoggerFactory.getLogger(ZookeeperNodeManager.class);
/**
* master lock
*/
private final Lock masterLock = new ReentrantLock();
/**
* worker group lock
*/
private final Lock workerGroupLock = new ReentrantLock();
/**
* worker group nodes
*/
private final ConcurrentHashMap<String, Set<String>> workerGroupNodes = new ConcurrentHashMap<>();
/**
* master nodes
*/
private final Set<String> masterNodes = new HashSet<>();
/**
* zookeeper registry center
*/
@Autowired
private ZookeeperRegistryCenter registryCenter;
/**
* alert dao
*/
@Autowired
private AlertDao alertDao;
/**
* init listener
* @throws Exception if error throws Exception
*/
@Override
public void afterPropertiesSet() throws Exception {
/**
* load nodes from zookeeper
*/
load();
/**
* init MasterNodeListener listener
*/
registryCenter.getZookeeperCachedOperator().addListener(new MasterNodeListener());
/**
* init WorkerNodeListener listener
*/
registryCenter.getZookeeperCachedOperator().addListener(new WorkerGroupNodeListener());
}
/**
* load nodes from zookeeper
*/
private void load(){
/**
* master nodes from zookeeper
*/
Set<String> masterNodes = registryCenter.getMasterNodesDirectly();
syncMasterNodes(masterNodes);
/**
* worker group nodes from zookeeper
*/
Set<String> workerGroups = registryCenter.getWorkerGroupDirectly();
for(String workerGroup : workerGroups){
syncWorkerGroupNodes(workerGroup, registryCenter.getWorkerGroupNodesDirectly(workerGroup));
}
}
/**
* worker group node listener
*/
class WorkerGroupNodeListener extends AbstractListener {
@Override
protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) {
if(registryCenter.isWorkerPath(path)){
try {
if (event.getType() == TreeCacheEvent.Type.NODE_ADDED) {
logger.info("worker group node : {} added.", path);
String group = parseGroup(path);
Set<String> currentNodes = registryCenter.getWorkerGroupNodesDirectly(group);
logger.info("currentNodes : {}", currentNodes);
syncWorkerGroupNodes(group, currentNodes);
} else if (event.getType() == TreeCacheEvent.Type.NODE_REMOVED) {
logger.info("worker group node : {} down.", path);
String group = parseGroup(path);
Set<String> currentNodes = registryCenter.getWorkerGroupNodesDirectly(group);
syncWorkerGroupNodes(group, currentNodes);
alertDao.sendServerStopedAlert(1, path, "WORKER");
}
} catch (IllegalArgumentException ignore) {
logger.warn(ignore.getMessage());
} catch (Exception ex) {
logger.error("WorkerGroupListener capture data change and get data failed", ex);
}
}
}
private String parseGroup(String path){
String[] parts = path.split("\\/");
if(parts.length != 6){
throw new IllegalArgumentException(String.format("worker group path : %s is not valid, ignore", path));
}
String group = parts[4];
return group;
}
}
/**
* master node listener
*/
class MasterNodeListener extends AbstractListener {
@Override
protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) {
if (registryCenter.isMasterPath(path)) {
try {
if (event.getType() == TreeCacheEvent.Type.NODE_ADDED) {
logger.info("master node : {} added.", path);
Set<String> currentNodes = registryCenter.getMasterNodesDirectly();
syncMasterNodes(currentNodes);
} else if (event.getType() == TreeCacheEvent.Type.NODE_REMOVED) {
logger.info("master node : {} down.", path);
Set<String> currentNodes = registryCenter.getMasterNodesDirectly();
syncMasterNodes(currentNodes);
alertDao.sendServerStopedAlert(1, path, "MASTER");
}
} catch (Exception ex) {
logger.error("MasterNodeListener capture data change and get data failed.", ex);
}
}
}
}
/**
* get master nodes
* @return master nodes
*/
public Set<String> getMasterNodes() {
masterLock.lock();
try {
return Collections.unmodifiableSet(masterNodes);
} finally {
masterLock.unlock();
}
}
/**
* sync master nodes
* @param nodes master nodes
*/
private void syncMasterNodes(Set<String> nodes){
masterLock.lock();
try {
masterNodes.clear();
masterNodes.addAll(nodes);
} finally {
masterLock.unlock();
}
}
/**
* sync worker group nodes
* @param workerGroup worker group
* @param nodes worker nodes
*/
private void syncWorkerGroupNodes(String workerGroup, Set<String> nodes){
workerGroupLock.lock();
try {
workerGroup = workerGroup.toLowerCase();
Set<String> workerNodes = workerGroupNodes.getOrDefault(workerGroup, new HashSet<>());
workerNodes.clear();
workerNodes.addAll(nodes);
workerGroupNodes.put(workerGroup, workerNodes);
} finally {
workerGroupLock.unlock();
}
}
public Map<String, Set<String>> getWorkerGroupNodes(){
return Collections.unmodifiableMap(workerGroupNodes);
}
/**
* get worker group nodes
* @param workerGroup workerGroup
* @return worker nodes
*/
public Set<String> getWorkerGroupNodes(String workerGroup){
workerGroupLock.lock();
try {
if(StringUtils.isEmpty(workerGroup)){
workerGroup = DEFAULT_WORKER_GROUP;
}
workerGroup = workerGroup.toLowerCase();
Set<String> nodes = workerGroupNodes.get(workerGroup);
if(CollectionUtils.isNotEmpty(nodes)){
return Collections.unmodifiableSet(nodes);
}
return nodes;
} finally {
workerGroupLock.unlock();
}
}
/**
* close
*/
public void close(){
registryCenter.close();
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,536 |
[Bug][API] If user didn't have tenant,create resource will NPE.
|
**Describe the bug**
[Bug][API] If user didn't have tenant,create resource will NPE.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3536
|
https://github.com/apache/dolphinscheduler/pull/3537
|
a3f61238f31412f3c1b54e8644a675487eb0132b
|
0505ebf45d93fc1518386804ceffa6b36595f9c5
| 2020-08-18T05:23:28Z |
java
| 2020-08-18T05:48:54Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.commons.collections.BeanMap;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.*;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* resources service
*/
@Service
public class ResourcesService extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(ResourcesService.class);
@Autowired
private ResourceMapper resourcesMapper;
@Autowired
private UdfFuncMapper udfFunctionMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private UserMapper userMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Transactional(rollbackFor = Exception.class)
public Result createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<String, Object>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirecotry(loginUser,fullName,type,result);
return result;
}
/**
* create resource
*
* @param loginUser login user
* @param name alias
* @param desc description
* @param file file
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result createResource(User loginUser,
String name,
String desc,
ResourceType type,
MultipartFile file,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
/**
* rename file suffix and original suffix must be consistent
*/
logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
// check resoure name exists
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
return result;
}
/**
* check resource is exists
*
* @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
private boolean checkResourceExists(String fullName, int userId, int type ){
List<Resource> resources = resourcesMapper.queryResourceList(fullName, userId, type);
if (resources != null && resources.size() > 0) {
return true;
}
return false;
}
/**
* update resource
* @param loginUser login user
* @param resourceId resource id
* @param name name
* @param desc description
* @param type resource type
* @param file resource file
* @return update result code
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResource(User loginUser,
int resourceId,
String name,
String desc,
ResourceType type,
MultipartFile file) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
if (name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) {
putMsg(result, Status.SUCCESS);
return result;
}
//check resource aleady exists
String originFullName = resource.getFullName();
String originResourceName = resource.getAlias();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
if (file != null) {
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
/**
* rename file suffix and original suffix must be consistent
*/
logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(FileUtils.suffix(originFullName))) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
}
// query tenant by user id
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// verify whether the resource exists in storage
// get the path of origin file in storage
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
try {
if (!HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.error("{} not exist", originHdfsFileName);
putMsg(result,Status.RESOURCE_NOT_EXIST);
return result;
}
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
}
if (!resource.isDirectory()) {
//get the origin file suffix
String originSuffix = FileUtils.suffix(originFullName);
String suffix = FileUtils.suffix(fullName);
boolean suffixIsChanged = false;
if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) {
suffixIsChanged = true;
}
if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) {
suffixIsChanged = true;
}
//verify whether suffix is changed
if (suffixIsChanged) {
//need verify whether this resource is authorized to other users
Map<String, Object> columnMap = new HashMap<>();
columnMap.put("resources_id", resourceId);
List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap);
if (CollectionUtils.isNotEmpty(resourcesUsers)) {
List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList());
List<User> users = userMapper.selectBatchIds(userIds);
String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString();
logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames);
putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames);
return result;
}
}
}
// updateResource data
Date now = new Date();
resource.setAlias(name);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
if (file != null) {
resource.setFileName(file.getOriginalFilename());
resource.setSize(file.getSize());
}
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory()) {
List<Integer> childrenResource = listAllChildren(resource,false);
if (CollectionUtils.isNotEmpty(childrenResource)) {
String matcherFullName = Matcher.quoteReplacement(fullName);
List<Resource> childResourceList = new ArrayList<>();
Integer[] childResIdArray = childrenResource.toArray(new Integer[childrenResource.size()]);
List<Resource> resourceList = resourcesMapper.listResourceByIds(childResIdArray);
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(childResIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(t.getResourceName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
}
} else if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new Integer[]{resourceId});
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(fullName);
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>(5);
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e);
throw new ServiceException(Status.UPDATE_RESOURCE_ERROR);
}
// if name unchanged, return directly without moving on HDFS
if (originResourceName.equals(name) && file == null) {
return result;
}
if (file != null) {
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
if (!fullName.equals(originFullName)) {
try {
HadoopUtils.getInstance().delete(originHdfsFileName,false);
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(String.format("delete resource: %s failed.", originFullName));
}
}
return result;
}
// get the path of dest file in hdfs
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
} catch (Exception e) {
logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e);
putMsg(result,Status.HDFS_COPY_FAIL);
throw new ServiceException(Status.HDFS_COPY_FAIL);
}
return result;
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
public Map<String, Object> queryResourceListPaging(User loginUser, int direcotryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>(5);
Page<Resource> page = new Page(pageNo, pageSize);
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId= 0;
}
if (direcotryId != -1) {
Resource directory = resourcesMapper.selectById(direcotryId);
if (directory == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
}
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page,
userId,direcotryId, type.ordinal(), searchVal);
PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* create direcoty
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirecotry(User loginUser,String fullName,ResourceType type,Result result){
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
}
/**
* upload file to hdfs
*
* @param loginUser login user
* @param fullName full name
* @param file file
*/
private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
return false;
}
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
// random file name
String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
// save file to hdfs, and delete original file
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
createTenantDirIfNotExists(tenantCode);
}
org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename);
HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
return false;
}
return true;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList);
//JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* query resource list by program type
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceByProgramType(User loginUser, ResourceType type, ProgramType programType) {
Map<String, Object> result = new HashMap<>(5);
String suffix = ".jar";
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
if (programType != null) {
switch (programType) {
case JAVA:
break;
case SCALA:
break;
case PYTHON:
suffix = ".py";
break;
}
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
List<Resource> resources = new ResourceFilter(suffix,new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
* @throws Exception exception
*/
@Transactional(rollbackFor = Exception.class)
public Result delete(User loginUser, int resourceId) throws Exception {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//get resource and hdfs path
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource,true);
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
//if resource type is UDF,need check whether it is bound by UDF functon
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
// get hdfs file by type
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteIds(needDeleteResourceIdArray);
resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* verify resource by name and type
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
public Result verifyResourceName(String fullName, ResourceType type,User loginUser) {
Result result = new Result();
putMsg(result, Status.SUCCESS);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, fullName);
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
if(tenant != null){
String tenantCode = tenant.getTenantCode();
try {
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if(HadoopUtils.getInstance().exists(hdfsFilename)){
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, fullName,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.HDFS_OPERATION_ERROR);
}
}else{
putMsg(result,Status.TENANT_NOT_EXIST);
}
}
return result;
}
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
public Result queryResource(String fullName,Integer id,ResourceType type) {
Result result = new Result();
if (StringUtils.isBlank(fullName) && id == null) {
logger.error("You must input one of fullName and pid");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
logger.error("resource file not exist, resource full name {} ", fullName);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
logger.error("resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
logger.error("parent resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/**
* view resource file online
*
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
public Result readResource(int resourceId, int skipLineNum, int limit) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check preview or not by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// hdfs path
String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {} ", hdfsFileName);
try {
if(HadoopUtils.getInstance().exists(hdfsFileName)){
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
putMsg(result, Status.SUCCESS);
Map<String, Object> map = new HashMap<>();
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, String.join("\n", content));
result.setData(map);
}else{
logger.error("read file {} not exist in hdfs", hdfsFileName);
putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName);
}
} catch (Exception e) {
logger.error("Resource {} read failed", hdfsFileName, e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
}
return result;
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param desc description
* @param content content
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDirectory) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//check file suffix
String nameSuffix = fileSuffix.trim();
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resouce suffix {} not support create", nameSuffix);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDirectory.equals("/") ? String.format("%s%s",currentDirectory,name):String.format("%s/%s",currentDirectory,name);
result = verifyResourceName(fullName,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* updateProcessInstance resource
*
* @param resourceId resource id
* @param content content
* @return update result cod
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResourceContent(int resourceId, String content) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("read file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check can edit by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
resource.setSize(content.getBytes().length);
resource.setUpdateTime(new Date());
resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* @param resourceName resource name
* @param tenantCode tenant code
* @param content content
* @return result
*/
private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result result = new Result();
String localFilename = "";
String hdfsFileName = "";
try {
localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
if (!FileUtils.writeContent2File(content, localFilename)) {
// write file fail
logger.error("file {} fail, content is {}", localFilename, content);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
// get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils hadoopUtils = HadoopUtils.getInstance();
if (!hadoopUtils.exists(resourcePath)) {
// create if tenant dir not exists
createTenantDirIfNotExists(tenantCode);
}
if (hadoopUtils.exists(hdfsFileName)) {
hadoopUtils.delete(hdfsFileName, false);
}
hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
result.setCode(Status.HDFS_OPERATION_ERROR.getCode());
result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName));
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* download file
*
* @param resourceId resource id
* @return resource content
* @throws Exception exception
*/
public org.springframework.core.io.Resource downloadResource(int resourceId) throws Exception {
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new RuntimeException("hdfs not startup");
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new RuntimeException("cant't download directory");
}
int userId = resource.getUserId();
User user = userMapper.selectById(userId);
if(user == null){
logger.error("user id {} not exists", userId);
throw new RuntimeException(String.format("resource owner id %d not exist",userId));
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if(tenant == null){
logger.error("tenant id {} not exists", user.getTenantId());
throw new RuntimeException(String.format("The tenant id %d of resource owner not exist",user.getTenantId()));
}
String tenantCode = tenant.getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true);
return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName);
}
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list ;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Resource> list ;
if (resourceList != null && resourceList.size() > 0) {
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
getAuthorizedResourceList(resourceSet, authedResourceList);
list = new ArrayList<>(resourceSet);
}else {
list = new ArrayList<>(0);
}
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
//only admin can operate
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId);
List<UdfFunc> resultList = new ArrayList<>();
Set<UdfFunc> udfFuncSet = null;
if (CollectionUtils.isNotEmpty(udfFuncList)) {
udfFuncSet = new HashSet<>(udfFuncList);
List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId);
getAuthorizedResourceList(udfFuncSet, authedUDFFuncList);
resultList = new ArrayList<>(udfFuncSet);
}
result.put(Constants.DATA_LIST, resultList);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId);
result.put(Constants.DATA_LIST, udfFuncs);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized file
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
public Map<String, Object> authorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
if (checkAdmin(loginUser, result)){
return result;
}
List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
Visitor visitor = new ResourceTreeVisitor(authedResources);
logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField));
String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get authorized resource list
*
* @param resourceSet resource set
* @param authedResourceList authorized resource list
*/
private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) {
Set<?> authedResourceSet = null;
if (CollectionUtils.isNotEmpty(authedResourceList)) {
authedResourceSet = new HashSet<>(authedResourceList);
resourceSet.removeAll(authedResourceSet);
}
}
/**
* get tenantCode by UserId
*
* @param userId user id
* @param result return result
* @return
*/
private String getTenantCode(int userId,Result result){
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user {} not exists", userId);
putMsg(result, Status.USER_NOT_EXIST,userId);
return null;
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null){
logger.error("tenant not exists");
putMsg(result, Status.TENANT_NOT_EXIST);
return null;
}
return tenant.getTenantCode();
}
/**
* list all children id
* @param resource resource
* @param containSelf whether add self to children list
* @return all children id
*/
List<Integer> listAllChildren(Resource resource,boolean containSelf){
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1 && containSelf) {
childList.add(resource.getId());
}
if(resource.isDirectory()){
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList){
List<Integer> children = resourcesMapper.listChildren(resourceId);
for(int chlidId:children){
childList.add(chlidId);
listAllChildren(chlidId,childList);
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,536 |
[Bug][API] If user didn't have tenant,create resource will NPE.
|
**Describe the bug**
[Bug][API] If user didn't have tenant,create resource will NPE.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3536
|
https://github.com/apache/dolphinscheduler/pull/3537
|
a3f61238f31412f3c1b54e8644a675487eb0132b
|
0505ebf45d93fc1518386804ceffa6b36595f9c5
| 2020-08-18T05:23:28Z |
java
| 2020-08-18T05:48:54Z |
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.mock.web.MockMultipartFile;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@RunWith(PowerMockRunner.class)
@PowerMockIgnore({"sun.security.*", "javax.net.*"})
@PrepareForTest({HadoopUtils.class,PropertyUtils.class, FileUtils.class,org.apache.dolphinscheduler.api.utils.FileUtils.class})
public class ResourcesServiceTest {
private static final Logger logger = LoggerFactory.getLogger(ResourcesServiceTest.class);
@InjectMocks
private ResourcesService resourcesService;
@Mock
private ResourceMapper resourcesMapper;
@Mock
private TenantMapper tenantMapper;
@Mock
private ResourceUserMapper resourceUserMapper;
@Mock
private HadoopUtils hadoopUtils;
@Mock
private UserMapper userMapper;
@Mock
private UdfFuncMapper udfFunctionMapper;
@Mock
private ProcessDefinitionMapper processDefinitionMapper;
@Before
public void setUp() {
PowerMockito.mockStatic(HadoopUtils.class);
PowerMockito.mockStatic(FileUtils.class);
PowerMockito.mockStatic(org.apache.dolphinscheduler.api.utils.FileUtils.class);
try {
// new HadoopUtils
PowerMockito.whenNew(HadoopUtils.class).withNoArguments().thenReturn(hadoopUtils);
} catch (Exception e) {
e.printStackTrace();
}
PowerMockito.when(HadoopUtils.getInstance()).thenReturn(hadoopUtils);
PowerMockito.mockStatic(PropertyUtils.class);
}
@Test
public void testCreateResource(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_FILE_IS_EMPTY
MockMultipartFile mockMultipartFile = new MockMultipartFile("test.pdf",new String().getBytes());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_FILE_IS_EMPTY.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_FORBID_CHANGE
mockMultipartFile = new MockMultipartFile("test.pdf","test.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("test.pdf")).thenReturn("pdf");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_FORBID_CHANGE.getMsg(),result.getMsg());
//UDF_RESOURCE_SUFFIX_NOT_JAR
mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.pdf","ResourcesServiceTest.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.pdf")).thenReturn("pdf");
result = resourcesService.createResource(user,"ResourcesServiceTest.pdf","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg(),result.getMsg());
}
@Test
public void testCreateDirecotry(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//PARENT_RESOURCE_NOT_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.PARENT_RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.queryResourceList("/directoryTest", 0, 0)).thenReturn(getResourceList());
result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
}
@Test
public void testUpdateResource(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.updateResource(user,1,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null);
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.updateResource(user,0,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//USER_NO_OPERATION_PERM
result = resourcesService.updateResource(user,1,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST
user.setId(1);
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(HadoopUtils.getHdfsFileName(Mockito.any(), Mockito.any(),Mockito.anyString())).thenReturn("test1");
try {
Mockito.when(HadoopUtils.getInstance().exists(Mockito.any())).thenReturn(false);
} catch (IOException e) {
logger.error(e.getMessage(),e);
}
result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest", ResourceType.UDF,null);
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//SUCCESS
user.setId(1);
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
try {
Mockito.when(HadoopUtils.getInstance().exists(Mockito.any())).thenReturn(true);
} catch (IOException e) {
logger.error(e.getMessage(),e);
}
result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,null);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
//RESOURCE_EXIST
Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest1.jar", 0, 0)).thenReturn(getResourceList());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.FILE,null);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
//USER_NOT_EXIST
Mockito.when(userMapper.selectById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF,null);
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF,null);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg());
//SUCCESS
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test");
try {
PowerMockito.when(HadoopUtils.getInstance().copy(Mockito.anyString(),Mockito.anyString(),true,true)).thenReturn(true);
} catch (Exception e) {
logger.error(e.getMessage(),e);
}
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF,null);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
}
@Test
public void testQueryResourceListPaging(){
User loginUser = new User();
loginUser.setUserType(UserType.ADMIN_USER);
IPage<Resource> resourcePage = new Page<>(1,10);
resourcePage.setTotal(1);
resourcePage.setRecords(getResourceList());
Mockito.when(resourcesMapper.queryResourcePaging(Mockito.any(Page.class),
Mockito.eq(0),Mockito.eq(-1), Mockito.eq(0), Mockito.eq("test"))).thenReturn(resourcePage);
Map<String, Object> result = resourcesService.queryResourceListPaging(loginUser,-1,ResourceType.FILE,"test",1,10);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
PageInfo pageInfo = (PageInfo) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(pageInfo.getLists()));
}
@Test
public void testQueryResourceList(){
User loginUser = new User();
loginUser.setId(0);
loginUser.setUserType(UserType.ADMIN_USER);
Mockito.when(resourcesMapper.queryResourceListAuthored(0, 0,0)).thenReturn(getResourceList());
Map<String, Object> result = resourcesService.queryResourceList(loginUser, ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
List<Resource> resourceList = (List<Resource>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(resourceList));
}
@Test
public void testDelete(){
User loginUser = new User();
loginUser.setId(0);
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
try {
// HDFS_NOT_STARTUP
Result result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//RESOURCE_NOT_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
result = resourcesService.delete(loginUser,2);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
// USER_NO_OPERATION_PERM
result = resourcesService.delete(loginUser,2);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
//TENANT_NOT_EXIST
loginUser.setUserType(UserType.ADMIN_USER);
loginUser.setTenantId(2);
Mockito.when(userMapper.selectById(Mockito.anyInt())).thenReturn(loginUser);
result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
//SUCCESS
loginUser.setTenantId(1);
Mockito.when(hadoopUtils.delete(Mockito.anyString(), Mockito.anyBoolean())).thenReturn(true);
result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
} catch (Exception e) {
logger.error("delete error",e);
Assert.assertTrue(false);
}
}
@Test
public void testVerifyResourceName(){
User user = new User();
user.setId(1);
Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest.jar", 0, 0)).thenReturn(getResourceList());
Result result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg());
//TENANT_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
String unExistFullName = "/test.jar";
try {
Mockito.when(hadoopUtils.exists(unExistFullName)).thenReturn(false);
} catch (IOException e) {
logger.error("hadoop error",e);
}
result = resourcesService.verifyResourceName("/test.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
//RESOURCE_FILE_EXIST
user.setTenantId(1);
try {
Mockito.when(hadoopUtils.exists("test")).thenReturn(true);
} catch (IOException e) {
logger.error("hadoop error",e);
}
PowerMockito.when(HadoopUtils.getHdfsResourceFileName("123", "test1")).thenReturn("test");
result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertTrue(Status.RESOURCE_EXIST.getCode()==result.getCode());
//SUCCESS
result = resourcesService.verifyResourceName("test2",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
}
@Test
public void testReadResource(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
//HDFS_NOT_STARTUP
Result result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.readResource(2,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
//USER_NOT_EXIST
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode()==result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_FILE_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
try {
Mockito.when(hadoopUtils.exists(Mockito.anyString())).thenReturn(false);
} catch (IOException e) {
logger.error("hadoop error",e);
}
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertTrue(Status.RESOURCE_FILE_NOT_EXIST.getCode()==result.getCode());
//SUCCESS
try {
Mockito.when(hadoopUtils.exists(null)).thenReturn(true);
Mockito.when(hadoopUtils.catFile(null,1,10)).thenReturn(getContent());
} catch (IOException e) {
logger.error("hadoop error",e);
}
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
}
@Test
public void testOnlineCreateResource() {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
PowerMockito.when(HadoopUtils.getHdfsResDir("hdfsdDir")).thenReturn("hdfsDir");
PowerMockito.when(HadoopUtils.getHdfsUdfDir("udfDir")).thenReturn("udfDir");
User user = getUser();
//HDFS_NOT_STARTUP
Result result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
//RuntimeException
try {
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content",-1,"/");
}catch (RuntimeException ex){
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), ex.getMessage());
}
//SUCCESS
Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test");
PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
}
@Test
public void testUpdateResourceContent(){
User loginUser = new User();
loginUser.setId(0);
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
// HDFS_NOT_STARTUP
Result result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//RESOURCE_NOT_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
result = resourcesService.updateResourceContent(2,"content");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
//USER_NOT_EXIST
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertTrue(Status.TENANT_NOT_EXIST.getCode() == result.getCode());
//SUCCESS
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test");
PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
}
@Test
public void testDownloadResource(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
org.springframework.core.io.Resource resourceMock = Mockito.mock(org.springframework.core.io.Resource.class);
try {
//resource null
org.springframework.core.io.Resource resource = resourcesService.downloadResource(1);
Assert.assertNull(resource);
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
PowerMockito.when(org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(Mockito.any())).thenReturn(resourceMock);
resource = resourcesService.downloadResource(1);
Assert.assertNotNull(resource);
} catch (Exception e) {
logger.error("DownloadResource error",e);
Assert.assertTrue(false);
}
}
@Test
public void testUnauthorizedFile(){
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.unauthorizedFile(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM,result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(resourcesMapper.queryResourceExceptUserId(1)).thenReturn(getResourceList());
result = resourcesService.unauthorizedFile(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<Resource> resources = (List<Resource>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(resources));
}
@Test
public void testUnauthorizedUDFFunction(){
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.unauthorizedUDFFunction(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM,result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(udfFunctionMapper.queryUdfFuncExceptUserId(1)).thenReturn(getUdfFuncList());
result = resourcesService.unauthorizedUDFFunction(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<UdfFunc> udfFuncs = (List<UdfFunc>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(udfFuncs));
}
@Test
public void testAuthorizedUDFFunction(){
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.authorizedUDFFunction(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM,result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(udfFunctionMapper.queryAuthedUdfFunc(1)).thenReturn(getUdfFuncList());
result = resourcesService.authorizedUDFFunction(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<UdfFunc> udfFuncs = (List<UdfFunc>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(udfFuncs));
}
@Test
public void testAuthorizedFile(){
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.authorizedFile(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM,result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(resourcesMapper.queryAuthorizedResourceList(1)).thenReturn(getResourceList());
result = resourcesService.authorizedFile(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<Resource> resources = (List<Resource>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(resources));
}
private List<Resource> getResourceList(){
List<Resource> resources = new ArrayList<>();
resources.add(getResource());
return resources;
}
private Tenant getTenant() {
Tenant tenant = new Tenant();
tenant.setTenantCode("123");
return tenant;
}
private Resource getResource(){
Resource resource = new Resource();
resource.setPid(-1);
resource.setUserId(1);
resource.setDescription("ResourcesServiceTest.jar");
resource.setAlias("ResourcesServiceTest.jar");
resource.setFullName("/ResourcesServiceTest.jar");
resource.setType(ResourceType.FILE);
return resource;
}
private Resource getUdfResource(){
Resource resource = new Resource();
resource.setUserId(1);
resource.setDescription("udfTest");
resource.setAlias("udfTest.jar");
resource.setFullName("/udfTest.jar");
resource.setType(ResourceType.UDF);
return resource;
}
private UdfFunc getUdfFunc(){
UdfFunc udfFunc = new UdfFunc();
udfFunc.setId(1);
return udfFunc;
}
private List<UdfFunc> getUdfFuncList(){
List<UdfFunc> udfFuncs = new ArrayList<>();
udfFuncs.add(getUdfFunc());
return udfFuncs;
}
private User getUser(){
User user = new User();
user.setId(1);
user.setTenantId(1);
return user;
}
private List<String> getContent(){
List<String> contentList = new ArrayList<>();
contentList.add("test");
return contentList;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,299 |
[Bug][Alert] when master or worker shutdown, alert cannot notify
|
This error log:
`[ERROR] 2020-07-22 11:00:22.209 org.apache.dolphinscheduler.common.utils.JSONUtils:[145] - parse list exception!
com.fasterxml.jackson.databind.JsonMappingException: Unexpected character (''' (code 39)): was expecting double-quote to start field name
at [Source: (String)"[{'type':'WORKER','host':'/dolphinscheduler/nodes/worker/hadoop/192.168.0.3:5678','event':'server down','warning level':'serious'}]"; line: 1, column: 2] (through reference chain: java.util.ArrayList[0])
at com.fasterxml.jackson.databind.JsonMappingException.wrapWithPath(JsonMappingException.java:394)
at com.fasterxml.jackson.databind.JsonMappingException.wrapWithPath(JsonMappingException.java:365)
at com.fasterxml.jackson.databind.deser.std.CollectionDeserializer.deserialize(CollectionDeserializer.java:302)
at com.fasterxml.jackson.databind.deser.std.CollectionDeserializer.deserialize(CollectionDeserializer.java:245)
at com.fasterxml.jackson.databind.deser.std.CollectionDeserializer.deserialize(CollectionDeserializer.java:27)
at com.fasterxml.jackson.databind.ObjectMapper._readMapAndClose(ObjectMapper.java:4013)
at com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:3042)
at org.apache.dolphinscheduler.common.utils.JSONUtils.toList(JSONUtils.java:143)`
I think the reason for this is that Jackson can't parse JSON fields with single quotes.
|
https://github.com/apache/dolphinscheduler/issues/3299
|
https://github.com/apache/dolphinscheduler/pull/3552
|
d30bc7bcf3dda45672845130e04212dfec9f0133
|
2f0102580268ff045e6042c3a691f1dee4c49962
| 2020-07-24T06:06:25Z |
java
| 2020-08-19T08:27:39Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/AlertDao.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.apache.dolphinscheduler.common.enums.AlertStatus;
import org.apache.dolphinscheduler.common.enums.AlertType;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.dao.datasource.ConnectionFactory;
import org.apache.dolphinscheduler.dao.entity.Alert;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.AlertMapper;
import org.apache.dolphinscheduler.dao.mapper.UserAlertGroupMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Date;
import java.util.List;
@Component
public class AlertDao extends AbstractBaseDao {
private final Logger logger = LoggerFactory.getLogger(getClass());
@Autowired
private AlertMapper alertMapper;
@Autowired
private UserAlertGroupMapper userAlertGroupMapper;
@Override
protected void init() {
alertMapper = ConnectionFactory.getInstance().getMapper(AlertMapper.class);
userAlertGroupMapper = ConnectionFactory.getInstance().getMapper(UserAlertGroupMapper.class);
}
/**
* insert alert
* @param alert alert
* @return add alert result
*/
public int addAlert(Alert alert){
return alertMapper.insert(alert);
}
/**
* update alert
* @param alertStatus alertStatus
* @param log log
* @param id id
* @return update alert result
*/
public int updateAlert(AlertStatus alertStatus,String log,int id){
Alert alert = alertMapper.selectById(id);
alert.setAlertStatus(alertStatus);
alert.setUpdateTime(new Date());
alert.setLog(log);
return alertMapper.updateById(alert);
}
/**
* query user list by alert group id
* @param alerGroupId alerGroupId
* @return user list
*/
public List<User> queryUserByAlertGroupId(int alerGroupId){
return userAlertGroupMapper.listUserByAlertgroupId(alerGroupId);
}
/**
* MasterServer or WorkerServer stoped
* @param alertgroupId alertgroupId
* @param host host
* @param serverType serverType
*/
public void sendServerStopedAlert(int alertgroupId,String host,String serverType){
Alert alert = new Alert();
String content = String.format("[{'type':'%s','host':'%s','event':'server down','warning level':'serious'}]",
serverType, host);
alert.setTitle("Fault tolerance warning");
saveTaskTimeoutAlert(alert, content, alertgroupId, null, null);
}
/**
* process time out alert
* @param processInstance processInstance
* @param processDefinition processDefinition
*/
public void sendProcessTimeoutAlert(ProcessInstance processInstance, ProcessDefinition processDefinition){
int alertgroupId = processInstance.getWarningGroupId();
String receivers = processDefinition.getReceivers();
String receiversCc = processDefinition.getReceiversCc();
Alert alert = new Alert();
String content = String.format("[{'id':'%d','name':'%s','event':'timeout','warnLevel':'middle'}]",
processInstance.getId(), processInstance.getName());
alert.setTitle("Process Timeout Warn");
saveTaskTimeoutAlert(alert, content, alertgroupId, receivers, receiversCc);
}
private void saveTaskTimeoutAlert(Alert alert, String content, int alertgroupId,
String receivers, String receiversCc){
alert.setShowType(ShowType.TABLE);
alert.setContent(content);
alert.setAlertType(AlertType.EMAIL);
alert.setAlertGroupId(alertgroupId);
if (StringUtils.isNotEmpty(receivers)) {
alert.setReceivers(receivers);
}
if (StringUtils.isNotEmpty(receiversCc)) {
alert.setReceiversCc(receiversCc);
}
alert.setCreateTime(new Date());
alert.setUpdateTime(new Date());
alertMapper.insert(alert);
}
/**
* task timeout warn
* @param alertgroupId alertgroupId
* @param receivers receivers
* @param receiversCc receiversCc
* @param processInstanceId processInstanceId
* @param processInstanceName processInstanceName
* @param taskId taskId
* @param taskName taskName
*/
public void sendTaskTimeoutAlert(int alertgroupId,String receivers,String receiversCc, int processInstanceId,
String processInstanceName, int taskId,String taskName){
Alert alert = new Alert();
String content = String.format("[{'process instance id':'%d','task name':'%s','task id':'%d','task name':'%s'," +
"'event':'timeout','warnLevel':'middle'}]", processInstanceId, processInstanceName, taskId, taskName);
alert.setTitle("Task Timeout Warn");
saveTaskTimeoutAlert(alert, content, alertgroupId, receivers, receiversCc);
}
/**
* list the alert information of waiting to be executed
* @return alert list
*/
public List<Alert> listWaitExecutionAlert(){
return alertMapper.listAlertByStatus(AlertStatus.WAIT_EXECUTION);
}
/**
* list user information by alert group id
* @param alertgroupId alertgroupId
* @return user list
*/
public List<User> listUserByAlertgroupId(int alertgroupId){
return userAlertGroupMapper.listUserByAlertgroupId(alertgroupId);
}
/**
* for test
* @return AlertMapper
*/
public AlertMapper getAlertMapper() {
return alertMapper;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,299 |
[Bug][Alert] when master or worker shutdown, alert cannot notify
|
This error log:
`[ERROR] 2020-07-22 11:00:22.209 org.apache.dolphinscheduler.common.utils.JSONUtils:[145] - parse list exception!
com.fasterxml.jackson.databind.JsonMappingException: Unexpected character (''' (code 39)): was expecting double-quote to start field name
at [Source: (String)"[{'type':'WORKER','host':'/dolphinscheduler/nodes/worker/hadoop/192.168.0.3:5678','event':'server down','warning level':'serious'}]"; line: 1, column: 2] (through reference chain: java.util.ArrayList[0])
at com.fasterxml.jackson.databind.JsonMappingException.wrapWithPath(JsonMappingException.java:394)
at com.fasterxml.jackson.databind.JsonMappingException.wrapWithPath(JsonMappingException.java:365)
at com.fasterxml.jackson.databind.deser.std.CollectionDeserializer.deserialize(CollectionDeserializer.java:302)
at com.fasterxml.jackson.databind.deser.std.CollectionDeserializer.deserialize(CollectionDeserializer.java:245)
at com.fasterxml.jackson.databind.deser.std.CollectionDeserializer.deserialize(CollectionDeserializer.java:27)
at com.fasterxml.jackson.databind.ObjectMapper._readMapAndClose(ObjectMapper.java:4013)
at com.fasterxml.jackson.databind.ObjectMapper.readValue(ObjectMapper.java:3042)
at org.apache.dolphinscheduler.common.utils.JSONUtils.toList(JSONUtils.java:143)`
I think the reason for this is that Jackson can't parse JSON fields with single quotes.
|
https://github.com/apache/dolphinscheduler/issues/3299
|
https://github.com/apache/dolphinscheduler/pull/3552
|
d30bc7bcf3dda45672845130e04212dfec9f0133
|
2f0102580268ff045e6042c3a691f1dee4c49962
| 2020-07-24T06:06:25Z |
java
| 2020-08-19T08:27:39Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/AlertManager.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import org.apache.dolphinscheduler.common.enums.AlertType;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.DaoFactory;
import org.apache.dolphinscheduler.dao.entity.Alert;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.LinkedHashMap;
import java.util.List;
/**
* alert manager
*/
public class AlertManager {
/**
* logger of AlertManager
*/
private static final Logger logger = LoggerFactory.getLogger(AlertManager.class);
/**
* alert dao
*/
private AlertDao alertDao = DaoFactory.getDaoInstance(AlertDao.class);
/**
* command type convert chinese
*
* @param commandType command type
* @return command name
*/
private String getCommandCnName(CommandType commandType) {
switch (commandType) {
case RECOVER_TOLERANCE_FAULT_PROCESS:
return "recover tolerance fault process";
case RECOVER_SUSPENDED_PROCESS:
return "recover suspended process";
case START_CURRENT_TASK_PROCESS:
return "start current task process";
case START_FAILURE_TASK_PROCESS:
return "start failure task process";
case START_PROCESS:
return "start process";
case REPEAT_RUNNING:
return "repeat running";
case SCHEDULER:
return "scheduler";
case COMPLEMENT_DATA:
return "complement data";
case PAUSE:
return "pause";
case STOP:
return "stop";
default:
return "unknown type";
}
}
/**
* process instance format
*/
private static final String PROCESS_INSTANCE_FORMAT =
"\"id:%d\"," +
"\"name:%s\"," +
"\"job type: %s\"," +
"\"state: %s\"," +
"\"recovery:%s\"," +
"\"run time: %d\"," +
"\"start time: %s\"," +
"\"end time: %s\"," +
"\"host: %s\"" ;
/**
* get process instance content
* @param processInstance process instance
* @param taskInstances task instance list
* @return process instance format content
*/
public String getContentProcessInstance(ProcessInstance processInstance,
List<TaskInstance> taskInstances){
String res = "";
if(processInstance.getState().typeIsSuccess()){
res = String.format(PROCESS_INSTANCE_FORMAT,
processInstance.getId(),
processInstance.getName(),
getCommandCnName(processInstance.getCommandType()),
processInstance.getState().toString(),
processInstance.getRecovery().toString(),
processInstance.getRunTimes(),
DateUtils.dateToString(processInstance.getStartTime()),
DateUtils.dateToString(processInstance.getEndTime()),
processInstance.getHost()
);
res = "[" + res + "]";
}else if(processInstance.getState().typeIsFailure()){
List<LinkedHashMap> failedTaskList = new ArrayList<>();
for(TaskInstance task : taskInstances){
if(task.getState().typeIsSuccess()){
continue;
}
LinkedHashMap<String, String> failedTaskMap = new LinkedHashMap();
failedTaskMap.put("process instance id", String.valueOf(processInstance.getId()));
failedTaskMap.put("process instance name", processInstance.getName());
failedTaskMap.put("task id", String.valueOf(task.getId()));
failedTaskMap.put("task name", task.getName());
failedTaskMap.put("task type", task.getTaskType());
failedTaskMap.put("task state", task.getState().toString());
failedTaskMap.put("task start time", DateUtils.dateToString(task.getStartTime()));
failedTaskMap.put("task end time", DateUtils.dateToString(task.getEndTime()));
failedTaskMap.put("host", task.getHost());
failedTaskMap.put("log path", task.getLogPath());
failedTaskList.add(failedTaskMap);
}
res = JSONUtils.toJsonString(failedTaskList);
}
return res;
}
/**
* getting worker fault tolerant content
*
* @param processInstance process instance
* @param toleranceTaskList tolerance task list
* @return worker tolerance content
*/
private String getWorkerToleranceContent(ProcessInstance processInstance, List<TaskInstance> toleranceTaskList){
List<LinkedHashMap<String, String>> toleranceTaskInstanceList = new ArrayList<>();
for(TaskInstance taskInstance: toleranceTaskList){
LinkedHashMap<String, String> toleranceWorkerContentMap = new LinkedHashMap();
toleranceWorkerContentMap.put("process name", processInstance.getName());
toleranceWorkerContentMap.put("task name", taskInstance.getName());
toleranceWorkerContentMap.put("host", taskInstance.getHost());
toleranceWorkerContentMap.put("task retry times", String.valueOf(taskInstance.getRetryTimes()));
toleranceTaskInstanceList.add(toleranceWorkerContentMap);
}
return JSONUtils.toJsonString(toleranceTaskInstanceList);
}
/**
* send worker alert fault tolerance
*
* @param processInstance process instance
* @param toleranceTaskList tolerance task list
*/
public void sendAlertWorkerToleranceFault(ProcessInstance processInstance, List<TaskInstance> toleranceTaskList){
try{
Alert alert = new Alert();
alert.setTitle("worker fault tolerance");
alert.setShowType(ShowType.TABLE);
String content = getWorkerToleranceContent(processInstance, toleranceTaskList);
alert.setContent(content);
alert.setAlertType(AlertType.EMAIL);
alert.setCreateTime(new Date());
alert.setAlertGroupId(processInstance.getWarningGroupId() == null ? 1:processInstance.getWarningGroupId());
alert.setReceivers(processInstance.getProcessDefinition().getReceivers());
alert.setReceiversCc(processInstance.getProcessDefinition().getReceiversCc());
alertDao.addAlert(alert);
logger.info("add alert to db , alert : {}", alert.toString());
}catch (Exception e){
logger.error("send alert failed:{} ", e.getMessage());
}
}
/**
* send process instance alert
* @param processInstance process instance
* @param taskInstances task instance list
*/
public void sendAlertProcessInstance(ProcessInstance processInstance,
List<TaskInstance> taskInstances){
boolean sendWarnning = false;
WarningType warningType = processInstance.getWarningType();
switch (warningType){
case ALL:
if(processInstance.getState().typeIsFinished()){
sendWarnning = true;
}
break;
case SUCCESS:
if(processInstance.getState().typeIsSuccess()){
sendWarnning = true;
}
break;
case FAILURE:
if(processInstance.getState().typeIsFailure()){
sendWarnning = true;
}
break;
default:
}
if(!sendWarnning){
return;
}
Alert alert = new Alert();
String cmdName = getCommandCnName(processInstance.getCommandType());
String success = processInstance.getState().typeIsSuccess() ? "success" :"failed";
alert.setTitle(cmdName + " " + success);
ShowType showType = processInstance.getState().typeIsSuccess() ? ShowType.TEXT : ShowType.TABLE;
alert.setShowType(showType);
String content = getContentProcessInstance(processInstance, taskInstances);
alert.setContent(content);
alert.setAlertType(AlertType.EMAIL);
alert.setAlertGroupId(processInstance.getWarningGroupId());
alert.setCreateTime(new Date());
alert.setReceivers(processInstance.getProcessDefinition().getReceivers());
alert.setReceiversCc(processInstance.getProcessDefinition().getReceiversCc());
alertDao.addAlert(alert);
logger.info("add alert to db , alert: {}", alert.toString());
}
/**
* send process timeout alert
*
* @param processInstance process instance
* @param processDefinition process definition
*/
public void sendProcessTimeoutAlert(ProcessInstance processInstance, ProcessDefinition processDefinition) {
alertDao.sendProcessTimeoutAlert(processInstance, processDefinition);
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,077 |
[BUG] repeated task name in one flow
|
first add two different tasks,then rename one and keep both has a some name ,you will find you can save the flow with same name tasks.
|
https://github.com/apache/dolphinscheduler/issues/3077
|
https://github.com/apache/dolphinscheduler/pull/3376
|
26184de905b9e1a83b0e27e8e50e335f12131fdf
|
555880e6e32ab5235f1947fe036f43cef54e50c4
| 2020-06-29T11:37:26Z |
java
| 2020-08-25T03:18:36Z |
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/udp/udp.vue
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="udp-model">
<div class="scrollbar contpi-boxt">
<div class="title">
<span>{{$t('Set the DAG diagram name')}}</span>
</div>
<div>
<x-input
type="text"
v-model="name"
:disabled="router.history.current.name === 'projects-instance-details'"
:placeholder="$t('Please enter name (required)')">
</x-input>
</div>
<template v-if="router.history.current.name !== 'projects-instance-details'">
<div style="padding-top: 12px;">
<x-input
type="textarea"
v-model="description"
:autosize="{minRows:2}"
:placeholder="$t('Please enter description(optional)')"
autocomplete="off">
</x-input>
</div>
</template>
<div class="title" style="padding-top: 6px;">
<span class="text-b">{{$t('select tenant')}}</span>
<form-tenant v-model="tenantId"></form-tenant>
</div>
<div class="title" style="padding-top: 6px;">
<span class="text-b">{{$t('warning of timeout')}}</span>
<span style="padding-left: 6px;">
<x-switch v-model="checkedTimeout"></x-switch>
</span>
</div>
<div class="content" style="padding-bottom: 10px;" v-if="checkedTimeout">
<span>
<x-input v-model="timeout" style="width: 160px;" maxlength="9">
<span slot="append">{{$t('Minute')}}</span>
</x-input>
</span>
</div>
<div class="title" style="padding-top: 6px;">
<span>{{$t('Set global')}}</span>
</div>
<div class="content">
<div>
<m-local-params
ref="refLocalParams"
@on-local-params="_onLocalParams"
:udp-list="udpList"
:hide="false">
</m-local-params>
</div>
</div>
</div>
<div class="bottom">
<div class="submit">
<template v-if="router.history.current.name === 'projects-instance-details'">
<div class="lint-pt">
<x-checkbox v-model="syncDefine">{{$t('Whether to update the process definition')}}</x-checkbox>
</div>
</template>
<x-button type="text" @click="close()"> {{$t('Cancel')}} </x-button>
<x-button type="primary" shape="circle" @click="ok()">{{$t('Add')}}</x-button>
</div>
</div>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import mLocalParams from '../formModel/tasks/_source/localParams'
import disabledState from '@/module/mixin/disabledState'
import Affirm from '../jumpAffirm'
import FormTenant from "./_source/selectTenant";
export default {
name: 'udp',
data () {
return {
// dag name
name: '',
// dag description
description: '',
// Global custom parameters
udpList: [],
// Global custom parameters
udpListCache: [],
// Whether to update the process definition
syncDefine: true,
// Timeout alarm
timeout: 0,
tenantId: -1,
// checked Timeout alarm
checkedTimeout: true
}
},
mixins: [disabledState],
props: {
},
methods: {
/**
* udp data
*/
_onLocalParams (a) {
this.udpList = a
},
_verifTimeout () {
const reg = /^[1-9]\d*$/
if (!reg.test(this.timeout)) {
this.$message.warning(`${i18n.$t('Please enter a positive integer greater than 0')}`)
return false
}
return true
},
_accuStore(){
this.store.commit('dag/setGlobalParams', _.cloneDeep(this.udpList))
this.store.commit('dag/setName', _.cloneDeep(this.name))
this.store.commit('dag/setTimeout', _.cloneDeep(this.timeout))
this.store.commit('dag/setTenantId', _.cloneDeep(this.tenantId))
this.store.commit('dag/setDesc', _.cloneDeep(this.description))
this.store.commit('dag/setSyncDefine', this.syncDefine)
},
/**
* submit
*/
ok () {
if (!this.name) {
this.$message.warning(`${i18n.$t('DAG graph name cannot be empty')}`)
return
}
let _verif = () => {
// verification udf
if (!this.$refs.refLocalParams._verifProp()) {
return
}
// verification timeout
if (this.checkedTimeout && !this._verifTimeout()) {
return
}
// Storage global globalParams
this._accuStore()
Affirm.setIsPop(false)
this.$emit('onUdp')
}
// verify that the name exists
this.store.dispatch('dag/verifDAGName', this.name).then(res => {
_verif()
}).catch(e => {
this.$message.error(e.msg || '')
})
},
/**
* Close the popup
*/
close () {
this.$emit('close')
}
},
watch: {
checkedTimeout (val) {
if (!val) {
this.timeout = 0
this.store.commit('dag/setTimeout', _.cloneDeep(this.timeout))
}
}
},
created () {
const dag = _.cloneDeep(this.store.state.dag)
this.udpList = dag.globalParams
this.udpListCache = dag.globalParams
this.name = dag.name
this.description = dag.description
this.syncDefine = dag.syncDefine
this.timeout = dag.timeout || 0
this.checkedTimeout = this.timeout !== 0
this.$nextTick(() => {
if (dag.tenantId === -1) {
this.tenantId = this.store.state.user.userInfo.tenantId
} else {
this.tenantId = dag.tenantId
}
})
},
mounted () {},
components: {FormTenant, mLocalParams }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.udp-model {
width: 624px;
min-height: 420px;
background: #fff;
border-radius: 3px;
padding:20px 0 ;
position: relative;
.contpi-boxt {
max-height: 600px;
overflow-y: scroll;
padding:0 20px;
}
.title {
line-height: 36px;
padding-bottom: 10px;
span {
font-size: 16px;
color: #333;
}
}
.bottom{
position: absolute;
bottom: 0;
left: 0;
width: 100%;
text-align: right;
height: 56px;
line-height: 56px;
border-top: 1px solid #DCDEDC;
background: #fff;
.submit {
padding-right: 20px;
margin-top: -4px;
}
.lint-pt {
position: absolute;
left: 20px;
top: -2px;
>label {
font-weight: normal;
}
}
}
.content {
padding-bottom: 50px;
.user-def-params-model {
.add {
a {
color: #0097e0;
}
}
}
}
}
</style>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,553 |
[Bug][ui] when edit the work flow,the selected connecting line has no style change,but the others have get bigger (1.3.0版本工作流编辑时被选中的line样式不变,其他未被选中的line变粗)
|
工作流定义的时候,通过‘’箭头选择器‘’选中line的时候,被选中的line样式不变,未被选中的其他line样式却变粗,而且当鼠标移入变粗了的未被选中的line时,变粗的line又恢复原样,这样的话当我想删除某一条线的时候,我不知道我选中了哪条line
|
https://github.com/apache/dolphinscheduler/issues/3553
|
https://github.com/apache/dolphinscheduler/pull/3572
|
879feb79b3aa28a6a296b3955b2796f81a0162df
|
3b714b15754af240a07fcf6c18d5a780140df21f
| 2020-08-19T09:37:58Z |
java
| 2020-08-26T01:40:12Z |
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/dag.vue
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="clearfix dag-model" >
<div class="toolbar">
<div class="title"><span>{{$t('Toolbar')}}</span></div>
<div class="toolbar-btn">
<div class="bar-box roundedRect jtk-draggable jtk-droppable jtk-endpoint-anchor jtk-connected"
:class="v === dagBarId ? 'active' : ''"
:id="v"
:key="v"
v-for="(item,v) in tasksTypeList"
@mousedown="_getDagId(v)">
<div data-toggle="tooltip" :title="item.desc">
<div class="icos" :class="'icos-' + v" ></div>
</div>
</div>
</div>
</div>
<div class="dag-contect">
<div class="dag-toolbar">
<div class="assist-btn">
<x-button
style="vertical-align: middle;"
data-toggle="tooltip"
:title="$t('View variables')"
data-container="body"
type="primary"
size="xsmall"
:disabled="$route.name !== 'projects-instance-details'"
@click="_toggleView"
icon="ans-icon-code">
</x-button>
<x-button
style="vertical-align: middle;"
data-toggle="tooltip"
:title="$t('Startup parameter')"
data-container="body"
type="primary"
size="xsmall"
:disabled="$route.name !== 'projects-instance-details'"
@click="_toggleParam"
icon="ans-icon-arrow-circle-right">
</x-button>
<span class="name">{{name}}</span>
<span v-if="name" class="copy-name" @click="_copyName" :data-clipboard-text="name"><em class="ans-icon-copy" data-container="body" data-toggle="tooltip" :title="$t('Copy name')" ></em></span>
</div>
<div class="save-btn">
<div class="operation" style="vertical-align: middle;">
<a href="javascript:"
v-for="(item,$index) in toolOperList"
:class="_operationClass(item)"
:id="item.code"
:key="$index"
@click="_ckOperation(item,$event)">
<x-button type="text" data-container="body" :icon="item.icon" v-tooltip.light="item.desc"></x-button>
</a>
</div>
<x-button
type="primary"
v-tooltip.light="$t('Format DAG')"
icon="ans-icon-triangle-solid-right"
size="xsmall"
data-container="body"
v-if="(type === 'instance' || 'definition') && urlParam.id !=undefined"
style="vertical-align: middle;"
@click="dagAutomaticLayout">
</x-button>
<x-button
v-tooltip.light="$t('Refresh DAG status')"
data-container="body"
style="vertical-align: middle;"
icon="ans-icon-refresh"
type="primary"
:loading="isRefresh"
v-if="type === 'instance'"
@click="!isRefresh && _refresh()"
size="xsmall" >
</x-button>
<x-button
v-if="isRtTasks"
style="vertical-align: middle;"
type="primary"
size="xsmall"
icon="ans-icon-play"
@click="_rtNodesDag" >
{{$t('Return_1')}}
</x-button>
<x-button
type="primary"
v-tooltip.light="$t('Close')"
icon="ans-icon-off"
size="xsmall"
data-container="body"
v-if="(type === 'instance' || 'definition') "
style="vertical-align: middle;"
@click="_closeDAG">
{{$t('Close')}}
</x-button>
<x-button
style="vertical-align: middle;"
type="primary"
size="xsmall"
:loading="spinnerLoading"
@click="_saveChart"
icon="ans-icon-save"
>
{{spinnerLoading ? 'Loading...' : $t('Save')}}
</x-button>
<x-button
style="vertical-align: middle;"
type="primary"
size="xsmall"
v-if="this.type !== 'instance' && this.urlParam.id !== null"
:loading="spinnerLoading"
@click="_version"
icon="ans-icon-dependence"
>
{{spinnerLoading ? 'Loading...' : $t('Version Info')}}
</x-button>
</div>
</div>
<div class="scrollbar dag-container">
<div class="jtk-demo" id="jtk-demo">
<div class="jtk-demo-canvas canvas-wide statemachine-demo jtk-surface jtk-surface-nopan jtk-draggable" id="canvas" ></div>
</div>
</div>
</div>
</div>
</template>
<script>
import _ from 'lodash'
import Dag from './dag'
import mUdp from './udp/udp'
import i18n from '@/module/i18n'
import { jsPlumb } from 'jsplumb'
import Clipboard from 'clipboard'
import { allNodesId } from './plugIn/util'
import { toolOper, tasksType } from './config'
import mFormModel from './formModel/formModel'
import mFormLineModel from './formModel/formLineModel'
import { formatDate } from '@/module/filter/filter'
import { findComponentDownward } from '@/module/util/'
import disabledState from '@/module/mixin/disabledState'
import { mapActions, mapState, mapMutations } from 'vuex'
import mVersions from '../../projects/pages/definition/pages/list/_source/versions'
let eventModel
export default {
name: 'dag-chart',
data () {
return {
tasksTypeList: tasksType,
toolOperList: toolOper(this),
dagBarId: null,
toolOperCode: '',
spinnerLoading: false,
urlParam: {
id: this.$route.params.id || null
},
isRtTasks: false,
isRefresh: false,
isLoading: false,
taskId: null,
arg: false,
}
},
mixins: [disabledState],
props: {
type: String,
releaseState: String
},
methods: {
...mapActions('dag', ['saveDAGchart', 'updateInstance', 'updateDefinition', 'getTaskState', 'switchProcessDefinitionVersion', 'getProcessDefinitionVersionsPage', 'deleteProcessDefinitionVersion']),
...mapMutations('dag', ['addTasks', 'cacheTasks', 'resetParams', 'setIsEditDag', 'setName', 'addConnects']),
// DAG automatic layout
dagAutomaticLayout() {
if(this.store.state.dag.isEditDag) {
this.$message.warning(`${i18n.$t('Please save the DAG before formatting')}`)
return false
}
$('#canvas').html('')
// Destroy round robin
Dag.init({
dag: this,
instance: jsPlumb.getInstance({
Endpoint: [
'Dot', { radius: 1, cssClass: 'dot-style' }
],
Connector: 'Bezier',
PaintStyle: { lineWidth: 2, stroke: '#456' }, // Connection style
HoverPaintStyle: {stroke: '#ccc', strokeWidth: 3},
ConnectionOverlays: [
[
'Arrow',
{
location: 1,
id: 'arrow',
length: 12,
foldback: 0.8
}
],
['Label', {
location: 0.5,
id: 'label'
}]
],
Container: 'canvas',
ConnectionsDetachable: true
})
})
if (this.tasks.length) {
Dag.backfill(true)
if (this.type === 'instance') {
this._getTaskState(false).then(res => {})
}
} else {
Dag.create()
}
},
init (args) {
if (this.tasks.length) {
Dag.backfill(args)
// Process instances can view status
if (this.type === 'instance') {
this._getTaskState(false).then(res => {})
// Round robin acquisition status
this.setIntervalP = setInterval(() => {
this._getTaskState(true).then(res => {})
}, 90000)
}
} else {
Dag.create()
}
},
/**
* copy name
*/
_copyName(){
let clipboard = new Clipboard(`.copy-name`)
clipboard.on('success', e => {
this.$message.success(`${i18n.$t('Copy success')}`)
// Free memory
clipboard.destroy()
})
clipboard.on('error', e => {
// Copy is not supported
this.$message.warning(`${i18n.$t('The browser does not support automatic copying')}`)
// Free memory
clipboard.destroy()
})
},
/**
* Get state interface
* @param isReset Whether to manually refresh
*/
_getTaskState (isReset) {
return new Promise((resolve, reject) => {
this.getTaskState(this.urlParam.id).then(res => {
let data = res.list
let state = res.processInstanceState
let taskList = res.taskList
let idArr = allNodesId()
const titleTpl = (item, desc) => {
let $item = _.filter(taskList, v => v.name === item.name)[0]
return `<div style="text-align: left">${i18n.$t('Name')}:${$item.name}</br>${i18n.$t('State')}:${desc}</br>${i18n.$t('type')}:${$item.taskType}</br>${i18n.$t('host')}:${$item.host || '-'}</br>${i18n.$t('Retry Count')}:${$item.retryTimes}</br>${i18n.$t('Submit Time')}:${formatDate($item.submitTime)}</br>${i18n.$t('Start Time')}:${formatDate($item.startTime)}</br>${i18n.$t('End Time')}:${$item.endTime ? formatDate($item.endTime) : '-'}</br></div>`
}
// remove tip state dom
$('.w').find('.state-p').html('')
data.forEach(v1 => {
idArr.forEach(v2 => {
if (v2.name === v1.name) {
let dom = $(`#${v2.id}`)
let state = dom.find('.state-p')
let depState = ''
taskList.forEach(item=>{
if(item.name==v1.name) {
depState = item.state
}
})
dom.attr('data-state-id', v1.stateId)
dom.attr('data-dependent-result', v1.dependentResult || '')
dom.attr('data-dependent-depState', depState)
state.append(`<strong class="${v1.icoUnicode} ${v1.isSpin ? 'as as-spin' : ''}" style="color:${v1.color}" data-toggle="tooltip" data-html="true" data-container="body"></strong>`)
state.find('strong').attr('title', titleTpl(v2, v1.desc))
}
})
})
if (state === 'PAUSE' || state === 'STOP' || state === 'FAILURE' || this.state === 'SUCCESS') {
// Manual refresh does not regain large json
if (isReset) {
findComponentDownward(this.$root, `${this.type}-details`)._reset()
}
}
resolve()
})
})
},
/**
* Get the action bar id
* @param item
*/
_getDagId (v) {
// if (this.isDetails) {
// return
// }
this.dagBarId = v
},
/**
* operating
*/
_ckOperation (item) {
let is = true
let code = ''
if (item.disable) {
return
}
if (this.toolOperCode === item.code) {
this.toolOperCode = ''
code = item.code
is = false
} else {
this.toolOperCode = item.code
code = this.toolOperCode
is = true
}
// event type
Dag.toolbarEvent({
item: item,
code: code,
is: is
})
},
_operationClass (item) {
return this.toolOperCode === item.code ? 'active' : ''
// if (item.disable) {
// return this.toolOperCode === item.code ? 'active' : ''
// } else {
// return 'disable'
// }
},
/**
* Storage interface
*/
_save (sourceType) {
return new Promise((resolve, reject) => {
this.spinnerLoading = true
// Storage store
Dag.saveStore().then(res => {
if(this._verifConditions(res.tasks)) {
if (this.urlParam.id) {
/**
* Edit
* @param saveInstanceEditDAGChart => Process instance editing
* @param saveEditDAGChart => Process definition editing
*/
this[this.type === 'instance' ? 'updateInstance' : 'updateDefinition'](this.urlParam.id).then(res => {
this.$message.success(res.msg)
this.spinnerLoading = false
// Jump process definition
if (this.type === 'instance') {
this.$router.push({ path: `/projects/instance/list/${this.urlParam.id}?_t=${new Date().getTime()}` })
} else {
this.$router.push({ path: `/projects/definition/list/${this.urlParam.id}?_t=${new Date().getTime()}` })
}
resolve()
}).catch(e => {
this.$message.error(e.msg || '')
this.spinnerLoading = false
reject(e)
})
} else {
// New
this.saveDAGchart().then(res => {
this.$message.success(res.msg)
this.spinnerLoading = false
// source @/conf/home/pages/dag/_source/editAffirmModel/index.js
if (sourceType !== 'affirm') {
// Jump process definition
this.$router.push({ name: 'projects-definition-list' })
}
resolve()
}).catch(e => {
this.$message.error(e.msg || '')
this.setName('')
this.spinnerLoading = false
reject(e)
})
}
}
})
})
},
_closeDAG(){
let $name = this.$route.name
if($name && $name.indexOf("definition") != -1){
this.$router.push({ name: 'projects-definition-list'})
}else{
this.$router.push({ name: 'projects-instance-list'})
}
},
_verifConditions (value) {
let tasks = value
let bool = true
tasks.map(v=>{
if(v.type == 'CONDITIONS' && (v.conditionResult.successNode[0] =='' || v.conditionResult.successNode[0] == null || v.conditionResult.failedNode[0] =='' || v.conditionResult.failedNode[0] == null)) {
bool = false
return false
}
})
if(!bool) {
this.$message.warning(`${i18n.$t('Successful branch flow and failed branch flow are required')}`)
this.spinnerLoading = false
return false
}
return true
},
/**
* Global parameter
* @param Promise
*/
_udpTopFloorPop () {
return new Promise((resolve, reject) => {
let modal = this.$modal.dialog({
closable: false,
showMask: true,
escClose: true,
className: 'v-modal-custom',
transitionName: 'opacityp',
render (h) {
return h(mUdp, {
on: {
onUdp () {
modal.remove()
resolve()
},
close () {
modal.remove()
}
}
})
}
})
})
},
/**
* Save chart
*/
_saveChart () {
// Verify node
if (!this.tasks.length) {
this.$message.warning(`${i18n.$t('Failed to create node to save')}`)
return
}
// Global parameters (optional)
this._udpTopFloorPop().then(() => {
return this._save()
})
},
/**
* Return to the previous child node
*/
_rtNodesDag () {
let getIds = this.$route.query.subProcessIds
let idsArr = getIds.split(',')
let ids = idsArr.slice(0, idsArr.length - 1)
let id = idsArr[idsArr.length - 1]
let query = {}
if (id !== idsArr[0]) {
query = { subProcessIds: ids.join(',') }
}
let $name = this.$route.name.split('-')
this.$router.push({ path: `/${$name[0]}/${$name[1]}/list/${id}`, query: query })
},
/**
* Subprocess processing
* @param subProcessId Subprocess ID
*/
_subProcessHandle (subProcessId) {
let subProcessIds = []
let getIds = this.$route.query.subProcessIds
if (getIds) {
let newId = getIds.split(',')
newId.push(this.urlParam.id)
subProcessIds = newId
} else {
subProcessIds.push(this.urlParam.id)
}
let $name = this.$route.name.split('-')
this.$router.push({ path: `/${$name[0]}/${$name[1]}/list/${subProcessId}`, query: { subProcessIds: subProcessIds.join(',') } })
},
/**
* Refresh data
*/
_refresh () {
this.isRefresh = true
this._getTaskState(false).then(res => {
setTimeout(() => {
this.isRefresh = false
this.$message.success(`${i18n.$t('Refresh status succeeded')}`)
}, 2200)
})
},
/**
* View variables
*/
_toggleView () {
findComponentDownward(this.$root, `assist-dag-index`)._toggleView()
},
/**
* Starting parameters
*/
_toggleParam () {
findComponentDownward(this.$root, `starting-params-dag-index`)._toggleParam()
},
/**
* Create a node popup layer
* @param Object id
*/
_createLineLabel({id, sourceId, targetId}) {
// $('#jsPlumb_2_50').text('111')
let self = this
self.$modal.destroy()
const removeNodesEvent = (fromThis) => {
// Manually destroy events inside the component
fromThis.$destroy()
// Close the popup
eventModel.remove()
}
eventModel = this.$drawer({
className: 'dagMask',
render (h) {
return h(mFormLineModel,{
on: {
addLineInfo ({ item, fromThis }) {
self.addConnects(item)
setTimeout(() => {
removeNodesEvent(fromThis)
}, 100)
},
cancel ({fromThis}) {
removeNodesEvent(fromThis)
}
},
props: {
id: id,
sourceId: sourceId,
targetId: targetId
}
})
}
})
},
_createNodes ({ id, type }) {
let self = this
let preNode = []
let rearNode = []
let rearList = []
$('div[data-targetarr*="' + id + '"]').each(function(){
rearNode.push($(this).attr("id"))
})
if (rearNode.length>0) {
rearNode.forEach(v => {
let rearobj = {}
rearobj.value = $(`#${v}`).find('.name-p').text()
rearobj.label = $(`#${v}`).find('.name-p').text()
rearList.push(rearobj)
})
} else {
rearList = []
}
let targetarr = $(`#${id}`).attr('data-targetarr')
if (targetarr) {
let nodearr = targetarr.split(',')
nodearr.forEach(v => {
let nodeobj = {}
nodeobj.value = $(`#${v}`).find('.name-p').text()
nodeobj.label = $(`#${v}`).find('.name-p').text()
preNode.push(nodeobj)
})
} else {
preNode = []
}
if (eventModel) {
// Close the popup
eventModel.remove()
}
const removeNodesEvent = (fromThis) => {
// Manually destroy events inside the component
fromThis.$destroy()
// Close the popup
eventModel.remove()
}
this.taskId = id
type = type || self.dagBarId
eventModel = this.$drawer({
closable: false,
direction: 'right',
escClose: true,
className: 'dagMask',
render: h => h(mFormModel, {
on: {
addTaskInfo ({ item, fromThis }) {
self.addTasks(item)
setTimeout(() => {
removeNodesEvent(fromThis)
}, 100)
},
/**
* Cache the item
* @param item
* @param fromThis
*/
cacheTaskInfo({item, fromThis}) {
self.cacheTasks(item)
},
close ({ item,flag, fromThis }) {
self.addTasks(item)
// Edit status does not allow deletion of nodes
if (flag) {
jsPlumb.remove(id)
}
removeNodesEvent(fromThis)
},
onSubProcess ({ subProcessId, fromThis }) {
removeNodesEvent(fromThis)
self._subProcessHandle(subProcessId)
}
},
props: {
id: id,
taskType: type,
self: self,
preNode: preNode,
rearList: rearList,
instanceId: this.$route.params.id
}
})
})
},
removeEventModelById ($id) {
if(eventModel && this.taskId == $id){
eventModel.remove()
}
},
/**
* query the process definition pagination version
*/
_version (item) {
let self = this
this.getProcessDefinitionVersionsPage({
pageNo: 1,
pageSize: 10,
processDefinitionId: this.urlParam.id
}).then(res => {
let processDefinitionVersions = res.data.lists
let total = res.data.totalCount
let pageSize = res.data.pageSize
let pageNo = res.data.currentPage
if (this.versionsModel) {
this.versionsModel.remove()
}
this.versionsModel = this.$drawer({
direction: 'right',
closable: true,
showMask: true,
escClose: true,
render (h) {
return h(mVersions, {
on: {
/**
* switch version in process definition version list
*
* @param version the version user want to change
* @param processDefinitionId the process definition id
* @param fromThis fromThis
*/
mVersionSwitchProcessDefinitionVersion ({ version, processDefinitionId, fromThis }) {
self.$store.state.dag.isSwitchVersion = true
self.switchProcessDefinitionVersion({
version: version,
processDefinitionId: processDefinitionId
}).then(res => {
self.$message.success($t('Switch Version Successfully'))
setTimeout(() => {
fromThis.$destroy()
self.versionsModel.remove()
}, 0)
self.$router.push({ path: `/projects/definition/list/${processDefinitionId}?_t=${new Date().getTime()}` })
}).catch(e => {
self.$store.state.dag.isSwitchVersion = false
self.$message.error(e.msg || '')
})
},
/**
* Paging event of process definition versions
*
* @param pageNo page number
* @param pageSize page size
* @param processDefinitionId the process definition id of page version
* @param fromThis fromThis
*/
mVersionGetProcessDefinitionVersionsPage ({ pageNo, pageSize, processDefinitionId, fromThis }) {
self.getProcessDefinitionVersionsPage({
pageNo: pageNo,
pageSize: pageSize,
processDefinitionId: processDefinitionId
}).then(res => {
fromThis.processDefinitionVersions = res.data.lists
fromThis.total = res.data.totalCount
fromThis.pageSize = res.data.pageSize
fromThis.pageNo = res.data.currentPage
}).catch(e => {
self.$message.error(e.msg || '')
})
},
/**
* delete one version of process definition
*
* @param version the version need to delete
* @param processDefinitionId the process definition id user want to delete
* @param fromThis fromThis
*/
mVersionDeleteProcessDefinitionVersion ({ version, processDefinitionId, fromThis }) {
self.deleteProcessDefinitionVersion({
version: version,
processDefinitionId: processDefinitionId
}).then(res => {
self.$message.success(res.msg || '')
fromThis.$emit('mVersionGetProcessDefinitionVersionsPage', {
pageNo: 1,
pageSize: 10,
processDefinitionId: processDefinitionId,
fromThis: fromThis
})
}).catch(e => {
self.$message.error(e.msg || '')
})
},
/**
* remove this drawer
*
* @param fromThis
*/
close ({ fromThis }) {
setTimeout(() => {
fromThis.$destroy()
self.versionsModel.remove()
}, 0)
}
},
props: {
processDefinition: {
id: self.urlParam.id,
version: self.$store.state.dag.version
},
processDefinitionVersions: processDefinitionVersions,
total: total,
pageNo: pageNo,
pageSize: pageSize
}
})
}
})
}).catch(e => {
this.$message.error(e.msg || '')
})
}
},
watch: {
'tasks': {
deep: true,
handler (o) {
// Edit state does not allow deletion of node a...
this.setIsEditDag(true)
}
}
},
created () {
// Edit state does not allow deletion of node a...
this.setIsEditDag(false)
if (this.$route.query.subProcessIds) {
this.isRtTasks = true
}
Dag.init({
dag: this,
instance: jsPlumb.getInstance({
Endpoint: [
'Dot', { radius: 1, cssClass: 'dot-style' }
],
Connector: 'Bezier',
PaintStyle: { lineWidth: 2, stroke: '#456' }, // Connection style
HoverPaintStyle: {stroke: '#ccc', strokeWidth: 3},
ConnectionOverlays: [
[
'Arrow',
{
location: 1,
id: 'arrow',
length: 12,
foldback: 0.8
}
],
['Label', {
location: 0.5,
id: 'label'
}]
],
Container: 'canvas',
ConnectionsDetachable: true
})
})
},
mounted () {
this.init(this.arg)
},
beforeDestroy () {
this.resetParams()
// Destroy round robin
clearInterval(this.setIntervalP)
},
destroyed () {
if (eventModel) {
eventModel.remove()
}
},
computed: {
...mapState('dag', ['tasks', 'locations', 'connects', 'isEditDag', 'name'])
},
components: {}
}
</script>
<style lang="scss" rel="stylesheet/scss">
@import "./dag";
</style>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,553 |
[Bug][ui] when edit the work flow,the selected connecting line has no style change,but the others have get bigger (1.3.0版本工作流编辑时被选中的line样式不变,其他未被选中的line变粗)
|
工作流定义的时候,通过‘’箭头选择器‘’选中line的时候,被选中的line样式不变,未被选中的其他line样式却变粗,而且当鼠标移入变粗了的未被选中的line时,变粗的line又恢复原样,这样的话当我想删除某一条线的时候,我不知道我选中了哪条line
|
https://github.com/apache/dolphinscheduler/issues/3553
|
https://github.com/apache/dolphinscheduler/pull/3572
|
879feb79b3aa28a6a296b3955b2796f81a0162df
|
3b714b15754af240a07fcf6c18d5a780140df21f
| 2020-08-19T09:37:58Z |
java
| 2020-08-26T01:40:12Z |
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/util.js
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import _ from 'lodash'
import i18n from '@/module/i18n'
import store from '@/conf/home/store'
/**
* Node, to array
*/
const rtTargetarrArr = (id) => {
const ids = $(`#${id}`).attr('data-targetarr')
return ids ? ids.split(',') : []
}
/**
* Store node id to targetarr
*/
const saveTargetarr = (valId, domId) => {
const $target = $(`#${domId}`)
const targetStr = $target.attr('data-targetarr') ? $target.attr('data-targetarr') + `,${valId}` : `${valId}`
$target.attr('data-targetarr', targetStr)
}
const rtBantpl = () => {
return `<em class="ans-icon-forbidden" data-toggle="tooltip" data-html="true" data-container="body" data-placement="left" title="${i18n.$t('Prohibition execution')}"></em>`
}
/**
* return node html
*/
const rtTasksTpl = ({ id, name, x, y, targetarr, isAttachment, taskType, runFlag, nodenumber, successNode, failedNode }) => {
let tpl = ''
tpl += `<div class="w jtk-draggable jtk-droppable jtk-endpoint-anchor jtk-connected ${isAttachment ? 'jtk-ep' : ''}" data-targetarr="${targetarr || ''}" data-successNode="${successNode || ''}" data-failedNode="${failedNode || ''}" data-nodenumber="${nodenumber || 0}" data-tasks-type="${taskType}" id="${id}" style="left: ${x}px; top: ${y}px;">`
tpl += '<div>'
tpl += '<div class="state-p"></div>'
tpl += `<div class="icos icos-${taskType}"></div>`
tpl += `<span class="name-p">${name}</span>`
tpl += '</div>'
tpl += '<div class="ep"></div>'
tpl += '<div class="ban-p">'
if (runFlag === 'FORBIDDEN') {
tpl += rtBantpl()
}
tpl += '</div>'
tpl += '</div>'
return tpl
}
/**
* Get all tasks nodes
*/
const tasksAll = () => {
const a = []
$('#canvas .w').each(function (idx, elem) {
const e = $(elem)
a.push({
id: e.attr('id'),
name: e.find('.name-p').text(),
targetarr: e.attr('data-targetarr') || '',
nodenumber: e.attr('data-nodenumber'),
x: parseInt(e.css('left'), 10),
y: parseInt(e.css('top'), 10)
})
})
return a
}
/**
* Determine if name is in the current dag map
* rely dom / backfill
*/
const isNameExDag = (name, rely) => {
if (rely === 'dom') {
return _.findIndex(tasksAll(), v => v.name === name) !== -1
} else {
return _.findIndex(store.state.dag.tasks, v => v.name === name) !== -1
}
}
/**
* Change svg line color
*/
const setSvgColor = (e, color) => {
// Traverse clear all colors
$('.jtk-connector').each((i, o) => {
_.map($(o)[0].childNodes, v => {
$(v).attr('fill', '#2d8cf0').attr('stroke', '#2d8cf0').attr('stroke-width', 2)
})
})
// Add color to the selection
_.map($(e.canvas)[0].childNodes, (v, i) => {
$(v).attr('fill', color).attr('stroke', color)
if ($(v).attr('class')) {
$(v).attr('stroke-width', 2)
}
})
}
/**
* Get all node ids
*/
const allNodesId = () => {
const idArr = []
$('.w').each((i, o) => {
const $obj = $(o)
const $span = $obj.find('.name-p').text()
if ($span) {
idArr.push({
id: $obj.attr('id'),
name: $span
})
}
})
return idArr
}
/**
* compute scale,because it cant get from jquery directly
* @param el element
* @returns {boolean|number}
*/
const computeScale = function (el) {
const matrix = el.css('transform')
if (!matrix || matrix === 'none') {
return false
}
const values = matrix.split('(')[1].split(')')[0].split(',')
return Math.sqrt(values[0] * values[0] + values[1] * values[1])
}
export {
rtTargetarrArr,
saveTargetarr,
rtTasksTpl,
tasksAll,
isNameExDag,
setSvgColor,
allNodesId,
rtBantpl,
computeScale
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,607 |
[Bug][Workflow version] ProcessDefinition version can delete and save with releaseState=ONLINE
|
**Describe the bug**
ProcessDefinition version can delete and save with releaseState=ONLINE
**To Reproduce**
Steps to reproduce the behavior, for example:
1. set processdefinition online
2. click processdefinition name
3. click save `[bug]should disabeld`
4. click version info,delete/switch version `[bug]shoudld disabeld`
**Which version of Dolphin Scheduler:**
-[dev]
@yangyichao-mango
|
https://github.com/apache/dolphinscheduler/issues/3607
|
https://github.com/apache/dolphinscheduler/pull/3609
|
06ab654d647c3fc6142f96a41f0913a53e3e8698
|
5e2a8bea2e228b2198502176bc95f2e94ad88b46
| 2020-08-27T02:16:10Z |
java
| 2020-08-27T03:13:17Z |
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/dag.vue
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="clearfix dag-model" >
<div class="toolbar">
<div class="title"><span>{{$t('Toolbar')}}</span></div>
<div class="toolbar-btn">
<div class="bar-box roundedRect jtk-draggable jtk-droppable jtk-endpoint-anchor jtk-connected"
:class="v === dagBarId ? 'active' : ''"
:id="v"
:key="v"
v-for="(item,v) in tasksTypeList"
@mousedown="_getDagId(v)">
<div data-toggle="tooltip" :title="item.desc">
<div class="icos" :class="'icos-' + v" ></div>
</div>
</div>
</div>
</div>
<div class="dag-contect">
<div class="dag-toolbar">
<div class="assist-btn">
<x-button
style="vertical-align: middle;"
data-toggle="tooltip"
:title="$t('View variables')"
data-container="body"
type="primary"
size="xsmall"
:disabled="$route.name !== 'projects-instance-details'"
@click="_toggleView"
icon="ans-icon-code">
</x-button>
<x-button
style="vertical-align: middle;"
data-toggle="tooltip"
:title="$t('Startup parameter')"
data-container="body"
type="primary"
size="xsmall"
:disabled="$route.name !== 'projects-instance-details'"
@click="_toggleParam"
icon="ans-icon-arrow-circle-right">
</x-button>
<span class="name">{{name}}</span>
<span v-if="name" class="copy-name" @click="_copyName" :data-clipboard-text="name"><em class="ans-icon-copy" data-container="body" data-toggle="tooltip" :title="$t('Copy name')" ></em></span>
</div>
<div class="save-btn">
<div class="operation" style="vertical-align: middle;">
<a href="javascript:"
v-for="(item,$index) in toolOperList"
:class="_operationClass(item)"
:id="item.code"
:key="$index"
@click="_ckOperation(item,$event)">
<x-button type="text" data-container="body" :icon="item.icon" v-tooltip.light="item.desc"></x-button>
</a>
</div>
<x-button
type="primary"
v-tooltip.light="$t('Format DAG')"
icon="ans-icon-triangle-solid-right"
size="xsmall"
data-container="body"
v-if="(type === 'instance' || 'definition') && urlParam.id !=undefined"
style="vertical-align: middle;"
@click="dagAutomaticLayout">
</x-button>
<x-button
v-tooltip.light="$t('Refresh DAG status')"
data-container="body"
style="vertical-align: middle;"
icon="ans-icon-refresh"
type="primary"
:loading="isRefresh"
v-if="type === 'instance'"
@click="!isRefresh && _refresh()"
size="xsmall" >
</x-button>
<x-button
v-if="isRtTasks"
style="vertical-align: middle;"
type="primary"
size="xsmall"
icon="ans-icon-play"
@click="_rtNodesDag" >
{{$t('Return_1')}}
</x-button>
<x-button
type="primary"
v-tooltip.light="$t('Close')"
icon="ans-icon-off"
size="xsmall"
data-container="body"
v-if="(type === 'instance' || 'definition') "
style="vertical-align: middle;"
@click="_closeDAG">
{{$t('Close')}}
</x-button>
<x-button
style="vertical-align: middle;"
type="primary"
size="xsmall"
:loading="spinnerLoading"
@click="_saveChart"
icon="ans-icon-save"
>
{{spinnerLoading ? 'Loading...' : $t('Save')}}
</x-button>
<x-button
style="vertical-align: middle;"
type="primary"
size="xsmall"
v-if="this.type !== 'instance' && this.urlParam.id !== null"
:loading="spinnerLoading"
@click="_version"
icon="ans-icon-dependence"
>
{{spinnerLoading ? 'Loading...' : $t('Version Info')}}
</x-button>
</div>
</div>
<div class="scrollbar dag-container">
<div class="jtk-demo" id="jtk-demo">
<div class="jtk-demo-canvas canvas-wide statemachine-demo jtk-surface jtk-surface-nopan jtk-draggable" id="canvas" ></div>
</div>
</div>
</div>
</div>
</template>
<script>
import _ from 'lodash'
import Dag from './dag'
import mUdp from './udp/udp'
import i18n from '@/module/i18n'
import { jsPlumb } from 'jsplumb'
import Clipboard from 'clipboard'
import { allNodesId } from './plugIn/util'
import { toolOper, tasksType } from './config'
import mFormModel from './formModel/formModel'
import mFormLineModel from './formModel/formLineModel'
import { formatDate } from '@/module/filter/filter'
import { findComponentDownward } from '@/module/util/'
import disabledState from '@/module/mixin/disabledState'
import { mapActions, mapState, mapMutations } from 'vuex'
import mVersions from '../../projects/pages/definition/pages/list/_source/versions'
let eventModel
export default {
name: 'dag-chart',
data () {
return {
tasksTypeList: tasksType,
toolOperList: toolOper(this),
dagBarId: null,
toolOperCode: '',
spinnerLoading: false,
urlParam: {
id: this.$route.params.id || null
},
isRtTasks: false,
isRefresh: false,
isLoading: false,
taskId: null,
arg: false,
}
},
mixins: [disabledState],
props: {
type: String,
releaseState: String
},
methods: {
...mapActions('dag', ['saveDAGchart', 'updateInstance', 'updateDefinition', 'getTaskState', 'switchProcessDefinitionVersion', 'getProcessDefinitionVersionsPage', 'deleteProcessDefinitionVersion']),
...mapMutations('dag', ['addTasks', 'cacheTasks', 'resetParams', 'setIsEditDag', 'setName', 'addConnects']),
// DAG automatic layout
dagAutomaticLayout() {
if(this.store.state.dag.isEditDag) {
this.$message.warning(`${i18n.$t('Please save the DAG before formatting')}`)
return false
}
$('#canvas').html('')
// Destroy round robin
Dag.init({
dag: this,
instance: jsPlumb.getInstance({
Endpoint: [
'Dot', { radius: 1, cssClass: 'dot-style' }
],
Connector: 'Bezier',
PaintStyle: { lineWidth: 2, stroke: '#456' }, // Connection style
ConnectionOverlays: [
[
'Arrow',
{
location: 1,
id: 'arrow',
length: 12,
foldback: 0.8
}
],
['Label', {
location: 0.5,
id: 'label'
}]
],
Container: 'canvas',
ConnectionsDetachable: true
})
})
if (this.tasks.length) {
Dag.backfill(true)
if (this.type === 'instance') {
this._getTaskState(false).then(res => {})
}
} else {
Dag.create()
}
},
init (args) {
if (this.tasks.length) {
Dag.backfill(args)
// Process instances can view status
if (this.type === 'instance') {
this._getTaskState(false).then(res => {})
// Round robin acquisition status
this.setIntervalP = setInterval(() => {
this._getTaskState(true).then(res => {})
}, 90000)
}
} else {
Dag.create()
}
},
/**
* copy name
*/
_copyName(){
let clipboard = new Clipboard(`.copy-name`)
clipboard.on('success', e => {
this.$message.success(`${i18n.$t('Copy success')}`)
// Free memory
clipboard.destroy()
})
clipboard.on('error', e => {
// Copy is not supported
this.$message.warning(`${i18n.$t('The browser does not support automatic copying')}`)
// Free memory
clipboard.destroy()
})
},
/**
* Get state interface
* @param isReset Whether to manually refresh
*/
_getTaskState (isReset) {
return new Promise((resolve, reject) => {
this.getTaskState(this.urlParam.id).then(res => {
let data = res.list
let state = res.processInstanceState
let taskList = res.taskList
let idArr = allNodesId()
const titleTpl = (item, desc) => {
let $item = _.filter(taskList, v => v.name === item.name)[0]
return `<div style="text-align: left">${i18n.$t('Name')}:${$item.name}</br>${i18n.$t('State')}:${desc}</br>${i18n.$t('type')}:${$item.taskType}</br>${i18n.$t('host')}:${$item.host || '-'}</br>${i18n.$t('Retry Count')}:${$item.retryTimes}</br>${i18n.$t('Submit Time')}:${formatDate($item.submitTime)}</br>${i18n.$t('Start Time')}:${formatDate($item.startTime)}</br>${i18n.$t('End Time')}:${$item.endTime ? formatDate($item.endTime) : '-'}</br></div>`
}
// remove tip state dom
$('.w').find('.state-p').html('')
data.forEach(v1 => {
idArr.forEach(v2 => {
if (v2.name === v1.name) {
let dom = $(`#${v2.id}`)
let state = dom.find('.state-p')
let depState = ''
taskList.forEach(item=>{
if(item.name==v1.name) {
depState = item.state
}
})
dom.attr('data-state-id', v1.stateId)
dom.attr('data-dependent-result', v1.dependentResult || '')
dom.attr('data-dependent-depState', depState)
state.append(`<strong class="${v1.icoUnicode} ${v1.isSpin ? 'as as-spin' : ''}" style="color:${v1.color}" data-toggle="tooltip" data-html="true" data-container="body"></strong>`)
state.find('strong').attr('title', titleTpl(v2, v1.desc))
}
})
})
if (state === 'PAUSE' || state === 'STOP' || state === 'FAILURE' || this.state === 'SUCCESS') {
// Manual refresh does not regain large json
if (isReset) {
findComponentDownward(this.$root, `${this.type}-details`)._reset()
}
}
resolve()
})
})
},
/**
* Get the action bar id
* @param item
*/
_getDagId (v) {
// if (this.isDetails) {
// return
// }
this.dagBarId = v
},
/**
* operating
*/
_ckOperation (item) {
let is = true
let code = ''
if (item.disable) {
return
}
if (this.toolOperCode === item.code) {
this.toolOperCode = ''
code = item.code
is = false
} else {
this.toolOperCode = item.code
code = this.toolOperCode
is = true
}
// event type
Dag.toolbarEvent({
item: item,
code: code,
is: is
})
},
_operationClass (item) {
return this.toolOperCode === item.code ? 'active' : ''
// if (item.disable) {
// return this.toolOperCode === item.code ? 'active' : ''
// } else {
// return 'disable'
// }
},
/**
* Storage interface
*/
_save (sourceType) {
return new Promise((resolve, reject) => {
this.spinnerLoading = true
// Storage store
Dag.saveStore().then(res => {
if(this._verifConditions(res.tasks)) {
if (this.urlParam.id) {
/**
* Edit
* @param saveInstanceEditDAGChart => Process instance editing
* @param saveEditDAGChart => Process definition editing
*/
this[this.type === 'instance' ? 'updateInstance' : 'updateDefinition'](this.urlParam.id).then(res => {
this.$message.success(res.msg)
this.spinnerLoading = false
// Jump process definition
if (this.type === 'instance') {
this.$router.push({ path: `/projects/instance/list/${this.urlParam.id}?_t=${new Date().getTime()}` })
} else {
this.$router.push({ path: `/projects/definition/list/${this.urlParam.id}?_t=${new Date().getTime()}` })
}
resolve()
}).catch(e => {
this.$message.error(e.msg || '')
this.spinnerLoading = false
reject(e)
})
} else {
// New
this.saveDAGchart().then(res => {
this.$message.success(res.msg)
this.spinnerLoading = false
// source @/conf/home/pages/dag/_source/editAffirmModel/index.js
if (sourceType !== 'affirm') {
// Jump process definition
this.$router.push({ name: 'projects-definition-list' })
}
resolve()
}).catch(e => {
this.$message.error(e.msg || '')
this.setName('')
this.spinnerLoading = false
reject(e)
})
}
}
})
})
},
_closeDAG(){
let $name = this.$route.name
if($name && $name.indexOf("definition") != -1){
this.$router.push({ name: 'projects-definition-list'})
}else{
this.$router.push({ name: 'projects-instance-list'})
}
},
_verifConditions (value) {
let tasks = value
let bool = true
tasks.map(v=>{
if(v.type == 'CONDITIONS' && (v.conditionResult.successNode[0] =='' || v.conditionResult.successNode[0] == null || v.conditionResult.failedNode[0] =='' || v.conditionResult.failedNode[0] == null)) {
bool = false
return false
}
})
if(!bool) {
this.$message.warning(`${i18n.$t('Successful branch flow and failed branch flow are required')}`)
this.spinnerLoading = false
return false
}
return true
},
/**
* Global parameter
* @param Promise
*/
_udpTopFloorPop () {
return new Promise((resolve, reject) => {
let modal = this.$modal.dialog({
closable: false,
showMask: true,
escClose: true,
className: 'v-modal-custom',
transitionName: 'opacityp',
render (h) {
return h(mUdp, {
on: {
onUdp () {
modal.remove()
resolve()
},
close () {
modal.remove()
}
}
})
}
})
})
},
/**
* Save chart
*/
_saveChart () {
// Verify node
if (!this.tasks.length) {
this.$message.warning(`${i18n.$t('Failed to create node to save')}`)
return
}
// Global parameters (optional)
this._udpTopFloorPop().then(() => {
return this._save()
})
},
/**
* Return to the previous child node
*/
_rtNodesDag () {
let getIds = this.$route.query.subProcessIds
let idsArr = getIds.split(',')
let ids = idsArr.slice(0, idsArr.length - 1)
let id = idsArr[idsArr.length - 1]
let query = {}
if (id !== idsArr[0]) {
query = { subProcessIds: ids.join(',') }
}
let $name = this.$route.name.split('-')
this.$router.push({ path: `/${$name[0]}/${$name[1]}/list/${id}`, query: query })
},
/**
* Subprocess processing
* @param subProcessId Subprocess ID
*/
_subProcessHandle (subProcessId) {
let subProcessIds = []
let getIds = this.$route.query.subProcessIds
if (getIds) {
let newId = getIds.split(',')
newId.push(this.urlParam.id)
subProcessIds = newId
} else {
subProcessIds.push(this.urlParam.id)
}
let $name = this.$route.name.split('-')
this.$router.push({ path: `/${$name[0]}/${$name[1]}/list/${subProcessId}`, query: { subProcessIds: subProcessIds.join(',') } })
},
/**
* Refresh data
*/
_refresh () {
this.isRefresh = true
this._getTaskState(false).then(res => {
setTimeout(() => {
this.isRefresh = false
this.$message.success(`${i18n.$t('Refresh status succeeded')}`)
}, 2200)
})
},
/**
* View variables
*/
_toggleView () {
findComponentDownward(this.$root, `assist-dag-index`)._toggleView()
},
/**
* Starting parameters
*/
_toggleParam () {
findComponentDownward(this.$root, `starting-params-dag-index`)._toggleParam()
},
/**
* Create a node popup layer
* @param Object id
*/
_createLineLabel({id, sourceId, targetId}) {
// $('#jsPlumb_2_50').text('111')
let self = this
self.$modal.destroy()
const removeNodesEvent = (fromThis) => {
// Manually destroy events inside the component
fromThis.$destroy()
// Close the popup
eventModel.remove()
}
eventModel = this.$drawer({
className: 'dagMask',
render (h) {
return h(mFormLineModel,{
on: {
addLineInfo ({ item, fromThis }) {
self.addConnects(item)
setTimeout(() => {
removeNodesEvent(fromThis)
}, 100)
},
cancel ({fromThis}) {
removeNodesEvent(fromThis)
}
},
props: {
id: id,
sourceId: sourceId,
targetId: targetId
}
})
}
})
},
_createNodes ({ id, type }) {
let self = this
let preNode = []
let rearNode = []
let rearList = []
$('div[data-targetarr*="' + id + '"]').each(function(){
rearNode.push($(this).attr("id"))
})
if (rearNode.length>0) {
rearNode.forEach(v => {
let rearobj = {}
rearobj.value = $(`#${v}`).find('.name-p').text()
rearobj.label = $(`#${v}`).find('.name-p').text()
rearList.push(rearobj)
})
} else {
rearList = []
}
let targetarr = $(`#${id}`).attr('data-targetarr')
if (targetarr) {
let nodearr = targetarr.split(',')
nodearr.forEach(v => {
let nodeobj = {}
nodeobj.value = $(`#${v}`).find('.name-p').text()
nodeobj.label = $(`#${v}`).find('.name-p').text()
preNode.push(nodeobj)
})
} else {
preNode = []
}
if (eventModel) {
// Close the popup
eventModel.remove()
}
const removeNodesEvent = (fromThis) => {
// Manually destroy events inside the component
fromThis.$destroy()
// Close the popup
eventModel.remove()
}
this.taskId = id
type = type || self.dagBarId
eventModel = this.$drawer({
closable: false,
direction: 'right',
escClose: true,
className: 'dagMask',
render: h => h(mFormModel, {
on: {
addTaskInfo ({ item, fromThis }) {
self.addTasks(item)
setTimeout(() => {
removeNodesEvent(fromThis)
}, 100)
},
/**
* Cache the item
* @param item
* @param fromThis
*/
cacheTaskInfo({item, fromThis}) {
self.cacheTasks(item)
},
close ({ item,flag, fromThis }) {
self.addTasks(item)
// Edit status does not allow deletion of nodes
if (flag) {
jsPlumb.remove(id)
}
removeNodesEvent(fromThis)
},
onSubProcess ({ subProcessId, fromThis }) {
removeNodesEvent(fromThis)
self._subProcessHandle(subProcessId)
}
},
props: {
id: id,
taskType: type,
self: self,
preNode: preNode,
rearList: rearList,
instanceId: this.$route.params.id
}
})
})
},
removeEventModelById ($id) {
if(eventModel && this.taskId == $id){
eventModel.remove()
}
},
/**
* query the process definition pagination version
*/
_version (item) {
let self = this
this.getProcessDefinitionVersionsPage({
pageNo: 1,
pageSize: 10,
processDefinitionId: this.urlParam.id
}).then(res => {
let processDefinitionVersions = res.data.lists
let total = res.data.totalCount
let pageSize = res.data.pageSize
let pageNo = res.data.currentPage
if (this.versionsModel) {
this.versionsModel.remove()
}
this.versionsModel = this.$drawer({
direction: 'right',
closable: true,
showMask: true,
escClose: true,
render (h) {
return h(mVersions, {
on: {
/**
* switch version in process definition version list
*
* @param version the version user want to change
* @param processDefinitionId the process definition id
* @param fromThis fromThis
*/
mVersionSwitchProcessDefinitionVersion ({ version, processDefinitionId, fromThis }) {
self.$store.state.dag.isSwitchVersion = true
self.switchProcessDefinitionVersion({
version: version,
processDefinitionId: processDefinitionId
}).then(res => {
self.$message.success($t('Switch Version Successfully'))
setTimeout(() => {
fromThis.$destroy()
self.versionsModel.remove()
}, 0)
self.$router.push({ path: `/projects/definition/list/${processDefinitionId}?_t=${new Date().getTime()}` })
}).catch(e => {
self.$store.state.dag.isSwitchVersion = false
self.$message.error(e.msg || '')
})
},
/**
* Paging event of process definition versions
*
* @param pageNo page number
* @param pageSize page size
* @param processDefinitionId the process definition id of page version
* @param fromThis fromThis
*/
mVersionGetProcessDefinitionVersionsPage ({ pageNo, pageSize, processDefinitionId, fromThis }) {
self.getProcessDefinitionVersionsPage({
pageNo: pageNo,
pageSize: pageSize,
processDefinitionId: processDefinitionId
}).then(res => {
fromThis.processDefinitionVersions = res.data.lists
fromThis.total = res.data.totalCount
fromThis.pageSize = res.data.pageSize
fromThis.pageNo = res.data.currentPage
}).catch(e => {
self.$message.error(e.msg || '')
})
},
/**
* delete one version of process definition
*
* @param version the version need to delete
* @param processDefinitionId the process definition id user want to delete
* @param fromThis fromThis
*/
mVersionDeleteProcessDefinitionVersion ({ version, processDefinitionId, fromThis }) {
self.deleteProcessDefinitionVersion({
version: version,
processDefinitionId: processDefinitionId
}).then(res => {
self.$message.success(res.msg || '')
fromThis.$emit('mVersionGetProcessDefinitionVersionsPage', {
pageNo: 1,
pageSize: 10,
processDefinitionId: processDefinitionId,
fromThis: fromThis
})
}).catch(e => {
self.$message.error(e.msg || '')
})
},
/**
* remove this drawer
*
* @param fromThis
*/
close ({ fromThis }) {
setTimeout(() => {
fromThis.$destroy()
self.versionsModel.remove()
}, 0)
}
},
props: {
processDefinition: {
id: self.urlParam.id,
version: self.$store.state.dag.version
},
processDefinitionVersions: processDefinitionVersions,
total: total,
pageNo: pageNo,
pageSize: pageSize
}
})
}
})
}).catch(e => {
this.$message.error(e.msg || '')
})
}
},
watch: {
'tasks': {
deep: true,
handler (o) {
// Edit state does not allow deletion of node a...
this.setIsEditDag(true)
}
}
},
created () {
// Edit state does not allow deletion of node a...
this.setIsEditDag(false)
if (this.$route.query.subProcessIds) {
this.isRtTasks = true
}
Dag.init({
dag: this,
instance: jsPlumb.getInstance({
Endpoint: [
'Dot', { radius: 1, cssClass: 'dot-style' }
],
Connector: 'Bezier',
PaintStyle: { lineWidth: 2, stroke: '#456' }, // Connection style
ConnectionOverlays: [
[
'Arrow',
{
location: 1,
id: 'arrow',
length: 12,
foldback: 0.8
}
],
['Label', {
location: 0.5,
id: 'label'
}]
],
Container: 'canvas',
ConnectionsDetachable: true
})
})
},
mounted () {
this.init(this.arg)
},
beforeDestroy () {
this.resetParams()
// Destroy round robin
clearInterval(this.setIntervalP)
},
destroyed () {
if (eventModel) {
eventModel.remove()
}
},
computed: {
...mapState('dag', ['tasks', 'locations', 'connects', 'isEditDag', 'name'])
},
components: {}
}
</script>
<style lang="scss" rel="stylesheet/scss">
@import "./dag";
</style>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,607 |
[Bug][Workflow version] ProcessDefinition version can delete and save with releaseState=ONLINE
|
**Describe the bug**
ProcessDefinition version can delete and save with releaseState=ONLINE
**To Reproduce**
Steps to reproduce the behavior, for example:
1. set processdefinition online
2. click processdefinition name
3. click save `[bug]should disabeld`
4. click version info,delete/switch version `[bug]shoudld disabeld`
**Which version of Dolphin Scheduler:**
-[dev]
@yangyichao-mango
|
https://github.com/apache/dolphinscheduler/issues/3607
|
https://github.com/apache/dolphinscheduler/pull/3609
|
06ab654d647c3fc6142f96a41f0913a53e3e8698
|
5e2a8bea2e228b2198502176bc95f2e94ad88b46
| 2020-08-27T02:16:10Z |
java
| 2020-08-27T03:13:17Z |
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/versions.vue
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="container">
<div class="title-box">
<span class="name">{{$t('Version Info')}}</span>
</div>
<div class="table-box" v-if="processDefinitionVersions.length > 0">
<table class="fixed">
<caption><!-- placeHolder --></caption>
<tr>
<th scope="col">
<span>#</span>
</th>
<th scope="col" style="min-width: 40px">
<span>Version</span>
</th>
<th scope="col" style="min-width: 200px;max-width: 300px;">
<span>{{$t('Description')}}</span>
</th>
<th scope="col" style="min-width: 50px">
<span>{{$t('Create Time')}}</span>
</th>
<th scope="col" style="min-width: 300px">
<span>{{$t('Operation')}}</span>
</th>
</tr>
<tr v-for="(item, $index) in processDefinitionVersions" :key="item.id">
<td>
<span>-</span>
</td>
<td>
<span v-if="item.version">
<span v-if="item.version === processDefinition.version" style="color: green"><strong>{{item.version}} {{$t('Current Version')}}</strong></span>
<span v-else>{{item.version}}</span>
</span>
<span v-else>-</span>
</td>
<td>
<span v-if="item.description">{{item.description}}</span>
<span v-else>-</span>
</td>
<td>
<span v-if="item.createTime">{{item.createTime}}</span>
<span v-else>-</span>
</td>
<td>
<x-poptip
:ref="'poptip-switch-version-' + $index"
placement="top-end"
width="90">
<p>{{$t('Confirm Switch To This Version?')}}</p>
<div style="text-align: right; margin: 0;padding-top: 4px;">
<x-button type="text" size="xsmall" shape="circle" @click="_closeSwitchVersion($index)">{{$t('Cancel')}}</x-button>
<x-button type="primary" size="xsmall" shape="circle" @click="_mVersionSwitchProcessDefinitionVersion(item)">{{$t('Confirm')}}</x-button>
</div>
<template slot="reference">
<x-button
icon="ans-icon-dependence"
type="primary"
shape="circle"
size="xsmall"
:disabled="item.version === processDefinition.version"
data-toggle="tooltip"
:title="$t('Switch To This Version')">
</x-button>
</template>
</x-poptip>
<x-poptip
:ref="'poptip-delete-' + $index"
placement="top-end"
width="90">
<p>{{$t('Delete?')}}</p>
<div style="text-align: right; margin: 0;padding-top: 4px;">
<x-button type="text" size="xsmall" shape="circle" @click="_closeDelete($index)">{{$t('Cancel')}}</x-button>
<x-button type="primary" size="xsmall" shape="circle" @click="_mVersionDeleteProcessDefinitionVersion(item,$index)">{{$t('Confirm')}}</x-button>
</div>
<template slot="reference">
<x-button
icon="ans-icon-trash"
type="error"
shape="circle"
size="xsmall"
:disabled="item.version === processDefinition.version"
data-toggle="tooltip"
:title="$t('delete')">
</x-button>
</template>
</x-poptip>
</td>
</tr>
</table>
</div>
<div v-if="processDefinitionVersions.length === 0">
<m-no-data><!----></m-no-data>
</div>
<div v-if="processDefinitionVersions.length > 0">
<div class="bottom-box">
<x-button type="text" @click="_close()"> {{$t('Cancel')}} </x-button>
<x-page :current="pageNo" :total="total" @on-change="_mVersionGetProcessDefinitionVersionsPage" small><!----></x-page>
</div>
</div>
</div>
</template>
<script>
import mNoData from '@/module/components/noData/noData'
export default {
name: 'versions',
data () {
return {
tableHeaders: [
{
label: 'version',
prop: 'version'
},
{
label: 'createTime',
prop: 'createTime'
}
]
}
},
props: {
processDefinition: Object,
processDefinitionVersions: Array,
total: Number,
pageNo: Number,
pageSize: Number
},
methods: {
/**
* switch version in process definition version list
*/
_mVersionSwitchProcessDefinitionVersion (item) {
this.$emit('mVersionSwitchProcessDefinitionVersion', {
version: item.version,
processDefinitionId: this.processDefinition.id,
fromThis: this
})
},
/**
* delete one version of process definition
*/
_mVersionDeleteProcessDefinitionVersion (item) {
this.$emit('mVersionDeleteProcessDefinitionVersion', {
version: item.version,
processDefinitionId: this.processDefinition.id,
fromThis: this
})
},
/**
* Paging event of process definition versions
*/
_mVersionGetProcessDefinitionVersionsPage (val) {
this.$emit('mVersionGetProcessDefinitionVersionsPage', {
pageNo: val,
pageSize: this.pageSize,
processDefinitionId: this.processDefinition.id,
fromThis: this
})
},
/**
* Close the switch version layer
*/
_closeSwitchVersion (i) {
if (i > 0) {
this.$refs[`poptip-switch-version-${i}`][0].doClose()
}
},
/**
* Close the delete layer
*/
_closeDelete (i) {
if (i > 0) {
this.$refs[`poptip-delete-${i}`][0].doClose()
}
},
/**
* Close and destroy component and component internal events
*/
_close () {
// flag Whether to delete a node this.$destroy()
this.$emit('close', {
fromThis: this
})
}
},
created () {},
mounted () {},
components: { mNoData }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.container {
width: 500px;
position: relative;
.title-box {
height: 61px;
border-bottom: 1px solid #DCDEDC;
position: relative;
.name {
position: absolute;
left: 24px;
top: 18px;
font-size: 16px;
}
}
.bottom-box {
position: absolute;
bottom: 0;
left: 0;
width: 100%;
text-align: right;
height: 60px;
line-height: 60px;
border-top: 1px solid #DCDEDC;
background: #fff;
.ans-page {
display: inline-block;
}
}
.table-box {
overflow-y: scroll;
height: calc(100vh - 61px);
padding-bottom: 60px;
}
}
</style>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,713 |
[Bug][HadoopUtils] catfile method Stream not closed
|
The catfile method did not close the data stream, resulting in too many open files

|
https://github.com/apache/dolphinscheduler/issues/3713
|
https://github.com/apache/dolphinscheduler/pull/3715
|
4ed36387507c50b1042802143676a04fc51e6bcc
|
7af20ca3afe858f29abdd9ad9cb5013d8fd33d65
| 2020-09-10T07:36:42Z |
java
| 2020-09-12T15:42:40Z |
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import org.apache.commons.io.IOUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.ResUploadType;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.client.cli.RMAdminCLI;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.nio.file.Files;
import java.security.PrivilegedExceptionAction;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.dolphinscheduler.common.Constants.RESOURCE_UPLOAD_PATH;
/**
* hadoop utils
* single instance
*/
public class HadoopUtils implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(HadoopUtils.class);
private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
public static final String resourceUploadPath = PropertyUtils.getString(RESOURCE_UPLOAD_PATH, "/dolphinscheduler");
public static final String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS);
public static final String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
public static final String jobHistoryAddress = PropertyUtils.getString(Constants.YARN_JOB_HISTORY_STATUS_ADDRESS);
private static final String HADOOP_UTILS_KEY = "HADOOP_UTILS_KEY";
private static final LoadingCache<String, HadoopUtils> cache = CacheBuilder
.newBuilder()
.expireAfterWrite(PropertyUtils.getInt(Constants.KERBEROS_EXPIRE_TIME, 2), TimeUnit.HOURS)
.build(new CacheLoader<String, HadoopUtils>() {
@Override
public HadoopUtils load(String key) throws Exception {
return new HadoopUtils();
}
});
private static volatile boolean yarnEnabled = false;
private Configuration configuration;
private FileSystem fs;
private HadoopUtils() {
init();
initHdfsPath();
}
public static HadoopUtils getInstance() {
return cache.getUnchecked(HADOOP_UTILS_KEY);
}
/**
* init dolphinscheduler root path in hdfs
*/
private void initHdfsPath() {
Path path = new Path(resourceUploadPath);
try {
if (!fs.exists(path)) {
fs.mkdirs(path);
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
/**
* init hadoop configuration
*/
private void init() {
try {
configuration = new Configuration();
String resourceStorageType = PropertyUtils.getUpperCaseString(Constants.RESOURCE_STORAGE_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resourceStorageType);
if (resUploadType == ResUploadType.HDFS) {
if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false)) {
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF,
PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
hdfsUser = "";
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH));
}
String defaultFS = configuration.get(Constants.FS_DEFAULTFS);
//first get key from core-site.xml hdfs-site.xml ,if null ,then try to get from properties file
// the default is the local file system
if (defaultFS.startsWith("file")) {
String defaultFSProp = PropertyUtils.getString(Constants.FS_DEFAULTFS);
if (StringUtils.isNotBlank(defaultFSProp)) {
Map<String, String> fsRelatedProps = PropertyUtils.getPrefixedProperties("fs.");
configuration.set(Constants.FS_DEFAULTFS, defaultFSProp);
fsRelatedProps.forEach((key, value) -> configuration.set(key, value));
} else {
logger.error("property:{} can not to be empty, please set!", Constants.FS_DEFAULTFS);
throw new RuntimeException(
String.format("property: %s can not to be empty, please set!", Constants.FS_DEFAULTFS)
);
}
} else {
logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", Constants.FS_DEFAULTFS, defaultFS);
}
if (fs == null) {
if (StringUtils.isNotEmpty(hdfsUser)) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser);
ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
@Override
public Boolean run() throws Exception {
fs = FileSystem.get(configuration);
return true;
}
});
} else {
logger.warn("hdfs.root.user is not set value!");
fs = FileSystem.get(configuration);
}
}
} else if (resUploadType == ResUploadType.S3) {
System.setProperty(Constants.AWS_S3_V4, Constants.STRING_TRUE);
configuration.set(Constants.FS_DEFAULTFS, PropertyUtils.getString(Constants.FS_DEFAULTFS));
configuration.set(Constants.FS_S3A_ENDPOINT, PropertyUtils.getString(Constants.FS_S3A_ENDPOINT));
configuration.set(Constants.FS_S3A_ACCESS_KEY, PropertyUtils.getString(Constants.FS_S3A_ACCESS_KEY));
configuration.set(Constants.FS_S3A_SECRET_KEY, PropertyUtils.getString(Constants.FS_S3A_SECRET_KEY));
fs = FileSystem.get(configuration);
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
/**
* @return Configuration
*/
public Configuration getConfiguration() {
return configuration;
}
/**
* get application url
*
* @param applicationId application id
* @return url of application
*/
public String getApplicationUrl(String applicationId) throws Exception {
/**
* if rmHaIds contains xx, it signs not use resourcemanager
* otherwise:
* if rmHaIds is empty, single resourcemanager enabled
* if rmHaIds not empty: resourcemanager HA enabled
*/
String appUrl = "";
if (StringUtils.isEmpty(rmHaIds)) {
//single resourcemanager enabled
appUrl = appAddress;
yarnEnabled = true;
} else {
//resourcemanager HA enabled
appUrl = getAppAddress(appAddress, rmHaIds);
yarnEnabled = true;
logger.info("application url : {}", appUrl);
}
if (StringUtils.isBlank(appUrl)) {
throw new Exception("application url is blank");
}
return String.format(appUrl, applicationId);
}
public String getJobHistoryUrl(String applicationId) {
//eg:application_1587475402360_712719 -> job_1587475402360_712719
String jobId = applicationId.replace("application", "job");
return String.format(jobHistoryAddress, jobId);
}
/**
* cat file on hdfs
*
* @param hdfsFilePath hdfs file path
* @return byte[] byte array
* @throws IOException errors
*/
public byte[] catFile(String hdfsFilePath) throws IOException {
if (StringUtils.isBlank(hdfsFilePath)) {
logger.error("hdfs file path:{} is blank", hdfsFilePath);
return new byte[0];
}
FSDataInputStream fsDataInputStream = fs.open(new Path(hdfsFilePath));
return IOUtils.toByteArray(fsDataInputStream);
}
/**
* cat file on hdfs
*
* @param hdfsFilePath hdfs file path
* @param skipLineNums skip line numbers
* @param limit read how many lines
* @return content of file
* @throws IOException errors
*/
public List<String> catFile(String hdfsFilePath, int skipLineNums, int limit) throws IOException {
if (StringUtils.isBlank(hdfsFilePath)) {
logger.error("hdfs file path:{} is blank", hdfsFilePath);
return Collections.emptyList();
}
try (FSDataInputStream in = fs.open(new Path(hdfsFilePath))) {
BufferedReader br = new BufferedReader(new InputStreamReader(in));
Stream<String> stream = br.lines().skip(skipLineNums).limit(limit);
return stream.collect(Collectors.toList());
}
}
/**
* make the given file and all non-existent parents into
* directories. Has the semantics of Unix 'mkdir -p'.
* Existence of the directory hierarchy is not an error.
*
* @param hdfsPath path to create
* @return mkdir result
* @throws IOException errors
*/
public boolean mkdir(String hdfsPath) throws IOException {
return fs.mkdirs(new Path(hdfsPath));
}
/**
* copy files between FileSystems
*
* @param srcPath source hdfs path
* @param dstPath destination hdfs path
* @param deleteSource whether to delete the src
* @param overwrite whether to overwrite an existing file
* @return if success or not
* @throws IOException errors
*/
public boolean copy(String srcPath, String dstPath, boolean deleteSource, boolean overwrite) throws IOException {
return FileUtil.copy(fs, new Path(srcPath), fs, new Path(dstPath), deleteSource, overwrite, fs.getConf());
}
/**
* the src file is on the local disk. Add it to FS at
* the given dst name.
*
* @param srcFile local file
* @param dstHdfsPath destination hdfs path
* @param deleteSource whether to delete the src
* @param overwrite whether to overwrite an existing file
* @return if success or not
* @throws IOException errors
*/
public boolean copyLocalToHdfs(String srcFile, String dstHdfsPath, boolean deleteSource, boolean overwrite) throws IOException {
Path srcPath = new Path(srcFile);
Path dstPath = new Path(dstHdfsPath);
fs.copyFromLocalFile(deleteSource, overwrite, srcPath, dstPath);
return true;
}
/**
* copy hdfs file to local
*
* @param srcHdfsFilePath source hdfs file path
* @param dstFile destination file
* @param deleteSource delete source
* @param overwrite overwrite
* @return result of copy hdfs file to local
* @throws IOException errors
*/
public boolean copyHdfsToLocal(String srcHdfsFilePath, String dstFile, boolean deleteSource, boolean overwrite) throws IOException {
Path srcPath = new Path(srcHdfsFilePath);
File dstPath = new File(dstFile);
if (dstPath.exists()) {
if (dstPath.isFile()) {
if (overwrite) {
Files.delete(dstPath.toPath());
}
} else {
logger.error("destination file must be a file");
}
}
if (!dstPath.getParentFile().exists()) {
dstPath.getParentFile().mkdirs();
}
return FileUtil.copy(fs, srcPath, dstPath, deleteSource, fs.getConf());
}
/**
* delete a file
*
* @param hdfsFilePath the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
* @return true if delete is successful else false.
* @throws IOException errors
*/
public boolean delete(String hdfsFilePath, boolean recursive) throws IOException {
return fs.delete(new Path(hdfsFilePath), recursive);
}
/**
* check if exists
*
* @param hdfsFilePath source file path
* @return result of exists or not
* @throws IOException errors
*/
public boolean exists(String hdfsFilePath) throws IOException {
return fs.exists(new Path(hdfsFilePath));
}
/**
* Gets a list of files in the directory
*
* @param filePath file path
* @return {@link FileStatus} file status
* @throws Exception errors
*/
public FileStatus[] listFileStatus(String filePath) throws Exception {
try {
return fs.listStatus(new Path(filePath));
} catch (IOException e) {
logger.error("Get file list exception", e);
throw new Exception("Get file list exception", e);
}
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*
* @param src path to be renamed
* @param dst new path after rename
* @return true if rename is successful
* @throws IOException on failure
*/
public boolean rename(String src, String dst) throws IOException {
return fs.rename(new Path(src), new Path(dst));
}
/**
* hadoop resourcemanager enabled or not
*
* @return result
*/
public boolean isYarnEnabled() {
return yarnEnabled;
}
/**
* get the state of an application
*
* @param applicationId application id
* @return the return may be null or there may be other parse exceptions
*/
public ExecutionStatus getApplicationStatus(String applicationId) throws Exception {
if (StringUtils.isEmpty(applicationId)) {
return null;
}
String result = Constants.FAILED;
String applicationUrl = getApplicationUrl(applicationId);
logger.info("applicationUrl={}", applicationUrl);
String responseContent;
if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false)) {
responseContent = KerberosHttpClient.get(applicationUrl);
} else {
responseContent = HttpUtils.get(applicationUrl);
}
if (responseContent != null) {
ObjectNode jsonObject = JSONUtils.parseObject(responseContent);
if (!jsonObject.has("app")) {
return ExecutionStatus.FAILURE;
}
result = jsonObject.path("app").path("finalStatus").asText();
} else {
//may be in job history
String jobHistoryUrl = getJobHistoryUrl(applicationId);
logger.info("jobHistoryUrl={}", jobHistoryUrl);
responseContent = HttpUtils.get(jobHistoryUrl);
if (null != responseContent) {
ObjectNode jsonObject = JSONUtils.parseObject(responseContent);
if (!jsonObject.has("job")) {
return ExecutionStatus.FAILURE;
}
result = jsonObject.path("job").path("state").asText();
} else {
return ExecutionStatus.FAILURE;
}
}
switch (result) {
case Constants.ACCEPTED:
return ExecutionStatus.SUBMITTED_SUCCESS;
case Constants.SUCCEEDED:
return ExecutionStatus.SUCCESS;
case Constants.NEW:
case Constants.NEW_SAVING:
case Constants.SUBMITTED:
case Constants.FAILED:
return ExecutionStatus.FAILURE;
case Constants.KILLED:
return ExecutionStatus.KILL;
case Constants.RUNNING:
default:
return ExecutionStatus.RUNNING_EXECUTION;
}
}
/**
* get data hdfs path
*
* @return data hdfs path
*/
public static String getHdfsDataBasePath() {
if ("/".equals(resourceUploadPath)) {
// if basepath is configured to /, the generated url may be //default/resources (with extra leading /)
return "";
} else {
return resourceUploadPath;
}
}
/**
* hdfs resource dir
*
* @param tenantCode tenant code
* @param resourceType resource type
* @return hdfs resource dir
*/
public static String getHdfsDir(ResourceType resourceType, String tenantCode) {
String hdfsDir = "";
if (resourceType.equals(ResourceType.FILE)) {
hdfsDir = getHdfsResDir(tenantCode);
} else if (resourceType.equals(ResourceType.UDF)) {
hdfsDir = getHdfsUdfDir(tenantCode);
}
return hdfsDir;
}
/**
* hdfs resource dir
*
* @param tenantCode tenant code
* @return hdfs resource dir
*/
public static String getHdfsResDir(String tenantCode) {
return String.format("%s/resources", getHdfsTenantDir(tenantCode));
}
/**
* hdfs user dir
*
* @param tenantCode tenant code
* @param userId user id
* @return hdfs resource dir
*/
public static String getHdfsUserDir(String tenantCode, int userId) {
return String.format("%s/home/%d", getHdfsTenantDir(tenantCode), userId);
}
/**
* hdfs udf dir
*
* @param tenantCode tenant code
* @return get udf dir on hdfs
*/
public static String getHdfsUdfDir(String tenantCode) {
return String.format("%s/udfs", getHdfsTenantDir(tenantCode));
}
/**
* get hdfs file name
*
* @param resourceType resource type
* @param tenantCode tenant code
* @param fileName file name
* @return hdfs file name
*/
public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) {
if (fileName.startsWith("/")) {
fileName = fileName.replaceFirst("/", "");
}
return String.format("%s/%s", getHdfsDir(resourceType, tenantCode), fileName);
}
/**
* get absolute path and name for resource file on hdfs
*
* @param tenantCode tenant code
* @param fileName file name
* @return get absolute path and name for file on hdfs
*/
public static String getHdfsResourceFileName(String tenantCode, String fileName) {
if (fileName.startsWith("/")) {
fileName = fileName.replaceFirst("/", "");
}
return String.format("%s/%s", getHdfsResDir(tenantCode), fileName);
}
/**
* get absolute path and name for udf file on hdfs
*
* @param tenantCode tenant code
* @param fileName file name
* @return get absolute path and name for udf file on hdfs
*/
public static String getHdfsUdfFileName(String tenantCode, String fileName) {
if (fileName.startsWith("/")) {
fileName = fileName.replaceFirst("/", "");
}
return String.format("%s/%s", getHdfsUdfDir(tenantCode), fileName);
}
/**
* @param tenantCode tenant code
* @return file directory of tenants on hdfs
*/
public static String getHdfsTenantDir(String tenantCode) {
return String.format("%s/%s", getHdfsDataBasePath(), tenantCode);
}
/**
* getAppAddress
*
* @param appAddress app address
* @param rmHa resource manager ha
* @return app address
*/
public static String getAppAddress(String appAddress, String rmHa) {
//get active ResourceManager
String activeRM = YarnHAAdminUtils.getAcitveRMName(rmHa);
String[] split1 = appAddress.split(Constants.DOUBLE_SLASH);
if (split1.length != 2) {
return null;
}
String start = split1[0] + Constants.DOUBLE_SLASH;
String[] split2 = split1[1].split(Constants.COLON);
if (split2.length != 2) {
return null;
}
String end = Constants.COLON + split2[1];
return start + activeRM + end;
}
@Override
public void close() throws IOException {
if (fs != null) {
try {
fs.close();
} catch (IOException e) {
logger.error("Close HadoopUtils instance failed", e);
throw new IOException("Close HadoopUtils instance failed", e);
}
}
}
/**
* yarn ha admin utils
*/
private static final class YarnHAAdminUtils extends RMAdminCLI {
/**
* get active resourcemanager
*
* @param rmIds
* @return
*/
public static String getAcitveRMName(String rmIds) {
String[] rmIdArr = rmIds.split(Constants.COMMA);
int activeResourceManagerPort = PropertyUtils.getInt(Constants.HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT, 8088);
String yarnUrl = "http://%s:" + activeResourceManagerPort + "/ws/v1/cluster/info";
String state = null;
try {
/**
* send http get request to rm1
*/
state = getRMState(String.format(yarnUrl, rmIdArr[0]));
if (Constants.HADOOP_RM_STATE_ACTIVE.equals(state)) {
return rmIdArr[0];
} else if (Constants.HADOOP_RM_STATE_STANDBY.equals(state)) {
state = getRMState(String.format(yarnUrl, rmIdArr[1]));
if (Constants.HADOOP_RM_STATE_ACTIVE.equals(state)) {
return rmIdArr[1];
}
} else {
return null;
}
} catch (Exception e) {
state = getRMState(String.format(yarnUrl, rmIdArr[1]));
if (Constants.HADOOP_RM_STATE_ACTIVE.equals(state)) {
return rmIdArr[0];
}
}
return null;
}
/**
* get ResourceManager state
*
* @param url
* @return
*/
public static String getRMState(String url) {
String retStr = HttpUtils.get(url);
if (StringUtils.isEmpty(retStr)) {
return null;
}
//to json
ObjectNode jsonObject = JSONUtils.parseObject(retStr);
//get ResourceManager state
if (!jsonObject.has("clusterInfo")) {
return null;
}
return jsonObject.get("clusterInfo").path("haState").asText();
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,548 |
[Bug][UI] Monitor gauge chart display blank with same pid
|
**Describe the bug**
with correct response data , the monitor chart doesn't show as expected.
**Expected behavior**
just show the correct chart rather than blank
the two worker start by docker ,and have the same pid.
worker in different machine has same pid is ok, so the ui show blank is a bug
**Screenshots**


**Which version of Dolphin Scheduler:**
-[1.3.2-preview]
|
https://github.com/apache/dolphinscheduler/issues/3548
|
https://github.com/apache/dolphinscheduler/pull/3759
|
1c07892fea86f14a5465231b285120c6de8e5f31
|
2a77ac3a840b2b861b2e0bb02bbb1c7936c3cf90
| 2020-08-19T03:31:36Z |
java
| 2020-09-17T03:52:43Z |
dolphinscheduler-ui/src/js/conf/home/pages/monitor/pages/servers/master.vue
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<m-list-construction :title="'Master ' + $t('Manage')">
<template slot="content">
<div class="servers-wrapper" v-show="masterList.length">
<div class="row-box" v-for="(item,$index) in masterList" :key="$index">
<div class="row-title">
<div class="left">
<span class="sp">IP: {{item.host}}</span>
<span class="sp">{{$t('Zk registration directory')}}: {{item.zkDirectory}}</span>
</div>
<div class="right">
<span class="sp">{{$t('Create Time')}}: {{item.createTime | formatDate}}</span>
<span class="sp">{{$t('Last heartbeat time')}}: {{item.lastHeartbeatTime | formatDate}}</span>
</div>
</div>
<div class="row-cont">
<div class="col-md-4">
<m-gauge
:value="(item.resInfo.cpuUsage * 100).toFixed(2)"
:name="'cpuUsage'"
:id="'gauge-cpu-' + item.id">
</m-gauge>
</div>
<div class="col-md-4">
<m-gauge
:value="(item.resInfo.memoryUsage * 100).toFixed(2)"
:name="'memoryUsage'"
:id="'gauge-memory-' + item.id">
</m-gauge>
</div>
<div class="col-md-4">
<div class="text-num-model">
<div class="value-p">
<strong :style="{color:color[$index]}">{{item.resInfo.loadAverage > 0? item.resInfo.loadAverage.toFixed(2):0}}</strong>
</div>
<div class="text-1">
loadAverage
</div>
</div>
</div>
</div>
</div>
</div>
<div v-if="!masterList.length">
<m-no-data></m-no-data>
</div>
<m-spin :is-spin="isLoading"></m-spin>
</template>
</m-list-construction>
</template>
<script>
import _ from 'lodash'
import { mapActions } from 'vuex'
import mGauge from './_source/gauge'
import mList from './_source/zookeeperList'
import mSpin from '@/module/components/spin/spin'
import mNoData from '@/module/components/noData/noData'
import themeData from '@/module/echarts/themeData.json'
import mListConstruction from '@/module/components/listConstruction/listConstruction'
export default {
name: 'servers-master',
data () {
return {
isLoading: false,
masterList: [],
color: themeData.color
}
},
props: {},
methods: {
...mapActions('monitor', ['getMasterData'])
},
watch: {},
created () {
},
mounted () {
this.isLoading = true
this.getMasterData().then(res => {
this.masterList = _.map(res, (v, i) => {
return _.assign(v, {
resInfo: JSON.parse(v.resInfo)
})
})
this.isLoading = false
}).catch(() => {
this.isLoading = false
})
},
components: { mList, mListConstruction, mSpin, mNoData, mGauge }
}
</script>
<style lang="scss" rel="stylesheet/scss">
@import "./servers";
</style>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,548 |
[Bug][UI] Monitor gauge chart display blank with same pid
|
**Describe the bug**
with correct response data , the monitor chart doesn't show as expected.
**Expected behavior**
just show the correct chart rather than blank
the two worker start by docker ,and have the same pid.
worker in different machine has same pid is ok, so the ui show blank is a bug
**Screenshots**


**Which version of Dolphin Scheduler:**
-[1.3.2-preview]
|
https://github.com/apache/dolphinscheduler/issues/3548
|
https://github.com/apache/dolphinscheduler/pull/3759
|
1c07892fea86f14a5465231b285120c6de8e5f31
|
2a77ac3a840b2b861b2e0bb02bbb1c7936c3cf90
| 2020-08-19T03:31:36Z |
java
| 2020-09-17T03:52:43Z |
dolphinscheduler-ui/src/js/conf/home/pages/monitor/pages/servers/worker.vue
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<m-list-construction :title="'Worker ' + $t('Manage')">
<template slot="content">
<div class="servers-wrapper" v-show="workerList.length">
<div class="row-box" v-for="(item,$index) in workerList" :key="$index">
<div class="row-title">
<div class="left">
<span class="sp">IP: {{item.host}}</span>
<span>{{$t('Zk registration directory')}}: <a href="javascript:" @click="_showZkDirectories(item)" class="links">{{$t('Directory detail')}}</a></span>
</div>
<div class="right">
<span class="sp">{{$t('Create Time')}}: {{item.createTime | formatDate}}</span>
<span class="sp">{{$t('Last heartbeat time')}}: {{item.lastHeartbeatTime | formatDate}}</span>
</div>
</div>
<div class="row-cont">
<div class="col-md-4">
<m-gauge
:value="(item.resInfo.cpuUsage * 100).toFixed(2)"
:name="'cpuUsage'"
:id="'gauge-cpu-' + item.id">
</m-gauge>
</div>
<div class="col-md-4">
<m-gauge
:value="(item.resInfo.memoryUsage * 100).toFixed(2)"
:name="'memoryUsage'"
:id="'gauge-memory-' + item.id">
</m-gauge>
</div>
<div class="col-md-4">
<div class="text-num-model">
<div class="value-p">
<strong :style="{color:color[$index]}">{{item.resInfo.loadAverage > 0? item.resInfo.loadAverage.toFixed(2):0}}</strong>
</div>
<div class="text-1">
loadAverage
</div>
</div>
</div>
</div>
</div>
</div>
<div v-if="!workerList.length">
<m-no-data></m-no-data>
</div>
<m-spin :is-spin="isLoading"></m-spin>
</template>
</m-list-construction>
</template>
<script>
import _ from 'lodash'
import { mapActions } from 'vuex'
import mGauge from './_source/gauge'
import mList from './_source/zookeeperList'
import mSpin from '@/module/components/spin/spin'
import mNoData from '@/module/components/noData/noData'
import themeData from '@/module/echarts/themeData.json'
import mListConstruction from '@/module/components/listConstruction/listConstruction'
import zookeeperDirectoriesPopup from './_source/zookeeperDirectories'
export default {
name: 'servers-worker',
data () {
return {
isLoading: false,
workerList: [],
color: themeData.color
}
},
props: {},
methods: {
...mapActions('monitor', ['getWorkerData']),
_showZkDirectories (item) {
let zkDirectories = []
item.zkDirectories.forEach(zkDirectory => {
zkDirectories.push({
zkDirectory: zkDirectory
})
})
this.$drawer({
direction: 'right',
render (h) {
return h(zookeeperDirectoriesPopup, {
props: {
zkDirectories: zkDirectories
}
})
}
})
}
},
watch: {},
created () {
},
mounted () {
this.isLoading = true
this.getWorkerData().then(res => {
this.workerList = _.map(res, (v, i) => {
return _.assign(v, {
resInfo: JSON.parse(v.resInfo)
})
})
this.isLoading = false
}).catch(() => {
this.isLoading = true
})
},
components: { mList, mListConstruction, mSpin, mNoData, mGauge, zookeeperDirectoriesPopup }
}
</script>
<style lang="scss" rel="stylesheet/scss">
@import "./servers";
</style>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,238 |
[Bug][Resource Center] Can not create folder in docker with standalone mode
|
Creating a folder in the Resource Center failed.
1. build docker images by default Dockerfile
2. run docker with args ALL
3. open resource center, and create a folder
4. See error
**Which version of Dolphin Scheduler:**
-[1.3.1]
**Additional context**
The error info in logs is : NulPointer Error in createDirecortry(ResourceService.java:484)
It seems to be a HDFS error. but in DOC, the local fs is supported.
And then,I modify the common config file, change the storage.type from NONE to HDFS, but still failed.
i want to evaluate dolphinscheduler quickly, but it's not very easy -_-b
Please consider being able to quickly evaluate the software through docker.
|
https://github.com/apache/dolphinscheduler/issues/3238
|
https://github.com/apache/dolphinscheduler/pull/3772
|
973cbf8d86df6a0895a5c19cfc69c38b89853850
|
b2e78cbf9cea99bbeb5656b071a9db8b025be363
| 2020-07-18T09:25:05Z |
java
| 2020-09-21T03:36:19Z |
docker/docker-swarm/docker-compose.yml
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: "3.4"
services:
dolphinscheduler-postgresql:
image: bitnami/postgresql:latest
container_name: dolphinscheduler-postgresql
ports:
- 5432:5432
environment:
TZ: Asia/Shanghai
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
POSTGRESQL_REPLICATION_PASSWORD: dolphinscheduler@123
volumes:
- dolphinscheduler-postgresql:/bitnami/postgresql
- dolphinscheduler-postgresql-initdb:/docker-entrypoint-initdb.d
networks:
- dolphinscheduler
dolphinscheduler-zookeeper:
image: bitnami/zookeeper:latest
container_name: dolphinscheduler-zookeeper
ports:
- 2181:2181
environment:
TZ: Asia/Shanghai
ALLOW_ANONYMOUS_LOGIN: "yes"
ZOO_4LW_COMMANDS_WHITELIST: srvr,ruok,wchs,cons
volumes:
- dolphinscheduler-zookeeper:/bitnami/zookeeper
networks:
- dolphinscheduler
dolphinscheduler-api:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-api
command: ["api-server"]
ports:
- 12345:12345
environment:
TZ: Asia/Shanghai
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
DOLPHINSCHEDULER_RESOURCE_STORAGE_TYPE: "HDFS"
DOLPHINSCHEDULER_FS_DEFAULTFS: "file:///data/dolphinscheduler"
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "ApiApplicationServer"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
- dolphinscheduler-resource-storage-local:/dolphinscheduler
networks:
- dolphinscheduler
dolphinscheduler-frontend:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-frontend
command: ["frontend"]
ports:
- 8888:8888
environment:
TZ: Asia/Shanghai
FRONTEND_API_SERVER_HOST: dolphinscheduler-api
FRONTEND_API_SERVER_PORT: 12345
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "8888"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
- dolphinscheduler-api
volumes:
- dolphinscheduler-logs:/var/log/nginx
networks:
- dolphinscheduler
dolphinscheduler-alert:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-alert
command: ["alert-server"]
environment:
TZ: Asia/Shanghai
XLS_FILE_PATH: "/tmp/xls"
MAIL_SERVER_HOST: ""
MAIL_SERVER_PORT: ""
MAIL_SENDER: ""
MAIL_USER: ""
MAIL_PASSWD: ""
MAIL_SMTP_STARTTLS_ENABLE: "false"
MAIL_SMTP_SSL_ENABLE: "false"
MAIL_SMTP_SSL_TRUST: ""
ENTERPRISE_WECHAT_ENABLE: "false"
ENTERPRISE_WECHAT_CORP_ID: ""
ENTERPRISE_WECHAT_SECRET: ""
ENTERPRISE_WECHAT_AGENT_ID: ""
ENTERPRISE_WECHAT_USERS: ""
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "AlertServer"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
- dolphinscheduler-postgresql
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
dolphinscheduler-master:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-master
command: ["master-server"]
ports:
- 5678:5678
environment:
TZ: Asia/Shanghai
MASTER_EXEC_THREADS: "100"
MASTER_EXEC_TASK_NUM: "20"
MASTER_HEARTBEAT_INTERVAL: "10"
MASTER_TASK_COMMIT_RETRYTIMES: "5"
MASTER_TASK_COMMIT_INTERVAL: "1000"
MASTER_MAX_CPULOAD_AVG: "100"
MASTER_RESERVED_MEMORY: "0.1"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "MasterServer"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- dolphinscheduler-logs:/opt/dolphinscheduler/logs
networks:
- dolphinscheduler
dolphinscheduler-worker:
image: apache/dolphinscheduler:latest
container_name: dolphinscheduler-worker
command: ["worker-server"]
ports:
- 1234:1234
- 50051:50051
environment:
TZ: Asia/Shanghai
WORKER_EXEC_THREADS: "100"
WORKER_HEARTBEAT_INTERVAL: "10"
WORKER_FETCH_TASK_NUM: "3"
WORKER_MAX_CPULOAD_AVG: "100"
WORKER_RESERVED_MEMORY: "0.1"
WORKER_GROUP: "default"
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
POSTGRESQL_HOST: dolphinscheduler-postgresql
POSTGRESQL_PORT: 5432
POSTGRESQL_USERNAME: root
POSTGRESQL_PASSWORD: root
POSTGRESQL_DATABASE: dolphinscheduler
ZOOKEEPER_QUORUM: dolphinscheduler-zookeeper:2181
healthcheck:
test: ["CMD", "/root/checkpoint.sh", "WorkerServer"]
interval: 30s
timeout: 5s
retries: 3
start_period: 30s
depends_on:
- dolphinscheduler-postgresql
- dolphinscheduler-zookeeper
volumes:
- type: bind
source: ./dolphinscheduler_env.sh
target: /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
- type: volume
source: dolphinscheduler-worker-data
target: /tmp/dolphinscheduler
- type: volume
source: dolphinscheduler-logs
target: /opt/dolphinscheduler/logs
- type: volume
source: dolphinscheduler-resource-storage-local
target: /dolphinscheduler
networks:
- dolphinscheduler
networks:
dolphinscheduler:
driver: bridge
volumes:
dolphinscheduler-postgresql:
dolphinscheduler-postgresql-initdb:
dolphinscheduler-zookeeper:
dolphinscheduler-worker-data:
dolphinscheduler-logs:
dolphinscheduler-resource-storage-local:
configs:
dolphinscheduler-worker-task-env:
file: ./dolphinscheduler_env.sh
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,124 |
[BUG] a error when build a docker image
|
**Describe the bug**
A error throw and then build break when I use `.\docker\build\hooks\build.bat`.
**Screenshots**

**Which version of Dolphin Scheduler:**
-1.3.1-release
**Requirement or improvement
- Windows 10 Pro
|
https://github.com/apache/dolphinscheduler/issues/3124
|
https://github.com/apache/dolphinscheduler/pull/3765
|
5c8171e3977b46e09e8d70119f00ecd0d60ecb05
|
09bdd99004ee4f311997a82974273569f5a9d678
| 2020-07-04T04:45:06Z |
java
| 2020-09-22T02:04:26Z |
docker/build/hooks/build.bat
|
:: Licensed to the Apache Software Foundation (ASF) under one or more
:: contributor license agreements. See the NOTICE file distributed with
:: this work for additional information regarding copyright ownership.
:: The ASF licenses this file to You under the Apache License, Version 2.0
:: (the "License"); you may not use this file except in compliance with
:: the License. You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
@echo off
echo "------ dolphinscheduler start - build -------"
set
if not defined VERSION (
echo "set environment variable [VERSION]"
for /f %%l in (%cd%\sql\soft_version) do (set VERSION=%%l)
)
if not defined DOCKER_REPO (
echo "set environment variable [DOCKER_REPO]"
set DOCKER_REPO='dolphinscheduler'
)
echo "Version: %VERSION%"
echo "Repo: %DOCKER_REPO%"
echo "Current Directory is %cd%"
:: maven package(Project Directory)
echo "call mvn clean compile package -Prelease"
call mvn clean compile package -Prelease -DskipTests=true
if "%errorlevel%"=="1" goto :mvnFailed
:: move dolphinscheduler-bin.tar.gz file to docker/build directory
echo "move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\docker\build\"
move %cd%\dolphinscheduler-dist\target\apache-dolphinscheduler-incubating-%VERSION%-SNAPSHOT-dolphinscheduler-bin.tar.gz %cd%\docker\build\
:: docker build
echo "docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\docker\build\"
docker build --build-arg VERSION=%VERSION% -t %DOCKER_REPO%:%VERSION% %cd%\docker\build\
if "%errorlevel%"=="1" goto :dockerBuildFailed
echo "------ dolphinscheduler end - build -------"
:mvnFailed
echo "MAVEN PACKAGE FAILED!"
:dockerBuildFailed
echo "DOCKER BUILD FAILED!"
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,621 |
[Bug][master] After batch deleting the executing process instances, the master cannot get the worker feedback results
|
1. Run more than 40 workflows in batches, and the status of the workflow instance and task instance is executing
2. Click the batch delete button on the process instance page
3. After the worker task is executed successfully, the master cannot get the result
4. Run the workflow, the master does not assign tasks to the workers
## Solution: After restarting the master service, all tasks run normally
1.批量运行40个以上工作流,工作流实例及任务实例状态都为执行中
2.流程实例页面点击批量删除按钮
3.worker任务执行成功后,master拿不到woker反馈的结果
4.运行工作流,master也没有向worker分配任务
## 解决方法:重启master服务后,所有任务正常运行


**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3621
|
https://github.com/apache/dolphinscheduler/pull/3848
|
a51dafc3826e142a08ef03d71e21918d80f677d6
|
d946717fcbb775a9c04b2728056b546bf2038ae4
| 2020-08-28T02:08:47Z |
java
| 2020-09-29T07:55:42Z |
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/instance/pages/list/_source/list.vue
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="list-model" style="position: relative;">
<div class="table-box">
<table class="fixed">
<tr>
<th scope="col" style="min-width: 50px">
<x-checkbox @on-change="_topCheckBoxClick" v-model="checkAll"></x-checkbox>
</th>
<th scope="col" style="min-width: 30px">
<span>{{$t('#')}}</span>
</th>
<th scope="col" style="min-width: 200px;max-width: 300px;">
<span>{{$t('Process Name')}}</span>
</th>
<th scope="col" style="min-width: 30px">
<span>{{$t('State')}}</span>
</th>
<th scope="col" style="min-width: 70px">
<span>{{$t('Run Type')}}</span>
</th>
<th scope="col" style="min-width: 130px">
<span>{{$t('Scheduling Time')}}</span>
</th>
<th scope="col" style="min-width: 130px">
<span>{{$t('Start Time')}}</span>
</th>
<th scope="col" style="min-width: 130px">
<span>{{$t('End Time')}}</span>
</th>
<th scope="col" style="min-width: 60px">
<span>{{$t('Duration')}}s</span>
</th>
<th scope="col" style="min-width: 60px">
<span>{{$t('Run Times')}}</span>
</th>
<th scope="col" style="min-width: 55px">
<span>{{$t('fault-tolerant sign')}}</span>
</th>
<th scope="col" style="min-width: 135px">
<span>{{$t('Executor')}}</span>
</th>
<th scope="col" style="min-width: 100px">
<div style="width: 100px">
<span>{{$t('host')}}</span>
</div>
</th>
<th scope="col" style="min-width: 210px">
<span>{{$t('Operation')}}</span>
</th>
</tr>
<tr v-for="(item, $index) in list" :key="item.id">
<td width="50"><x-checkbox v-model="item.isCheck" :disabled="item.state === 'RUNNING_EXEUTION'" @on-change="_arrDelChange"></x-checkbox></td>
<td width="50">
<span>{{parseInt(pageNo === 1 ? ($index + 1) : (($index + 1) + (pageSize * (pageNo - 1))))}}</span>
</td>
<td style="min-width: 200px;max-width: 300px;padding-right: 10px;">
<span class="ellipsis" style="padding-left: 4px;"><router-link :to="{ path: '/projects/instance/list/' + item.id , query:{id: item.processDefinitionId}}" tag="a" class="links" :title="item.name">{{item.name}}</router-link></span>
</td>
<td>
<span v-html="_rtState(item.state)" style="cursor: pointer;"></span>
</td>
<td><span>{{_rtRunningType(item.commandType)}}</span></td>
<td>
<span v-if="item.scheduleTime">{{item.scheduleTime | formatDate}}</span>
<span v-else>-</span>
</td>
<td>
<span v-if="item.startTime">{{item.startTime | formatDate}}</span>
<span v-else>-</span>
</td>
<td>
<span v-if="item.endTime">{{item.endTime | formatDate}}</span>
<span v-else>-</span>
</td>
<td width="70"><span>{{item.duration || '-'}}</span></td>
<td width="70"><span>{{item.runTimes}}</span></td>
<td><span>{{item.recovery}}</span></td>
<td>
<span v-if="item.executorName">{{item.executorName}}</span>
<span v-else>-</span>
</td>
<td>
<span v-if="item.host" style="word-break: break-all">{{item.host}}</span>
<span v-else>-</span>
</td>
<td style="z-index: inherit;">
<div v-show="item.disabled">
<x-button type="info"
shape="circle"
size="xsmall"
data-toggle="tooltip"
:title="$t('Edit')"
@click="_reEdit(item)"
icon="ans-icon-edit"
:disabled="item.state !== 'SUCCESS' && item.state !== 'PAUSE' && item.state !== 'FAILURE' && item.state !== 'STOP'"></x-button>
<x-button type="info"
shape="circle"
size="xsmall"
data-toggle="tooltip"
:title="$t('Rerun')"
@click="_reRun(item,$index)"
icon="ans-icon-refresh"
:disabled="item.state !== 'SUCCESS' && item.state !== 'PAUSE' && item.state !== 'FAILURE' && item.state !== 'STOP'"></x-button>
<x-button type="success"
shape="circle"
size="xsmall"
data-toggle="tooltip"
:title="$t('Recovery Failed')"
@click="_restore(item,$index)"
icon="ans-icon-fail-empty"
:disabled="item.state !== 'FAILURE'"></x-button>
<x-button type="error"
shape="circle"
size="xsmall"
data-toggle="tooltip"
:title="item.state === 'STOP' ? $t('Recovery Suspend') : $t('Stop')"
@click="_stop(item,$index)"
:icon="item.state === 'STOP' ? 'ans-icon-pause-solid' : 'ans-icon-stop'"
:disabled="item.state !== 'RUNNING_EXEUTION' && item.state != 'STOP'"></x-button>
<x-button type="warning"
shape="circle"
size="xsmall"
data-toggle="tooltip"
:title="item.state === 'PAUSE' ? $t('Recovery Suspend') : $t('Pause')"
@click="_suspend(item,$index)"
:icon="item.state === 'PAUSE' ? 'ans-icon-pause-solid' : 'ans-icon-pause'"
:disabled="item.state !== 'RUNNING_EXEUTION' && item.state !== 'PAUSE'"></x-button>
<x-poptip
:ref="'poptip-delete-' + $index"
placement="top-end"
width="90">
<p>{{$t('Delete?')}}</p>
<div style="text-align: right; margin: 0;padding-top: 4px;">
<x-button type="text" size="xsmall" shape="circle" @click="_closeDelete($index)">{{$t('Cancel')}}</x-button>
<x-button type="primary" size="xsmall" shape="circle" @click="_delete(item,$index)">{{$t('Confirm')}}</x-button>
</div>
<template slot="reference">
<x-button
icon="ans-icon-trash"
type="error"
shape="circle"
size="xsmall"
data-toggle="tooltip"
:disabled="item.state !== 'SUCCESS' && item.state !== 'FAILURE' && item.state !== 'STOP' && item.state !== 'PAUSE'"
:title="$t('delete')">
</x-button>
</template>
</x-poptip>
<x-button type="info"
shape="circle"
size="xsmall"
data-toggle="tooltip"
:title="$t('Gantt')"
@click="_gantt(item)"
icon="ans-icon-gantt">
</x-button>
</div>
<div v-show="!item.disabled">
<!--Edit-->
<x-button
type="info"
shape="circle"
size="xsmall"
icon="ans-icon-edit"
disabled="true">
</x-button>
<!--Rerun-->
<x-button
v-show="buttonType === 'run'"
type="info"
shape="circle"
size="xsmall"
disabled="true">
{{item.count}}
</x-button>
<x-button
v-show="buttonType !== 'run'"
type="info"
shape="circle"
size="xsmall"
icon="ans-icon-refresh"
disabled="true">
</x-button>
<!--Recovery Failed-->
<x-button
v-show="buttonType === 'store'"
type="success"
shape="circle"
size="xsmall"
disabled="true">
{{item.count}}
</x-button>
<x-button
v-show="buttonType !== 'store'"
type="success"
shape="circle"
size="xsmall"
icon="ans-icon-fail-empty"
disabled="true">
</x-button>
<!--Stop-->
<!--<x-button-->
<!--type="error"-->
<!--shape="circle"-->
<!--size="xsmall"-->
<!--icon="ans-icon-pause"-->
<!--disabled="true">-->
<!--</x-button>-->
<!--倒计时 => Recovery Suspend/Pause-->
<x-button
v-show="(item.state === 'PAUSE' || item.state == 'STOP') && buttonType === 'suspend'"
type="warning"
shape="circle"
size="xsmall"
disabled="true">
{{item.count}}
</x-button>
<!--Recovery Suspend-->
<x-button
v-show="(item.state === 'PAUSE' || item.state == 'STOP') && buttonType !== 'suspend'"
type="warning"
shape="circle"
size="xsmall"
icon="ans-icon-pause-solid"
disabled="true">
</x-button>
<!--Pause-->
<x-button
v-show="item.state !== 'PAUSE'"
type="warning"
shape="circle"
size="xsmall"
icon="ans-icon-stop"
disabled="true">
</x-button>
<!--Stop-->
<x-button
v-show="item.state !== 'STOP'"
type="warning"
shape="circle"
size="xsmall"
icon="ans-icon-pause"
disabled="true">
</x-button>
<!--delete-->
<x-button
type="error"
shape="circle"
size="xsmall"
icon="ans-icon-trash"
:disabled="true">
</x-button>
<!--Gantt-->
<x-button
type="info"
shape="circle"
size="xsmall"
icon="ans-icon-gantt"
disabled="true">
</x-button>
</div>
</td>
</tr>
</table>
</div>
<x-poptip
v-show="strDelete !== ''"
ref="poptipDeleteAll"
placement="bottom-start"
width="90">
<p>{{$t('Delete?')}}</p>
<div style="text-align: right; margin: 0;padding-top: 4px;">
<x-button type="text" size="xsmall" shape="circle" @click="_closeDelete(-1)">{{$t('Cancel')}}</x-button>
<x-button type="primary" size="xsmall" shape="circle" @click="_delete({},-1)">{{$t('Confirm')}}</x-button>
</div>
<template slot="reference">
<x-button size="xsmall" style="position: absolute; bottom: -48px; left: 22px;" >{{$t('Delete')}}</x-button>
</template>
</x-poptip>
</div>
</template>
<script>
import _ from 'lodash'
import { mapActions } from 'vuex'
import { tasksState, runningType } from '@/conf/home/pages/dag/_source/config'
export default {
name: 'list',
data () {
return {
// data
list: [],
// btn type
buttonType: '',
strDelete: '',
checkAll: false
}
},
props: {
processInstanceList: Array,
pageNo: Number,
pageSize: Number
},
methods: {
...mapActions('dag', ['editExecutorsState', 'deleteInstance', 'batchDeleteInstance']),
/**
* Return run type
*/
_rtRunningType (code) {
return _.filter(runningType, v => v.code === code)[0].desc
},
/**
* Return status
*/
_rtState (code) {
let o = tasksState[code]
return `<em class="ansfont ${o.icoUnicode} ${o.isSpin ? 'as as-spin' : ''}" style="color:${o.color}" data-toggle="tooltip" data-container="body" title="${o.desc}"></em>`
},
/**
* Close the delete layer
*/
_closeDelete (i) {
// close batch
if (i < 0) {
this.$refs['poptipDeleteAll'].doClose()
return
}
// close one
this.$refs[`poptip-delete-${i}`][0].doClose()
},
/**
* delete
*/
_delete (item, i) {
// remove tow++
if (i < 0) {
this._batchDelete()
return
}
// remove one
this.deleteInstance({
processInstanceId: item.id
}).then(res => {
this.$refs[`poptip-delete-${i}`][0].doClose()
this._onUpdate()
this.$message.success(res.msg)
}).catch(e => {
this.$refs[`poptip-delete-${i}`][0].doClose()
this.$message.error(e.msg || '')
})
},
/**
* edit
*/
_reEdit (item) {
this.$router.push({ path: `/projects/instance/list/${item.id}` })
},
/**
* Rerun
* @param REPEAT_RUNNING
*/
_reRun (item, index) {
this._countDownFn({
id: item.id,
executeType: 'REPEAT_RUNNING',
index: index,
buttonType: 'run'
})
},
/**
* Resume running
* @param PAUSE => RECOVER_SUSPENDED_PROCESS
* @param FAILURE => START_FAILURE_TASK_PROCESS
*/
_restore (item, index) {
this._countDownFn({
id: item.id,
executeType: 'START_FAILURE_TASK_PROCESS',
index: index,
buttonType: 'store'
})
},
/**
* stop
* @param STOP
*/
_stop (item, index) {
if(item.state == 'STOP') {
this._countDownFn({
id: item.id,
executeType: 'RECOVER_SUSPENDED_PROCESS',
index: index,
buttonType: 'suspend'
})
} else {
this._upExecutorsState({
processInstanceId: item.id,
executeType: 'STOP'
})
}
},
/**
* pause
* @param PAUSE
*/
_suspend (item, index) {
if (item.state === 'PAUSE') {
this._countDownFn({
id: item.id,
executeType: 'RECOVER_SUSPENDED_PROCESS',
index: index,
buttonType: 'suspend'
})
} else {
this._upExecutorsState({
processInstanceId: item.id,
executeType: 'PAUSE'
})
}
},
/**
* operating
*/
_upExecutorsState (o) {
this.editExecutorsState(o).then(res => {
this.$message.success(res.msg)
$('body').find('.tooltip.fade.top.in').remove()
this._onUpdate()
}).catch(e => {
this.$message.error(e.msg || '')
this._onUpdate()
})
},
/**
* Countdown method refresh
*/
_countDownFn (param) {
this.buttonType = param.buttonType
this.editExecutorsState({
processInstanceId: param.id,
executeType: param.executeType
}).then(res => {
this.list[param.index].disabled = false
$('body').find('.tooltip.fade.top.in').remove()
this.$forceUpdate()
this.$message.success(res.msg)
// Countdown
this._countDown(() => {
this._onUpdate()
}, param.index)
}).catch(e => {
this.$message.error(e.msg || '')
this._onUpdate()
})
},
/**
* update
*/
_onUpdate () {
this.$emit('on-update')
},
/**
* list data handle
*/
_listDataHandle (data) {
if (data.length) {
_.map(data, v => {
v.disabled = true
v.count = 9
})
}
return data
},
/**
* Countdown
*/
_countDown (fn, index) {
const TIME_COUNT = 10
let timer
let $count
if (!timer) {
$count = TIME_COUNT
timer = setInterval(() => {
if ($count > 0 && $count <= TIME_COUNT) {
$count--
this.list[index].count = $count
this.$forceUpdate()
} else {
fn()
clearInterval(timer)
timer = null
}
}, 1000)
}
},
_gantt (item) {
this.$router.push({ path: `/projects/instance/gantt/${item.id}` })
},
_topCheckBoxClick (v) {
this.list.forEach((item, i) => {
this.$set(this.list[i], 'isCheck', v)
})
this._arrDelChange()
},
_arrDelChange (v) {
let arr = []
this.list.forEach((item)=>{
if (item.isCheck) {
arr.push(item.id)
}
})
this.strDelete = _.join(arr, ',')
if (v === false) {
this.checkAll = false
}
},
_batchDelete () {
this.$refs['poptipDeleteAll'].doClose()
this.batchDeleteInstance({
processInstanceIds: this.strDelete
}).then(res => {
this._onUpdate()
this.checkAll = false
this.strDelete = ''
this.$message.success(res.msg)
}).catch(e => {
this.checkAll = false
this.strDelete = ''
this.$message.error(e.msg || '')
})
}
},
watch: {
processInstanceList: {
handler (a) {
this.checkAll = false
this.list = []
setTimeout(() => {
this.list = _.cloneDeep(this._listDataHandle(a))
})
},
immediate: true,
deep: true
},
pageNo () {
this.strDelete = ''
}
},
created () {
},
mounted () {
},
components: { }
}
</script>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,617 |
[Bug][master] After subtask fault tolerance, 2 task instances are generated,The process instance status always is executing
|
Sub-process fault tolerance, two commands:
1.run sub_process workflow
2.stop master
3.start master,2 task instances are generated


The master log is as follows (master日志如下)

The worker log is as follows (worker日志如下)

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3617
|
https://github.com/apache/dolphinscheduler/pull/3873
|
c4be3b57493fe75f5a5dbb9f258a0430d0363cc6
|
39411ce03b864bc770da220ad6f81df47bd2487b
| 2020-08-27T09:48:06Z |
java
| 2020-10-10T07:05:56Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKMasterClient.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.zk;
import org.apache.commons.lang.StringUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.NetUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.builder.TaskExecutionContextBuilder;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.zk.AbstractZKClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.Date;
import java.util.List;
import static org.apache.dolphinscheduler.common.Constants.SLEEP_TIME_MILLIS;
/**
* zookeeper master client
*
* single instance
*/
@Component
public class ZKMasterClient extends AbstractZKClient {
/**
* logger
*/
private static final Logger logger = LoggerFactory.getLogger(ZKMasterClient.class);
/**
* process service
*/
@Autowired
private ProcessService processService;
public void start() {
InterProcessMutex mutex = null;
try {
// create distributed lock with the root node path of the lock space as /dolphinscheduler/lock/failover/master
String znodeLock = getMasterStartUpLockPath();
mutex = new InterProcessMutex(getZkClient(), znodeLock);
mutex.acquire();
// init system znode
this.initSystemZNode();
while (!checkZKNodeExists(NetUtils.getHost(), ZKNodeType.MASTER)){
ThreadUtils.sleep(SLEEP_TIME_MILLIS);
}
// self tolerant
if (getActiveMasterNum() == 1) {
failoverWorker(null, true);
failoverMaster(null);
}
}catch (Exception e){
logger.error("master start up exception",e);
}finally {
releaseMutex(mutex);
}
}
@Override
public void close(){
super.close();
}
/**
* handle path events that this class cares about
* @param client zkClient
* @param event path event
* @param path zk path
*/
@Override
protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) {
//monitor master
if(path.startsWith(getZNodeParentPath(ZKNodeType.MASTER)+Constants.SINGLE_SLASH)){
handleMasterEvent(event,path);
}else if(path.startsWith(getZNodeParentPath(ZKNodeType.WORKER)+Constants.SINGLE_SLASH)){
//monitor worker
handleWorkerEvent(event,path);
}
}
/**
* remove zookeeper node path
*
* @param path zookeeper node path
* @param zkNodeType zookeeper node type
* @param failover is failover
*/
private void removeZKNodePath(String path, ZKNodeType zkNodeType, boolean failover) {
logger.info("{} node deleted : {}", zkNodeType.toString(), path);
InterProcessMutex mutex = null;
try {
String failoverPath = getFailoverLockPath(zkNodeType);
// create a distributed lock
mutex = new InterProcessMutex(getZkClient(), failoverPath);
mutex.acquire();
String serverHost = getHostByEventDataPath(path);
// handle dead server
handleDeadServer(path, zkNodeType, Constants.ADD_ZK_OP);
//failover server
if(failover){
failoverServerWhenDown(serverHost, zkNodeType);
}
}catch (Exception e){
logger.error("{} server failover failed.", zkNodeType.toString());
logger.error("failover exception ",e);
}
finally {
releaseMutex(mutex);
}
}
/**
* failover server when server down
*
* @param serverHost server host
* @param zkNodeType zookeeper node type
* @throws Exception exception
*/
private void failoverServerWhenDown(String serverHost, ZKNodeType zkNodeType) throws Exception {
if(StringUtils.isEmpty(serverHost) || serverHost.startsWith(NetUtils.getHost())){
return ;
}
switch (zkNodeType){
case MASTER:
failoverMaster(serverHost);
break;
case WORKER:
failoverWorker(serverHost, true);
default:
break;
}
}
/**
* get failover lock path
*
* @param zkNodeType zookeeper node type
* @return fail over lock path
*/
private String getFailoverLockPath(ZKNodeType zkNodeType){
switch (zkNodeType){
case MASTER:
return getMasterFailoverLockPath();
case WORKER:
return getWorkerFailoverLockPath();
default:
return "";
}
}
/**
* monitor master
* @param event event
* @param path path
*/
public void handleMasterEvent(TreeCacheEvent event, String path){
switch (event.getType()) {
case NODE_ADDED:
logger.info("master node added : {}", path);
break;
case NODE_REMOVED:
removeZKNodePath(path, ZKNodeType.MASTER, true);
break;
default:
break;
}
}
/**
* monitor worker
* @param event event
* @param path path
*/
public void handleWorkerEvent(TreeCacheEvent event, String path){
switch (event.getType()) {
case NODE_ADDED:
logger.info("worker node added : {}", path);
break;
case NODE_REMOVED:
logger.info("worker node deleted : {}", path);
removeZKNodePath(path, ZKNodeType.WORKER, true);
break;
default:
break;
}
}
/**
* task needs failover if task start before worker starts
*
* @param taskInstance task instance
* @return true if task instance need fail over
*/
private boolean checkTaskInstanceNeedFailover(TaskInstance taskInstance) throws Exception {
boolean taskNeedFailover = true;
//now no host will execute this task instance,so no need to failover the task
if(taskInstance.getHost() == null){
return false;
}
// if the worker node exists in zookeeper, we must check the task starts after the worker
if(checkZKNodeExists(taskInstance.getHost(), ZKNodeType.WORKER)){
//if task start after worker starts, there is no need to failover the task.
if(checkTaskAfterWorkerStart(taskInstance)){
taskNeedFailover = false;
}
}
return taskNeedFailover;
}
/**
* check task start after the worker server starts.
*
* @param taskInstance task instance
* @return true if task instance start time after worker server start date
*/
private boolean checkTaskAfterWorkerStart(TaskInstance taskInstance) {
if(StringUtils.isEmpty(taskInstance.getHost())){
return false;
}
Date workerServerStartDate = null;
List<Server> workerServers = getServersList(ZKNodeType.WORKER);
for(Server workerServer : workerServers){
if(taskInstance.getHost().equals(workerServer.getHost() + Constants.COLON + workerServer.getPort())){
workerServerStartDate = workerServer.getCreateTime();
break;
}
}
if(workerServerStartDate != null){
return taskInstance.getStartTime().after(workerServerStartDate);
}else{
return false;
}
}
/**
* failover worker tasks
*
* 1. kill yarn job if there are yarn jobs in tasks.
* 2. change task state from running to need failover.
* 3. failover all tasks when workerHost is null
* @param workerHost worker host
*/
/**
* failover worker tasks
*
* 1. kill yarn job if there are yarn jobs in tasks.
* 2. change task state from running to need failover.
* 3. failover all tasks when workerHost is null
* @param workerHost worker host
* @param needCheckWorkerAlive need check worker alive
* @throws Exception exception
*/
private void failoverWorker(String workerHost, boolean needCheckWorkerAlive) throws Exception {
logger.info("start worker[{}] failover ...", workerHost);
List<TaskInstance> needFailoverTaskInstanceList = processService.queryNeedFailoverTaskInstances(workerHost);
for(TaskInstance taskInstance : needFailoverTaskInstanceList){
if(needCheckWorkerAlive){
if(!checkTaskInstanceNeedFailover(taskInstance)){
continue;
}
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
if(processInstance != null){
taskInstance.setProcessInstance(processInstance);
}
TaskExecutionContext taskExecutionContext = TaskExecutionContextBuilder.get()
.buildTaskInstanceRelatedInfo(taskInstance)
.buildProcessInstanceRelatedInfo(processInstance)
.create();
// only kill yarn job if exists , the local thread has exited
ProcessUtils.killYarnJob(taskExecutionContext);
taskInstance.setState(ExecutionStatus.NEED_FAULT_TOLERANCE);
processService.saveTaskInstance(taskInstance);
}
logger.info("end worker[{}] failover ...", workerHost);
}
/**
* failover master tasks
*
* @param masterHost master host
*/
private void failoverMaster(String masterHost) {
logger.info("start master failover ...");
List<ProcessInstance> needFailoverProcessInstanceList = processService.queryNeedFailoverProcessInstances(masterHost);
//updateProcessInstance host is null and insert into command
for(ProcessInstance processInstance : needFailoverProcessInstanceList){
if(Constants.NULL.equals(processInstance.getHost()) ){
continue;
}
processService.processNeedFailoverProcessInstances(processInstance);
}
logger.info("master failover end");
}
public InterProcessMutex blockAcquireMutex() throws Exception {
InterProcessMutex mutex = new InterProcessMutex(getZkClient(), getMasterLockPath());
mutex.acquire();
return mutex;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,617 |
[Bug][master] After subtask fault tolerance, 2 task instances are generated,The process instance status always is executing
|
Sub-process fault tolerance, two commands:
1.run sub_process workflow
2.stop master
3.start master,2 task instances are generated


The master log is as follows (master日志如下)

The worker log is as follows (worker日志如下)

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3617
|
https://github.com/apache/dolphinscheduler/pull/3873
|
c4be3b57493fe75f5a5dbb9f258a0430d0363cc6
|
39411ce03b864bc770da220ad6f81df47bd2487b
| 2020-08-27T09:48:06Z |
java
| 2020-10-10T07:05:56Z |
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.process;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_EMPTY_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_RECOVER_PROCESS_ID_STRING;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_SUB_PROCESS;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_SUB_PROCESS_DEFINE_ID;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_SUB_PROCESS_PARENT_INSTANCE_ID;
import static org.apache.dolphinscheduler.common.Constants.YYYY_MM_DD_HH_MM_SS;
import static java.util.stream.Collectors.toSet;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.CycleEnum;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.model.DateInterval;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.CycleDependency;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.ErrorCommand;
import org.apache.dolphinscheduler.dao.entity.ProcessData;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.CommandMapper;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ResourceMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.TenantMapper;
import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper;
import org.apache.dolphinscheduler.dao.mapper.UserMapper;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.service.log.LogClientService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.stream.Collectors;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import com.cronutils.model.Cron;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* process relative dao that some mappers in this.
*/
@Component
public class ProcessService {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXECUTION.ordinal(),
ExecutionStatus.DELAY_EXECUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
@Autowired
private UserMapper userMapper;
@Autowired
private ProcessDefinitionMapper processDefineMapper;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private DataSourceMapper dataSourceMapper;
@Autowired
private ProcessInstanceMapMapper processInstanceMapMapper;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private CommandMapper commandMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private UdfFuncMapper udfFuncMapper;
@Autowired
private ResourceMapper resourceMapper;
@Autowired
private ErrorCommandMapper errorCommandMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private ProjectMapper projectMapper;
/**
* handle Command (construct ProcessInstance from Command) , wrapped in transaction
* @param logger logger
* @param host host
* @param validThreadNum validThreadNum
* @param command found command
* @return process instance
*/
@Transactional(rollbackFor = RuntimeException.class)
public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) {
ProcessInstance processInstance = constructProcessInstance(command, host);
//cannot construct process instance, return null;
if(processInstance == null){
logger.error("scan command, command parameter is error: {}", command);
moveToErrorCommand(command, "process instance is null");
return null;
}
if(!checkThreadNum(command, validThreadNum)){
logger.info("there is not enough thread for this command: {}", command);
return setWaitingThreadProcess(command, processInstance);
}
processInstance.setCommandType(command.getCommandType());
processInstance.addHistoryCmd(command.getCommandType());
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
delCommandByid(command.getId());
return processInstance;
}
/**
* save error command, and delete original command
* @param command command
* @param message message
*/
@Transactional(rollbackFor = RuntimeException.class)
public void moveToErrorCommand(Command command, String message) {
ErrorCommand errorCommand = new ErrorCommand(command, message);
this.errorCommandMapper.insert(errorCommand);
delCommandByid(command.getId());
}
/**
* set process waiting thread
* @param command command
* @param processInstance processInstance
* @return process instance
*/
private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) {
processInstance.setState(ExecutionStatus.WAITTING_THREAD);
if(command.getCommandType() != CommandType.RECOVER_WAITTING_THREAD){
processInstance.addHistoryCmd(command.getCommandType());
}
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
createRecoveryWaitingThreadCommand(command, processInstance);
return null;
}
/**
* check thread num
* @param command command
* @param validThreadNum validThreadNum
* @return if thread is enough
*/
private boolean checkThreadNum(Command command, int validThreadNum) {
int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionId());
return validThreadNum >= commandThreadCount;
}
/**
* insert one command
* @param command command
* @return create result
*/
public int createCommand(Command command) {
int result = 0;
if (command != null){
result = commandMapper.insert(command);
}
return result;
}
/**
* find one command from queue list
* @return command
*/
public Command findOneCommand(){
return commandMapper.getOneToRun();
}
/**
* check the input command exists in queue list
* @param command command
* @return create command result
*/
public Boolean verifyIsNeedCreateCommand(Command command){
Boolean isNeedCreate = true;
Map<CommandType,Integer> cmdTypeMap = new HashMap<CommandType,Integer>();
cmdTypeMap.put(CommandType.REPEAT_RUNNING,1);
cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS,1);
cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS,1);
CommandType commandType = command.getCommandType();
if(cmdTypeMap.containsKey(commandType)){
ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam());
int processInstanceId = cmdParamObj.path(CMDPARAM_RECOVER_PROCESS_ID_STRING).asInt();
List<Command> commands = commandMapper.selectList(null);
// for all commands
for (Command tmpCommand:commands){
if(cmdTypeMap.containsKey(tmpCommand.getCommandType())){
ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam());
if(tempObj != null && processInstanceId == tempObj.path(CMDPARAM_RECOVER_PROCESS_ID_STRING).asInt()){
isNeedCreate = false;
break;
}
}
}
}
return isNeedCreate;
}
/**
* find process instance detail by id
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceDetailById(int processId){
return processInstanceMapper.queryDetailById(processId);
}
/**
* get task node list by definitionId
* @param defineId
* @return
*/
public List<TaskNode> getTaskNodeListByDefinitionId(Integer defineId){
ProcessDefinition processDefinition = processDefineMapper.selectById(defineId);
if (processDefinition == null) {
logger.info("process define not exists");
return null;
}
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//process data check
if (null == processData) {
logger.error("process data is null");
return new ArrayList<>();
}
return processData.getTasks();
}
/**
* find process instance by id
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceById(int processId){
return processInstanceMapper.selectById(processId);
}
/**
* find process define by id.
* @param processDefinitionId processDefinitionId
* @return process definition
*/
public ProcessDefinition findProcessDefineById(int processDefinitionId) {
return processDefineMapper.selectById(processDefinitionId);
}
/**
* delete work process instance by id
* @param processInstanceId processInstanceId
* @return delete process instance result
*/
public int deleteWorkProcessInstanceById(int processInstanceId){
return processInstanceMapper.deleteById(processInstanceId);
}
/**
* delete all sub process by parent instance id
* @param processInstanceId processInstanceId
* @return delete all sub process instance result
*/
public int deleteAllSubWorkProcessByParentId(int processInstanceId){
List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId);
for(Integer subId : subProcessIdList){
deleteAllSubWorkProcessByParentId(subId);
deleteWorkProcessMapByParentId(subId);
removeTaskLogFile(subId);
deleteWorkProcessInstanceById(subId);
}
return 1;
}
/**
* remove task log file
* @param processInstanceId processInstanceId
*/
public void removeTaskLogFile(Integer processInstanceId){
LogClientService logClient = new LogClientService();
List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId);
if (CollectionUtils.isEmpty(taskInstanceList)){
return;
}
for (TaskInstance taskInstance : taskInstanceList){
String taskLogPath = taskInstance.getLogPath();
if (StringUtils.isEmpty(taskInstance.getHost())){
continue;
}
int port = Constants.RPC_PORT;
String ip = "";
try {
ip = Host.of(taskInstance.getHost()).getIp();
}catch (Exception e){
// compatible old version
ip = taskInstance.getHost();
}
// remove task log from loggerserver
logClient.removeTaskLog(ip,port,taskLogPath);
}
}
/**
* calculate sub process number in the process define.
* @param processDefinitionId processDefinitionId
* @return process thread num count
*/
private Integer workProcessThreadNumCount(Integer processDefinitionId){
List<Integer> ids = new ArrayList<>();
recurseFindSubProcessId(processDefinitionId, ids);
return ids.size()+1;
}
/**
* recursive query sub process definition id by parent id.
* @param parentId parentId
* @param ids ids
*/
public void recurseFindSubProcessId(int parentId, List<Integer> ids){
ProcessDefinition processDefinition = processDefineMapper.selectById(parentId);
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
List<TaskNode> taskNodeList = processData.getTasks();
if (taskNodeList != null && taskNodeList.size() > 0){
for (TaskNode taskNode : taskNodeList){
String parameter = taskNode.getParams();
ObjectNode parameterJson = JSONUtils.parseObject(parameter);
if (parameterJson.get(CMDPARAM_SUB_PROCESS_DEFINE_ID) != null){
SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class);
ids.add(subProcessParam.getProcessDefinitionId());
recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(),ids);
}
}
}
}
/**
* create recovery waiting thread command when thread pool is not enough for the process instance.
* sub work process instance need not to create recovery command.
* create recovery waiting thread command and delete origin command at the same time.
* if the recovery command is exists, only update the field update_time
* @param originCommand originCommand
* @param processInstance processInstance
*/
public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) {
// sub process doesnot need to create wait command
if(processInstance.getIsSubProcess() == Flag.YES){
if(originCommand != null){
commandMapper.deleteById(originCommand.getId());
}
return;
}
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(Constants.CMDPARAM_RECOVERY_WAITTING_THREAD, String.valueOf(processInstance.getId()));
// process instance quit by "waiting thread" state
if(originCommand == null){
Command command = new Command(
CommandType.RECOVER_WAITTING_THREAD,
processInstance.getTaskDependType(),
processInstance.getFailureStrategy(),
processInstance.getExecutorId(),
processInstance.getProcessDefinitionId(),
JSONUtils.toJsonString(cmdParam),
processInstance.getWarningType(),
processInstance.getWarningGroupId(),
processInstance.getScheduleTime(),
processInstance.getProcessInstancePriority()
);
saveCommand(command);
return ;
}
// update the command time if current command if recover from waiting
if(originCommand.getCommandType() == CommandType.RECOVER_WAITTING_THREAD){
originCommand.setUpdateTime(new Date());
saveCommand(originCommand);
}else{
// delete old command and create new waiting thread command
commandMapper.deleteById(originCommand.getId());
originCommand.setId(0);
originCommand.setCommandType(CommandType.RECOVER_WAITTING_THREAD);
originCommand.setUpdateTime(new Date());
originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam));
originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority());
saveCommand(originCommand);
}
}
/**
* get schedule time from command
* @param command command
* @param cmdParam cmdParam map
* @return date
*/
private Date getScheduleTime(Command command, Map<String, String> cmdParam){
Date scheduleTime = command.getScheduleTime();
if(scheduleTime == null){
if(cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)){
scheduleTime = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
}
}
return scheduleTime;
}
/**
* generate a new work process instance from command.
* @param processDefinition processDefinition
* @param command command
* @param cmdParam cmdParam map
* @return process instance
*/
private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition,
Command command,
Map<String, String> cmdParam){
ProcessInstance processInstance = new ProcessInstance(processDefinition);
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setRecovery(Flag.NO);
processInstance.setStartTime(new Date());
processInstance.setRunTimes(1);
processInstance.setMaxTryTimes(0);
processInstance.setProcessDefinitionId(command.getProcessDefinitionId());
processInstance.setCommandParam(command.getCommandParam());
processInstance.setCommandType(command.getCommandType());
processInstance.setIsSubProcess(Flag.NO);
processInstance.setTaskDependType(command.getTaskDependType());
processInstance.setFailureStrategy(command.getFailureStrategy());
processInstance.setExecutorId(command.getExecutorId());
WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType();
processInstance.setWarningType(warningType);
Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId();
processInstance.setWarningGroupId(warningGroupId);
// schedule time
Date scheduleTime = getScheduleTime(command, cmdParam);
if(scheduleTime != null){
processInstance.setScheduleTime(scheduleTime);
}
processInstance.setCommandStartTime(command.getStartTime());
processInstance.setLocations(processDefinition.getLocations());
processInstance.setConnects(processDefinition.getConnects());
// curing global params
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
getCommandTypeIfComplement(processInstance, command),
processInstance.getScheduleTime()));
//copy process define json to process instance
processInstance.setProcessInstanceJson(processDefinition.getProcessDefinitionJson());
// set process instance priority
processInstance.setProcessInstancePriority(command.getProcessInstancePriority());
String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup();
processInstance.setWorkerGroup(workerGroup);
processInstance.setTimeout(processDefinition.getTimeout());
processInstance.setTenantId(processDefinition.getTenantId());
return processInstance;
}
/**
* get process tenant
* there is tenant id in definition, use the tenant of the definition.
* if there is not tenant id in the definiton or the tenant not exist
* use definition creator's tenant.
* @param tenantId tenantId
* @param userId userId
* @return tenant
*/
public Tenant getTenantForProcess(int tenantId, int userId){
Tenant tenant = null;
if(tenantId >= 0){
tenant = tenantMapper.queryById(tenantId);
}
if (userId == 0){
return null;
}
if(tenant == null){
User user = userMapper.selectById(userId);
tenant = tenantMapper.queryById(user.getTenantId());
}
return tenant;
}
/**
* check command parameters is valid
* @param command command
* @param cmdParam cmdParam map
* @return whether command param is valid
*/
private Boolean checkCmdParam(Command command, Map<String, String> cmdParam){
if(command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType()== TaskDependType.TASK_PRE){
if(cmdParam == null
|| !cmdParam.containsKey(Constants.CMDPARAM_START_NODE_NAMES)
|| cmdParam.get(Constants.CMDPARAM_START_NODE_NAMES).isEmpty()){
logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType());
return false;
}
}
return true;
}
/**
* construct process instance according to one command.
* @param command command
* @param host host
* @return process instance
*/
private ProcessInstance constructProcessInstance(Command command, String host){
ProcessInstance processInstance = null;
CommandType commandType = command.getCommandType();
Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam());
ProcessDefinition processDefinition = null;
if(command.getProcessDefinitionId() != 0){
processDefinition = processDefineMapper.selectById(command.getProcessDefinitionId());
if(processDefinition == null){
logger.error("cannot find the work process define! define id : {}", command.getProcessDefinitionId());
return null;
}
}
if(cmdParam != null ){
Integer processInstanceId = 0;
// recover from failure or pause tasks
if(cmdParam.containsKey(Constants.CMDPARAM_RECOVER_PROCESS_ID_STRING)) {
String processId = cmdParam.get(Constants.CMDPARAM_RECOVER_PROCESS_ID_STRING);
processInstanceId = Integer.parseInt(processId);
if (processInstanceId == 0) {
logger.error("command parameter is error, [ ProcessInstanceId ] is 0");
return null;
}
}else if(cmdParam.containsKey(Constants.CMDPARAM_SUB_PROCESS)){
// sub process map
String pId = cmdParam.get(Constants.CMDPARAM_SUB_PROCESS);
processInstanceId = Integer.parseInt(pId);
}else if(cmdParam.containsKey(Constants.CMDPARAM_RECOVERY_WAITTING_THREAD)){
// waiting thread command
String pId = cmdParam.get(Constants.CMDPARAM_RECOVERY_WAITTING_THREAD);
processInstanceId = Integer.parseInt(pId);
}
if(processInstanceId ==0){
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
}else{
processInstance = this.findProcessInstanceDetailById(processInstanceId);
}
processDefinition = processDefineMapper.selectById(processInstance.getProcessDefinitionId());
processInstance.setProcessDefinition(processDefinition);
//reset command parameter
if(processInstance.getCommandParam() != null){
Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam());
for(Map.Entry<String, String> entry: processCmdParam.entrySet()) {
if(!cmdParam.containsKey(entry.getKey())){
cmdParam.put(entry.getKey(), entry.getValue());
}
}
}
// reset command parameter if sub process
if(cmdParam.containsKey(Constants.CMDPARAM_SUB_PROCESS)){
processInstance.setCommandParam(command.getCommandParam());
}
}else{
// generate one new process instance
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
}
if(!checkCmdParam(command, cmdParam)){
logger.error("command parameter check failed!");
return null;
}
if(command.getScheduleTime() != null){
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setHost(host);
ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION;
int runTime = processInstance.getRunTimes();
switch (commandType){
case START_PROCESS:
break;
case START_FAILURE_TASK_PROCESS:
// find failed tasks and init these tasks
List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE);
List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE);
List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL);
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
failedList.addAll(killedList);
failedList.addAll(toleranceList);
for(Integer taskId : failedList){
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMDPARAM_RECOVERY_START_NODE_STRING,
String.join(Constants.COMMA, convertIntListToString(failedList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime +1 );
break;
case START_CURRENT_TASK_PROCESS:
break;
case RECOVER_WAITTING_THREAD:
break;
case RECOVER_SUSPENDED_PROCESS:
// find pause tasks and init task's state
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE);
List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(),
ExecutionStatus.KILL);
suspendedNodeList.addAll(stopNodeList);
for(Integer taskId : suspendedNodeList){
// initialize the pause state
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMDPARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList)));
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
processInstance.setRunTimes(runTime +1);
break;
case RECOVER_TOLERANCE_FAULT_PROCESS:
// recover tolerance fault process
processInstance.setRecovery(Flag.YES);
runStatus = processInstance.getState();
break;
case COMPLEMENT_DATA:
// delete all the valid tasks when complement data
List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId());
for(TaskInstance taskInstance : taskInstanceList){
taskInstance.setFlag(Flag.NO);
this.updateTaskInstance(taskInstance);
}
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case REPEAT_RUNNING:
// delete the recover task names from command parameter
if(cmdParam.containsKey(Constants.CMDPARAM_RECOVERY_START_NODE_STRING)){
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam));
}
// delete all the valid tasks when repeat running
List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId());
for(TaskInstance taskInstance : validTaskList){
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
}
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processInstance.setRunTimes(runTime +1);
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case SCHEDULER:
break;
default:
break;
}
processInstance.setState(runStatus);
return processInstance;
}
/**
* return complement data if the process start with complement data
* @param processInstance processInstance
* @param command command
* @return command type
*/
private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command){
if(CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()){
return CommandType.COMPLEMENT_DATA;
}else{
return command.getCommandType();
}
}
/**
* initialize complement data parameters
* @param processDefinition processDefinition
* @param processInstance processInstance
* @param cmdParam cmdParam
*/
private void initComplementDataParam(ProcessDefinition processDefinition,
ProcessInstance processInstance,
Map<String, String> cmdParam) {
if(!processInstance.isComplementData()){
return;
}
Date startComplementTime = DateUtils.parse(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE),
YYYY_MM_DD_HH_MM_SS);
if(Flag.NO == processInstance.getIsSubProcess()) {
processInstance.setScheduleTime(startComplementTime);
}
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
}
/**
* set sub work process parameters.
* handle sub work process instance, update relation table and command parameters
* set sub work process flag, extends parent work process command parameters
* @param subProcessInstance subProcessInstance
* @return process instance
*/
public ProcessInstance setSubProcessParam(ProcessInstance subProcessInstance){
String cmdParam = subProcessInstance.getCommandParam();
if(StringUtils.isEmpty(cmdParam)){
return subProcessInstance;
}
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
// write sub process id into cmd param.
if(paramMap.containsKey(CMDPARAM_SUB_PROCESS)
&& CMDPARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMDPARAM_SUB_PROCESS))){
paramMap.remove(CMDPARAM_SUB_PROCESS);
paramMap.put(CMDPARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId()));
subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap));
subProcessInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(subProcessInstance);
}
// copy parent instance user def params to sub process..
String parentInstanceId = paramMap.get(CMDPARAM_SUB_PROCESS_PARENT_INSTANCE_ID);
if(StringUtils.isNotEmpty(parentInstanceId)){
ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId));
if(parentInstance != null){
subProcessInstance.setGlobalParams(
joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams()));
this.saveProcessInstance(subProcessInstance);
}else{
logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam);
}
}
ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class);
if(processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0){
return subProcessInstance;
}
// update sub process id to process map table
processInstanceMap.setProcessInstanceId(subProcessInstance.getId());
this.updateWorkProcessInstanceMap(processInstanceMap);
return subProcessInstance;
}
/**
* join parent global params into sub process.
* only the keys doesn't in sub process global would be joined.
* @param parentGlobalParams parentGlobalParams
* @param subGlobalParams subGlobalParams
* @return global params join
*/
private String joinGlobalParams(String parentGlobalParams, String subGlobalParams){
List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class);
List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class);
Map<String,String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
for(Property parent : parentPropertyList){
if(!subMap.containsKey(parent.getProp())){
subPropertyList.add(parent);
}
}
return JSONUtils.toJsonString(subPropertyList);
}
/**
* initialize task instance
* @param taskInstance taskInstance
*/
private void initTaskInstance(TaskInstance taskInstance){
if(!taskInstance.isSubProcess()){
if(taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure()){
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
return;
}
}
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
}
/**
* submit task to db
* submit sub process to command
* @param taskInstance taskInstance
* @return task instance
*/
@Transactional(rollbackFor = RuntimeException.class)
public TaskInstance submitTask(TaskInstance taskInstance){
ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
logger.info("start submit task : {}, instance id:{}, state: {}",
taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState());
//submit to db
TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance);
if(task == null){
logger.error("end submit task to db error, task name:{}, process id:{} state: {} ",
taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState());
return task;
}
if(!task.getState().typeIsFinished()){
createSubWorkProcessCommand(processInstance, task);
}
logger.info("end submit task to db successfully:{} state:{} complete, instance id:{} state: {} ",
taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState());
return task;
}
/**
* set work process instance map
* @param parentInstance parentInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask){
ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId());
if(processMap != null){
return processMap;
}else if(parentInstance.getCommandType() == CommandType.REPEAT_RUNNING
|| parentInstance.isComplementData()){
// update current task id to map
// repeat running does not generate new sub process instance
processMap = findPreviousTaskProcessMap(parentInstance, parentTask);
if(processMap!= null){
processMap.setParentTaskInstanceId(parentTask.getId());
updateWorkProcessInstanceMap(processMap);
return processMap;
}
}
// new task
processMap = new ProcessInstanceMap();
processMap.setParentProcessInstanceId(parentInstance.getId());
processMap.setParentTaskInstanceId(parentTask.getId());
createWorkProcessInstanceMap(processMap);
return processMap;
}
/**
* find previous task work process map.
* @param parentProcessInstance parentProcessInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance,
TaskInstance parentTask) {
Integer preTaskId = 0;
List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId());
for(TaskInstance task : preTaskList){
if(task.getName().equals(parentTask.getName())){
preTaskId = task.getId();
ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId);
if(map!=null){
return map;
}
}
}
logger.info("sub process instance is not found,parent task:{},parent instance:{}",
parentTask.getId(), parentProcessInstance.getId());
return null;
}
/**
* create sub work process command
* @param parentProcessInstance parentProcessInstance
* @param task task
*/
private void createSubWorkProcessCommand(ProcessInstance parentProcessInstance,
TaskInstance task){
if(!task.isSubProcess()){
return;
}
ProcessInstanceMap instanceMap = setProcessInstanceMap(parentProcessInstance, task);
TaskNode taskNode = JSONUtils.parseObject(task.getTaskJson(), TaskNode.class);
Map<String, String> subProcessParam = JSONUtils.toMap(taskNode.getParams());
Integer childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMDPARAM_SUB_PROCESS_DEFINE_ID));
ProcessInstance childInstance = findSubProcessInstance(parentProcessInstance.getId(), task.getId());
CommandType fatherType = parentProcessInstance.getCommandType();
CommandType commandType = fatherType;
if(childInstance == null){
String fatherHistoryCommand = parentProcessInstance.getHistoryCmd();
// sub process must begin with schedule/complement data
// if father begin with scheduler/complement data
if(fatherHistoryCommand.startsWith(CommandType.SCHEDULER.toString()) ||
fatherHistoryCommand.startsWith(CommandType.COMPLEMENT_DATA.toString())){
commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]);
}
}
if(childInstance != null){
childInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateProcessInstance(childInstance);
}
// set sub work process command
String processMapStr = JSONUtils.toJsonString(instanceMap);
Map<String, String> cmdParam = JSONUtils.toMap(processMapStr);
if(commandType == CommandType.COMPLEMENT_DATA ||
(childInstance != null && childInstance.isComplementData())){
Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam());
String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE);
String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime);
processMapStr = JSONUtils.toJsonString(cmdParam);
}
updateSubProcessDefinitionByParent(parentProcessInstance, childDefineId);
Command command = new Command();
command.setWarningType(parentProcessInstance.getWarningType());
command.setWarningGroupId(parentProcessInstance.getWarningGroupId());
command.setFailureStrategy(parentProcessInstance.getFailureStrategy());
command.setProcessDefinitionId(childDefineId);
command.setScheduleTime(parentProcessInstance.getScheduleTime());
command.setExecutorId(parentProcessInstance.getExecutorId());
command.setCommandParam(processMapStr);
command.setCommandType(commandType);
command.setProcessInstancePriority(parentProcessInstance.getProcessInstancePriority());
command.setWorkerGroup(parentProcessInstance.getWorkerGroup());
createCommand(command);
logger.info("sub process command created: {} ", command.toString());
}
/**
* update sub process definition
* @param parentProcessInstance parentProcessInstance
* @param childDefinitionId childDefinitionId
*/
private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, int childDefinitionId) {
ProcessDefinition fatherDefinition = this.findProcessDefineById(parentProcessInstance.getProcessDefinitionId());
ProcessDefinition childDefinition = this.findProcessDefineById(childDefinitionId);
if(childDefinition != null && fatherDefinition != null){
childDefinition.setReceivers(fatherDefinition.getReceivers());
childDefinition.setReceiversCc(fatherDefinition.getReceiversCc());
processDefineMapper.updateById(childDefinition);
}
}
/**
* submit task to mysql
* @param taskInstance taskInstance
* @param processInstance processInstance
* @return task instance
*/
public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance){
ExecutionStatus processInstanceState = processInstance.getState();
if(taskInstance.getState().typeIsFailure()){
if(taskInstance.isSubProcess()){
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1 );
}else {
if( processInstanceState != ExecutionStatus.READY_STOP
&& processInstanceState != ExecutionStatus.READY_PAUSE){
// failure task set invalid
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
// crate new task instance
if(taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE){
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1 );
}
taskInstance.setSubmitTime(null);
taskInstance.setStartTime(null);
taskInstance.setEndTime(null);
taskInstance.setFlag(Flag.YES);
taskInstance.setHost(null);
taskInstance.setId(0);
}
}
}
taskInstance.setExecutorId(processInstance.getExecutorId());
taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority());
taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState));
if (taskInstance.getSubmitTime() == null) {
taskInstance.setSubmitTime(new Date());
}
if (taskInstance.getFirstSubmitTime() == null) {
taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime());
}
boolean saveResult = saveTaskInstance(taskInstance);
if(!saveResult){
return null;
}
return taskInstance;
}
/**
* ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskInstanceId}_${task executed by ip1},${ip2}...
* The tasks with the highest priority are selected by comparing the priorities of the above four levels from high to low.
* @param taskInstance taskInstance
* @return task zk queue str
*/
public String taskZkInfo(TaskInstance taskInstance) {
String taskWorkerGroup = getTaskWorkerGroup(taskInstance);
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
if(processInstance == null){
logger.error("process instance is null. please check the task info, task id: " + taskInstance.getId());
return "";
}
StringBuilder sb = new StringBuilder(100);
sb.append(processInstance.getProcessInstancePriority().ordinal()).append(Constants.UNDERLINE)
.append(taskInstance.getProcessInstanceId()).append(Constants.UNDERLINE)
.append(taskInstance.getTaskInstancePriority().ordinal()).append(Constants.UNDERLINE)
.append(taskInstance.getId()).append(Constants.UNDERLINE)
.append(taskInstance.getWorkerGroup());
return sb.toString();
}
/**
* get submit task instance state by the work process state
* cannot modify the task state when running/kill/submit success, or this
* task instance is already exists in task queue .
* return pause if work process state is ready pause
* return stop if work process state is ready stop
* if all of above are not satisfied, return submit success
*
* @param taskInstance taskInstance
* @param processInstanceState processInstanceState
* @return process instance state
*/
public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState){
ExecutionStatus state = taskInstance.getState();
if(
// running, delayed or killed
// the task already exists in task queue
// return state
state == ExecutionStatus.RUNNING_EXECUTION
|| state == ExecutionStatus.DELAY_EXECUTION
|| state == ExecutionStatus.KILL
|| checkTaskExistsInTaskQueue(taskInstance)
){
return state;
}
//return pasue /stop if process instance state is ready pause / stop
// or return submit success
if( processInstanceState == ExecutionStatus.READY_PAUSE){
state = ExecutionStatus.PAUSE;
}else if(processInstanceState == ExecutionStatus.READY_STOP
|| !checkProcessStrategy(taskInstance)) {
state = ExecutionStatus.KILL;
}else{
state = ExecutionStatus.SUBMITTED_SUCCESS;
}
return state;
}
/**
* check process instance strategy
* @param taskInstance taskInstance
* @return check strategy result
*/
private boolean checkProcessStrategy(TaskInstance taskInstance){
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
FailureStrategy failureStrategy = processInstance.getFailureStrategy();
if(failureStrategy == FailureStrategy.CONTINUE){
return true;
}
List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId());
for(TaskInstance task : taskInstances){
if(task.getState() == ExecutionStatus.FAILURE){
return false;
}
}
return true;
}
/**
* check the task instance existing in queue
* @param taskInstance taskInstance
* @return whether taskinstance exists queue
*/
public boolean checkTaskExistsInTaskQueue(TaskInstance taskInstance){
if(taskInstance.isSubProcess()){
return false;
}
String taskZkInfo = taskZkInfo(taskInstance);
return false;
}
/**
* create a new process instance
* @param processInstance processInstance
*/
public void createProcessInstance(ProcessInstance processInstance){
if (processInstance != null){
processInstanceMapper.insert(processInstance);
}
}
/**
* insert or update work process instance to data base
* @param processInstance processInstance
*/
public void saveProcessInstance(ProcessInstance processInstance){
if (processInstance == null){
logger.error("save error, process instance is null!");
return ;
}
if(processInstance.getId() != 0){
processInstanceMapper.updateById(processInstance);
}else{
createProcessInstance(processInstance);
}
}
/**
* insert or update command
* @param command command
* @return save command result
*/
public int saveCommand(Command command){
if(command.getId() != 0){
return commandMapper.updateById(command);
}else{
return commandMapper.insert(command);
}
}
/**
* insert or update task instance
* @param taskInstance taskInstance
* @return save task instance result
*/
public boolean saveTaskInstance(TaskInstance taskInstance){
if(taskInstance.getId() != 0){
return updateTaskInstance(taskInstance);
}else{
return createTaskInstance(taskInstance);
}
}
/**
* insert task instance
* @param taskInstance taskInstance
* @return create task instance result
*/
public boolean createTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.insert(taskInstance);
return count > 0;
}
/**
* update task instance
* @param taskInstance taskInstance
* @return update task instance result
*/
public boolean updateTaskInstance(TaskInstance taskInstance){
int count = taskInstanceMapper.updateById(taskInstance);
return count > 0;
}
/**
* delete a command by id
* @param id id
*/
public void delCommandByid(int id) {
commandMapper.deleteById(id);
}
/**
* find task instance by id
* @param taskId task id
* @return task intance
*/
public TaskInstance findTaskInstanceById(Integer taskId){
return taskInstanceMapper.selectById(taskId);
}
/**
* package task instance,associate processInstance and processDefine
* @param taskInstId taskInstId
* @return task instance
*/
public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId){
// get task instance
TaskInstance taskInstance = findTaskInstanceById(taskInstId);
if(taskInstance == null){
return taskInstance;
}
// get process instance
ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
// get process define
ProcessDefinition processDefine = findProcessDefineById(taskInstance.getProcessDefinitionId());
taskInstance.setProcessInstance(processInstance);
taskInstance.setProcessDefine(processDefine);
return taskInstance;
}
/**
* get id list by task state
* @param instanceId instanceId
* @param state state
* @return task instance states
*/
public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state){
return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal());
}
/**
* find valid task list by process definition id
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId){
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES);
}
/**
* find previous task list by work process id
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId){
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO);
}
/**
* update work process instance map
* @param processInstanceMap processInstanceMap
* @return update process instance result
*/
public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap){
return processInstanceMapMapper.updateById(processInstanceMap);
}
/**
* create work process instance map
* @param processInstanceMap processInstanceMap
* @return create process instance result
*/
public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap){
Integer count = 0;
if(processInstanceMap !=null){
return processInstanceMapMapper.insert(processInstanceMap);
}
return count;
}
/**
* find work process map by parent process id and parent task id.
* @param parentWorkProcessId parentWorkProcessId
* @param parentTaskId parentTaskId
* @return process instance map
*/
public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId){
return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId);
}
/**
* delete work process map by parent process id
* @param parentWorkProcessId parentWorkProcessId
* @return delete process map result
*/
public int deleteWorkProcessMapByParentId(int parentWorkProcessId){
return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId);
}
/**
* find sub process instance
* @param parentProcessId parentProcessId
* @param parentTaskId parentTaskId
* @return process instance
*/
public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId){
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId);
if(processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0){
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId());
return processInstance;
}
/**
* find parent process instance
* @param subProcessId subProcessId
* @return process instance
*/
public ProcessInstance findParentProcessInstance(Integer subProcessId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId);
if(processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0){
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
return processInstance;
}
/**
* change task state
* @param state state
* @param startTime startTime
* @param host host
* @param executePath executePath
* @param logPath logPath
* @param taskInstId taskInstId
*/
public void changeTaskState(ExecutionStatus state, Date startTime, String host,
String executePath,
String logPath,
int taskInstId) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskInstId);
taskInstance.setState(state);
taskInstance.setStartTime(startTime);
taskInstance.setHost(host);
taskInstance.setExecutePath(executePath);
taskInstance.setLogPath(logPath);
saveTaskInstance(taskInstance);
}
/**
* update process instance
* @param processInstance processInstance
* @return update process instance result
*/
public int updateProcessInstance(ProcessInstance processInstance){
return processInstanceMapper.updateById(processInstance);
}
/**
* update the process instance
* @param processInstanceId processInstanceId
* @param processJson processJson
* @param globalParams globalParams
* @param scheduleTime scheduleTime
* @param flag flag
* @param locations locations
* @param connects connects
* @return update process instance result
*/
public int updateProcessInstance(Integer processInstanceId, String processJson,
String globalParams, Date scheduleTime, Flag flag,
String locations, String connects){
ProcessInstance processInstance = processInstanceMapper.queryDetailById(processInstanceId);
if(processInstance!= null){
processInstance.setProcessInstanceJson(processJson);
processInstance.setGlobalParams(globalParams);
processInstance.setScheduleTime(scheduleTime);
processInstance.setLocations(locations);
processInstance.setConnects(connects);
return processInstanceMapper.updateById(processInstance);
}
return 0;
}
/**
* change task state
* @param state state
* @param endTime endTime
* @param taskInstId taskInstId
* @param varPool varPool
*/
public void changeTaskState(ExecutionStatus state,
Date endTime,
int processId,
String appIds,
int taskInstId,
String varPool) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskInstId);
taskInstance.setPid(processId);
taskInstance.setAppLink(appIds);
taskInstance.setState(state);
taskInstance.setEndTime(endTime);
taskInstance.setVarPool(varPool);
saveTaskInstance(taskInstance);
}
/**
* convert integer list to string list
* @param intList intList
* @return string list
*/
public List<String> convertIntListToString(List<Integer> intList){
if(intList == null){
return new ArrayList<>();
}
List<String> result = new ArrayList<String>(intList.size());
for(Integer intVar : intList){
result.add(String.valueOf(intVar));
}
return result;
}
/**
* update pid and app links field by task instance id
* @param taskInstId taskInstId
* @param pid pid
* @param appLinks appLinks
*/
public void updatePidByTaskInstId(int taskInstId, int pid,String appLinks) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskInstId);
taskInstance.setPid(pid);
taskInstance.setAppLink(appLinks);
saveTaskInstance(taskInstance);
}
/**
* query schedule by id
* @param id id
* @return schedule
*/
public Schedule querySchedule(int id) {
return scheduleMapper.selectById(id);
}
/**
* query Schedule by processDefinitionId
* @param processDefinitionId processDefinitionId
* @see Schedule
*/
public List<Schedule> queryReleaseSchedulerListByProcessDefinitionId(int processDefinitionId) {
return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId);
}
/**
* query need failover process instance
* @param host host
* @return process instance list
*/
public List<ProcessInstance> queryNeedFailoverProcessInstances(String host){
return processInstanceMapper.queryByHostAndStatus(host, stateArray);
}
/**
* process need failover process instance
* @param processInstance processInstance
*/
@Transactional(rollbackFor = RuntimeException.class)
public void processNeedFailoverProcessInstances(ProcessInstance processInstance){
//1 update processInstance host is null
processInstance.setHost(Constants.NULL);
processInstanceMapper.updateById(processInstance);
//2 insert into recover command
Command cmd = new Command();
cmd.setProcessDefinitionId(processInstance.getProcessDefinitionId());
cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMDPARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId()));
cmd.setExecutorId(processInstance.getExecutorId());
cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS);
createCommand(cmd);
}
/**
* query all need failover task instances by host
* @param host host
* @return task instance list
*/
public List<TaskInstance> queryNeedFailoverTaskInstances(String host){
return taskInstanceMapper.queryByHostAndStatus(host,
stateArray);
}
/**
* find data source by id
* @param id id
* @return datasource
*/
public DataSource findDataSourceById(int id){
return dataSourceMapper.selectById(id);
}
/**
* update process instance state by id
* @param processInstanceId processInstanceId
* @param executionStatus executionStatus
* @return update process result
*/
public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) {
ProcessInstance instance = processInstanceMapper.selectById(processInstanceId);
instance.setState(executionStatus);
return processInstanceMapper.updateById(instance);
}
/**
* find process instance by the task id
* @param taskId taskId
* @return process instance
*/
public ProcessInstance findProcessInstanceByTaskId(int taskId){
TaskInstance taskInstance = taskInstanceMapper.selectById(taskId);
if(taskInstance!= null){
return processInstanceMapper.selectById(taskInstance.getProcessInstanceId());
}
return null;
}
/**
* find udf function list by id list string
* @param ids ids
* @return udf function list
*/
public List<UdfFunc> queryUdfFunListByids(int[] ids){
return udfFuncMapper.queryUdfByIdStr(ids, null);
}
/**
* find tenant code by resource name
* @param resName resource name
* @param resourceType resource type
* @return tenant code
*/
public String queryTenantCodeByResName(String resName,ResourceType resourceType){
return resourceMapper.queryTenantCodeByResourceName(resName, resourceType.ordinal());
}
/**
* find schedule list by process define id.
* @param ids ids
* @return schedule list
*/
public List<Schedule> selectAllByProcessDefineId(int[] ids){
return scheduleMapper.selectAllByProcessDefineArray(
ids);
}
/**
* get dependency cycle by work process define id and scheduler fire time
* @param masterId masterId
* @param processDefinitionId processDefinitionId
* @param scheduledFireTime the time the task schedule is expected to trigger
* @return CycleDependency
* @throws Exception if error throws Exception
*/
public CycleDependency getCycleDependency(int masterId, int processDefinitionId, Date scheduledFireTime) throws Exception {
List<CycleDependency> list = getCycleDependencies(masterId,new int[]{processDefinitionId},scheduledFireTime);
return list.size()>0 ? list.get(0) : null;
}
/**
* get dependency cycle list by work process define id list and scheduler fire time
* @param masterId masterId
* @param ids ids
* @param scheduledFireTime the time the task schedule is expected to trigger
* @return CycleDependency list
* @throws Exception if error throws Exception
*/
public List<CycleDependency> getCycleDependencies(int masterId,int[] ids,Date scheduledFireTime) throws Exception {
List<CycleDependency> cycleDependencyList = new ArrayList<CycleDependency>();
if (ids == null || ids.length == 0) {
logger.warn("ids[] is empty!is invalid!");
return cycleDependencyList;
}
if(scheduledFireTime == null){
logger.warn("scheduledFireTime is null!is invalid!");
return cycleDependencyList;
}
String strCrontab = "";
CronExpression depCronExpression;
Cron depCron;
List<Date> list;
List<Schedule> schedules = this.selectAllByProcessDefineId(ids);
// for all scheduling information
for(Schedule depSchedule:schedules){
strCrontab = depSchedule.getCrontab();
depCronExpression = CronUtils.parse2CronExpression(strCrontab);
depCron = CronUtils.parse2Cron(strCrontab);
CycleEnum cycleEnum = CronUtils.getMiniCycle(depCron);
if(cycleEnum == null){
logger.error("{} is not valid",strCrontab);
continue;
}
Calendar calendar = Calendar.getInstance();
switch (cycleEnum){
/*case MINUTE:
calendar.add(Calendar.MINUTE,-61);*/
case HOUR:
calendar.add(Calendar.HOUR,-25);
break;
case DAY:
calendar.add(Calendar.DATE,-32);
break;
case WEEK:
calendar.add(Calendar.DATE,-32);
break;
case MONTH:
calendar.add(Calendar.MONTH,-13);
break;
default:
logger.warn("Dependent process definition's cycleEnum is {},not support!!", cycleEnum.name());
continue;
}
Date start = calendar.getTime();
if(depSchedule.getProcessDefinitionId() == masterId){
list = CronUtils.getSelfFireDateList(start, scheduledFireTime, depCronExpression);
}else {
list = CronUtils.getFireDateList(start, scheduledFireTime, depCronExpression);
}
if(list.size()>=1){
start = list.get(list.size()-1);
CycleDependency dependency = new CycleDependency(depSchedule.getProcessDefinitionId(),start, CronUtils.getExpirationTime(start, cycleEnum), cycleEnum);
cycleDependencyList.add(dependency);
}
}
return cycleDependencyList;
}
/**
* find last scheduler process instance in the date interval
* @param definitionId definitionId
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastSchedulerProcessInterval(int definitionId, DateInterval dateInterval) {
return processInstanceMapper.queryLastSchedulerProcess(definitionId,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last manual process instance interval
* @param definitionId process definition id
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastManualProcessInterval(int definitionId, DateInterval dateInterval) {
return processInstanceMapper.queryLastManualProcess(definitionId,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last running process instance
* @param definitionId process definition id
* @param startTime start time
* @param endTime end time
* @return process instance
*/
public ProcessInstance findLastRunningProcess(int definitionId, Date startTime, Date endTime) {
return processInstanceMapper.queryLastRunningProcess(definitionId,
startTime,
endTime,
stateArray);
}
/**
* query user queue by process instance id
* @param processInstanceId processInstanceId
* @return queue
*/
public String queryUserQueueByProcessInstanceId(int processInstanceId){
String queue = "";
ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId);
if(processInstance == null){
return queue;
}
User executor = userMapper.selectById(processInstance.getExecutorId());
if(executor != null){
queue = executor.getQueue();
}
return queue;
}
/**
* get task worker group
* @param taskInstance taskInstance
* @return workerGroupId
*/
public String getTaskWorkerGroup(TaskInstance taskInstance) {
String workerGroup = taskInstance.getWorkerGroup();
if(StringUtils.isNotBlank(workerGroup)){
return workerGroup;
}
int processInstanceId = taskInstance.getProcessInstanceId();
ProcessInstance processInstance = findProcessInstanceById(processInstanceId);
if(processInstance != null){
return processInstance.getWorkerGroup();
}
logger.info("task : {} will use default worker group", taskInstance.getId());
return Constants.DEFAULT_WORKER_GROUP;
}
/**
* get have perm project list
* @param userId userId
* @return project list
*/
public List<Project> getProjectListHavePerm(int userId){
List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId);
List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId);
if(createProjects == null){
createProjects = new ArrayList<>();
}
if(authedProjects != null){
createProjects.addAll(authedProjects);
}
return createProjects;
}
/**
* get have perm project ids
* @param userId userId
* @return project ids
*/
public List<Integer> getProjectIdListHavePerm(int userId){
List<Integer> projectIdList = new ArrayList<>();
for(Project project : getProjectListHavePerm(userId)){
projectIdList.add(project.getId());
}
return projectIdList;
}
/**
* list unauthorized udf function
* @param userId user id
* @param needChecks data source id array
* @return unauthorized udf function list
*/
public <T> List<T> listUnauthorized(int userId,T[] needChecks,AuthorizationType authorizationType){
List<T> resultList = new ArrayList<T>();
if (Objects.nonNull(needChecks) && needChecks.length > 0) {
Set<T> originResSet = new HashSet<T>(Arrays.asList(needChecks));
switch (authorizationType){
case RESOURCE_FILE_ID:
Set<Integer> authorizedResourceFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedResourceFiles);
break;
case RESOURCE_FILE_NAME:
Set<String> authorizedResources = resourceMapper.listAuthorizedResource(userId, needChecks).stream().map(t -> t.getFullName()).collect(toSet());
originResSet.removeAll(authorizedResources);
break;
case UDF_FILE:
Set<Integer> authorizedUdfFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedUdfFiles);
break;
case DATASOURCE:
Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId,needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedDatasources);
break;
case UDF:
Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedUdfs);
break;
}
resultList.addAll(originResSet);
}
return resultList;
}
/**
* get user by user id
* @param userId user id
* @return User
*/
public User getUserById(int userId){
return userMapper.selectById(userId);
}
/**
* get resource by resoruce id
* @param resoruceId resource id
* @return Resource
*/
public Resource getResourceById(int resoruceId){
return resourceMapper.selectById(resoruceId);
}
/**
* list resources by ids
* @param resIds resIds
* @return resource list
*/
public List<Resource> listResourceByIds(Integer[] resIds){
return resourceMapper.listResourceByIds(resIds);
}
/**
* format task app id in task instance
* @param taskInstance
* @return
*/
public String formatTaskAppId(TaskInstance taskInstance){
ProcessDefinition definition = this.findProcessDefineById(taskInstance.getProcessDefinitionId());
ProcessInstance processInstanceById = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
if(definition == null || processInstanceById == null){
return "";
}
return String.format("%s_%s_%s",
definition.getId(),
processInstanceById.getId(),
taskInstance.getId());
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,617 |
[Bug][master] After subtask fault tolerance, 2 task instances are generated,The process instance status always is executing
|
Sub-process fault tolerance, two commands:
1.run sub_process workflow
2.stop master
3.start master,2 task instances are generated


The master log is as follows (master日志如下)

The worker log is as follows (worker日志如下)

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3617
|
https://github.com/apache/dolphinscheduler/pull/3873
|
c4be3b57493fe75f5a5dbb9f258a0430d0363cc6
|
39411ce03b864bc770da220ad6f81df47bd2487b
| 2020-08-27T09:48:06Z |
java
| 2020-10-10T07:05:56Z |
dolphinscheduler-service/src/test/java/org/apache/dolphinscheduler/service/process/ProcessServiceTest.java
| |
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,617 |
[Bug][master] After subtask fault tolerance, 2 task instances are generated,The process instance status always is executing
|
Sub-process fault tolerance, two commands:
1.run sub_process workflow
2.stop master
3.start master,2 task instances are generated


The master log is as follows (master日志如下)

The worker log is as follows (worker日志如下)

**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3617
|
https://github.com/apache/dolphinscheduler/pull/3873
|
c4be3b57493fe75f5a5dbb9f258a0430d0363cc6
|
39411ce03b864bc770da220ad6f81df47bd2487b
| 2020-08-27T09:48:06Z |
java
| 2020-10-10T07:05:56Z |
pom.xml
|
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>1.3.2-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/incubator-dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>[email protected]</post>
<subscribe>[email protected]</subscribe>
<unsubscribe>[email protected]</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<spring.version>5.1.5.RELEASE</spring.version>
<spring.boot.version>2.1.3.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.2.3</quartz.version>
<jackson.version>2.9.8</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<druid.version>1.1.22</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.6</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>5.1.34</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.7.0</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>3.17</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>20.0</guava.version>
<postgresql.version>42.1.4</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.5.0</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<presto.jdbc.version>0.238.1</presto.jdbc.version>
<jsp-2.1.version>6.1.14</jsp-2.1.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.0.0</checkstyle.version>
<apache.rat.version>0.13</apache.rat.version>
<zookeeper.version>3.4.14</zookeeper.version>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
<guava-retry.version>2.0.0</guava-retry.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-plugin-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
<version>${zookeeper.version}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.facebook.presto</groupId>
<artifactId>presto-jdbc</artifactId>
<version>${presto.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<!-- for api module -->
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
<version>${jsp-2.1.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<version>${guava-retry.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-incubating-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<configuration>
<source>8</source>
<failOnError>false</failOnError>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<includes>
<include>**/alert/utils/DingTalkUtilsTest.java</include>
<include>**/alert/template/AlertTemplateFactoryTest.java</include>
<include>**/alert/template/impl/DefaultHTMLTemplateTest.java</include>
<include>**/alert/utils/EnterpriseWeChatUtilsTest.java</include>
<include>**/alert/utils/ExcelUtilsTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/utils/JSONUtilsTest.java</include>
<!--<include>**/alert/utils/MailUtilsTest.java</include>-->
<include>**/alert/plugin/EmailAlertPluginTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/dto/resources/filter/ResourceFilterTest.java</include>
<include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include>
<includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest>
<include>**/api/enums/StatusTest.java</include>
<include>**/api/exceptions/ApiExceptionHandlerTest.java</include>
<include>**/api/exceptions/ServiceExceptionTest.java</include>
<include>**/api/interceptor/LoginHandlerInterceptorTest.java</include>
<include>**/api/security/PasswordAuthenticatorTest.java</include>
<include>**/api/security/SecurityConfigTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/DataSourceServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/ExecutorServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/ProcessDefinitionVersionServiceTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/SchedulerServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/service/WorkFlowLineageServiceTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/WorkFlowLineageControllerTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/ResultTest.java</include>
<include>**/common/graph/DAGTest.java</include>
<include>**/common/os/OshiTest.java</include>
<include>**/common/os/OSUtilsTest.java</include>
<include>**/common/shell/ShellExecutorTest.java</include>
<include>**/common/task/EntityTestUtils.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/HttpParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/common/threadutils/ThreadPoolExecutorsTest.java</include>
<include>**/common/threadutils/ThreadUtilsTest.java</include>
<include>**/common/utils/process/ProcessBuilderForWin32Test.java</include>
<include>**/common/utils/process/ProcessEnvironmentForWin32Test.java</include>
<include>**/common/utils/process/ProcessImplForWin32Test.java</include>
<include>**/common/utils/CollectionUtilsTest.java</include>
<include>**/common/utils/CommonUtilsTest.java</include>
<include>**/common/utils/DateUtilsTest.java</include>
<include>**/common/utils/DependentUtilsTest.java</include>
<include>**/common/utils/EncryptionUtilsTest.java</include>
<include>**/common/utils/FileUtilsTest.java</include>
<include>**/common/utils/IpUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/OSUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/PreconditionsTest.java</include>
<include>**/common/utils/PropertyUtilsTest.java</include>
<include>**/common/utils/SchemaUtilsTest.java</include>
<include>**/common/utils/ScriptRunnerTest.java</include>
<include>**/common/utils/SensitiveLogUtilsTest.java</include>
<include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/VarPoolUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/utils/KerberosHttpClientTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/utils/RetryerUtilsTest.java</include>
<include>**/common/plugin/FilePluginManagerTest</include>
<include>**/common/plugin/PluginClassLoaderTest</include>
<include>**/common/enums/ExecutionStatusTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/mapper/ConnectionFactoryTest.java</include>
<include>**/dao/mapper/DataSourceMapperTest.java</include>
<include>**/dao/entity/TaskInstanceTest.java</include>
<include>**/dao/entity/UdfFuncTest.java</include>
<include>**/remote/JsonSerializerTest.java</include>
<include>**/remote/RemoveTaskLogResponseCommandTest.java</include>
<include>**/remote/RemoveTaskLogRequestCommandTest.java</include>
<include>**/remote/NettyRemotingClientTest.java</include>
<include>**/remote/NettyUtilTest.java</include>
<include>**/remote/ResponseFutureTest.java</include>
<include>**/server/log/LoggerServerTest.java</include>
<include>**/server/entity/SQLTaskExecutionContextTest.java</include>
<include>**/server/log/MasterLogFilterTest.java</include>
<include>**/server/log/SensitiveDataConverterTest.java</include>
<!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>-->
<include>**/server/log/TaskLogFilterTest.java</include>
<include>**/server/log/WorkerLogFilterTest.java</include>
<include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include>
<include>**/server/master/runner/MasterTaskExecThreadTest.java</include>
<!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>-->
<include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include>
<include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include>
<include>**/server/master/register/MasterRegistryTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include>
<include>**/server/master/AlertManagerTest.java</include>
<include>**/server/master/MasterCommandTest.java</include>
<include>**/server/master/DependentTaskTest.java</include>
<include>**/server/master/ConditionsTaskTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/server/master/ParamsTest.java</include>
<include>**/server/master/SubProcessTaskTest.java</include>
<include>**/server/register/ZookeeperNodeManagerTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>
<include>**/server/utils/HostTest.java</include>
<!--<include>**/server/utils/FlinkArgsUtilsTest.java</include>-->
<include>**/server/utils/LogUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/utils/ProcessUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/worker/processor/TaskCallbackServiceTest.java</include>
<include>**/server/worker/registry/WorkerRegistryTest.java</include>
<include>**/server/worker/shell/ShellCommandExecutorTest.java</include>
<include>**/server/worker/sql/SqlExecutorTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/EnvFileTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<!--<include>**/server/worker/task/datax/DataxTaskTest.java</include>-->
<!--<include>**/server/worker/task/http/HttpTaskTest.java</include>-->
<include>**/server/worker/task/sqoop/SqoopTaskTest.java</include>
<include>**/server/worker/task/TaskManagerTest.java</include>
<include>**/server/worker/EnvFileTest.java</include>
<include>**/server/worker/runner/TaskExecuteThreadTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/zk/DefaultEnsembleProviderTest.java</include>
<include>**/service/zk/ZKServerTest.java</include>
<include>**/service/zk/CuratorZookeeperClientTest.java</include>
<include>**/service/queue/TaskUpdateQueueTest.java</include>
<include>**/dao/mapper/DataSourceUserMapperTest.java</include>
<!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>-->
<include>**/dao/mapper/ProcessDefinitionMapperTest.java</include>
<include>**/dao/mapper/ProcessDefinitionVersionMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapperTest.java</include>
<include>**/dao/mapper/ProjectMapperTest.java</include>
<include>**/dao/mapper/ProjectUserMapperTest.java</include>
<include>**/dao/mapper/QueueMapperTest.java</include>
<include>**/dao/mapper/ResourceUserMapperTest.java</include>
<include>**/dao/mapper/ScheduleMapperTest.java</include>
<include>**/dao/mapper/SessionMapperTest.java</include>
<include>**/dao/mapper/TaskInstanceMapperTest.java</include>
<include>**/dao/mapper/TenantMapperTest.java</include>
<include>**/dao/mapper/UdfFuncMapperTest.java</include>
<include>**/dao/mapper/UDFUserMapperTest.java</include>
<include>**/dao/mapper/UserAlertGroupMapperTest.java</include>
<include>**/dao/mapper/UserMapperTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
<include>**/dao/datasource/OracleDataSourceTest.java</include>
<include>**/dao/datasource/HiveDataSourceTest.java</include>
<include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include>
<include>**/dao/upgrade/WokrerGrouopDaoTest.java</include>
<include>**/dao/upgrade/UpgradeDaoTest.java</include>
<include>**/plugin/model/AlertDataTest.java</include>
<include>**/plugin/model/AlertInfoTest.java</include>
<include>**/plugin/utils/PropertyUtilsTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<destFile>target/jacoco.exec</destFile>
<dataFile>target/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>jacoco-initialize</id>
<goals>
<goal>prepare-agent</goal>
</goals>
</execution>
<execution>
<id>jacoco-site</id>
<phase>test</phase>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
<version>${apache.rat.version}</version>
<configuration>
<excludeSubProjects>false</excludeSubProjects>
<addDefaultLicenseMatchers>false</addDefaultLicenseMatchers>
<licenses>
<license implementation="org.apache.rat.analysis.license.SimplePatternBasedLicense">
<licenseFamilyCategory>AL20</licenseFamilyCategory>
<licenseFamilyName>Apache License, 2.0</licenseFamilyName>
<patterns>
<pattern>Licensed to the Apache Software Foundation (ASF)</pattern>
</patterns>
</license>
</licenses>
<licenseFamilies>
<licenseFamily implementation="org.apache.rat.license.SimpleLicenseFamily">
<familyName>Apache License, 2.0</familyName>
</licenseFamily>
</licenseFamilies>
<excludes>
<exclude>**/node_modules/**</exclude>
<exclude>**/node/**</exclude>
<exclude>**/dist/**</exclude>
<exclude>**/licenses/**</exclude>
<exclude>.github/**</exclude>
<exclude>**/sql/soft_version</exclude>
<exclude>**/common/utils/ScriptRunner.java</exclude>
<exclude>**/*.json</exclude>
<!-- document files -->
<exclude>**/*.md</exclude>
<excldue>**/*.MD</excldue>
<exclude>**/*.txt</exclude>
<exclude>**/docs/**</exclude>
<exclude>**/*.babelrc</exclude>
<exclude>**/*.eslintrc</exclude>
<exclude>**/.mvn/jvm.config</exclude>
<exclude>**/.mvn/wrapper/**</exclude>
</excludes>
<consoleOutput>true</consoleOutput>
</configuration>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.18</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<suppressionsLocation>style/checkstyle-suppressions.xml</suppressionsLocation>
<suppressionsFileExpression>checkstyle.suppressions.file</suppressionsFileExpression>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
<skip>true</skip>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
<module>dolphinscheduler-plugin-api</module>
<module>dolphinscheduler-microbench</module>
</modules>
</project>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,573 |
[Bug][dao] potential horizontal unauthorized access
|
## Description
In the file `dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml`, there is a method called `queryLastRunningProcess`, the SQL statement in it on line 12 may cause horizontal unauthorized access.
If the caller does not notice this problem, and it provides a `startTime` or `endTime` which is `null`, then it is possible for him to query other user process instances.
```
1. <select id="queryLastRunningProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
2. select *
3. from t_ds_process_instance
4. where 1=1
5. <if test="states !=null and states.length != 0">
6. and state in
7. <foreach collection="states" item="i" index="index" open="(" separator="," close=")">
8. #{i}
9. </foreach>
10. </if>
11. <if test="startTime!=null and endTime != null ">
12. and process_definition_id=#{processDefinitionId}
13. and (schedule_time <![CDATA[ >= ]]> #{startTime} and schedule_time <![CDATA[ <= ]]> #{endTime}
14. or start_time <![CDATA[ >= ]]> #{startTime} and start_time <![CDATA[ <= ]]> #{endTime})
15. </if>
16. order by start_time desc limit 1
17.</select>
```
|
https://github.com/apache/dolphinscheduler/issues/3573
|
https://github.com/apache/dolphinscheduler/pull/3880
|
39411ce03b864bc770da220ad6f81df47bd2487b
|
9de7d3c7725588393b4e90bef28697dda4b20f5d
| 2020-08-22T09:34:59Z |
java
| 2020-10-12T02:23:44Z |
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml
|
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper">
<select id="queryDetailById" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select inst.*
from t_ds_process_instance inst
where inst.id = #{processId}
</select>
<select id="queryByHostAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where 1=1
<if test="host != null and host != ''">
and host=#{host}
</if>
and state in
<foreach collection="states" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
order by id asc
</select>
<select id="queryTopNProcessInstance" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where state = #{status}
and start_time between
#{startTime} and #{endTime}
order by end_time-start_time desc
limit #{size}
</select>
<select id="queryByTenantIdAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where 1=1
<if test="tenantId != -1">
and tenant_id =#{tenantId}
</if>
and state in
<foreach collection="states" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
order by id asc
</select>
<select id="queryByWorkerGroupIdAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where 1=1
<if test="workerGroupId != -1">
and worker_group_id =#{workerGroupId}
</if>
and state in
<foreach collection="states" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
order by id asc
</select>
<select id="queryProcessInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select instance.*
from t_ds_process_instance instance
join t_ds_process_definition define ON instance.process_definition_id = define.id
where 1=1
and instance.is_sub_process=0
and define.project_id = #{projectId}
<if test="processDefinitionId != 0">
and instance.process_definition_id = #{processDefinitionId}
</if>
<if test="searchVal != null and searchVal != ''">
and instance.name like concat('%', #{searchVal}, '%')
</if>
<if test="startTime != null ">
and instance.start_time > #{startTime} and instance.start_time <![CDATA[ <=]]> #{endTime}
</if>
<if test="states != null and states != ''">
and instance.state in
<foreach collection="states" index="index" item="i" open="(" separator="," close=")">
#{i}
</foreach>
</if>
<if test="host != null and host != ''">
and instance.host like concat('%', #{host}, '%')
</if>
<if test="executorId != 0">
and instance.executor_id = #{executorId}
</if>
order by instance.start_time desc
</select>
<update id="setFailoverByHostAndStateArray">
update t_ds_process_instance
set host=null
where host =#{host} and state in
<foreach collection="states" index="index" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</update>
<update id="updateProcessInstanceByState">
update t_ds_process_instance
set state = #{destState}
where state = #{originState}
</update>
<update id="updateProcessInstanceByTenantId">
update t_ds_process_instance
set tenant_id = #{destTenantId}
where tenant_id = #{originTenantId}
</update>
<update id="updateProcessInstanceByWorkerGroupId">
update t_ds_process_instance
set worker_group_id = #{destWorkerGroupId}
where worker_group_id = #{originWorkerGroupId}
</update>
<select id="countInstanceStateByUser" resultType="org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount">
select t.state, count(0) as count
from t_ds_process_instance t
join t_ds_process_definition d on d.id=t.process_definition_id
join t_ds_project p on p.id=d.project_id
where 1 = 1
and t.is_sub_process = 0
<if test="startTime != null and endTime != null">
and t.start_time <![CDATA[ >= ]]> #{startTime} and t.start_time <![CDATA[ <= ]]> #{endTime}
</if>
<if test="projectIds != null and projectIds.length != 0">
and p.id in
<foreach collection="projectIds" index="index" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
</if>
group by t.state
</select>
<select id="queryByProcessDefineId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where process_definition_id=#{processDefinitionId}
order by start_time desc limit #{size}
</select>
<select id="queryLastSchedulerProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where process_definition_id=#{processDefinitionId}
<if test="startTime!=null and endTime != null ">
and schedule_time <![CDATA[ >= ]]> #{startTime} and schedule_time <![CDATA[ <= ]]> #{endTime}
</if>
order by end_time desc limit 1
</select>
<select id="queryLastRunningProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where 1=1
<if test="states !=null and states.length != 0">
and state in
<foreach collection="states" item="i" index="index" open="(" separator="," close=")">
#{i}
</foreach>
</if>
<if test="startTime!=null and endTime != null ">
and process_definition_id=#{processDefinitionId}
and (schedule_time <![CDATA[ >= ]]> #{startTime} and schedule_time <![CDATA[ <= ]]> #{endTime}
or start_time <![CDATA[ >= ]]> #{startTime} and start_time <![CDATA[ <= ]]> #{endTime})
</if>
order by start_time desc limit 1
</select>
<select id="queryLastManualProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where process_definition_id=#{processDefinitionId}
and schedule_time is null
<if test="startTime!=null and endTime != null ">
and start_time <![CDATA[ >= ]]> #{startTime} and start_time <![CDATA[ <= ]]> #{endTime}
</if>
order by end_time desc limit 1
</select>
<select id="queryByProcessDefineIdAndStatus"
resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
select *
from t_ds_process_instance
where process_definition_id=#{processDefinitionId}
and state in
<foreach collection="states" item="i" open="(" close=")" separator=",">
#{i}
</foreach>
order by id asc
</select>
</mapper>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,789 |
[Bug][remote] channel time out
|
**Describe the bug**
某些网络情况下,master submit task时,无法进行netty通信,task信息发送不到worker,等待很长时间之后,出现time out的异常,然后过一段儿时间就又会出现这种现象。
**To Reproduce**
Steps to reproduce the behavior, for example:
1. 手动运行某个流程
2. 流程处于运行中,所有任务全部是已提交的灰色圆点状态
3. master节点很长一段时间之后会出现timeout的异常
4. worker端没有接受到master的信息
**Expected behavior**
在send方法中,获取channel的时候判断了channel的状态是否active,怀疑这里获取到的active 状态的channel并不能向worker发送数据,等待这个channel异常之后,重新创建的channel可以短暂使用,但是过一段儿时间还是会这样复现
**Screenshots**
公司环境截不了图
**Which version of Dolphin Scheduler:**
-[1.3.1]
-[1.3.2]
**Additional context**
不同的网络环境可能结果不同,有朋友的测试集群没有出现异常,而生产出现异常。我个人的生产环境还没有上线进行测试,测试环境基本每半个小时左右可以出现一次
**Requirement or improvement**
- 希望尽快修复这个问题,严重影响调度
|
https://github.com/apache/dolphinscheduler/issues/3789
|
https://github.com/apache/dolphinscheduler/pull/3913
|
e740dc7645eb7c2659a40dd0958af41969aafa00
|
51d476be69287e7c52c53825ea8ed434ed71dc44
| 2020-09-22T13:57:17Z |
java
| 2020-10-15T02:28:05Z |
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/NettyRemotingClient.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.*;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import org.apache.dolphinscheduler.remote.codec.NettyDecoder;
import org.apache.dolphinscheduler.remote.codec.NettyEncoder;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.config.NettyClientConfig;
import org.apache.dolphinscheduler.remote.exceptions.RemotingException;
import org.apache.dolphinscheduler.remote.exceptions.RemotingTimeoutException;
import org.apache.dolphinscheduler.remote.exceptions.RemotingTooMuchRequestException;
import org.apache.dolphinscheduler.remote.future.InvokeCallback;
import org.apache.dolphinscheduler.remote.future.ReleaseSemaphore;
import org.apache.dolphinscheduler.remote.future.ResponseFuture;
import org.apache.dolphinscheduler.remote.handler.NettyClientHandler;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.remote.utils.Host;
import org.apache.dolphinscheduler.remote.utils.CallerThreadExecutePolicy;
import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetSocketAddress;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
/**
* remoting netty client
*/
public class NettyRemotingClient {
private final Logger logger = LoggerFactory.getLogger(NettyRemotingClient.class);
/**
* client bootstrap
*/
private final Bootstrap bootstrap = new Bootstrap();
/**
* encoder
*/
private final NettyEncoder encoder = new NettyEncoder();
/**
* channels
*/
private final ConcurrentHashMap<Host, Channel> channels = new ConcurrentHashMap(128);
/**
* started flag
*/
private final AtomicBoolean isStarted = new AtomicBoolean(false);
/**
* worker group
*/
private final NioEventLoopGroup workerGroup;
/**
* client config
*/
private final NettyClientConfig clientConfig;
/**
* saync semaphore
*/
private final Semaphore asyncSemaphore = new Semaphore(200, true);
/**
* callback thread executor
*/
private final ExecutorService callbackExecutor;
/**
* client handler
*/
private final NettyClientHandler clientHandler;
/**
* response future executor
*/
private final ScheduledExecutorService responseFutureExecutor;
/**
* client init
* @param clientConfig client config
*/
public NettyRemotingClient(final NettyClientConfig clientConfig){
this.clientConfig = clientConfig;
this.workerGroup = new NioEventLoopGroup(clientConfig.getWorkerThreads(), new ThreadFactory() {
private AtomicInteger threadIndex = new AtomicInteger(0);
@Override
public Thread newThread(Runnable r) {
return new Thread(r, String.format("NettyClient_%d", this.threadIndex.incrementAndGet()));
}
});
this.callbackExecutor = new ThreadPoolExecutor(5, 10, 1, TimeUnit.MINUTES,
new LinkedBlockingQueue<>(1000), new NamedThreadFactory("CallbackExecutor", 10),
new CallerThreadExecutePolicy());
this.clientHandler = new NettyClientHandler(this, callbackExecutor);
this.responseFutureExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("ResponseFutureExecutor"));
this.start();
}
/**
* start
*/
private void start(){
this.bootstrap
.group(this.workerGroup)
.channel(NioSocketChannel.class)
.option(ChannelOption.SO_KEEPALIVE, clientConfig.isSoKeepalive())
.option(ChannelOption.TCP_NODELAY, clientConfig.isTcpNoDelay())
.option(ChannelOption.SO_SNDBUF, clientConfig.getSendBufferSize())
.option(ChannelOption.SO_RCVBUF, clientConfig.getReceiveBufferSize())
.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(
new NettyDecoder(),
clientHandler,
encoder);
}
});
this.responseFutureExecutor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
ResponseFuture.scanFutureTable();
}
}, 5000, 1000, TimeUnit.MILLISECONDS);
//
isStarted.compareAndSet(false, true);
}
/**
* async send
* @param host host
* @param command command
* @param timeoutMillis timeoutMillis
* @param invokeCallback callback function
* @throws InterruptedException
* @throws RemotingException
*/
public void sendAsync(final Host host, final Command command,
final long timeoutMillis,
final InvokeCallback invokeCallback) throws InterruptedException, RemotingException {
final Channel channel = getChannel(host);
if (channel == null) {
throw new RemotingException("network error");
}
/**
* request unique identification
*/
final long opaque = command.getOpaque();
/**
* control concurrency number
*/
boolean acquired = this.asyncSemaphore.tryAcquire(timeoutMillis, TimeUnit.MILLISECONDS);
if(acquired){
final ReleaseSemaphore releaseSemaphore = new ReleaseSemaphore(this.asyncSemaphore);
/**
* response future
*/
final ResponseFuture responseFuture = new ResponseFuture(opaque,
timeoutMillis,
invokeCallback,
releaseSemaphore);
try {
channel.writeAndFlush(command).addListener(new ChannelFutureListener(){
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if(future.isSuccess()){
responseFuture.setSendOk(true);
return;
} else {
responseFuture.setSendOk(false);
}
responseFuture.setCause(future.cause());
responseFuture.putResponse(null);
try {
responseFuture.executeInvokeCallback();
} catch (Throwable ex){
logger.error("execute callback error", ex);
} finally{
responseFuture.release();
}
}
});
} catch (Throwable ex){
responseFuture.release();
throw new RemotingException(String.format("send command to host: %s failed", host), ex);
}
} else{
String message = String.format("try to acquire async semaphore timeout: %d, waiting thread num: %d, total permits: %d",
timeoutMillis, asyncSemaphore.getQueueLength(), asyncSemaphore.availablePermits());
throw new RemotingTooMuchRequestException(message);
}
}
/**
* sync send
* @param host host
* @param command command
* @param timeoutMillis timeoutMillis
* @return command
* @throws InterruptedException
* @throws RemotingException
*/
public Command sendSync(final Host host, final Command command, final long timeoutMillis) throws InterruptedException, RemotingException {
final Channel channel = getChannel(host);
if (channel == null) {
throw new RemotingException(String.format("connect to : %s fail", host));
}
final long opaque = command.getOpaque();
final ResponseFuture responseFuture = new ResponseFuture(opaque, timeoutMillis, null, null);
channel.writeAndFlush(command).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if(future.isSuccess()){
responseFuture.setSendOk(true);
return;
} else {
responseFuture.setSendOk(false);
}
responseFuture.setCause(future.cause());
responseFuture.putResponse(null);
logger.error("send command {} to host {} failed", command, host);
}
});
/**
* sync wait for result
*/
Command result = responseFuture.waitResponse();
if(result == null){
if(responseFuture.isSendOK()){
throw new RemotingTimeoutException(host.toString(), timeoutMillis, responseFuture.getCause());
} else{
throw new RemotingException(host.toString(), responseFuture.getCause());
}
}
return result;
}
/**
* send task
* @param host host
* @param command command
* @throws RemotingException
*/
public void send(final Host host, final Command command) throws RemotingException {
Channel channel = getChannel(host);
if (channel == null) {
throw new RemotingException(String.format("connect to : %s fail", host));
}
try {
ChannelFuture future = channel.writeAndFlush(command).await();
if (future.isSuccess()) {
logger.debug("send command : {} , to : {} successfully.", command, host.getAddress());
} else {
String msg = String.format("send command : %s , to :%s failed", command, host.getAddress());
logger.error(msg, future.cause());
throw new RemotingException(msg);
}
} catch (Exception e) {
logger.error("Send command {} to address {} encounter error.", command, host.getAddress());
throw new RemotingException(String.format("Send command : %s , to :%s encounter error", command, host.getAddress()), e);
}
}
/**
* register processor
* @param commandType command type
* @param processor processor
*/
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor) {
this.registerProcessor(commandType, processor, null);
}
/**
* register processor
*
* @param commandType command type
* @param processor processor
* @param executor thread executor
*/
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor, final ExecutorService executor) {
this.clientHandler.registerProcessor(commandType, processor, executor);
}
/**
* get channel
* @param host
* @return
*/
public Channel getChannel(Host host) {
Channel channel = channels.get(host);
if(channel != null && channel.isActive()){
return channel;
}
return createChannel(host, true);
}
/**
* create channel
* @param host host
* @param isSync sync flag
* @return channel
*/
public Channel createChannel(Host host, boolean isSync) {
ChannelFuture future;
try {
synchronized (bootstrap){
future = bootstrap.connect(new InetSocketAddress(host.getIp(), host.getPort()));
}
if(isSync){
future.sync();
}
if (future.isSuccess()) {
Channel channel = future.channel();
channels.put(host, channel);
return channel;
}
} catch (Exception ex) {
logger.warn(String.format("connect to %s error", host), ex);
}
return null;
}
/**
* close
*/
public void close() {
if(isStarted.compareAndSet(true, false)){
try {
closeChannels();
if(workerGroup != null){
this.workerGroup.shutdownGracefully();
}
if(callbackExecutor != null){
this.callbackExecutor.shutdownNow();
}
if(this.responseFutureExecutor != null){
this.responseFutureExecutor.shutdownNow();
}
} catch (Exception ex) {
logger.error("netty client close exception", ex);
}
logger.info("netty client closed");
}
}
/**
* close channels
*/
private void closeChannels(){
for (Channel channel : this.channels.values()) {
channel.close();
}
this.channels.clear();
}
/**
* close channel
* @param host host
*/
public void closeChannel(Host host){
Channel channel = this.channels.remove(host);
if(channel != null){
channel.close();
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,789 |
[Bug][remote] channel time out
|
**Describe the bug**
某些网络情况下,master submit task时,无法进行netty通信,task信息发送不到worker,等待很长时间之后,出现time out的异常,然后过一段儿时间就又会出现这种现象。
**To Reproduce**
Steps to reproduce the behavior, for example:
1. 手动运行某个流程
2. 流程处于运行中,所有任务全部是已提交的灰色圆点状态
3. master节点很长一段时间之后会出现timeout的异常
4. worker端没有接受到master的信息
**Expected behavior**
在send方法中,获取channel的时候判断了channel的状态是否active,怀疑这里获取到的active 状态的channel并不能向worker发送数据,等待这个channel异常之后,重新创建的channel可以短暂使用,但是过一段儿时间还是会这样复现
**Screenshots**
公司环境截不了图
**Which version of Dolphin Scheduler:**
-[1.3.1]
-[1.3.2]
**Additional context**
不同的网络环境可能结果不同,有朋友的测试集群没有出现异常,而生产出现异常。我个人的生产环境还没有上线进行测试,测试环境基本每半个小时左右可以出现一次
**Requirement or improvement**
- 希望尽快修复这个问题,严重影响调度
|
https://github.com/apache/dolphinscheduler/issues/3789
|
https://github.com/apache/dolphinscheduler/pull/3913
|
e740dc7645eb7c2659a40dd0958af41969aafa00
|
51d476be69287e7c52c53825ea8ed434ed71dc44
| 2020-09-22T13:57:17Z |
java
| 2020-10-15T02:28:05Z |
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/NettyRemotingServer.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import org.apache.dolphinscheduler.remote.codec.NettyDecoder;
import org.apache.dolphinscheduler.remote.codec.NettyEncoder;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.config.NettyServerConfig;
import org.apache.dolphinscheduler.remote.handler.NettyServerHandler;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.remote.utils.Constants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
/**
* remoting netty server
*/
public class NettyRemotingServer {
private final Logger logger = LoggerFactory.getLogger(NettyRemotingServer.class);
/**
* server bootstrap
*/
private final ServerBootstrap serverBootstrap = new ServerBootstrap();
/**
* encoder
*/
private final NettyEncoder encoder = new NettyEncoder();
/**
* default executor
*/
private final ExecutorService defaultExecutor = Executors.newFixedThreadPool(Constants.CPUS);
/**
* boss group
*/
private final NioEventLoopGroup bossGroup;
/**
* worker group
*/
private final NioEventLoopGroup workGroup;
/**
* server config
*/
private final NettyServerConfig serverConfig;
/**
* server handler
*/
private final NettyServerHandler serverHandler = new NettyServerHandler(this);
/**
* started flag
*/
private final AtomicBoolean isStarted = new AtomicBoolean(false);
/**
* server init
*
* @param serverConfig server config
*/
public NettyRemotingServer(final NettyServerConfig serverConfig){
this.serverConfig = serverConfig;
this.bossGroup = new NioEventLoopGroup(1, new ThreadFactory() {
private AtomicInteger threadIndex = new AtomicInteger(0);
@Override
public Thread newThread(Runnable r) {
return new Thread(r, String.format("NettyServerBossThread_%d", this.threadIndex.incrementAndGet()));
}
});
this.workGroup = new NioEventLoopGroup(serverConfig.getWorkerThread(), new ThreadFactory() {
private AtomicInteger threadIndex = new AtomicInteger(0);
@Override
public Thread newThread(Runnable r) {
return new Thread(r, String.format("NettyServerWorkerThread_%d", this.threadIndex.incrementAndGet()));
}
});
}
/**
* server start
*/
public void start(){
if(this.isStarted.get()){
return;
}
this.serverBootstrap
.group(this.bossGroup, this.workGroup)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.SO_REUSEADDR, true)
.option(ChannelOption.SO_BACKLOG, serverConfig.getSoBacklog())
.childOption(ChannelOption.SO_KEEPALIVE, serverConfig.isSoKeepalive())
.childOption(ChannelOption.TCP_NODELAY, serverConfig.isTcpNoDelay())
.childOption(ChannelOption.SO_SNDBUF, serverConfig.getSendBufferSize())
.childOption(ChannelOption.SO_RCVBUF, serverConfig.getReceiveBufferSize())
.childHandler(new ChannelInitializer<NioSocketChannel>() {
@Override
protected void initChannel(NioSocketChannel ch) throws Exception {
initNettyChannel(ch);
}
});
ChannelFuture future;
try {
future = serverBootstrap.bind(serverConfig.getListenPort()).sync();
} catch (Exception e) {
logger.error("NettyRemotingServer bind fail {}, exit",e.getMessage(), e);
throw new RuntimeException(String.format("NettyRemotingServer bind %s fail", serverConfig.getListenPort()));
}
if (future.isSuccess()) {
logger.info("NettyRemotingServer bind success at port : {}", serverConfig.getListenPort());
} else if (future.cause() != null) {
throw new RuntimeException(String.format("NettyRemotingServer bind %s fail", serverConfig.getListenPort()), future.cause());
} else {
throw new RuntimeException(String.format("NettyRemotingServer bind %s fail", serverConfig.getListenPort()));
}
//
isStarted.compareAndSet(false, true);
}
/**
* init netty channel
* @param ch socket channel
* @throws Exception
*/
private void initNettyChannel(NioSocketChannel ch) throws Exception{
ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast("encoder", encoder);
pipeline.addLast("decoder", new NettyDecoder());
pipeline.addLast("handler", serverHandler);
}
/**
* register processor
* @param commandType command type
* @param processor processor
*/
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor) {
this.registerProcessor(commandType, processor, null);
}
/**
* register processor
*
* @param commandType command type
* @param processor processor
* @param executor thread executor
*/
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor, final ExecutorService executor) {
this.serverHandler.registerProcessor(commandType, processor, executor);
}
/**
* get default thread executor
* @return thread executor
*/
public ExecutorService getDefaultExecutor() {
return defaultExecutor;
}
public void close() {
if(isStarted.compareAndSet(true, false)){
try {
if(bossGroup != null){
this.bossGroup.shutdownGracefully();
}
if(workGroup != null){
this.workGroup.shutdownGracefully();
}
if(defaultExecutor != null){
defaultExecutor.shutdown();
}
} catch (Exception ex) {
logger.error("netty server close exception", ex);
}
logger.info("netty server closed");
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,789 |
[Bug][remote] channel time out
|
**Describe the bug**
某些网络情况下,master submit task时,无法进行netty通信,task信息发送不到worker,等待很长时间之后,出现time out的异常,然后过一段儿时间就又会出现这种现象。
**To Reproduce**
Steps to reproduce the behavior, for example:
1. 手动运行某个流程
2. 流程处于运行中,所有任务全部是已提交的灰色圆点状态
3. master节点很长一段时间之后会出现timeout的异常
4. worker端没有接受到master的信息
**Expected behavior**
在send方法中,获取channel的时候判断了channel的状态是否active,怀疑这里获取到的active 状态的channel并不能向worker发送数据,等待这个channel异常之后,重新创建的channel可以短暂使用,但是过一段儿时间还是会这样复现
**Screenshots**
公司环境截不了图
**Which version of Dolphin Scheduler:**
-[1.3.1]
-[1.3.2]
**Additional context**
不同的网络环境可能结果不同,有朋友的测试集群没有出现异常,而生产出现异常。我个人的生产环境还没有上线进行测试,测试环境基本每半个小时左右可以出现一次
**Requirement or improvement**
- 希望尽快修复这个问题,严重影响调度
|
https://github.com/apache/dolphinscheduler/issues/3789
|
https://github.com/apache/dolphinscheduler/pull/3913
|
e740dc7645eb7c2659a40dd0958af41969aafa00
|
51d476be69287e7c52c53825ea8ed434ed71dc44
| 2020-09-22T13:57:17Z |
java
| 2020-10-15T02:28:05Z |
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/command/CommandType.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
public enum CommandType {
/**
* remove task log request,
*/
REMOVE_TAK_LOG_REQUEST,
/**
* remove task log response
*/
REMOVE_TAK_LOG_RESPONSE,
/**
* roll view log request
*/
ROLL_VIEW_LOG_REQUEST,
/**
* roll view log response
*/
ROLL_VIEW_LOG_RESPONSE,
/**
* view whole log request
*/
VIEW_WHOLE_LOG_REQUEST,
/**
* view whole log response
*/
VIEW_WHOLE_LOG_RESPONSE,
/**
* get log bytes request
*/
GET_LOG_BYTES_REQUEST,
/**
* get log bytes response
*/
GET_LOG_BYTES_RESPONSE,
WORKER_REQUEST,
MASTER_RESPONSE,
/**
* execute task request
*/
TASK_EXECUTE_REQUEST,
/**
* execute task ack
*/
TASK_EXECUTE_ACK,
/**
* execute task response
*/
TASK_EXECUTE_RESPONSE,
/**
* db task ack
*/
DB_TASK_ACK,
/**
* db task response
*/
DB_TASK_RESPONSE,
/**
* kill task
*/
TASK_KILL_REQUEST,
/**
* kill task response
*/
TASK_KILL_RESPONSE,
/**
* ping
*/
PING,
/**
* pong
*/
PONG;
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,789 |
[Bug][remote] channel time out
|
**Describe the bug**
某些网络情况下,master submit task时,无法进行netty通信,task信息发送不到worker,等待很长时间之后,出现time out的异常,然后过一段儿时间就又会出现这种现象。
**To Reproduce**
Steps to reproduce the behavior, for example:
1. 手动运行某个流程
2. 流程处于运行中,所有任务全部是已提交的灰色圆点状态
3. master节点很长一段时间之后会出现timeout的异常
4. worker端没有接受到master的信息
**Expected behavior**
在send方法中,获取channel的时候判断了channel的状态是否active,怀疑这里获取到的active 状态的channel并不能向worker发送数据,等待这个channel异常之后,重新创建的channel可以短暂使用,但是过一段儿时间还是会这样复现
**Screenshots**
公司环境截不了图
**Which version of Dolphin Scheduler:**
-[1.3.1]
-[1.3.2]
**Additional context**
不同的网络环境可能结果不同,有朋友的测试集群没有出现异常,而生产出现异常。我个人的生产环境还没有上线进行测试,测试环境基本每半个小时左右可以出现一次
**Requirement or improvement**
- 希望尽快修复这个问题,严重影响调度
|
https://github.com/apache/dolphinscheduler/issues/3789
|
https://github.com/apache/dolphinscheduler/pull/3913
|
e740dc7645eb7c2659a40dd0958af41969aafa00
|
51d476be69287e7c52c53825ea8ed434ed71dc44
| 2020-09-22T13:57:17Z |
java
| 2020-10-15T02:28:05Z |
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/config/NettyClientConfig.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.config;
import org.apache.dolphinscheduler.remote.utils.Constants;
/**
* netty client config
*/
public class NettyClientConfig {
/**
* worker threads,default get machine cpus
*/
private int workerThreads = Constants.CPUS;
/**
* whether tpc delay
*/
private boolean tcpNoDelay = true;
/**
* whether keep alive
*/
private boolean soKeepalive = true;
/**
* send buffer size
*/
private int sendBufferSize = 65535;
/**
* receive buffer size
*/
private int receiveBufferSize = 65535;
public int getWorkerThreads() {
return workerThreads;
}
public void setWorkerThreads(int workerThreads) {
this.workerThreads = workerThreads;
}
public boolean isTcpNoDelay() {
return tcpNoDelay;
}
public void setTcpNoDelay(boolean tcpNoDelay) {
this.tcpNoDelay = tcpNoDelay;
}
public boolean isSoKeepalive() {
return soKeepalive;
}
public void setSoKeepalive(boolean soKeepalive) {
this.soKeepalive = soKeepalive;
}
public int getSendBufferSize() {
return sendBufferSize;
}
public void setSendBufferSize(int sendBufferSize) {
this.sendBufferSize = sendBufferSize;
}
public int getReceiveBufferSize() {
return receiveBufferSize;
}
public void setReceiveBufferSize(int receiveBufferSize) {
this.receiveBufferSize = receiveBufferSize;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,789 |
[Bug][remote] channel time out
|
**Describe the bug**
某些网络情况下,master submit task时,无法进行netty通信,task信息发送不到worker,等待很长时间之后,出现time out的异常,然后过一段儿时间就又会出现这种现象。
**To Reproduce**
Steps to reproduce the behavior, for example:
1. 手动运行某个流程
2. 流程处于运行中,所有任务全部是已提交的灰色圆点状态
3. master节点很长一段时间之后会出现timeout的异常
4. worker端没有接受到master的信息
**Expected behavior**
在send方法中,获取channel的时候判断了channel的状态是否active,怀疑这里获取到的active 状态的channel并不能向worker发送数据,等待这个channel异常之后,重新创建的channel可以短暂使用,但是过一段儿时间还是会这样复现
**Screenshots**
公司环境截不了图
**Which version of Dolphin Scheduler:**
-[1.3.1]
-[1.3.2]
**Additional context**
不同的网络环境可能结果不同,有朋友的测试集群没有出现异常,而生产出现异常。我个人的生产环境还没有上线进行测试,测试环境基本每半个小时左右可以出现一次
**Requirement or improvement**
- 希望尽快修复这个问题,严重影响调度
|
https://github.com/apache/dolphinscheduler/issues/3789
|
https://github.com/apache/dolphinscheduler/pull/3913
|
e740dc7645eb7c2659a40dd0958af41969aafa00
|
51d476be69287e7c52c53825ea8ed434ed71dc44
| 2020-09-22T13:57:17Z |
java
| 2020-10-15T02:28:05Z |
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/handler/NettyClientHandler.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.handler;
import io.netty.channel.*;
import org.apache.dolphinscheduler.remote.NettyRemotingClient;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.future.ResponseFuture;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.remote.utils.ChannelUtils;
import org.apache.dolphinscheduler.remote.utils.Constants;
import org.apache.dolphinscheduler.remote.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
/**
* netty client request handler
*/
@ChannelHandler.Sharable
public class NettyClientHandler extends ChannelInboundHandlerAdapter {
private final Logger logger = LoggerFactory.getLogger(NettyClientHandler.class);
/**
* netty client
*/
private final NettyRemotingClient nettyRemotingClient;
/**
* callback thread executor
*/
private final ExecutorService callbackExecutor;
/**
* processors
*/
private final ConcurrentHashMap<CommandType, Pair<NettyRequestProcessor, ExecutorService>> processors;
/**
* default executor
*/
private final ExecutorService defaultExecutor = Executors.newFixedThreadPool(Constants.CPUS);
public NettyClientHandler(NettyRemotingClient nettyRemotingClient, ExecutorService callbackExecutor){
this.nettyRemotingClient = nettyRemotingClient;
this.callbackExecutor = callbackExecutor;
this.processors = new ConcurrentHashMap();
}
/**
* When the current channel is not active,
* the current channel has reached the end of its life cycle
*
* @param ctx channel handler context
* @throws Exception
*/
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
nettyRemotingClient.closeChannel(ChannelUtils.toAddress(ctx.channel()));
ctx.channel().close();
}
/**
* The current channel reads data from the remote
*
* @param ctx channel handler context
* @param msg message
* @throws Exception
*/
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
processReceived(ctx.channel(), (Command)msg);
}
/**
* register processor
*
* @param commandType command type
* @param processor processor
*/
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor) {
this.registerProcessor(commandType, processor, null);
}
/**
* register processor
*
* @param commandType command type
* @param processor processor
* @param executor thread executor
*/
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor, final ExecutorService executor) {
ExecutorService executorRef = executor;
if(executorRef == null){
executorRef = defaultExecutor;
}
this.processors.putIfAbsent(commandType, new Pair<>(processor, executorRef));
}
/**
* process received logic
*
* @param command command
*/
private void processReceived(final Channel channel, final Command command) {
ResponseFuture future = ResponseFuture.getFuture(command.getOpaque());
if(future != null){
future.setResponseCommand(command);
future.release();
if(future.getInvokeCallback() != null){
this.callbackExecutor.submit(new Runnable() {
@Override
public void run() {
future.executeInvokeCallback();
}
});
} else{
future.putResponse(command);
}
} else{
processByCommandType(channel, command);
}
}
public void processByCommandType(final Channel channel, final Command command) {
final Pair<NettyRequestProcessor, ExecutorService> pair = processors.get(command.getType());
if (pair != null) {
Runnable run = () -> {
try {
pair.getLeft().process(channel, command);
} catch (Throwable e) {
logger.error(String.format("process command %s exception", command), e);
}
};
try {
pair.getRight().submit(run);
} catch (RejectedExecutionException e) {
logger.warn("thread pool is full, discard command {} from {}", command, ChannelUtils.getRemoteAddress(channel));
}
} else {
logger.warn("receive response {}, but not matched any request ", command);
}
}
/**
* caught exception
* @param ctx channel handler context
* @param cause cause
* @throws Exception
*/
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
logger.error("exceptionCaught : {}", cause);
nettyRemotingClient.closeChannel(ChannelUtils.toAddress(ctx.channel()));
ctx.channel().close();
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,789 |
[Bug][remote] channel time out
|
**Describe the bug**
某些网络情况下,master submit task时,无法进行netty通信,task信息发送不到worker,等待很长时间之后,出现time out的异常,然后过一段儿时间就又会出现这种现象。
**To Reproduce**
Steps to reproduce the behavior, for example:
1. 手动运行某个流程
2. 流程处于运行中,所有任务全部是已提交的灰色圆点状态
3. master节点很长一段时间之后会出现timeout的异常
4. worker端没有接受到master的信息
**Expected behavior**
在send方法中,获取channel的时候判断了channel的状态是否active,怀疑这里获取到的active 状态的channel并不能向worker发送数据,等待这个channel异常之后,重新创建的channel可以短暂使用,但是过一段儿时间还是会这样复现
**Screenshots**
公司环境截不了图
**Which version of Dolphin Scheduler:**
-[1.3.1]
-[1.3.2]
**Additional context**
不同的网络环境可能结果不同,有朋友的测试集群没有出现异常,而生产出现异常。我个人的生产环境还没有上线进行测试,测试环境基本每半个小时左右可以出现一次
**Requirement or improvement**
- 希望尽快修复这个问题,严重影响调度
|
https://github.com/apache/dolphinscheduler/issues/3789
|
https://github.com/apache/dolphinscheduler/pull/3913
|
e740dc7645eb7c2659a40dd0958af41969aafa00
|
51d476be69287e7c52c53825ea8ed434ed71dc44
| 2020-09-22T13:57:17Z |
java
| 2020-10-15T02:28:05Z |
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/handler/NettyServerHandler.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.handler;
import io.netty.channel.*;
import org.apache.dolphinscheduler.remote.NettyRemotingServer;
import org.apache.dolphinscheduler.remote.command.Command;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
import org.apache.dolphinscheduler.remote.utils.ChannelUtils;
import org.apache.dolphinscheduler.remote.utils.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.RejectedExecutionException;
/**
* netty server request handler
*/
@ChannelHandler.Sharable
public class NettyServerHandler extends ChannelInboundHandlerAdapter {
private final Logger logger = LoggerFactory.getLogger(NettyServerHandler.class);
/**
* netty remote server
*/
private final NettyRemotingServer nettyRemotingServer;
/**
* server processors queue
*/
private final ConcurrentHashMap<CommandType, Pair<NettyRequestProcessor, ExecutorService>> processors = new ConcurrentHashMap();
public NettyServerHandler(NettyRemotingServer nettyRemotingServer){
this.nettyRemotingServer = nettyRemotingServer;
}
/**
* When the current channel is not active,
* the current channel has reached the end of its life cycle
* @param ctx channel handler context
* @throws Exception
*/
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
ctx.channel().close();
}
/**
* The current channel reads data from the remote end
*
* @param ctx channel handler context
* @param msg message
* @throws Exception
*/
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
processReceived(ctx.channel(), (Command)msg);
}
/**
* register processor
*
* @param commandType command type
* @param processor processor
*/
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor) {
this.registerProcessor(commandType, processor, null);
}
/**
* register processor
*
* @param commandType command type
* @param processor processor
* @param executor thread executor
*/
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor, final ExecutorService executor) {
ExecutorService executorRef = executor;
if(executorRef == null){
executorRef = nettyRemotingServer.getDefaultExecutor();
}
this.processors.putIfAbsent(commandType, new Pair<>(processor, executorRef));
}
/**
* process received logic
* @param channel channel
* @param msg message
*/
private void processReceived(final Channel channel, final Command msg) {
final CommandType commandType = msg.getType();
final Pair<NettyRequestProcessor, ExecutorService> pair = processors.get(commandType);
if (pair != null) {
Runnable r = new Runnable() {
@Override
public void run() {
try {
pair.getLeft().process(channel, msg);
} catch (Throwable ex) {
logger.error("process msg {} error", msg, ex);
}
}
};
try {
pair.getRight().submit(r);
} catch (RejectedExecutionException e) {
logger.warn("thread pool is full, discard msg {} from {}", msg, ChannelUtils.getRemoteAddress(channel));
}
} else {
logger.warn("commandType {} not support", commandType);
}
}
/**
* caught exception
*
* @param ctx channel handler context
* @param cause cause
* @throws Exception
*/
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
logger.error("exceptionCaught : {}",cause.getMessage(), cause);
ctx.channel().close();
}
/**
* channel write changed
*
* @param ctx channel handler context
* @throws Exception
*/
@Override
public void channelWritabilityChanged(ChannelHandlerContext ctx) throws Exception {
Channel ch = ctx.channel();
ChannelConfig config = ch.config();
if (!ch.isWritable()) {
if (logger.isWarnEnabled()) {
logger.warn("{} is not writable, over high water level : {}",
ch, config.getWriteBufferHighWaterMark());
}
config.setAutoRead(false);
} else {
if (logger.isWarnEnabled()) {
logger.warn("{} is writable, to low water : {}",
ch, config.getWriteBufferLowWaterMark());
}
config.setAutoRead(true);
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,789 |
[Bug][remote] channel time out
|
**Describe the bug**
某些网络情况下,master submit task时,无法进行netty通信,task信息发送不到worker,等待很长时间之后,出现time out的异常,然后过一段儿时间就又会出现这种现象。
**To Reproduce**
Steps to reproduce the behavior, for example:
1. 手动运行某个流程
2. 流程处于运行中,所有任务全部是已提交的灰色圆点状态
3. master节点很长一段时间之后会出现timeout的异常
4. worker端没有接受到master的信息
**Expected behavior**
在send方法中,获取channel的时候判断了channel的状态是否active,怀疑这里获取到的active 状态的channel并不能向worker发送数据,等待这个channel异常之后,重新创建的channel可以短暂使用,但是过一段儿时间还是会这样复现
**Screenshots**
公司环境截不了图
**Which version of Dolphin Scheduler:**
-[1.3.1]
-[1.3.2]
**Additional context**
不同的网络环境可能结果不同,有朋友的测试集群没有出现异常,而生产出现异常。我个人的生产环境还没有上线进行测试,测试环境基本每半个小时左右可以出现一次
**Requirement or improvement**
- 希望尽快修复这个问题,严重影响调度
|
https://github.com/apache/dolphinscheduler/issues/3789
|
https://github.com/apache/dolphinscheduler/pull/3913
|
e740dc7645eb7c2659a40dd0958af41969aafa00
|
51d476be69287e7c52c53825ea8ed434ed71dc44
| 2020-09-22T13:57:17Z |
java
| 2020-10-15T02:28:05Z |
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/utils/Constants.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.utils;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
/**
* constant
*/
public class Constants {
public static final String COMMA = ",";
public static final String SLASH = "/";
/**
* charset
*/
public static final Charset UTF8 = StandardCharsets.UTF_8;
/**
* cpus
*/
public static final int CPUS = Runtime.getRuntime().availableProcessors();
public static final String LOCAL_ADDRESS = IPUtils.getFirstNoLoopbackIP4Address();
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,789 |
[Bug][remote] channel time out
|
**Describe the bug**
某些网络情况下,master submit task时,无法进行netty通信,task信息发送不到worker,等待很长时间之后,出现time out的异常,然后过一段儿时间就又会出现这种现象。
**To Reproduce**
Steps to reproduce the behavior, for example:
1. 手动运行某个流程
2. 流程处于运行中,所有任务全部是已提交的灰色圆点状态
3. master节点很长一段时间之后会出现timeout的异常
4. worker端没有接受到master的信息
**Expected behavior**
在send方法中,获取channel的时候判断了channel的状态是否active,怀疑这里获取到的active 状态的channel并不能向worker发送数据,等待这个channel异常之后,重新创建的channel可以短暂使用,但是过一段儿时间还是会这样复现
**Screenshots**
公司环境截不了图
**Which version of Dolphin Scheduler:**
-[1.3.1]
-[1.3.2]
**Additional context**
不同的网络环境可能结果不同,有朋友的测试集群没有出现异常,而生产出现异常。我个人的生产环境还没有上线进行测试,测试环境基本每半个小时左右可以出现一次
**Requirement or improvement**
- 希望尽快修复这个问题,严重影响调度
|
https://github.com/apache/dolphinscheduler/issues/3789
|
https://github.com/apache/dolphinscheduler/pull/3913
|
e740dc7645eb7c2659a40dd0958af41969aafa00
|
51d476be69287e7c52c53825ea8ed434ed71dc44
| 2020-09-22T13:57:17Z |
java
| 2020-10-15T02:28:05Z |
dolphinscheduler-remote/src/main/java/org/apache/dolphinscheduler/remote/utils/NettyUtils.java
| |
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,836 |
[Bug][API] verifyProcessDefinitionName error message
|

|
https://github.com/apache/dolphinscheduler/issues/3836
|
https://github.com/apache/dolphinscheduler/pull/3908
|
d32300ba5b33ec17092ae3ba7dd6502f0f709554
|
13030502fd27863827ce9a2e3ec905c5a359170b
| 2020-09-28T03:21:57Z |
java
| 2020-10-15T06:09:28Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service.impl;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_SUB_PROCESS_DEFINE_ID;
import org.apache.dolphinscheduler.api.dto.ProcessMeta;
import org.apache.dolphinscheduler.api.dto.treeview.Instance;
import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.BaseService;
import org.apache.dolphinscheduler.api.service.ProcessDefinitionService;
import org.apache.dolphinscheduler.api.service.ProcessDefinitionVersionService;
import org.apache.dolphinscheduler.api.service.ProcessInstanceService;
import org.apache.dolphinscheduler.api.service.ProjectService;
import org.apache.dolphinscheduler.api.service.SchedulerService;
import org.apache.dolphinscheduler.api.utils.CheckUtils;
import org.apache.dolphinscheduler.api.utils.FileUtils;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.exportprocess.ProcessAddTaskParam;
import org.apache.dolphinscheduler.api.utils.exportprocess.TaskNodeParamFactory;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StreamUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessData;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionVersion;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.service.permission.PermissionCheck;
import org.apache.dolphinscheduler.service.process.ProcessService;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServletResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.MediaType;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
/**
* process definition service impl
*/
@Service
public class ProcessDefinitionServiceImpl extends BaseService implements
ProcessDefinitionService {
private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionServiceImpl.class);
private static final String PROCESSDEFINITIONID = "processDefinitionId";
private static final String RELEASESTATE = "releaseState";
private static final String TASKS = "tasks";
@Autowired
private ProjectMapper projectMapper;
@Autowired
private ProjectService projectService;
@Autowired
private ProcessDefinitionVersionService processDefinitionVersionService;
@Autowired
private ProcessDefinitionMapper processDefineMapper;
@Autowired
private ProcessInstanceService processInstanceService;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private ProcessService processService;
/**
* create process definition
*
* @param loginUser login user
* @param projectName project name
* @param name process definition name
* @param processDefinitionJson process definition json
* @param desc description
* @param locations locations for nodes
* @param connects connects for nodes
* @return create result code
* @throws JsonProcessingException JsonProcessingException
*/
public Map<String, Object> createProcessDefinition(User loginUser,
String projectName,
String name,
String processDefinitionJson,
String desc,
String locations,
String connects) throws JsonProcessingException {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
// check project auth
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefine = new ProcessDefinition();
Date now = new Date();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
Map<String, Object> checkProcessJson = checkProcessNodeList(processData, processDefinitionJson);
if (checkProcessJson.get(Constants.STATUS) != Status.SUCCESS) {
return checkProcessJson;
}
processDefine.setName(name);
processDefine.setReleaseState(ReleaseState.OFFLINE);
processDefine.setProjectId(project.getId());
processDefine.setUserId(loginUser.getId());
processDefine.setProcessDefinitionJson(processDefinitionJson);
processDefine.setDescription(desc);
processDefine.setLocations(locations);
processDefine.setConnects(connects);
processDefine.setTimeout(processData.getTimeout());
processDefine.setTenantId(processData.getTenantId());
processDefine.setModifyBy(loginUser.getUserName());
processDefine.setResourceIds(getResourceIds(processData));
//custom global params
List<Property> globalParamsList = processData.getGlobalParams();
if (CollectionUtils.isNotEmpty(globalParamsList)) {
Set<Property> globalParamsSet = new HashSet<>(globalParamsList);
globalParamsList = new ArrayList<>(globalParamsSet);
processDefine.setGlobalParamList(globalParamsList);
}
processDefine.setCreateTime(now);
processDefine.setUpdateTime(now);
processDefine.setFlag(Flag.YES);
// save the new process definition
processDefineMapper.insert(processDefine);
// add process definition version
long version = processDefinitionVersionService.addProcessDefinitionVersion(processDefine);
processDefine.setVersion(version);
processDefineMapper.updateVersionByProcessDefinitionId(processDefine.getId(), version);
// return processDefinition object with ID
result.put(Constants.DATA_LIST, processDefineMapper.selectById(processDefine.getId()));
putMsg(result, Status.SUCCESS);
result.put("processDefinitionId", processDefine.getId());
return result;
}
/**
* get resource ids
*
* @param processData process data
* @return resource ids
*/
private String getResourceIds(ProcessData processData) {
return Optional.ofNullable(processData.getTasks())
.orElse(Collections.emptyList())
.stream()
.map(taskNode -> TaskParametersUtils.getParameters(taskNode.getType(), taskNode.getParams()))
.filter(Objects::nonNull)
.flatMap(parameters -> parameters.getResourceFilesList().stream())
.map(ResourceInfo::getId)
.distinct()
.map(Objects::toString)
.collect(Collectors.joining(","));
}
/**
* query process definition list
*
* @param loginUser login user
* @param projectName project name
* @return definition list
*/
public Map<String, Object> queryProcessDefinitionList(User loginUser, String projectName) {
HashMap<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
List<ProcessDefinition> resourceList = processDefineMapper.queryAllDefinitionList(project.getId());
result.put(Constants.DATA_LIST, resourceList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query process definition list paging
*
* @param loginUser login user
* @param projectName project name
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @param userId user id
* @return process definition page
*/
public Map<String, Object> queryProcessDefinitionListPaging(User loginUser, String projectName, String searchVal, Integer pageNo, Integer pageSize, Integer userId) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
Page<ProcessDefinition> page = new Page<>(pageNo, pageSize);
IPage<ProcessDefinition> processDefinitionIPage = processDefineMapper.queryDefineListPaging(
page, searchVal, userId, project.getId(), isAdmin(loginUser));
PageInfo<ProcessDefinition> pageInfo = new PageInfo<>(pageNo, pageSize);
pageInfo.setTotalCount((int) processDefinitionIPage.getTotal());
pageInfo.setLists(processDefinitionIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query datail of process definition
*
* @param loginUser login user
* @param projectName project name
* @param processId process definition id
* @return process definition detail
*/
public Map<String, Object> queryProcessDefinitionById(User loginUser, String projectName, Integer processId) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefineMapper.selectById(processId);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processId);
} else {
result.put(Constants.DATA_LIST, processDefinition);
putMsg(result, Status.SUCCESS);
}
return result;
}
/**
* update process definition
*
* @param loginUser login user
* @param projectName project name
* @param name process definition name
* @param id process definition id
* @param processDefinitionJson process definition json
* @param desc description
* @param locations locations for nodes
* @param connects connects for nodes
* @return update result code
*/
public Map<String, Object> updateProcessDefinition(User loginUser, String projectName, int id, String name,
String processDefinitionJson, String desc,
String locations, String connects) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
Map<String, Object> checkProcessJson = checkProcessNodeList(processData, processDefinitionJson);
if ((checkProcessJson.get(Constants.STATUS) != Status.SUCCESS)) {
return checkProcessJson;
}
ProcessDefinition processDefine = processService.findProcessDefineById(id);
if (processDefine == null) {
// check process definition exists
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, id);
return result;
} else if (processDefine.getReleaseState() == ReleaseState.ONLINE) {
// online can not permit edit
putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefine.getName());
return result;
} else {
putMsg(result, Status.SUCCESS);
}
Date now = new Date();
processDefine.setId(id);
processDefine.setName(name);
processDefine.setReleaseState(ReleaseState.OFFLINE);
processDefine.setProjectId(project.getId());
processDefine.setProcessDefinitionJson(processDefinitionJson);
processDefine.setDescription(desc);
processDefine.setLocations(locations);
processDefine.setConnects(connects);
processDefine.setTimeout(processData.getTimeout());
processDefine.setTenantId(processData.getTenantId());
processDefine.setModifyBy(loginUser.getUserName());
processDefine.setResourceIds(getResourceIds(processData));
//custom global params
List<Property> globalParamsList = new ArrayList<>();
if (CollectionUtils.isNotEmpty(processData.getGlobalParams())) {
Set<Property> userDefParamsSet = new HashSet<>(processData.getGlobalParams());
globalParamsList = new ArrayList<>(userDefParamsSet);
}
processDefine.setGlobalParamList(globalParamsList);
processDefine.setUpdateTime(now);
processDefine.setFlag(Flag.YES);
// add process definition version
long version = processDefinitionVersionService.addProcessDefinitionVersion(processDefine);
processDefine.setVersion(version);
if (processDefineMapper.updateById(processDefine) > 0) {
putMsg(result, Status.SUCCESS);
result.put(Constants.DATA_LIST, processDefineMapper.queryByDefineId(id));
} else {
putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR);
}
return result;
}
/**
* verify process definition name unique
*
* @param loginUser login user
* @param projectName project name
* @param name name
* @return true if process definition name not exists, otherwise false
*/
public Map<String, Object> verifyProcessDefinitionName(User loginUser, String projectName, String name) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefineMapper.verifyByDefineName(project.getId(), name);
if (processDefinition == null) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.PROCESS_INSTANCE_EXIST, name);
}
return result;
}
/**
* delete process definition by id
*
* @param loginUser login user
* @param projectName project name
* @param processDefinitionId process definition id
* @return delete result code
*/
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> deleteProcessDefinitionById(User loginUser, String projectName, Integer processDefinitionId) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefineMapper.selectById(processDefinitionId);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionId);
return result;
}
// Determine if the login user is the owner of the process definition
if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
// check process definition is already online
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, processDefinitionId);
return result;
}
// check process instances is already running
List<ProcessInstance> processInstances = processInstanceService.queryByProcessDefineIdAndStatus(processDefinitionId, Constants.NOT_TERMINATED_STATES);
if (CollectionUtils.isNotEmpty(processInstances)) {
putMsg(result, Status.DELETE_PROCESS_DEFINITION_BY_ID_FAIL,processInstances.size());
return result;
}
// get the timing according to the process definition
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinitionId);
if (!schedules.isEmpty() && schedules.size() > 1) {
logger.warn("scheduler num is {},Greater than 1", schedules.size());
putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR);
return result;
} else if (schedules.size() == 1) {
Schedule schedule = schedules.get(0);
if (schedule.getReleaseState() == ReleaseState.OFFLINE) {
scheduleMapper.deleteById(schedule.getId());
} else if (schedule.getReleaseState() == ReleaseState.ONLINE) {
putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId());
return result;
}
}
int delete = processDefineMapper.deleteById(processDefinitionId);
if (delete > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR);
}
return result;
}
/**
* release process definition: online / offline
*
* @param loginUser login user
* @param projectName project name
* @param id process definition id
* @param releaseState release state
* @return release result code
*/
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> releaseProcessDefinition(User loginUser, String projectName, int id, int releaseState) {
HashMap<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ReleaseState state = ReleaseState.getEnum(releaseState);
// check state
if (null == state) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE);
return result;
}
ProcessDefinition processDefinition = processDefineMapper.selectById(id);
switch (state) {
case ONLINE:
// To check resources whether they are already cancel authorized or deleted
String resourceIds = processDefinition.getResourceIds();
if (StringUtils.isNotBlank(resourceIds)) {
Integer[] resourceIdArray = Arrays.stream(resourceIds.split(",")).map(Integer::parseInt).toArray(Integer[]::new);
PermissionCheck<Integer> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE_ID, processService, resourceIdArray, loginUser.getId(), logger);
try {
permissionCheck.checkPermission();
} catch (Exception e) {
logger.error(e.getMessage(), e);
putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, RELEASESTATE);
return result;
}
}
processDefinition.setReleaseState(state);
processDefineMapper.updateById(processDefinition);
break;
case OFFLINE:
processDefinition.setReleaseState(state);
processDefineMapper.updateById(processDefinition);
List<Schedule> scheduleList = scheduleMapper.selectAllByProcessDefineArray(
new int[]{processDefinition.getId()}
);
for (Schedule schedule : scheduleList) {
logger.info("set schedule offline, project id: {}, schedule id: {}, process definition id: {}", project.getId(), schedule.getId(), id);
// set status
schedule.setReleaseState(ReleaseState.OFFLINE);
scheduleMapper.updateById(schedule);
SchedulerService.deleteSchedule(project.getId(), schedule.getId());
}
break;
default:
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE);
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* batch export process definition by ids
*/
public void batchExportProcessDefinitionByIds(User loginUser, String projectName, String processDefinitionIds, HttpServletResponse response) {
if (StringUtils.isEmpty(processDefinitionIds)) {
return;
}
//export project info
Project project = projectMapper.queryByName(projectName);
//check user access for project
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return;
}
List<ProcessMeta> processDefinitionList =
getProcessDefinitionList(processDefinitionIds);
if (CollectionUtils.isNotEmpty(processDefinitionList)) {
downloadProcessDefinitionFile(response, processDefinitionList);
}
}
/**
* get process definition list by ids
*/
private List<ProcessMeta> getProcessDefinitionList(String processDefinitionIds) {
List<ProcessMeta> processDefinitionList = new ArrayList<>();
String[] processDefinitionIdArray = processDefinitionIds.split(",");
for (String strProcessDefinitionId : processDefinitionIdArray) {
//get workflow info
int processDefinitionId = Integer.parseInt(strProcessDefinitionId);
ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(processDefinitionId);
if (null != processDefinition) {
processDefinitionList.add(exportProcessMetaData(processDefinitionId, processDefinition));
}
}
return processDefinitionList;
}
/**
* download the process definition file
*/
private void downloadProcessDefinitionFile(HttpServletResponse response, List<ProcessMeta> processDefinitionList) {
response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE);
BufferedOutputStream buff = null;
ServletOutputStream out = null;
try {
out = response.getOutputStream();
buff = new BufferedOutputStream(out);
buff.write(JSONUtils.toJsonString(processDefinitionList).getBytes(StandardCharsets.UTF_8));
buff.flush();
buff.close();
} catch (IOException e) {
logger.warn("export process fail", e);
} finally {
if (null != buff) {
try {
buff.close();
} catch (Exception e) {
logger.warn("export process buffer not close", e);
}
}
if (null != out) {
try {
out.close();
} catch (Exception e) {
logger.warn("export process output stream not close", e);
}
}
}
}
/**
* get export process metadata string
*
* @param processDefinitionId process definition id
* @param processDefinition process definition
* @return export process metadata string
*/
public ProcessMeta exportProcessMetaData(Integer processDefinitionId, ProcessDefinition processDefinition) {
//correct task param which has data source or dependent param
String correctProcessDefinitionJson = addExportTaskNodeSpecialParam(processDefinition.getProcessDefinitionJson());
processDefinition.setProcessDefinitionJson(correctProcessDefinitionJson);
//export process metadata
ProcessMeta exportProcessMeta = new ProcessMeta();
exportProcessMeta.setProjectName(processDefinition.getProjectName());
exportProcessMeta.setProcessDefinitionName(processDefinition.getName());
exportProcessMeta.setProcessDefinitionJson(processDefinition.getProcessDefinitionJson());
exportProcessMeta.setProcessDefinitionDescription(processDefinition.getDescription());
exportProcessMeta.setProcessDefinitionLocations(processDefinition.getLocations());
exportProcessMeta.setProcessDefinitionConnects(processDefinition.getConnects());
//schedule info
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinitionId);
if (!schedules.isEmpty()) {
Schedule schedule = schedules.get(0);
exportProcessMeta.setScheduleWarningType(schedule.getWarningType().toString());
exportProcessMeta.setScheduleWarningGroupId(schedule.getWarningGroupId());
exportProcessMeta.setScheduleStartTime(DateUtils.dateToString(schedule.getStartTime()));
exportProcessMeta.setScheduleEndTime(DateUtils.dateToString(schedule.getEndTime()));
exportProcessMeta.setScheduleCrontab(schedule.getCrontab());
exportProcessMeta.setScheduleFailureStrategy(String.valueOf(schedule.getFailureStrategy()));
exportProcessMeta.setScheduleReleaseState(String.valueOf(ReleaseState.OFFLINE));
exportProcessMeta.setScheduleProcessInstancePriority(String.valueOf(schedule.getProcessInstancePriority()));
exportProcessMeta.setScheduleWorkerGroupName(schedule.getWorkerGroup());
}
//create workflow json file
return exportProcessMeta;
}
/**
* correct task param which has datasource or dependent
*
* @param processDefinitionJson processDefinitionJson
* @return correct processDefinitionJson
*/
private String addExportTaskNodeSpecialParam(String processDefinitionJson) {
ObjectNode jsonObject = JSONUtils.parseObject(processDefinitionJson);
ArrayNode jsonArray = (ArrayNode) jsonObject.path(TASKS);
for (int i = 0; i < jsonArray.size(); i++) {
JsonNode taskNode = jsonArray.path(i);
if (StringUtils.isNotEmpty(taskNode.path("type").asText())) {
String taskType = taskNode.path("type").asText();
ProcessAddTaskParam addTaskParam = TaskNodeParamFactory.getByTaskType(taskType);
if (null != addTaskParam) {
addTaskParam.addExportSpecialParam(taskNode);
}
}
}
jsonObject.set(TASKS, jsonArray);
return jsonObject.toString();
}
/**
* check task if has sub process
*
* @param taskType task type
* @return if task has sub process return true else false
*/
private boolean checkTaskHasSubProcess(String taskType) {
return taskType.equals(TaskType.SUB_PROCESS.name());
}
/**
* import process definition
*
* @param loginUser login user
* @param file process metadata json file
* @param currentProjectName current project name
* @return import process
*/
@Transactional(rollbackFor = RuntimeException.class)
public Map<String, Object> importProcessDefinition(User loginUser, MultipartFile file, String currentProjectName) {
Map<String, Object> result = new HashMap<>();
String processMetaJson = FileUtils.file2String(file);
List<ProcessMeta> processMetaList = JSONUtils.toList(processMetaJson, ProcessMeta.class);
//check file content
if (CollectionUtils.isEmpty(processMetaList)) {
putMsg(result, Status.DATA_IS_NULL, "fileContent");
return result;
}
for (ProcessMeta processMeta : processMetaList) {
if (!checkAndImportProcessDefinition(loginUser, currentProjectName, result, processMeta)) {
return result;
}
}
return result;
}
/**
* check and import process definition
*/
private boolean checkAndImportProcessDefinition(User loginUser, String currentProjectName, Map<String, Object> result, ProcessMeta processMeta) {
if (!checkImportanceParams(processMeta, result)) {
return false;
}
//deal with process name
String processDefinitionName = processMeta.getProcessDefinitionName();
//use currentProjectName to query
Project targetProject = projectMapper.queryByName(currentProjectName);
if (null != targetProject) {
processDefinitionName = recursionProcessDefinitionName(targetProject.getId(),
processDefinitionName, 1);
}
//unique check
Map<String, Object> checkResult = verifyProcessDefinitionName(loginUser, currentProjectName, processDefinitionName);
Status status = (Status) checkResult.get(Constants.STATUS);
if (Status.SUCCESS.equals(status)) {
putMsg(result, Status.SUCCESS);
} else {
result.putAll(checkResult);
return false;
}
// get create process result
Map<String, Object> createProcessResult =
getCreateProcessResult(loginUser,
currentProjectName,
result,
processMeta,
processDefinitionName,
addImportTaskNodeParam(loginUser, processMeta.getProcessDefinitionJson(), targetProject));
if (createProcessResult == null) {
return false;
}
//create process definition
Integer processDefinitionId =
Objects.isNull(createProcessResult.get(PROCESSDEFINITIONID))
? null : Integer.parseInt(createProcessResult.get(PROCESSDEFINITIONID).toString());
//scheduler param
return getImportProcessScheduleResult(loginUser,
currentProjectName,
result,
processMeta,
processDefinitionName,
processDefinitionId);
}
/**
* get create process result
*/
private Map<String, Object> getCreateProcessResult(User loginUser,
String currentProjectName,
Map<String, Object> result,
ProcessMeta processMeta,
String processDefinitionName,
String importProcessParam) {
Map<String, Object> createProcessResult = null;
try {
createProcessResult = createProcessDefinition(loginUser
, currentProjectName,
processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp(),
importProcessParam,
processMeta.getProcessDefinitionDescription(),
processMeta.getProcessDefinitionLocations(),
processMeta.getProcessDefinitionConnects());
putMsg(result, Status.SUCCESS);
} catch (JsonProcessingException e) {
logger.error("import process meta json data: {}", e.getMessage(), e);
putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR);
}
return createProcessResult;
}
/**
* get import process schedule result
*/
private boolean getImportProcessScheduleResult(User loginUser,
String currentProjectName,
Map<String, Object> result,
ProcessMeta processMeta,
String processDefinitionName,
Integer processDefinitionId) {
if (null != processMeta.getScheduleCrontab() && null != processDefinitionId) {
int scheduleInsert = importProcessSchedule(loginUser,
currentProjectName,
processMeta,
processDefinitionName,
processDefinitionId);
if (0 == scheduleInsert) {
putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR);
return false;
}
}
return true;
}
/**
* check importance params
*/
private boolean checkImportanceParams(ProcessMeta processMeta, Map<String, Object> result) {
if (StringUtils.isEmpty(processMeta.getProjectName())) {
putMsg(result, Status.DATA_IS_NULL, "projectName");
return false;
}
if (StringUtils.isEmpty(processMeta.getProcessDefinitionName())) {
putMsg(result, Status.DATA_IS_NULL, "processDefinitionName");
return false;
}
if (StringUtils.isEmpty(processMeta.getProcessDefinitionJson())) {
putMsg(result, Status.DATA_IS_NULL, "processDefinitionJson");
return false;
}
return true;
}
/**
* import process add special task param
*
* @param loginUser login user
* @param processDefinitionJson process definition json
* @param targetProject target project
* @return import process param
*/
private String addImportTaskNodeParam(User loginUser, String processDefinitionJson, Project targetProject) {
ObjectNode jsonObject = JSONUtils.parseObject(processDefinitionJson);
ArrayNode jsonArray = (ArrayNode) jsonObject.get(TASKS);
//add sql and dependent param
for (int i = 0; i < jsonArray.size(); i++) {
JsonNode taskNode = jsonArray.path(i);
String taskType = taskNode.path("type").asText();
ProcessAddTaskParam addTaskParam = TaskNodeParamFactory.getByTaskType(taskType);
if (null != addTaskParam) {
addTaskParam.addImportSpecialParam(taskNode);
}
}
//recursive sub-process parameter correction map key for old process id value for new process id
Map<Integer, Integer> subProcessIdMap = new HashMap<>();
List<Object> subProcessList = StreamUtils.asStream(jsonArray.elements())
.filter(elem -> checkTaskHasSubProcess(JSONUtils.parseObject(elem.toString()).path("type").asText()))
.collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(subProcessList)) {
importSubProcess(loginUser, targetProject, jsonArray, subProcessIdMap);
}
jsonObject.set(TASKS, jsonArray);
return jsonObject.toString();
}
/**
* import process schedule
*
* @param loginUser login user
* @param currentProjectName current project name
* @param processMeta process meta data
* @param processDefinitionName process definition name
* @param processDefinitionId process definition id
* @return insert schedule flag
*/
public int importProcessSchedule(User loginUser, String currentProjectName, ProcessMeta processMeta,
String processDefinitionName, Integer processDefinitionId) {
Date now = new Date();
Schedule scheduleObj = new Schedule();
scheduleObj.setProjectName(currentProjectName);
scheduleObj.setProcessDefinitionId(processDefinitionId);
scheduleObj.setProcessDefinitionName(processDefinitionName);
scheduleObj.setCreateTime(now);
scheduleObj.setUpdateTime(now);
scheduleObj.setUserId(loginUser.getId());
scheduleObj.setUserName(loginUser.getUserName());
scheduleObj.setCrontab(processMeta.getScheduleCrontab());
if (null != processMeta.getScheduleStartTime()) {
scheduleObj.setStartTime(DateUtils.stringToDate(processMeta.getScheduleStartTime()));
}
if (null != processMeta.getScheduleEndTime()) {
scheduleObj.setEndTime(DateUtils.stringToDate(processMeta.getScheduleEndTime()));
}
if (null != processMeta.getScheduleWarningType()) {
scheduleObj.setWarningType(WarningType.valueOf(processMeta.getScheduleWarningType()));
}
if (null != processMeta.getScheduleWarningGroupId()) {
scheduleObj.setWarningGroupId(processMeta.getScheduleWarningGroupId());
}
if (null != processMeta.getScheduleFailureStrategy()) {
scheduleObj.setFailureStrategy(FailureStrategy.valueOf(processMeta.getScheduleFailureStrategy()));
}
if (null != processMeta.getScheduleReleaseState()) {
scheduleObj.setReleaseState(ReleaseState.valueOf(processMeta.getScheduleReleaseState()));
}
if (null != processMeta.getScheduleProcessInstancePriority()) {
scheduleObj.setProcessInstancePriority(Priority.valueOf(processMeta.getScheduleProcessInstancePriority()));
}
if (null != processMeta.getScheduleWorkerGroupName()) {
scheduleObj.setWorkerGroup(processMeta.getScheduleWorkerGroupName());
}
return scheduleMapper.insert(scheduleObj);
}
/**
* check import process has sub process
* recursion create sub process
*
* @param loginUser login user
* @param targetProject target project
* @param jsonArray process task array
* @param subProcessIdMap correct sub process id map
*/
private void importSubProcess(User loginUser, Project targetProject, ArrayNode jsonArray, Map<Integer, Integer> subProcessIdMap) {
for (int i = 0; i < jsonArray.size(); i++) {
ObjectNode taskNode = (ObjectNode) jsonArray.path(i);
String taskType = taskNode.path("type").asText();
if (!checkTaskHasSubProcess(taskType)) {
continue;
}
//get sub process info
ObjectNode subParams = (ObjectNode) taskNode.path("params");
Integer subProcessId = subParams.path(PROCESSDEFINITIONID).asInt();
ProcessDefinition subProcess = processDefineMapper.queryByDefineId(subProcessId);
//check is sub process exist in db
if (null == subProcess) {
continue;
}
String subProcessJson = subProcess.getProcessDefinitionJson();
//check current project has sub process
ProcessDefinition currentProjectSubProcess = processDefineMapper.queryByDefineName(targetProject.getId(), subProcess.getName());
if (null == currentProjectSubProcess) {
ArrayNode subJsonArray = (ArrayNode) JSONUtils.parseObject(subProcess.getProcessDefinitionJson()).get(TASKS);
List<Object> subProcessList = StreamUtils.asStream(subJsonArray.elements())
.filter(item -> checkTaskHasSubProcess(JSONUtils.parseObject(item.toString()).path("type").asText()))
.collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(subProcessList)) {
importSubProcess(loginUser, targetProject, subJsonArray, subProcessIdMap);
//sub process processId correct
if (!subProcessIdMap.isEmpty()) {
for (Map.Entry<Integer, Integer> entry : subProcessIdMap.entrySet()) {
String oldSubProcessId = "\"processDefinitionId\":" + entry.getKey();
String newSubProcessId = "\"processDefinitionId\":" + entry.getValue();
subProcessJson = subProcessJson.replaceAll(oldSubProcessId, newSubProcessId);
}
subProcessIdMap.clear();
}
}
//if sub-process recursion
Date now = new Date();
//create sub process in target project
ProcessDefinition processDefine = new ProcessDefinition();
processDefine.setName(subProcess.getName());
processDefine.setVersion(subProcess.getVersion());
processDefine.setReleaseState(subProcess.getReleaseState());
processDefine.setProjectId(targetProject.getId());
processDefine.setUserId(loginUser.getId());
processDefine.setProcessDefinitionJson(subProcessJson);
processDefine.setDescription(subProcess.getDescription());
processDefine.setLocations(subProcess.getLocations());
processDefine.setConnects(subProcess.getConnects());
processDefine.setTimeout(subProcess.getTimeout());
processDefine.setTenantId(subProcess.getTenantId());
processDefine.setGlobalParams(subProcess.getGlobalParams());
processDefine.setCreateTime(now);
processDefine.setUpdateTime(now);
processDefine.setFlag(subProcess.getFlag());
processDefine.setReceivers(subProcess.getReceivers());
processDefine.setReceiversCc(subProcess.getReceiversCc());
processDefineMapper.insert(processDefine);
logger.info("create sub process, project: {}, process name: {}", targetProject.getName(), processDefine.getName());
//modify task node
ProcessDefinition newSubProcessDefine = processDefineMapper.queryByDefineName(processDefine.getProjectId(), processDefine.getName());
if (null != newSubProcessDefine) {
subProcessIdMap.put(subProcessId, newSubProcessDefine.getId());
subParams.put(PROCESSDEFINITIONID, newSubProcessDefine.getId());
taskNode.set("params", subParams);
}
}
}
}
/**
* check the process definition node meets the specifications
*
* @param processData process data
* @param processDefinitionJson process definition json
* @return check result code
*/
public Map<String, Object> checkProcessNodeList(ProcessData processData, String processDefinitionJson) {
Map<String, Object> result = new HashMap<>();
try {
if (processData == null) {
logger.error("process data is null");
putMsg(result, Status.DATA_IS_NOT_VALID, processDefinitionJson);
return result;
}
// Check whether the task node is normal
List<TaskNode> taskNodes = processData.getTasks();
if (taskNodes == null) {
logger.error("process node info is empty");
putMsg(result, Status.DATA_IS_NULL, processDefinitionJson);
return result;
}
// check has cycle
if (graphHasCycle(taskNodes)) {
logger.error("process DAG has cycle");
putMsg(result, Status.PROCESS_NODE_HAS_CYCLE);
return result;
}
// check whether the process definition json is normal
for (TaskNode taskNode : taskNodes) {
if (!CheckUtils.checkTaskNodeParameters(taskNode.getParams(), taskNode.getType())) {
logger.error("task node {} parameter invalid", taskNode.getName());
putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskNode.getName());
return result;
}
// check extra params
CheckUtils.checkOtherParams(taskNode.getExtras());
}
putMsg(result, Status.SUCCESS);
} catch (Exception e) {
result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
result.put(Constants.MSG, e.getMessage());
}
return result;
}
/**
* get task node details based on process definition
*
* @param defineId define id
* @return task node list
*/
public Map<String, Object> getTaskNodeListByDefinitionId(Integer defineId) {
Map<String, Object> result = new HashMap<>();
ProcessDefinition processDefinition = processDefineMapper.selectById(defineId);
if (processDefinition == null) {
logger.info("process define not exists");
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, defineId);
return result;
}
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//process data check
if (null == processData) {
logger.error("process data is null");
putMsg(result, Status.DATA_IS_NOT_VALID, processDefinitionJson);
return result;
}
List<TaskNode> taskNodeList = (processData.getTasks() == null) ? new ArrayList<>() : processData.getTasks();
result.put(Constants.DATA_LIST, taskNodeList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* get task node details based on process definition
*
* @param defineIdList define id list
* @return task node list
*/
public Map<String, Object> getTaskNodeListByDefinitionIdList(String defineIdList) {
Map<String, Object> result = new HashMap<>();
Map<Integer, List<TaskNode>> taskNodeMap = new HashMap<>();
String[] idList = defineIdList.split(",");
List<Integer> idIntList = new ArrayList<>();
for (String definitionId : idList) {
idIntList.add(Integer.parseInt(definitionId));
}
Integer[] idArray = idIntList.toArray(new Integer[0]);
List<ProcessDefinition> processDefinitionList = processDefineMapper.queryDefinitionListByIdList(idArray);
if (CollectionUtils.isEmpty(processDefinitionList)) {
logger.info("process definition not exists");
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, defineIdList);
return result;
}
for (ProcessDefinition processDefinition : processDefinitionList) {
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
List<TaskNode> taskNodeList = (processData.getTasks() == null) ? new ArrayList<>() : processData.getTasks();
taskNodeMap.put(processDefinition.getId(), taskNodeList);
}
result.put(Constants.DATA_LIST, taskNodeMap);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query process definition all by project id
*
* @param projectId project id
* @return process definitions in the project
*/
public Map<String, Object> queryProcessDefinitionAllByProjectId(Integer projectId) {
HashMap<String, Object> result = new HashMap<>();
List<ProcessDefinition> resourceList = processDefineMapper.queryAllDefinitionList(projectId);
result.put(Constants.DATA_LIST, resourceList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* Encapsulates the TreeView structure
*
* @param processId process definition id
* @param limit limit
* @return tree view json data
* @throws Exception exception
*/
public Map<String, Object> viewTree(Integer processId, Integer limit) throws Exception {
Map<String, Object> result = new HashMap<>();
ProcessDefinition processDefinition = processDefineMapper.selectById(processId);
if (null == processDefinition) {
logger.info("process define not exists");
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefinition);
return result;
}
DAG<String, TaskNode, TaskNodeRelation> dag = genDagGraph(processDefinition);
/**
* nodes that is running
*/
Map<String, List<TreeViewDto>> runningNodeMap = new ConcurrentHashMap<>();
/**
* nodes that is waiting torun
*/
Map<String, List<TreeViewDto>> waitingRunningNodeMap = new ConcurrentHashMap<>();
/**
* List of process instances
*/
List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineId(processId, limit);
for (ProcessInstance processInstance : processInstanceList) {
processInstance.setDuration(DateUtils.differSec(processInstance.getStartTime(), processInstance.getEndTime()));
}
if (limit > processInstanceList.size()) {
limit = processInstanceList.size();
}
TreeViewDto parentTreeViewDto = new TreeViewDto();
parentTreeViewDto.setName("DAG");
parentTreeViewDto.setType("");
// Specify the process definition, because it is a TreeView for a process definition
for (int i = limit - 1; i >= 0; i--) {
ProcessInstance processInstance = processInstanceList.get(i);
Date endTime = processInstance.getEndTime() == null ? new Date() : processInstance.getEndTime();
parentTreeViewDto.getInstances().add(new Instance(processInstance.getId(), processInstance.getName(), "", processInstance.getState().toString()
, processInstance.getStartTime(), endTime, processInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - processInstance.getStartTime().getTime())));
}
List<TreeViewDto> parentTreeViewDtoList = new ArrayList<>();
parentTreeViewDtoList.add(parentTreeViewDto);
// Here is the encapsulation task instance
for (String startNode : dag.getBeginNode()) {
runningNodeMap.put(startNode, parentTreeViewDtoList);
}
while (Stopper.isRunning()) {
Set<String> postNodeList = null;
Iterator<Map.Entry<String, List<TreeViewDto>>> iter = runningNodeMap.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<String, List<TreeViewDto>> en = iter.next();
String nodeName = en.getKey();
parentTreeViewDtoList = en.getValue();
TreeViewDto treeViewDto = new TreeViewDto();
treeViewDto.setName(nodeName);
TaskNode taskNode = dag.getNode(nodeName);
treeViewDto.setType(taskNode.getType());
//set treeViewDto instances
for (int i = limit - 1; i >= 0; i--) {
ProcessInstance processInstance = processInstanceList.get(i);
TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(processInstance.getId(), nodeName);
if (taskInstance == null) {
treeViewDto.getInstances().add(new Instance(-1, "not running", "null"));
} else {
Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime();
Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime();
int subProcessId = 0;
/**
* if process is sub process, the return sub id, or sub id=0
*/
if (taskInstance.getTaskType().equals(TaskType.SUB_PROCESS.name())) {
String taskJson = taskInstance.getTaskJson();
taskNode = JSONUtils.parseObject(taskJson, TaskNode.class);
subProcessId = Integer.parseInt(JSONUtils.parseObject(
taskNode.getParams()).path(CMDPARAM_SUB_PROCESS_DEFINE_ID).asText());
}
treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskType(), taskInstance.getState().toString()
, taskInstance.getStartTime(), taskInstance.getEndTime(), taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessId));
}
}
for (TreeViewDto pTreeViewDto : parentTreeViewDtoList) {
pTreeViewDto.getChildren().add(treeViewDto);
}
postNodeList = dag.getSubsequentNodes(nodeName);
if (CollectionUtils.isNotEmpty(postNodeList)) {
for (String nextNodeName : postNodeList) {
List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeName);
if (CollectionUtils.isEmpty(treeViewDtoList)) {
treeViewDtoList = new ArrayList<>();
}
treeViewDtoList.add(treeViewDto);
waitingRunningNodeMap.put(nextNodeName, treeViewDtoList);
}
}
runningNodeMap.remove(nodeName);
}
if (waitingRunningNodeMap == null || waitingRunningNodeMap.size() == 0) {
break;
} else {
runningNodeMap.putAll(waitingRunningNodeMap);
waitingRunningNodeMap.clear();
}
}
result.put(Constants.DATA_LIST, parentTreeViewDto);
result.put(Constants.STATUS, Status.SUCCESS);
result.put(Constants.MSG, Status.SUCCESS.getMsg());
return result;
}
/**
* Generate the DAG Graph based on the process definition id
*
* @param processDefinition process definition
* @return dag graph
*/
private DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) {
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//check process data
if (null != processData) {
List<TaskNode> taskNodeList = processData.getTasks();
processDefinition.setGlobalParamList(processData.getGlobalParams());
ProcessDag processDag = DagHelper.getProcessDag(taskNodeList);
// Generate concrete Dag to be executed
return DagHelper.buildDagGraph(processDag);
}
return new DAG<>();
}
/**
* whether the graph has a ring
*
* @param taskNodeResponseList task node response list
* @return if graph has cycle flag
*/
private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) {
DAG<String, TaskNode, String> graph = new DAG<>();
// Fill the vertices
for (TaskNode taskNodeResponse : taskNodeResponseList) {
graph.addNode(taskNodeResponse.getName(), taskNodeResponse);
}
// Fill edge relations
for (TaskNode taskNodeResponse : taskNodeResponseList) {
taskNodeResponse.getPreTasks();
List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(), String.class);
if (CollectionUtils.isNotEmpty(preTasks)) {
for (String preTask : preTasks) {
if (!graph.addEdge(preTask, taskNodeResponse.getName())) {
return true;
}
}
}
}
return graph.hasCycle();
}
private String recursionProcessDefinitionName(Integer projectId, String processDefinitionName, int num) {
ProcessDefinition processDefinition = processDefineMapper.queryByDefineName(projectId, processDefinitionName);
if (processDefinition != null) {
if (num > 1) {
String str = processDefinitionName.substring(0, processDefinitionName.length() - 3);
processDefinitionName = str + "(" + num + ")";
} else {
processDefinitionName = processDefinition.getName() + "(" + num + ")";
}
} else {
return processDefinitionName;
}
return recursionProcessDefinitionName(projectId, processDefinitionName, num + 1);
}
private Map<String, Object> copyProcessDefinition(User loginUser,
Integer processId,
Project targetProject) throws JsonProcessingException {
Map<String, Object> result = new HashMap<>();
ProcessDefinition processDefinition = processDefineMapper.selectById(processId);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processId);
return result;
} else {
return createProcessDefinition(
loginUser,
targetProject.getName(),
processDefinition.getName() + "_copy_" + DateUtils.getCurrentTimeStamp(),
processDefinition.getProcessDefinitionJson(),
processDefinition.getDescription(),
processDefinition.getLocations(),
processDefinition.getConnects());
}
}
/**
* batch copy process definition
*
* @param loginUser loginUser
* @param projectName projectName
* @param processDefinitionIds processDefinitionIds
* @param targetProjectId targetProjectId
*/
@Override
public Map<String, Object> batchCopyProcessDefinition(User loginUser,
String projectName,
String processDefinitionIds,
int targetProjectId) {
Map<String, Object> result = new HashMap<>();
List<String> failedProcessList = new ArrayList<>();
if (StringUtils.isEmpty(processDefinitionIds)) {
putMsg(result, Status.PROCESS_DEFINITION_IDS_IS_EMPTY, processDefinitionIds);
return result;
}
//check src project auth
Map<String, Object> checkResult = checkProjectAndAuth(loginUser, projectName);
if (checkResult != null) {
return checkResult;
}
Project targetProject = projectMapper.queryDetailById(targetProjectId);
if (targetProject == null) {
putMsg(result, Status.PROJECT_NOT_FOUNT, targetProjectId);
return result;
}
if (!(targetProject.getName()).equals(projectName)) {
Map<String, Object> checkTargetProjectResult = checkProjectAndAuth(loginUser, targetProject.getName());
if (checkTargetProjectResult != null) {
return checkTargetProjectResult;
}
}
String[] processDefinitionIdList = processDefinitionIds.split(Constants.COMMA);
doBatchCopyProcessDefinition(loginUser, targetProject, failedProcessList, processDefinitionIdList);
checkBatchOperateResult(projectName, targetProject.getName(), result, failedProcessList, true);
return result;
}
/**
* batch move process definition
*
* @param loginUser loginUser
* @param projectName projectName
* @param processDefinitionIds processDefinitionIds
* @param targetProjectId targetProjectId
*/
@Override
public Map<String, Object> batchMoveProcessDefinition(User loginUser,
String projectName,
String processDefinitionIds,
int targetProjectId) {
Map<String, Object> result = new HashMap<>();
List<String> failedProcessList = new ArrayList<>();
//check src project auth
Map<String, Object> checkResult = checkProjectAndAuth(loginUser, projectName);
if (checkResult != null) {
return checkResult;
}
if (StringUtils.isEmpty(processDefinitionIds)) {
putMsg(result, Status.PROCESS_DEFINITION_IDS_IS_EMPTY, processDefinitionIds);
return result;
}
Project targetProject = projectMapper.queryDetailById(targetProjectId);
if (targetProject == null) {
putMsg(result, Status.PROJECT_NOT_FOUNT, targetProjectId);
return result;
}
if (!(targetProject.getName()).equals(projectName)) {
Map<String, Object> checkTargetProjectResult = checkProjectAndAuth(loginUser, targetProject.getName());
if (checkTargetProjectResult != null) {
return checkTargetProjectResult;
}
}
String[] processDefinitionIdList = processDefinitionIds.split(Constants.COMMA);
doBatchMoveProcessDefinition(targetProject, failedProcessList, processDefinitionIdList);
checkBatchOperateResult(projectName, targetProject.getName(), result, failedProcessList, false);
return result;
}
/**
* switch the defined process definition verison
*
* @param loginUser login user
* @param projectName project name
* @param processDefinitionId process definition id
* @param version the version user want to switch
* @return switch process definition version result code
*/
@Override
public Map<String, Object> switchProcessDefinitionVersion(User loginUser, String projectName
, int processDefinitionId, long version) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
// check project auth
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(processDefinitionId);
if (Objects.isNull(processDefinition)) {
putMsg(result
, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR
, processDefinitionId);
return result;
}
ProcessDefinitionVersion processDefinitionVersion = processDefinitionVersionService
.queryByProcessDefinitionIdAndVersion(processDefinitionId, version);
if (Objects.isNull(processDefinitionVersion)) {
putMsg(result
, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR
, processDefinitionId
, version);
return result;
}
processDefinition.setVersion(processDefinitionVersion.getVersion());
processDefinition.setProcessDefinitionJson(processDefinitionVersion.getProcessDefinitionJson());
processDefinition.setDescription(processDefinitionVersion.getDescription());
processDefinition.setLocations(processDefinitionVersion.getLocations());
processDefinition.setConnects(processDefinitionVersion.getConnects());
processDefinition.setTimeout(processDefinitionVersion.getTimeout());
processDefinition.setGlobalParams(processDefinitionVersion.getGlobalParams());
processDefinition.setUpdateTime(new Date());
processDefinition.setReceivers(processDefinitionVersion.getReceivers());
processDefinition.setReceiversCc(processDefinitionVersion.getReceiversCc());
processDefinition.setResourceIds(processDefinitionVersion.getResourceIds());
if (processDefineMapper.updateById(processDefinition) > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR);
}
return result;
}
/**
* do batch move process definition
*
* @param targetProject targetProject
* @param failedProcessList failedProcessList
* @param processDefinitionIdList processDefinitionIdList
*/
private void doBatchMoveProcessDefinition(Project targetProject, List<String> failedProcessList, String[] processDefinitionIdList) {
for (String processDefinitionId : processDefinitionIdList) {
try {
Map<String, Object> moveProcessDefinitionResult =
moveProcessDefinition(Integer.valueOf(processDefinitionId), targetProject);
if (!Status.SUCCESS.equals(moveProcessDefinitionResult.get(Constants.STATUS))) {
setFailedProcessList(failedProcessList, processDefinitionId);
logger.error((String) moveProcessDefinitionResult.get(Constants.MSG));
}
} catch (Exception e) {
setFailedProcessList(failedProcessList, processDefinitionId);
}
}
}
/**
* batch copy process definition
*
* @param loginUser loginUser
* @param targetProject targetProject
* @param failedProcessList failedProcessList
* @param processDefinitionIdList processDefinitionIdList
*/
private void doBatchCopyProcessDefinition(User loginUser, Project targetProject, List<String> failedProcessList, String[] processDefinitionIdList) {
for (String processDefinitionId : processDefinitionIdList) {
try {
Map<String, Object> copyProcessDefinitionResult =
copyProcessDefinition(loginUser, Integer.valueOf(processDefinitionId), targetProject);
if (!Status.SUCCESS.equals(copyProcessDefinitionResult.get(Constants.STATUS))) {
setFailedProcessList(failedProcessList, processDefinitionId);
logger.error((String) copyProcessDefinitionResult.get(Constants.MSG));
}
} catch (Exception e) {
setFailedProcessList(failedProcessList, processDefinitionId);
}
}
}
/**
* set failed processList
*
* @param failedProcessList failedProcessList
* @param processDefinitionId processDefinitionId
*/
private void setFailedProcessList(List<String> failedProcessList, String processDefinitionId) {
ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(Integer.valueOf(processDefinitionId));
if (processDefinition != null) {
failedProcessList.add(processDefinitionId + "[" + processDefinition.getName() + "]");
} else {
failedProcessList.add(processDefinitionId + "[null]");
}
}
/**
* check project and auth
*
* @param loginUser loginUser
* @param projectName projectName
*/
private Map<String, Object> checkProjectAndAuth(User loginUser, String projectName) {
Project project = projectMapper.queryByName(projectName);
//check user access for project
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
return null;
}
/**
* move process definition
*
* @param processId processId
* @param targetProject targetProject
* @return move result code
*/
private Map<String, Object> moveProcessDefinition(Integer processId,
Project targetProject) {
Map<String, Object> result = new HashMap<>();
ProcessDefinition processDefinition = processDefineMapper.selectById(processId);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processId);
return result;
}
processDefinition.setProjectId(targetProject.getId());
processDefinition.setUpdateTime(new Date());
if (processDefineMapper.updateById(processDefinition) > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR);
}
return result;
}
/**
* check batch operate result
*
* @param srcProjectName srcProjectName
* @param targetProjectName targetProjectName
* @param result result
* @param failedProcessList failedProcessList
* @param isCopy isCopy
*/
private void checkBatchOperateResult(String srcProjectName, String targetProjectName,
Map<String, Object> result, List<String> failedProcessList, boolean isCopy) {
if (!failedProcessList.isEmpty()) {
if (isCopy) {
putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR, srcProjectName, targetProjectName, String.join(",", failedProcessList));
} else {
putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR, srcProjectName, targetProjectName, String.join(",", failedProcessList));
}
} else {
putMsg(result, Status.SUCCESS);
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,836 |
[Bug][API] verifyProcessDefinitionName error message
|

|
https://github.com/apache/dolphinscheduler/issues/3836
|
https://github.com/apache/dolphinscheduler/pull/3908
|
d32300ba5b33ec17092ae3ba7dd6502f0f709554
|
13030502fd27863827ce9a2e3ec905c5a359170b
| 2020-09-28T03:21:57Z |
java
| 2020-10-15T06:09:28Z |
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionControllerTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.controller;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.ProcessDefinitionVersionService;
import org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionVersion;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.User;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.servlet.http.HttpServletResponse;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.mock.web.MockHttpServletResponse;
/**
* process definition controller test
*/
@RunWith(MockitoJUnitRunner.Silent.class)
public class ProcessDefinitionControllerTest {
private static Logger logger = LoggerFactory.getLogger(ProcessDefinitionControllerTest.class);
@InjectMocks
private ProcessDefinitionController processDefinitionController;
@Mock
private ProcessDefinitionServiceImpl processDefinitionService;
@Mock
private ProcessDefinitionVersionService processDefinitionVersionService;
protected User user;
@Before
public void before() {
User loginUser = new User();
loginUser.setId(1);
loginUser.setUserType(UserType.GENERAL_USER);
loginUser.setUserName("admin");
user = loginUser;
}
@Test
public void testCreateProcessDefinition() throws Exception {
String json = "{\"globalParams\":[],\"tasks\":[{\"type\":\"SHELL\",\"id\":\"tasks-36196\",\"name\""
+ ":\"ssh_test1\",\"params\":{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"aa=\\\"1234\\\"\\"
+ "necho ${aa}\"},\"desc\":\"\",\"runFlag\":\"NORMAL\",\"dependence\":{},\"maxRetryTimes\":\"0\""
+ ",\"retryInterval\":\"1\",\"timeout\":{\"strategy\":\"\",\"interval\":null,\"enable\":false},"
+ "\"taskInstancePriority\":\"MEDIUM\",\"workerGroupId\":-1,\"preTasks\":[]}],\"tenantId\":-1,\"timeout\":0}";
String locations = "{\"tasks-36196\":{\"name\":\"ssh_test1\",\"targetarr\":\"\",\"x\":141,\"y\":70}}";
String projectName = "test";
String name = "dag_test";
String description = "desc test";
String connects = "[]";
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
result.put("processDefinitionId", 1);
Mockito.when(processDefinitionService.createProcessDefinition(user, projectName, name, json,
description, locations, connects)).thenReturn(result);
Result response = processDefinitionController.createProcessDefinition(user, projectName, name, json,
locations, connects, description);
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
private void putMsg(Map<String, Object> result, Status status, Object... statusParams) {
result.put(Constants.STATUS, status);
if (statusParams != null && statusParams.length > 0) {
result.put(Constants.MSG, MessageFormat.format(status.getMsg(), statusParams));
} else {
result.put(Constants.MSG, status.getMsg());
}
}
@Test
public void testVerifyProcessDefinitionName() throws Exception {
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.PROCESS_INSTANCE_EXIST);
String projectName = "test";
String name = "dag_test";
Mockito.when(processDefinitionService.verifyProcessDefinitionName(user, projectName, name)).thenReturn(result);
Result response = processDefinitionController.verifyProcessDefinitionName(user, projectName, name);
Assert.assertEquals(Status.PROCESS_INSTANCE_EXIST.getCode(), response.getCode().intValue());
}
@Test
public void updateProcessDefinition() throws Exception {
String json = "{\"globalParams\":[],\"tasks\":[{\"type\":\"SHELL\",\"id\":\"tasks-36196\",\"name\":\"ssh_test1\""
+ ",\"params\":{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"aa=\\\"1234\\\"\\necho ${aa}\"}"
+ ",\"desc\":\"\",\"runFlag\":\"NORMAL\",\"dependence\":{},\"maxRetryTimes\":\"0\",\"retryInterval\""
+ ":\"1\",\"timeout\":{\"strategy\":\"\",\"interval\":null,\"enable\":false},\"taskInstancePriority\""
+ ":\"MEDIUM\",\"workerGroupId\":-1,\"preTasks\":[]}],\"tenantId\":-1,\"timeout\":0}";
String locations = "{\"tasks-36196\":{\"name\":\"ssh_test1\",\"targetarr\":\"\",\"x\":141,\"y\":70}}";
String projectName = "test";
String name = "dag_test";
String description = "desc test";
String connects = "[]";
int id = 1;
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
result.put("processDefinitionId", 1);
Mockito.when(processDefinitionService.updateProcessDefinition(user, projectName, id, name, json,
description, locations, connects)).thenReturn(result);
Result response = processDefinitionController.updateProcessDefinition(user, projectName, name, id, json,
locations, connects, description);
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
@Test
public void testReleaseProcessDefinition() throws Exception {
String projectName = "test";
int id = 1;
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
Mockito.when(processDefinitionService.releaseProcessDefinition(user, projectName, id, ReleaseState.OFFLINE.ordinal())).thenReturn(result);
Result response = processDefinitionController.releaseProcessDefinition(user, projectName, id, ReleaseState.OFFLINE.ordinal());
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
@Test
public void testQueryProcessDefinitionById() throws Exception {
String json = "{\"globalParams\":[],\"tasks\":[{\"type\":\"SHELL\",\"id\":\"tasks-36196\",\"name\":\"ssh_test1"
+ "\",\"params\":{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"aa=\\\"1234\\\"\\necho ${aa}"
+ "\"},\"desc\":\"\",\"runFlag\":\"NORMAL\",\"dependence\":{},\"maxRetryTimes\":\"0\",\"retryInterval\""
+ ":\"1\",\"timeout\":{\"strategy\":\"\",\"interval\":null,\"enable\":false},\"taskInstancePriority\":"
+ "\"MEDIUM\",\"workerGroupId\":-1,\"preTasks\":[]}],\"tenantId\":-1,\"timeout\":0}";
String locations = "{\"tasks-36196\":{\"name\":\"ssh_test1\",\"targetarr\":\"\",\"x\":141,\"y\":70}}";
String projectName = "test";
String name = "dag_test";
String description = "desc test";
String connects = "[]";
int id = 1;
ProcessDefinition processDefinition = new ProcessDefinition();
processDefinition.setProjectName(projectName);
processDefinition.setConnects(connects);
processDefinition.setDescription(description);
processDefinition.setId(id);
processDefinition.setLocations(locations);
processDefinition.setName(name);
processDefinition.setProcessDefinitionJson(json);
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
result.put(Constants.DATA_LIST, processDefinition);
Mockito.when(processDefinitionService.queryProcessDefinitionById(user, projectName, id)).thenReturn(result);
Result response = processDefinitionController.queryProcessDefinitionById(user, projectName, id);
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
@Test
public void testBatchCopyProcessDefinition() throws Exception {
String projectName = "test";
int targetProjectId = 2;
String id = "1";
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
Mockito.when(processDefinitionService.batchCopyProcessDefinition(user, projectName, id, targetProjectId)).thenReturn(result);
Result response = processDefinitionController.copyProcessDefinition(user, projectName, id, targetProjectId);
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
@Test
public void testBatchMoveProcessDefinition() throws Exception {
String projectName = "test";
int targetProjectId = 2;
String id = "1";
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
Mockito.when(processDefinitionService.batchMoveProcessDefinition(user, projectName, id, targetProjectId)).thenReturn(result);
Result response = processDefinitionController.moveProcessDefinition(user, projectName, id, targetProjectId);
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
@Test
public void testQueryProcessDefinitionList() throws Exception {
String projectName = "test";
List<ProcessDefinition> resourceList = getDefinitionList();
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
result.put(Constants.DATA_LIST, resourceList);
Mockito.when(processDefinitionService.queryProcessDefinitionList(user, projectName)).thenReturn(result);
Result response = processDefinitionController.queryProcessDefinitionList(user, projectName);
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
public List<ProcessDefinition> getDefinitionList() {
List<ProcessDefinition> resourceList = new ArrayList<>();
String json = "{\"globalParams\":[],\"tasks\":[{\"type\":\"SHELL\",\"id\":\"tasks-36196\",\"name\":\"ssh_test1"
+ "\",\"params\":{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"aa=\\\"1234\\\"\\necho ${aa}"
+ "\"},\"desc\":\"\",\"runFlag\":\"NORMAL\",\"dependence\":{},\"maxRetryTimes\":\"0\",\"retryInterval"
+ "\":\"1\",\"timeout\":{\"strategy\":\"\",\"interval\":null,\"enable\":false},\"taskInstancePriority\""
+ ":\"MEDIUM\",\"workerGroupId\":-1,\"preTasks\":[]}],\"tenantId\":-1,\"timeout\":0}";
String locations = "{\"tasks-36196\":{\"name\":\"ssh_test1\",\"targetarr\":\"\",\"x\":141,\"y\":70}}";
String projectName = "test";
String name = "dag_test";
String description = "desc test";
String connects = "[]";
int id = 1;
ProcessDefinition processDefinition = new ProcessDefinition();
processDefinition.setProjectName(projectName);
processDefinition.setConnects(connects);
processDefinition.setDescription(description);
processDefinition.setId(id);
processDefinition.setLocations(locations);
processDefinition.setName(name);
processDefinition.setProcessDefinitionJson(json);
String name2 = "dag_test";
int id2 = 2;
ProcessDefinition processDefinition2 = new ProcessDefinition();
processDefinition2.setProjectName(projectName);
processDefinition2.setConnects(connects);
processDefinition2.setDescription(description);
processDefinition2.setId(id2);
processDefinition2.setLocations(locations);
processDefinition2.setName(name2);
processDefinition2.setProcessDefinitionJson(json);
resourceList.add(processDefinition);
resourceList.add(processDefinition2);
return resourceList;
}
@Test
public void testDeleteProcessDefinitionById() throws Exception {
String projectName = "test";
int id = 1;
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
Mockito.when(processDefinitionService.deleteProcessDefinitionById(user, projectName, id)).thenReturn(result);
Result response = processDefinitionController.deleteProcessDefinitionById(user, projectName, id);
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
@Test
public void testGetNodeListByDefinitionId() throws Exception {
String projectName = "test";
int id = 1;
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
Mockito.when(processDefinitionService.getTaskNodeListByDefinitionId(id)).thenReturn(result);
Result response = processDefinitionController.getNodeListByDefinitionId(user, projectName, id);
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
@Test
public void testGetNodeListByDefinitionIdList() throws Exception {
String projectName = "test";
String idList = "1,2,3";
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
Mockito.when(processDefinitionService.getTaskNodeListByDefinitionIdList(idList)).thenReturn(result);
Result response = processDefinitionController.getNodeListByDefinitionIdList(user, projectName, idList);
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
@Test
public void testQueryProcessDefinitionAllByProjectId() throws Exception {
int projectId = 1;
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
Mockito.when(processDefinitionService.queryProcessDefinitionAllByProjectId(projectId)).thenReturn(result);
Result response = processDefinitionController.queryProcessDefinitionAllByProjectId(user, projectId);
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
@Test
public void testViewTree() throws Exception {
String projectName = "test";
int processId = 1;
int limit = 2;
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
Mockito.when(processDefinitionService.viewTree(processId, limit)).thenReturn(result);
Result response = processDefinitionController.viewTree(user, projectName, processId, limit);
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
@Test
public void testQueryProcessDefinitionListPaging() throws Exception {
String projectName = "test";
int pageNo = 1;
int pageSize = 10;
String searchVal = "";
int userId = 1;
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
result.put(Constants.DATA_LIST, new PageInfo<Resource>(1, 10));
Mockito.when(processDefinitionService.queryProcessDefinitionListPaging(user, projectName, searchVal, pageNo, pageSize, userId)).thenReturn(result);
Result response = processDefinitionController.queryProcessDefinitionListPaging(user, projectName, pageNo, searchVal, userId, pageSize);
Assert.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue());
}
@Test
public void testBatchExportProcessDefinitionByIds() throws Exception {
String processDefinitionIds = "1,2";
String projectName = "test";
HttpServletResponse response = new MockHttpServletResponse();
Mockito.doNothing().when(this.processDefinitionService).batchExportProcessDefinitionByIds(user, projectName, processDefinitionIds, response);
processDefinitionController.batchExportProcessDefinitionByIds(user, projectName, processDefinitionIds, response);
}
@Test
public void testQueryProcessDefinitionVersions() {
String projectName = "test";
Map<String, Object> resultMap = new HashMap<>();
putMsg(resultMap, Status.SUCCESS);
resultMap.put(Constants.DATA_LIST, new PageInfo<ProcessDefinitionVersion>(1, 10));
Mockito.when(processDefinitionVersionService.queryProcessDefinitionVersions(
user
, projectName
, 1
, 10
, 1))
.thenReturn(resultMap);
Result result = processDefinitionController.queryProcessDefinitionVersions(
user
, projectName
, 1
, 10
, 1);
Assert.assertEquals(Status.SUCCESS.getCode(), (int) result.getCode());
}
@Test
public void testSwitchProcessDefinitionVersion() {
String projectName = "test";
Map<String, Object> resultMap = new HashMap<>();
putMsg(resultMap, Status.SUCCESS);
Mockito.when(processDefinitionService.switchProcessDefinitionVersion(
user
, projectName
, 1
, 10))
.thenReturn(resultMap);
Result result = processDefinitionController.switchProcessDefinitionVersion(
user
, projectName
, 1
, 10);
Assert.assertEquals(Status.SUCCESS.getCode(), (int) result.getCode());
}
@Test
public void testDeleteProcessDefinitionVersion() {
String projectName = "test";
Map<String, Object> resultMap = new HashMap<>();
putMsg(resultMap, Status.SUCCESS);
Mockito.when(processDefinitionVersionService.deleteByProcessDefinitionIdAndVersion(
user
, projectName
, 1
, 10))
.thenReturn(resultMap);
Result result = processDefinitionController.deleteProcessDefinitionVersion(
user
, projectName
, 1
, 10);
Assert.assertEquals(Status.SUCCESS.getCode(), (int) result.getCode());
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,836 |
[Bug][API] verifyProcessDefinitionName error message
|

|
https://github.com/apache/dolphinscheduler/issues/3836
|
https://github.com/apache/dolphinscheduler/pull/3908
|
d32300ba5b33ec17092ae3ba7dd6502f0f709554
|
13030502fd27863827ce9a2e3ec905c5a359170b
| 2020-09-28T03:21:57Z |
java
| 2020-10-15T06:09:28Z |
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import static org.assertj.core.api.Assertions.assertThat;
import org.apache.dolphinscheduler.api.dto.ProcessMeta;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl;
import org.apache.dolphinscheduler.api.service.impl.ProjectServiceImpl;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.common.enums.ReleaseState;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.shell.ShellParameters;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.ProcessData;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Project;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper;
import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.http.entity.ContentType;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.lang.reflect.Method;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import org.springframework.mock.web.MockMultipartFile;
import org.springframework.util.ReflectionUtils;
import org.springframework.web.multipart.MultipartFile;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
@RunWith(MockitoJUnitRunner.class)
public class ProcessDefinitionServiceTest {
@InjectMocks
private ProcessDefinitionServiceImpl processDefinitionService;
@Mock
private ProcessDefinitionMapper processDefineMapper;
@Mock
private ProjectMapper projectMapper;
@Mock
private ProjectServiceImpl projectService;
@Mock
private ScheduleMapper scheduleMapper;
@Mock
private ProcessService processService;
@Mock
private ProcessInstanceService processInstanceService;
@Mock
private TaskInstanceMapper taskInstanceMapper;
@Mock
private ProcessDefinitionVersionService processDefinitionVersionService;
private static final String SHELL_JSON = "{\n"
+ " \"globalParams\": [\n"
+ " \n"
+ " ],\n"
+ " \"tasks\": [\n"
+ " {\n"
+ " \"type\": \"SHELL\",\n"
+ " \"id\": \"tasks-9527\",\n"
+ " \"name\": \"shell-1\",\n"
+ " \"params\": {\n"
+ " \"resourceList\": [\n"
+ " \n"
+ " ],\n"
+ " \"localParams\": [\n"
+ " \n"
+ " ],\n"
+ " \"rawScript\": \"#!/bin/bash\\necho \\\"shell-1\\\"\"\n"
+ " },\n"
+ " \"description\": \"\",\n"
+ " \"runFlag\": \"NORMAL\",\n"
+ " \"dependence\": {\n"
+ " \n"
+ " },\n"
+ " \"maxRetryTimes\": \"0\",\n"
+ " \"retryInterval\": \"1\",\n"
+ " \"timeout\": {\n"
+ " \"strategy\": \"\",\n"
+ " \"interval\": 1,\n"
+ " \"enable\": false\n"
+ " },\n"
+ " \"taskInstancePriority\": \"MEDIUM\",\n"
+ " \"workerGroupId\": -1,\n"
+ " \"preTasks\": [\n"
+ " \n"
+ " ]\n"
+ " }\n"
+ " ],\n"
+ " \"tenantId\": 1,\n"
+ " \"timeout\": 0\n"
+ "}";
private static final String CYCLE_SHELL_JSON = "{\n"
+ " \"globalParams\": [\n"
+ " \n"
+ " ],\n"
+ " \"tasks\": [\n"
+ " {\n"
+ " \"type\": \"SHELL\",\n"
+ " \"id\": \"tasks-9527\",\n"
+ " \"name\": \"shell-1\",\n"
+ " \"params\": {\n"
+ " \"resourceList\": [\n"
+ " \n"
+ " ],\n"
+ " \"localParams\": [\n"
+ " \n"
+ " ],\n"
+ " \"rawScript\": \"#!/bin/bash\\necho \\\"shell-1\\\"\"\n"
+ " },\n"
+ " \"description\": \"\",\n"
+ " \"runFlag\": \"NORMAL\",\n"
+ " \"dependence\": {\n"
+ " \n"
+ " },\n"
+ " \"maxRetryTimes\": \"0\",\n"
+ " \"retryInterval\": \"1\",\n"
+ " \"timeout\": {\n"
+ " \"strategy\": \"\",\n"
+ " \"interval\": 1,\n"
+ " \"enable\": false\n"
+ " },\n"
+ " \"taskInstancePriority\": \"MEDIUM\",\n"
+ " \"workerGroupId\": -1,\n"
+ " \"preTasks\": [\n"
+ " \"tasks-9529\"\n"
+ " ]\n"
+ " },\n"
+ " {\n"
+ " \"type\": \"SHELL\",\n"
+ " \"id\": \"tasks-9528\",\n"
+ " \"name\": \"shell-1\",\n"
+ " \"params\": {\n"
+ " \"resourceList\": [\n"
+ " \n"
+ " ],\n"
+ " \"localParams\": [\n"
+ " \n"
+ " ],\n"
+ " \"rawScript\": \"#!/bin/bash\\necho \\\"shell-1\\\"\"\n"
+ " },\n"
+ " \"description\": \"\",\n"
+ " \"runFlag\": \"NORMAL\",\n"
+ " \"dependence\": {\n"
+ " \n"
+ " },\n"
+ " \"maxRetryTimes\": \"0\",\n"
+ " \"retryInterval\": \"1\",\n"
+ " \"timeout\": {\n"
+ " \"strategy\": \"\",\n"
+ " \"interval\": 1,\n"
+ " \"enable\": false\n"
+ " },\n"
+ " \"taskInstancePriority\": \"MEDIUM\",\n"
+ " \"workerGroupId\": -1,\n"
+ " \"preTasks\": [\n"
+ " \"tasks-9527\"\n"
+ " ]\n"
+ " },\n"
+ " {\n"
+ " \"type\": \"SHELL\",\n"
+ " \"id\": \"tasks-9529\",\n"
+ " \"name\": \"shell-1\",\n"
+ " \"params\": {\n"
+ " \"resourceList\": [\n"
+ " \n"
+ " ],\n"
+ " \"localParams\": [\n"
+ " \n"
+ " ],\n"
+ " \"rawScript\": \"#!/bin/bash\\necho \\\"shell-1\\\"\"\n"
+ " },\n"
+ " \"description\": \"\",\n"
+ " \"runFlag\": \"NORMAL\",\n"
+ " \"dependence\": {\n"
+ " \n"
+ " },\n"
+ " \"maxRetryTimes\": \"0\",\n"
+ " \"retryInterval\": \"1\",\n"
+ " \"timeout\": {\n"
+ " \"strategy\": \"\",\n"
+ " \"interval\": 1,\n"
+ " \"enable\": false\n"
+ " },\n"
+ " \"taskInstancePriority\": \"MEDIUM\",\n"
+ " \"workerGroupId\": -1,\n"
+ " \"preTasks\": [\n"
+ " \"tasks-9528\"\n"
+ " ]\n"
+ " }\n"
+ " ],\n"
+ " \"tenantId\": 1,\n"
+ " \"timeout\": 0\n"
+ "}";
@Test
public void testQueryProcessDefinitionList() {
String projectName = "project_test1";
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
Project project = getProject(projectName);
User loginUser = new User();
loginUser.setId(-1);
loginUser.setUserType(UserType.GENERAL_USER);
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.PROJECT_NOT_FOUNT, projectName);
//project not found
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Map<String, Object> map = processDefinitionService.queryProcessDefinitionList(loginUser, "project_test1");
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS));
//project check auth success
putMsg(result, Status.SUCCESS, projectName);
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
List<ProcessDefinition> resourceList = new ArrayList<>();
resourceList.add(getProcessDefinition());
Mockito.when(processDefineMapper.queryAllDefinitionList(project.getId())).thenReturn(resourceList);
Map<String, Object> checkSuccessRes = processDefinitionService.queryProcessDefinitionList(loginUser, "project_test1");
Assert.assertEquals(Status.SUCCESS, checkSuccessRes.get(Constants.STATUS));
}
@Test
@SuppressWarnings("unchecked")
public void testQueryProcessDefinitionListPaging() {
String projectName = "project_test1";
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
Project project = getProject(projectName);
User loginUser = new User();
loginUser.setId(-1);
loginUser.setUserType(UserType.GENERAL_USER);
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.PROJECT_NOT_FOUNT, projectName);
//project not found
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Map<String, Object> map = processDefinitionService.queryProcessDefinitionListPaging(loginUser, "project_test1", "", 1, 5, 0);
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS));
putMsg(result, Status.SUCCESS, projectName);
loginUser.setId(1);
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Page<ProcessDefinition> page = new Page<>(1, 10);
page.setTotal(30);
Mockito.when(processDefineMapper.queryDefineListPaging(
Mockito.any(IPage.class)
, Mockito.eq("")
, Mockito.eq(loginUser.getId())
, Mockito.eq(project.getId())
, Mockito.anyBoolean())).thenReturn(page);
Map<String, Object> map1 = processDefinitionService.queryProcessDefinitionListPaging(
loginUser, projectName, "", 1, 10, loginUser.getId());
Assert.assertEquals(Status.SUCCESS, map1.get(Constants.STATUS));
}
@Test
public void testQueryProcessDefinitionById() {
String projectName = "project_test1";
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
Project project = getProject(projectName);
User loginUser = new User();
loginUser.setId(-1);
loginUser.setUserType(UserType.GENERAL_USER);
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.PROJECT_NOT_FOUNT, projectName);
//project check auth fail
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Map<String, Object> map = processDefinitionService.queryProcessDefinitionById(loginUser,
"project_test1", 1);
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS));
//project check auth success, instance not exist
putMsg(result, Status.SUCCESS, projectName);
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Mockito.when(processDefineMapper.selectById(1)).thenReturn(null);
Map<String, Object> instanceNotexitRes = processDefinitionService.queryProcessDefinitionById(loginUser,
"project_test1", 1);
Assert.assertEquals(Status.PROCESS_INSTANCE_NOT_EXIST, instanceNotexitRes.get(Constants.STATUS));
//instance exit
Mockito.when(processDefineMapper.selectById(46)).thenReturn(getProcessDefinition());
Map<String, Object> successRes = processDefinitionService.queryProcessDefinitionById(loginUser,
"project_test1", 46);
Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS));
}
@Test
public void testBatchCopyProcessDefinition() {
String projectName = "project_test1";
Project project = getProject(projectName);
User loginUser = new User();
loginUser.setId(-1);
loginUser.setUserType(UserType.GENERAL_USER);
// copy project definition ids empty test
Map<String, Object> map = processDefinitionService.batchCopyProcessDefinition(loginUser, projectName, StringUtils.EMPTY, 0);
Assert.assertEquals(Status.PROCESS_DEFINITION_IDS_IS_EMPTY, map.get(Constants.STATUS));
Map<String, Object> result = new HashMap<>();
// project check auth fail
putMsg(result, Status.PROJECT_NOT_FOUNT, projectName);
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Map<String, Object> map1 = processDefinitionService.batchCopyProcessDefinition(
loginUser, projectName, String.valueOf(project.getId()), 0);
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map1.get(Constants.STATUS));
// project check auth success, target project is null
putMsg(result, Status.SUCCESS, projectName);
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Mockito.when(projectMapper.queryDetailById(0)).thenReturn(null);
Map<String, Object> map2 = processDefinitionService.batchCopyProcessDefinition(
loginUser, projectName, String.valueOf(project.getId()), 0);
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map2.get(Constants.STATUS));
// project check auth success, target project name not equal project name, check auth target project fail
Project project1 = getProject(projectName);
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(project1);
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
putMsg(result, Status.SUCCESS, projectName);
String projectName2 = "project_test2";
Project project2 = getProject(projectName2);
Mockito.when(projectMapper.queryByName(projectName2)).thenReturn(project2);
Mockito.when(projectService.checkProjectAndAuth(loginUser, project2, projectName2)).thenReturn(result);
Mockito.when(projectMapper.queryDetailById(1)).thenReturn(project2);
// instance exit
ProcessDefinition definition = getProcessDefinition();
definition.setLocations("{\"tasks-36196\":{\"name\":\"ssh_test1\",\"targetarr\":\"\",\"x\":141,\"y\":70}}");
definition.setProcessDefinitionJson("{\"globalParams\":[],\"tasks\":[{\"type\":\"SHELL\",\"id\":\"tasks-36196\","
+ "\"name\":\"ssh_test1\",\"params\":{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"aa=\\\"1234"
+ "\\\"\\necho ${aa}\"},\"desc\":\"\",\"runFlag\":\"NORMAL\",\"dependence\":{},\"maxRetryTimes\":\"0\","
+ "\"retryInterval\":\"1\",\"timeout\":{\"strategy\":\"\",\"interval\":null,\"enable\":false},"
+ "\"taskInstancePriority\":\"MEDIUM\",\"workerGroupId\":-1,\"preTasks\":[]}],\"tenantId\":-1,\"timeout\":0}");
definition.setConnects("[]");
Mockito.when(processDefineMapper.selectById(46)).thenReturn(definition);
Map<String, Object> map3 = processDefinitionService.batchCopyProcessDefinition(
loginUser, projectName, "46", 1);
Assert.assertEquals(Status.SUCCESS, map3.get(Constants.STATUS));
}
@Test
public void testBatchMoveProcessDefinition() {
String projectName = "project_test1";
Project project1 = getProject(projectName);
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(project1);
String projectName2 = "project_test2";
Project project2 = getProject(projectName2);
Mockito.when(projectMapper.queryByName(projectName2)).thenReturn(project2);
int targetProjectId = 2;
Mockito.when(projectMapper.queryDetailById(targetProjectId)).thenReturn(getProjectById(targetProjectId));
Project project = getProject(projectName);
Project targetProject = getProjectById(targetProjectId);
User loginUser = new User();
loginUser.setId(-1);
loginUser.setUserType(UserType.GENERAL_USER);
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS, projectName);
Map<String, Object> result2 = new HashMap<>();
putMsg(result2, Status.SUCCESS, targetProject.getName());
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Mockito.when(projectService.checkProjectAndAuth(loginUser, project2, projectName2)).thenReturn(result);
ProcessDefinition definition = getProcessDefinition();
definition.setLocations("{\"tasks-36196\":{\"name\":\"ssh_test1\",\"targetarr\":\"\",\"x\":141,\"y\":70}}");
definition.setProcessDefinitionJson("{\"globalParams\":[],\"tasks\":[{\"type\":\"SHELL\",\"id\":\"tasks-36196\""
+ ",\"name\":\"ssh_test1\",\"params\":{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"aa=\\\"1234"
+ "\\\"\\necho ${aa}\"},\"desc\":\"\",\"runFlag\":\"NORMAL\",\"dependence\":{},\"maxRetryTimes\":\"0\","
+ "\"retryInterval\":\"1\",\"timeout\":{\"strategy\":\"\",\"interval\":null,\"enable\":false},"
+ "\"taskInstancePriority\":\"MEDIUM\",\"workerGroupId\":-1,\"preTasks\":[]}],\"tenantId\":-1,\"timeout\":0}");
definition.setConnects("[]");
// check target project result == null
Mockito.when(processDefineMapper.updateById(definition)).thenReturn(46);
Mockito.when(processDefineMapper.selectById(46)).thenReturn(definition);
putMsg(result, Status.SUCCESS);
Map<String, Object> successRes = processDefinitionService.batchMoveProcessDefinition(
loginUser, "project_test1", "46", 2);
Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS));
}
@Test
public void deleteProcessDefinitionByIdTest() {
String projectName = "project_test1";
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
Project project = getProject(projectName);
User loginUser = new User();
loginUser.setId(-1);
loginUser.setUserType(UserType.GENERAL_USER);
//project check auth fail
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.PROJECT_NOT_FOUNT, projectName);
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Map<String, Object> map = processDefinitionService.deleteProcessDefinitionById(loginUser, "project_test1", 6);
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS));
//project check auth success, instance not exist
putMsg(result, Status.SUCCESS, projectName);
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Mockito.when(processDefineMapper.selectById(1)).thenReturn(null);
Map<String, Object> instanceNotexitRes = processDefinitionService.deleteProcessDefinitionById(loginUser,
"project_test1", 1);
Assert.assertEquals(Status.PROCESS_DEFINE_NOT_EXIST, instanceNotexitRes.get(Constants.STATUS));
ProcessDefinition processDefinition = getProcessDefinition();
//user no auth
loginUser.setUserType(UserType.GENERAL_USER);
Mockito.when(processDefineMapper.selectById(46)).thenReturn(processDefinition);
Map<String, Object> userNoAuthRes = processDefinitionService.deleteProcessDefinitionById(loginUser,
"project_test1", 46);
Assert.assertEquals(Status.USER_NO_OPERATION_PERM, userNoAuthRes.get(Constants.STATUS));
//process definition online
loginUser.setUserType(UserType.ADMIN_USER);
processDefinition.setReleaseState(ReleaseState.ONLINE);
Mockito.when(processDefineMapper.selectById(46)).thenReturn(processDefinition);
Map<String, Object> dfOnlineRes = processDefinitionService.deleteProcessDefinitionById(loginUser,
"project_test1", 46);
Assert.assertEquals(Status.PROCESS_DEFINE_STATE_ONLINE, dfOnlineRes.get(Constants.STATUS));
//scheduler list elements > 1
processDefinition.setReleaseState(ReleaseState.OFFLINE);
Mockito.when(processDefineMapper.selectById(46)).thenReturn(processDefinition);
List<Schedule> schedules = new ArrayList<>();
schedules.add(getSchedule());
schedules.add(getSchedule());
Mockito.when(scheduleMapper.queryByProcessDefinitionId(46)).thenReturn(schedules);
Map<String, Object> schedulerGreaterThanOneRes = processDefinitionService.deleteProcessDefinitionById(loginUser,
"project_test1", 46);
Assert.assertEquals(Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR, schedulerGreaterThanOneRes.get(Constants.STATUS));
//scheduler online
schedules.clear();
Schedule schedule = getSchedule();
schedule.setReleaseState(ReleaseState.ONLINE);
schedules.add(schedule);
Mockito.when(scheduleMapper.queryByProcessDefinitionId(46)).thenReturn(schedules);
Map<String, Object> schedulerOnlineRes = processDefinitionService.deleteProcessDefinitionById(loginUser,
"project_test1", 46);
Assert.assertEquals(Status.SCHEDULE_CRON_STATE_ONLINE, schedulerOnlineRes.get(Constants.STATUS));
//delete fail
schedules.clear();
schedule.setReleaseState(ReleaseState.OFFLINE);
schedules.add(schedule);
Mockito.when(scheduleMapper.queryByProcessDefinitionId(46)).thenReturn(schedules);
Mockito.when(processDefineMapper.deleteById(46)).thenReturn(0);
Map<String, Object> deleteFail = processDefinitionService.deleteProcessDefinitionById(loginUser,
"project_test1", 46);
Assert.assertEquals(Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR, deleteFail.get(Constants.STATUS));
//delete success
Mockito.when(processDefineMapper.deleteById(46)).thenReturn(1);
Map<String, Object> deleteSuccess = processDefinitionService.deleteProcessDefinitionById(loginUser,
"project_test1", 46);
Assert.assertEquals(Status.SUCCESS, deleteSuccess.get(Constants.STATUS));
}
@Test
public void testReleaseProcessDefinition() {
String projectName = "project_test1";
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
Project project = getProject(projectName);
User loginUser = new User();
loginUser.setId(1);
loginUser.setUserType(UserType.GENERAL_USER);
//project check auth fail
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.PROJECT_NOT_FOUNT, projectName);
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Map<String, Object> map = processDefinitionService.releaseProcessDefinition(loginUser, "project_test1",
6, ReleaseState.OFFLINE.getCode());
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS));
// project check auth success, processs definition online
putMsg(result, Status.SUCCESS, projectName);
Mockito.when(processDefineMapper.selectById(46)).thenReturn(getProcessDefinition());
Map<String, Object> onlineRes = processDefinitionService.releaseProcessDefinition(
loginUser, "project_test1", 46, ReleaseState.ONLINE.getCode());
Assert.assertEquals(Status.SUCCESS, onlineRes.get(Constants.STATUS));
// project check auth success, processs definition online
ProcessDefinition processDefinition1 = getProcessDefinition();
processDefinition1.setResourceIds("1,2");
Mockito.when(processDefineMapper.selectById(46)).thenReturn(processDefinition1);
Mockito.when(processService.getUserById(1)).thenReturn(loginUser);
Map<String, Object> onlineWithResourceRes = processDefinitionService.releaseProcessDefinition(
loginUser, "project_test1", 46, ReleaseState.ONLINE.getCode());
Assert.assertEquals(Status.SUCCESS, onlineWithResourceRes.get(Constants.STATUS));
// release error code
Map<String, Object> failRes = processDefinitionService.releaseProcessDefinition(
loginUser, "project_test1", 46, 2);
Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, failRes.get(Constants.STATUS));
//FIXME has function exit code 1 when exception
//process definition offline
// List<Schedule> schedules = new ArrayList<>();
// Schedule schedule = getSchedule();
// schedules.add(schedule);
// Mockito.when(scheduleMapper.selectAllByProcessDefineArray(new int[]{46})).thenReturn(schedules);
// Mockito.when(scheduleMapper.updateById(schedule)).thenReturn(1);
// Map<String, Object> offlineRes = processDefinitionService.releaseProcessDefinition(loginUser, "project_test1",
// 46, ReleaseState.OFFLINE.getCode());
// Assert.assertEquals(Status.SUCCESS, offlineRes.get(Constants.STATUS));
}
@Test
public void testVerifyProcessDefinitionName() {
String projectName = "project_test1";
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
Project project = getProject(projectName);
User loginUser = new User();
loginUser.setId(-1);
loginUser.setUserType(UserType.GENERAL_USER);
//project check auth fail
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.PROJECT_NOT_FOUNT, projectName);
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Map<String, Object> map = processDefinitionService.verifyProcessDefinitionName(loginUser,
"project_test1", "test_pdf");
Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS));
//project check auth success, process not exist
putMsg(result, Status.SUCCESS, projectName);
Mockito.when(processDefineMapper.verifyByDefineName(project.getId(), "test_pdf")).thenReturn(null);
Map<String, Object> processNotExistRes = processDefinitionService.verifyProcessDefinitionName(loginUser,
"project_test1", "test_pdf");
Assert.assertEquals(Status.SUCCESS, processNotExistRes.get(Constants.STATUS));
//process exist
Mockito.when(processDefineMapper.verifyByDefineName(project.getId(), "test_pdf")).thenReturn(getProcessDefinition());
Map<String, Object> processExistRes = processDefinitionService.verifyProcessDefinitionName(loginUser,
"project_test1", "test_pdf");
Assert.assertEquals(Status.PROCESS_INSTANCE_EXIST, processExistRes.get(Constants.STATUS));
}
@Test
public void testCheckProcessNodeList() {
Map<String, Object> dataNotValidRes = processDefinitionService.checkProcessNodeList(null, "");
Assert.assertEquals(Status.DATA_IS_NOT_VALID, dataNotValidRes.get(Constants.STATUS));
// task not empty
String processDefinitionJson = SHELL_JSON;
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
Assert.assertNotNull(processData);
Map<String, Object> taskEmptyRes = processDefinitionService.checkProcessNodeList(processData, processDefinitionJson);
Assert.assertEquals(Status.SUCCESS, taskEmptyRes.get(Constants.STATUS));
// task empty
processData.setTasks(null);
Map<String, Object> taskNotEmptyRes = processDefinitionService.checkProcessNodeList(processData, processDefinitionJson);
Assert.assertEquals(Status.DATA_IS_NULL, taskNotEmptyRes.get(Constants.STATUS));
// task cycle
String processDefinitionJsonCycle = CYCLE_SHELL_JSON;
ProcessData processDataCycle = JSONUtils.parseObject(processDefinitionJsonCycle, ProcessData.class);
Map<String, Object> taskCycleRes = processDefinitionService.checkProcessNodeList(processDataCycle, processDefinitionJsonCycle);
Assert.assertEquals(Status.PROCESS_NODE_HAS_CYCLE, taskCycleRes.get(Constants.STATUS));
//json abnormal
String abnormalJson = processDefinitionJson.replaceAll("SHELL", "");
processData = JSONUtils.parseObject(abnormalJson, ProcessData.class);
Map<String, Object> abnormalTaskRes = processDefinitionService.checkProcessNodeList(processData, abnormalJson);
Assert.assertEquals(Status.PROCESS_NODE_S_PARAMETER_INVALID, abnormalTaskRes.get(Constants.STATUS));
}
@Test
public void testGetTaskNodeListByDefinitionId() {
//process definition not exist
Mockito.when(processDefineMapper.selectById(46)).thenReturn(null);
Map<String, Object> processDefinitionNullRes = processDefinitionService.getTaskNodeListByDefinitionId(46);
Assert.assertEquals(Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionNullRes.get(Constants.STATUS));
//process data null
ProcessDefinition processDefinition = getProcessDefinition();
Mockito.when(processDefineMapper.selectById(46)).thenReturn(processDefinition);
Map<String, Object> successRes = processDefinitionService.getTaskNodeListByDefinitionId(46);
Assert.assertEquals(Status.DATA_IS_NOT_VALID, successRes.get(Constants.STATUS));
//success
processDefinition.setProcessDefinitionJson(SHELL_JSON);
Mockito.when(processDefineMapper.selectById(46)).thenReturn(processDefinition);
Map<String, Object> dataNotValidRes = processDefinitionService.getTaskNodeListByDefinitionId(46);
Assert.assertEquals(Status.SUCCESS, dataNotValidRes.get(Constants.STATUS));
}
@Test
public void testGetTaskNodeListByDefinitionIdList() {
//process definition not exist
String defineIdList = "46";
Integer[] idArray = {46};
Mockito.when(processDefineMapper.queryDefinitionListByIdList(idArray)).thenReturn(null);
Map<String, Object> processNotExistRes = processDefinitionService.getTaskNodeListByDefinitionIdList(defineIdList);
Assert.assertEquals(Status.PROCESS_DEFINE_NOT_EXIST, processNotExistRes.get(Constants.STATUS));
//process definition exist
ProcessDefinition processDefinition = getProcessDefinition();
processDefinition.setProcessDefinitionJson(SHELL_JSON);
List<ProcessDefinition> processDefinitionList = new ArrayList<>();
processDefinitionList.add(processDefinition);
Mockito.when(processDefineMapper.queryDefinitionListByIdList(idArray)).thenReturn(processDefinitionList);
Map<String, Object> successRes = processDefinitionService.getTaskNodeListByDefinitionIdList(defineIdList);
Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS));
}
@Test
public void testQueryProcessDefinitionAllByProjectId() {
int projectId = 1;
ProcessDefinition processDefinition = getProcessDefinition();
processDefinition.setProcessDefinitionJson(SHELL_JSON);
List<ProcessDefinition> processDefinitionList = new ArrayList<>();
processDefinitionList.add(processDefinition);
Mockito.when(processDefineMapper.queryAllDefinitionList(projectId)).thenReturn(processDefinitionList);
Map<String, Object> successRes = processDefinitionService.queryProcessDefinitionAllByProjectId(projectId);
Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS));
}
@Test
public void testViewTree() throws Exception {
//process definition not exist
ProcessDefinition processDefinition = getProcessDefinition();
processDefinition.setProcessDefinitionJson(SHELL_JSON);
Mockito.when(processDefineMapper.selectById(46)).thenReturn(null);
Map<String, Object> processDefinitionNullRes = processDefinitionService.viewTree(46, 10);
Assert.assertEquals(Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionNullRes.get(Constants.STATUS));
List<ProcessInstance> processInstanceList = new ArrayList<>();
ProcessInstance processInstance = new ProcessInstance();
processInstance.setId(1);
processInstance.setName("test_instance");
processInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
processInstance.setHost("192.168.xx.xx");
processInstance.setStartTime(new Date());
processInstance.setEndTime(new Date());
processInstanceList.add(processInstance);
TaskInstance taskInstance = new TaskInstance();
taskInstance.setStartTime(new Date());
taskInstance.setEndTime(new Date());
taskInstance.setTaskType("SHELL");
taskInstance.setId(1);
taskInstance.setName("test_task_instance");
taskInstance.setState(ExecutionStatus.RUNNING_EXECUTION);
taskInstance.setHost("192.168.xx.xx");
//task instance not exist
Mockito.when(processDefineMapper.selectById(46)).thenReturn(processDefinition);
Mockito.when(processInstanceService.queryByProcessDefineId(46, 10)).thenReturn(processInstanceList);
Mockito.when(taskInstanceMapper.queryByInstanceIdAndName(processInstance.getId(), "shell-1")).thenReturn(null);
Map<String, Object> taskNullRes = processDefinitionService.viewTree(46, 10);
Assert.assertEquals(Status.SUCCESS, taskNullRes.get(Constants.STATUS));
//task instance exist
Mockito.when(taskInstanceMapper.queryByInstanceIdAndName(processInstance.getId(), "shell-1")).thenReturn(taskInstance);
Map<String, Object> taskNotNuLLRes = processDefinitionService.viewTree(46, 10);
Assert.assertEquals(Status.SUCCESS, taskNotNuLLRes.get(Constants.STATUS));
}
@Test
public void testImportProcessDefinitionById() throws IOException {
String processJson = "[\n"
+ " {\n"
+ " \"projectName\": \"testProject\",\n"
+ " \"processDefinitionName\": \"shell-4\",\n"
+ " \"processDefinitionJson\": \"{\\\"tenantId\\\":1"
+ ",\\\"globalParams\\\":[],\\\"tasks\\\":[{\\\"workerGroupId\\\":\\\"3\\\",\\\"description\\\""
+ ":\\\"\\\",\\\"runFlag\\\":\\\"NORMAL\\\",\\\"type\\\":\\\"SHELL\\\",\\\"params\\\":{\\\"rawScript\\\""
+ ":\\\"#!/bin/bash\\\\necho \\\\\\\"shell-4\\\\\\\"\\\",\\\"localParams\\\":[],\\\"resourceList\\\":[]}"
+ ",\\\"timeout\\\":{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"},\\\"maxRetryTimes\\\":\\\"0\\\""
+ ",\\\"taskInstancePriority\\\":\\\"MEDIUM\\\",\\\"name\\\":\\\"shell-4\\\",\\\"dependence\\\":{}"
+ ",\\\"retryInterval\\\":\\\"1\\\",\\\"preTasks\\\":[],\\\"id\\\":\\\"tasks-84090\\\"}"
+ ",{\\\"taskInstancePriority\\\":\\\"MEDIUM\\\",\\\"name\\\":\\\"shell-5\\\",\\\"workerGroupId\\\""
+ ":\\\"3\\\",\\\"description\\\":\\\"\\\",\\\"dependence\\\":{},\\\"preTasks\\\":[\\\"shell-4\\\"]"
+ ",\\\"id\\\":\\\"tasks-87364\\\",\\\"runFlag\\\":\\\"NORMAL\\\",\\\"type\\\":\\\"SUB_PROCESS\\\""
+ ",\\\"params\\\":{\\\"processDefinitionId\\\":46},\\\"timeout\\\":{\\\"enable\\\":false"
+ ",\\\"strategy\\\":\\\"\\\"}}],\\\"timeout\\\":0}\",\n"
+ " \"processDefinitionDescription\": \"\",\n"
+ " \"processDefinitionLocations\": \"{\\\"tasks-84090\\\":{\\\"name\\\":\\\"shell-4\\\""
+ ",\\\"targetarr\\\":\\\"\\\",\\\"x\\\":128,\\\"y\\\":114},\\\"tasks-87364\\\":{\\\"name\\\""
+ ":\\\"shell-5\\\",\\\"targetarr\\\":\\\"tasks-84090\\\",\\\"x\\\":266,\\\"y\\\":115}}\",\n"
+ " \"processDefinitionConnects\": \"[{\\\"endPointSourceId\\\":\\\"tasks-84090\\\""
+ ",\\\"endPointTargetId\\\":\\\"tasks-87364\\\"}]\"\n"
+ " }\n"
+ "]";
String subProcessJson = "{\n"
+ " \"globalParams\": [\n"
+ " \n"
+ " ],\n"
+ " \"tasks\": [\n"
+ " {\n"
+ " \"type\": \"SHELL\",\n"
+ " \"id\": \"tasks-52423\",\n"
+ " \"name\": \"shell-5\",\n"
+ " \"params\": {\n"
+ " \"resourceList\": [\n"
+ " \n"
+ " ],\n"
+ " \"localParams\": [\n"
+ " \n"
+ " ],\n"
+ " \"rawScript\": \"echo \\\"shell-5\\\"\"\n"
+ " },\n"
+ " \"description\": \"\",\n"
+ " \"runFlag\": \"NORMAL\",\n"
+ " \"dependence\": {\n"
+ " \n"
+ " },\n"
+ " \"maxRetryTimes\": \"0\",\n"
+ " \"retryInterval\": \"1\",\n"
+ " \"timeout\": {\n"
+ " \"strategy\": \"\",\n"
+ " \"interval\": null,\n"
+ " \"enable\": false\n"
+ " },\n"
+ " \"taskInstancePriority\": \"MEDIUM\",\n"
+ " \"workerGroupId\": \"3\",\n"
+ " \"preTasks\": [\n"
+ " \n"
+ " ]\n"
+ " }\n"
+ " ],\n"
+ " \"tenantId\": 1,\n"
+ " \"timeout\": 0\n"
+ "}";
FileUtils.writeStringToFile(new File("/tmp/task.json"), processJson);
File file = new File("/tmp/task.json");
FileInputStream fileInputStream = new FileInputStream("/tmp/task.json");
MultipartFile multipartFile = new MockMultipartFile(file.getName(), file.getName(),
ContentType.APPLICATION_OCTET_STREAM.toString(), fileInputStream);
User loginUser = new User();
loginUser.setId(1);
loginUser.setUserType(UserType.ADMIN_USER);
String currentProjectName = "testProject";
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS, currentProjectName);
ProcessDefinition shellDefinition2 = new ProcessDefinition();
shellDefinition2.setId(46);
shellDefinition2.setName("shell-5");
shellDefinition2.setProjectId(2);
shellDefinition2.setProcessDefinitionJson(subProcessJson);
Mockito.when(projectMapper.queryByName(currentProjectName)).thenReturn(getProject(currentProjectName));
Mockito.when(projectService.checkProjectAndAuth(loginUser, getProject(currentProjectName), currentProjectName)).thenReturn(result);
Mockito.when(processDefineMapper.queryByDefineId(46)).thenReturn(shellDefinition2);
Map<String, Object> importProcessResult = processDefinitionService.importProcessDefinition(loginUser, multipartFile, currentProjectName);
Assert.assertEquals(Status.SUCCESS, importProcessResult.get(Constants.STATUS));
boolean delete = file.delete();
Assert.assertTrue(delete);
}
@Test
public void testUpdateProcessDefinition() {
User loginUser = new User();
loginUser.setId(1);
loginUser.setUserType(UserType.ADMIN_USER);
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.SUCCESS);
String projectName = "project_test1";
Project project = getProject(projectName);
ProcessDefinition processDefinition = getProcessDefinition();
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
Mockito.when(processService.findProcessDefineById(1)).thenReturn(processDefinition);
Mockito.when(processDefinitionVersionService.addProcessDefinitionVersion(processDefinition)).thenReturn(1L);
String sqlDependentJson = "{\n"
+ " \"globalParams\": [\n"
+ " \n"
+ " ],\n"
+ " \"tasks\": [\n"
+ " {\n"
+ " \"type\": \"SQL\",\n"
+ " \"id\": \"tasks-27297\",\n"
+ " \"name\": \"sql\",\n"
+ " \"params\": {\n"
+ " \"type\": \"MYSQL\",\n"
+ " \"datasource\": 1,\n"
+ " \"sql\": \"select * from test\",\n"
+ " \"udfs\": \"\",\n"
+ " \"sqlType\": \"1\",\n"
+ " \"title\": \"\",\n"
+ " \"receivers\": \"\",\n"
+ " \"receiversCc\": \"\",\n"
+ " \"showType\": \"TABLE\",\n"
+ " \"localParams\": [\n"
+ " \n"
+ " ],\n"
+ " \"connParams\": \"\",\n"
+ " \"preStatements\": [\n"
+ " \n"
+ " ],\n"
+ " \"postStatements\": [\n"
+ " \n"
+ " ]\n"
+ " },\n"
+ " \"description\": \"\",\n"
+ " \"runFlag\": \"NORMAL\",\n"
+ " \"dependence\": {\n"
+ " \n"
+ " },\n"
+ " \"maxRetryTimes\": \"0\",\n"
+ " \"retryInterval\": \"1\",\n"
+ " \"timeout\": {\n"
+ " \"strategy\": \"\",\n"
+ " \"enable\": false\n"
+ " },\n"
+ " \"taskInstancePriority\": \"MEDIUM\",\n"
+ " \"workerGroupId\": -1,\n"
+ " \"preTasks\": [\n"
+ " \"dependent\"\n"
+ " ]\n"
+ " },\n"
+ " {\n"
+ " \"type\": \"DEPENDENT\",\n"
+ " \"id\": \"tasks-33787\",\n"
+ " \"name\": \"dependent\",\n"
+ " \"params\": {\n"
+ " \n"
+ " },\n"
+ " \"description\": \"\",\n"
+ " \"runFlag\": \"NORMAL\",\n"
+ " \"dependence\": {\n"
+ " \"relation\": \"AND\",\n"
+ " \"dependTaskList\": [\n"
+ " {\n"
+ " \"relation\": \"AND\",\n"
+ " \"dependItemList\": [\n"
+ " {\n"
+ " \"projectId\": 2,\n"
+ " \"definitionId\": 46,\n"
+ " \"depTasks\": \"ALL\",\n"
+ " \"cycle\": \"day\",\n"
+ " \"dateValue\": \"today\"\n"
+ " }\n"
+ " ]\n"
+ " }\n"
+ " ]\n"
+ " },\n"
+ " \"maxRetryTimes\": \"0\",\n"
+ " \"retryInterval\": \"1\",\n"
+ " \"timeout\": {\n"
+ " \"strategy\": \"\",\n"
+ " \"enable\": false\n"
+ " },\n"
+ " \"taskInstancePriority\": \"MEDIUM\",\n"
+ " \"workerGroupId\": -1,\n"
+ " \"preTasks\": [\n"
+ " \n"
+ " ]\n"
+ " }\n"
+ " ],\n"
+ " \"tenantId\": 1,\n"
+ " \"timeout\": 0\n"
+ "}";
Map<String, Object> updateResult = processDefinitionService.updateProcessDefinition(loginUser, projectName, 1, "test",
sqlDependentJson, "", "", "");
Assert.assertEquals(Status.UPDATE_PROCESS_DEFINITION_ERROR, updateResult.get(Constants.STATUS));
}
@Test
public void testBatchExportProcessDefinitionByIds() {
processDefinitionService.batchExportProcessDefinitionByIds(
null, null, null, null);
User loginUser = new User();
loginUser.setId(1);
loginUser.setUserType(UserType.ADMIN_USER);
String projectName = "project_test1";
Project project = getProject(projectName);
Map<String, Object> result = new HashMap<>();
putMsg(result, Status.PROJECT_NOT_FOUNT);
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectName)).thenReturn(result);
processDefinitionService.batchExportProcessDefinitionByIds(
loginUser, projectName, "1", null);
}
@Test
public void testGetResourceIds() throws Exception {
// set up
Method testMethod = ReflectionUtils.findMethod(ProcessDefinitionServiceImpl.class, "getResourceIds", ProcessData.class);
assertThat(testMethod).isNotNull();
testMethod.setAccessible(true);
// when processData has empty task, then return empty string
ProcessData input1 = new ProcessData();
input1.setTasks(Collections.emptyList());
String output1 = (String) testMethod.invoke(processDefinitionService, input1);
assertThat(output1).isEmpty();
// when task is null, then return empty string
ProcessData input2 = new ProcessData();
input2.setTasks(null);
String output2 = (String) testMethod.invoke(processDefinitionService, input2);
assertThat(output2).isEmpty();
// when task type is incorrect mapping, then return empty string
ProcessData input3 = new ProcessData();
TaskNode taskNode3 = new TaskNode();
taskNode3.setType("notExistType");
input3.setTasks(Collections.singletonList(taskNode3));
String output3 = (String) testMethod.invoke(processDefinitionService, input3);
assertThat(output3).isEmpty();
// when task parameter list is null, then return empty string
ProcessData input4 = new ProcessData();
TaskNode taskNode4 = new TaskNode();
taskNode4.setType("SHELL");
taskNode4.setParams(null);
input4.setTasks(Collections.singletonList(taskNode4));
String output4 = (String) testMethod.invoke(processDefinitionService, input4);
assertThat(output4).isEmpty();
// when resource id list is 0 1, then return 0,1
ProcessData input5 = new ProcessData();
TaskNode taskNode5 = new TaskNode();
taskNode5.setType("SHELL");
ShellParameters shellParameters5 = new ShellParameters();
ResourceInfo resourceInfo5A = new ResourceInfo();
resourceInfo5A.setId(0);
ResourceInfo resourceInfo5B = new ResourceInfo();
resourceInfo5B.setId(1);
shellParameters5.setResourceList(Arrays.asList(resourceInfo5A, resourceInfo5B));
taskNode5.setParams(JSONUtils.toJsonString(shellParameters5));
input5.setTasks(Collections.singletonList(taskNode5));
String output5 = (String) testMethod.invoke(processDefinitionService, input5);
assertThat(output5.split(",")).hasSize(2)
.containsExactlyInAnyOrder("0", "1");
// when resource id list is 0 1 1 2, then return 0,1,2
ProcessData input6 = new ProcessData();
TaskNode taskNode6 = new TaskNode();
taskNode6.setType("SHELL");
ShellParameters shellParameters6 = new ShellParameters();
ResourceInfo resourceInfo6A = new ResourceInfo();
resourceInfo6A.setId(0);
ResourceInfo resourceInfo6B = new ResourceInfo();
resourceInfo6B.setId(1);
ResourceInfo resourceInfo6C = new ResourceInfo();
resourceInfo6C.setId(1);
ResourceInfo resourceInfo6D = new ResourceInfo();
resourceInfo6D.setId(2);
shellParameters6.setResourceList(Arrays.asList(resourceInfo6A, resourceInfo6B, resourceInfo6C, resourceInfo6D));
taskNode6.setParams(JSONUtils.toJsonString(shellParameters6));
input6.setTasks(Collections.singletonList(taskNode6));
String output6 = (String) testMethod.invoke(processDefinitionService, input6);
assertThat(output6.split(",")).hasSize(3)
.containsExactlyInAnyOrder("0", "1", "2");
}
/**
* get mock datasource
*
* @return DataSource
*/
private DataSource getDataSource() {
DataSource dataSource = new DataSource();
dataSource.setId(2);
dataSource.setName("test");
return dataSource;
}
/**
* get mock processDefinition
*
* @return ProcessDefinition
*/
private ProcessDefinition getProcessDefinition() {
ProcessDefinition processDefinition = new ProcessDefinition();
processDefinition.setId(46);
processDefinition.setName("test_pdf");
processDefinition.setProjectId(2);
processDefinition.setTenantId(1);
processDefinition.setDescription("");
return processDefinition;
}
/**
* get mock Project
*
* @param projectName projectName
* @return Project
*/
private Project getProject(String projectName) {
Project project = new Project();
project.setId(1);
project.setName(projectName);
project.setUserId(1);
return project;
}
/**
* get mock Project
*
* @param projectId projectId
* @return Project
*/
private Project getProjectById(int projectId) {
Project project = new Project();
project.setId(projectId);
project.setName("project_test2");
project.setUserId(1);
return project;
}
/**
* get mock schedule
*
* @return schedule
*/
private Schedule getSchedule() {
Date date = new Date();
Schedule schedule = new Schedule();
schedule.setId(46);
schedule.setProcessDefinitionId(1);
schedule.setStartTime(date);
schedule.setEndTime(date);
schedule.setCrontab("0 0 5 * * ? *");
schedule.setFailureStrategy(FailureStrategy.END);
schedule.setUserId(1);
schedule.setReleaseState(ReleaseState.OFFLINE);
schedule.setProcessInstancePriority(Priority.MEDIUM);
schedule.setWarningType(WarningType.NONE);
schedule.setWarningGroupId(1);
schedule.setWorkerGroup(Constants.DEFAULT_WORKER_GROUP);
return schedule;
}
/**
* get mock processMeta
*
* @return processMeta
*/
private ProcessMeta getProcessMeta() {
ProcessMeta processMeta = new ProcessMeta();
Schedule schedule = getSchedule();
processMeta.setScheduleCrontab(schedule.getCrontab());
processMeta.setScheduleStartTime(DateUtils.dateToString(schedule.getStartTime()));
processMeta.setScheduleEndTime(DateUtils.dateToString(schedule.getEndTime()));
processMeta.setScheduleWarningType(String.valueOf(schedule.getWarningType()));
processMeta.setScheduleWarningGroupId(schedule.getWarningGroupId());
processMeta.setScheduleFailureStrategy(String.valueOf(schedule.getFailureStrategy()));
processMeta.setScheduleReleaseState(String.valueOf(schedule.getReleaseState()));
processMeta.setScheduleProcessInstancePriority(String.valueOf(schedule.getProcessInstancePriority()));
processMeta.setScheduleWorkerGroupName("workgroup1");
return processMeta;
}
private List<Schedule> getSchedulerList() {
List<Schedule> scheduleList = new ArrayList<>();
scheduleList.add(getSchedule());
return scheduleList;
}
private void putMsg(Map<String, Object> result, Status status, Object... statusParams) {
result.put(Constants.STATUS, status);
if (statusParams != null && statusParams.length > 0) {
result.put(Constants.MSG, MessageFormat.format(status.getMsg(), statusParams));
} else {
result.put(Constants.MSG, status.getMsg());
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,823 |
[Bug][ui] DAG formatting leads to the loss of pre-task
|
**Describe the bug**
DAG formatting leads to the loss of pre-task
## before

## after

**To Reproduce**
Steps to reproduce the behavior, for example:
1. Go to 'process definition'
2. Edit 'process definition'
3. Click on 'Format DAG'
4. Edit 'node settings'
5. See 'Pre tasks'
**Which version of Dolphin Scheduler:**
-[dev]
|
https://github.com/apache/dolphinscheduler/issues/3823
|
https://github.com/apache/dolphinscheduler/pull/3824
|
62fb2fd19b769c2fa4fa68a32675891104d3608c
|
60bf3f851102f1e259a6fa65ee642a0e669b188b
| 2020-09-25T09:45:19Z |
java
| 2020-10-16T02:15:02Z |
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/dag.js
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import Vue from 'vue'
let v = new Vue()
import _ from 'lodash'
import i18n from '@/module/i18n'
import { jsPlumb } from 'jsplumb'
import JSP from './plugIn/jsPlumbHandle'
import DownChart from './plugIn/downChart'
import store from '@/conf/home/store'
import dagre from "dagre"
/**
* Prototype method
*/
const Dag = function () {
this.dag = {}
this.instance = {}
}
/**
* init
* @dag dag vue instance
*/
Dag.prototype.init = function ({ dag, instance }) {
this.dag = dag
this.instance = instance
}
/**
* set init config
*/
Dag.prototype.setConfig = function (o) {
JSP.setConfig(o)
}
/**
* create dag
*/
Dag.prototype.create = function () {
const self = this
jsPlumb.ready(() => {
JSP.init({
dag: this.dag,
instance: this.instance,
options: {
onRemoveNodes ($id) {
self.dag.removeEventModelById($id)
}
}
})
// init event
JSP.handleEvent()
// init draggable
JSP.draggable()
})
}
/**
* Action event on the right side of the toolbar
*/
Dag.prototype.toolbarEvent = function ({ item, code, is }) {
let self = this
switch (code) {
case 'pointer':
JSP.handleEventPointer(is)
break
case 'line':
JSP.handleEventLine(is)
break
case 'remove':
JSP.handleEventRemove()
break
case 'screen':
JSP.handleEventScreen({ item, is })
break
case 'download':
v.$modal.dialog({
width: 350,
closable: false,
showMask: true,
maskClosable: true,
title: i18n.$t('Download'),
content: i18n.$t('Please confirm whether the workflow has been saved before downloading'),
ok: {
handle (e) {
DownChart.download({
dagThis: self.dag
})
}
},
cancel: {}
})
break
}
}
/**
* Echo data display
*/
Dag.prototype.backfill = function (arg) {
if (arg) {
const marginX = 100
const g = new dagre.graphlib.Graph()
g.setGraph({})
g.setDefaultEdgeLabel(function () { return {} })
for (const i in store.state.dag.locations) {
const location = store.state.dag.locations[i]
g.setNode(i, { label: i, width: Math.min(location.name.length * 7, 170), height: 150 })
}
for (const i in store.state.dag.connects) {
const connect = store.state.dag.connects[i]
g.setEdge(connect['endPointSourceId'], connect['endPointTargetId'])
}
dagre.layout(g)
const dataObject = {}
g.nodes().forEach(function (v) {
const node = g.node(v)
const obj = {}
obj.name = node.label
obj.x = node.x + marginX
obj.y = node.y
dataObject[node.label] = obj
})
jsPlumb.ready(() => {
JSP.init({
dag: this.dag,
instance: this.instance,
options: {
onRemoveNodes ($id) {
this.dag.removeEventModelById($id)
}
}
})
// Backfill
JSP.jspBackfill({
// connects
connects: _.cloneDeep(store.state.dag.connects),
// Node location information
locations: _.cloneDeep(dataObject),
// Node data
largeJson: _.cloneDeep(store.state.dag.tasks)
})
})
} else {
jsPlumb.ready(() => {
JSP.init({
dag: this.dag,
instance: this.instance,
options: {
onRemoveNodes ($id) {
this.dag.removeEventModelById($id)
}
}
})
// Backfill
JSP.jspBackfill({
// connects
connects: _.cloneDeep(store.state.dag.connects),
// Node location information
locations: _.cloneDeep(store.state.dag.locations),
// Node data
largeJson: _.cloneDeep(store.state.dag.tasks)
})
})
}
}
/**
* Get dag storage format data
*/
Dag.prototype.saveStore = function () {
return JSP.saveStore()
}
export default new Dag()
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,487 |
[Bug][API] Creating folders with multiple threads will result in multiple identical folders
|
Multiple threads calling the API to create folders will result in multiple identical folders。
The API to create folders is expected to be thread-safe
dolphinScheduler version is 1.3.1


|
https://github.com/apache/dolphinscheduler/issues/3487
|
https://github.com/apache/dolphinscheduler/pull/3919
|
3361151155847de080e3c4d07ec0c784a8eae8bf
|
5018117edcbe6db128dad9c00e4eb7e9ad8312bf
| 2020-08-13T10:04:41Z |
java
| 2020-10-16T02:32:30Z |
sql/upgrade/1.3.3_schema/postgresql/dolphinscheduler_ddl.sql
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- add t_ds_resources_un
CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_resources_un() RETURNS void AS $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM information_schema.KEY_COLUMN_USAGE
WHERE TABLE_NAME = 't_ds_resources'
AND CONSTRAINT_NAME = 't_ds_resources_un'
)
THEN
ALTER TABLE t_ds_resources ADD CONSTRAINT t_ds_resources_un UNIQUE (full_name,"type");
END IF;
END;
$$ LANGUAGE plpgsql;
SELECT uc_dolphin_T_t_ds_resources_un();
DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_resources_un();
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,894 |
[Bug][docker] Variable reference error in startup.sh
|
*For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.*
**Describe the bug**
in file `docker/build/startup.sh` , variables cannot reference in single quotes.
```sh
# start database
initDatabase() {
echo "test ${DATABASE_TYPE} service"
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
echo "Error: Couldn't connect to ${DATABASE_TYPE}."
exit 1
fi
echo "Trying to connect to ${DATABASE_TYPE} at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 5
done
echo "connect ${DATABASE_TYPE} service"
if [ ${DATABASE_TYPE} = "mysql" ]; then
v=$(mysql -h${DATABASE_HOST} -P${DATABASE_PORT} -u${DATABASE_USERNAME} --password=${DATABASE_PASSWORD} -D ${DATABASE_DATABASE} -e "select 1" 2>&1)
if [ "$(echo '${v}' | grep 'ERROR' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
fi
else
v=$(sudo -u postgres PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1")
if [ "$(echo '${v}' | grep 'FATAL' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
fi
fi
echo "import sql data"
${DOLPHINSCHEDULER_SCRIPT}/create-dolphinscheduler.sh
}
```
**To Reproduce**
v="ERROR"
echo '${v}' | grep 'ERROR' | wc -l
**Expected behavior**
v="ERROR"
echo ${v} | grep 'ERROR' | wc -l
**Screenshots**
**Which version of Dolphin Scheduler:**
-[1.3.2-release]
-[dev]
**Additional context**
Add any other context about the problem here.
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.
|
https://github.com/apache/dolphinscheduler/issues/3894
|
https://github.com/apache/dolphinscheduler/pull/3895
|
3d4238f66811dc6b83bb65e3a146251be9ddde95
|
3fdc5576e19b8102ea4f10a714cd3c85b27b2c4e
| 2020-10-13T06:24:43Z |
java
| 2020-10-17T13:57:16Z |
docker/build/Dockerfile
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FROM nginx:alpine
ARG VERSION
ENV TZ Asia/Shanghai
ENV LANG C.UTF-8
ENV DEBIAN_FRONTEND noninteractive
#1. install dos2unix shadow bash openrc python sudo vim wget iputils net-tools ssh pip tini kazoo.
#If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example:
#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories
RUN apk update && \
apk add --update --no-cache dos2unix shadow bash openrc python2 python3 sudo vim wget iputils net-tools openssh-server py-pip tini && \
apk add --update --no-cache procps && \
openrc boot && \
pip install kazoo
#2. install jdk
RUN apk add --update --no-cache openjdk8
ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk
ENV PATH $JAVA_HOME/bin:$PATH
#3. add dolphinscheduler
ADD ./apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin.tar.gz /opt/
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-dolphinscheduler-bin/ /opt/dolphinscheduler/
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
#4. install database, if use mysql as your backend database, the `mysql-client` package should be installed
RUN apk add --update --no-cache postgresql postgresql-contrib
#5. modify nginx
RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \
rm -rf /etc/nginx/conf.d/*
COPY ./conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
#6. add configuration and modify permissions and set soft links
COPY ./checkpoint.sh /root/checkpoint.sh
COPY ./startup-init-conf.sh /root/startup-init-conf.sh
COPY ./startup.sh /root/startup.sh
COPY ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/
COPY ./conf/dolphinscheduler/logback/* /opt/dolphinscheduler/conf/
COPY conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/
RUN chmod +x /root/checkpoint.sh && \
chmod +x /root/startup-init-conf.sh && \
chmod +x /root/startup.sh && \
chmod +x /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
chmod +x /opt/dolphinscheduler/script/*.sh && \
chmod +x /opt/dolphinscheduler/bin/*.sh && \
dos2unix /root/checkpoint.sh && \
dos2unix /root/startup-init-conf.sh && \
dos2unix /root/startup.sh && \
dos2unix /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
dos2unix /opt/dolphinscheduler/script/*.sh && \
dos2unix /opt/dolphinscheduler/bin/*.sh && \
rm -rf /bin/sh && \
ln -s /bin/bash /bin/sh && \
mkdir -p /tmp/xls && \
#7. remove apk index cache and disable coredup for sudo
rm -rf /var/cache/apk/* && \
echo "Set disable_coredump false" >> /etc/sudo.conf
#8. expose port
EXPOSE 2181 2888 3888 5432 5678 1234 12345 50051 8888
ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"]
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,894 |
[Bug][docker] Variable reference error in startup.sh
|
*For better global communication, please give priority to using English description, thx! *
*Please review https://dolphinscheduler.apache.org/en-us/docs/development/issue.html when describe an issue.*
**Describe the bug**
in file `docker/build/startup.sh` , variables cannot reference in single quotes.
```sh
# start database
initDatabase() {
echo "test ${DATABASE_TYPE} service"
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
echo "Error: Couldn't connect to ${DATABASE_TYPE}."
exit 1
fi
echo "Trying to connect to ${DATABASE_TYPE} at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 5
done
echo "connect ${DATABASE_TYPE} service"
if [ ${DATABASE_TYPE} = "mysql" ]; then
v=$(mysql -h${DATABASE_HOST} -P${DATABASE_PORT} -u${DATABASE_USERNAME} --password=${DATABASE_PASSWORD} -D ${DATABASE_DATABASE} -e "select 1" 2>&1)
if [ "$(echo '${v}' | grep 'ERROR' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
fi
else
v=$(sudo -u postgres PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1")
if [ "$(echo '${v}' | grep 'FATAL' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
fi
fi
echo "import sql data"
${DOLPHINSCHEDULER_SCRIPT}/create-dolphinscheduler.sh
}
```
**To Reproduce**
v="ERROR"
echo '${v}' | grep 'ERROR' | wc -l
**Expected behavior**
v="ERROR"
echo ${v} | grep 'ERROR' | wc -l
**Screenshots**
**Which version of Dolphin Scheduler:**
-[1.3.2-release]
-[dev]
**Additional context**
Add any other context about the problem here.
**Requirement or improvement**
- Please describe about your requirements or improvement suggestions.
|
https://github.com/apache/dolphinscheduler/issues/3894
|
https://github.com/apache/dolphinscheduler/pull/3895
|
3d4238f66811dc6b83bb65e3a146251be9ddde95
|
3fdc5576e19b8102ea4f10a714cd3c85b27b2c4e
| 2020-10-13T06:24:43Z |
java
| 2020-10-17T13:57:16Z |
docker/build/startup.sh
|
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
DOLPHINSCHEDULER_BIN=${DOLPHINSCHEDULER_HOME}/bin
DOLPHINSCHEDULER_SCRIPT=${DOLPHINSCHEDULER_HOME}/script
DOLPHINSCHEDULER_LOGS=${DOLPHINSCHEDULER_HOME}/logs
# start database
initDatabase() {
echo "test ${DATABASE_TYPE} service"
while ! nc -z ${DATABASE_HOST} ${DATABASE_PORT}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
echo "Error: Couldn't connect to ${DATABASE_TYPE}."
exit 1
fi
echo "Trying to connect to ${DATABASE_TYPE} at ${DATABASE_HOST}:${DATABASE_PORT}. Attempt $counter."
sleep 5
done
echo "connect ${DATABASE_TYPE} service"
if [ ${DATABASE_TYPE} = "mysql" ]; then
v=$(mysql -h${DATABASE_HOST} -P${DATABASE_PORT} -u${DATABASE_USERNAME} --password=${DATABASE_PASSWORD} -D ${DATABASE_DATABASE} -e "select 1" 2>&1)
if [ "$(echo '${v}' | grep 'ERROR' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
fi
else
v=$(sudo -u postgres PGPASSWORD=${DATABASE_PASSWORD} psql -h ${DATABASE_HOST} -p ${DATABASE_PORT} -U ${DATABASE_USERNAME} -d ${DATABASE_DATABASE} -tAc "select 1")
if [ "$(echo '${v}' | grep 'FATAL' | wc -l)" -eq 1 ]; then
echo "Error: Can't connect to database...${v}"
exit 1
fi
fi
echo "import sql data"
${DOLPHINSCHEDULER_SCRIPT}/create-dolphinscheduler.sh
}
# start zk
initZK() {
echo "connect remote zookeeper"
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
while ! nc -z ${line%:*} ${line#*:}; do
counter=$((counter+1))
if [ $counter == 30 ]; then
echo "Error: Couldn't connect to zookeeper."
exit 1
fi
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
sleep 5
done
done
}
# start nginx
initNginx() {
echo "start nginx"
nginx &
}
# start master-server
initMasterServer() {
echo "start master-server"
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop master-server
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start master-server
}
# start worker-server
initWorkerServer() {
echo "start worker-server"
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop worker-server
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start worker-server
}
# start api-server
initApiServer() {
echo "start api-server"
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop api-server
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start api-server
}
# start logger-server
initLoggerServer() {
echo "start logger-server"
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop logger-server
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start logger-server
}
# start alert-server
initAlertServer() {
echo "start alert-server"
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh stop alert-server
${DOLPHINSCHEDULER_BIN}/dolphinscheduler-daemon.sh start alert-server
}
# print usage
printUsage() {
echo -e "Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system,"
echo -e "dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.\n"
echo -e "Usage: [ all | master-server | worker-server | api-server | alert-server | frontend ]\n"
printf "%-13s: %s\n" "all" "Run master-server, worker-server, api-server, alert-server and frontend."
printf "%-13s: %s\n" "master-server" "MasterServer is mainly responsible for DAG task split, task submission monitoring."
printf "%-13s: %s\n" "worker-server" "WorkerServer is mainly responsible for task execution and providing log services.."
printf "%-13s: %s\n" "api-server" "ApiServer is mainly responsible for processing requests from the front-end UI layer."
printf "%-13s: %s\n" "alert-server" "AlertServer mainly include Alarms."
printf "%-13s: %s\n" "frontend" "Frontend mainly provides various visual operation interfaces of the system."
}
# init config file
source /root/startup-init-conf.sh
LOGFILE=/var/log/nginx/access.log
case "$1" in
(all)
initZK
initDatabase
initMasterServer
initWorkerServer
initApiServer
initAlertServer
initLoggerServer
initNginx
LOGFILE=/var/log/nginx/access.log
;;
(master-server)
initZK
initDatabase
initMasterServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-master.log
;;
(worker-server)
initZK
initDatabase
initWorkerServer
initLoggerServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-worker.log
;;
(api-server)
initZK
initDatabase
initApiServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-api-server.log
;;
(alert-server)
initDatabase
initAlertServer
LOGFILE=${DOLPHINSCHEDULER_LOGS}/dolphinscheduler-alert.log
;;
(frontend)
initNginx
LOGFILE=/var/log/nginx/access.log
;;
(help)
printUsage
exit 1
;;
(*)
printUsage
exit 1
;;
esac
# init directories and log files
mkdir -p ${DOLPHINSCHEDULER_LOGS} && mkdir -p /var/log/nginx/ && cat /dev/null >> ${LOGFILE}
echo "tail begin"
exec bash -c "tail -n 1 -f ${LOGFILE}"
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,938 |
[CodeClean][DAO]Remove redundant comments
|
Remove redundant comments
ProcessInstanceMapper# queryProcessInstanceListPaging

|
https://github.com/apache/dolphinscheduler/issues/3938
|
https://github.com/apache/dolphinscheduler/pull/3939
|
3fdc5576e19b8102ea4f10a714cd3c85b27b2c4e
|
fa355b23f39e14975d2d6ca1f0f6e2178a189b42
| 2020-10-17T16:58:17Z |
java
| 2020-10-18T14:36:04Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.mapper;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.ibatis.annotations.Param;
import java.util.Date;
import java.util.List;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
/**
* process instance mapper interface
*/
public interface ProcessInstanceMapper extends BaseMapper<ProcessInstance> {
/**
* query process instance detail info by id
* @param processId processId
* @return process instance
*/
ProcessInstance queryDetailById(@Param("processId") int processId);
/**
* query process instance by host and stateArray
* @param host host
* @param stateArray stateArray
* @return process instance list
*/
List<ProcessInstance> queryByHostAndStatus(@Param("host") String host,
@Param("states") int[] stateArray);
/**
* query process instance by tenantId and stateArray
* @param tenantId tenantId
* @param states states array
* @return process instance list
*/
List<ProcessInstance> queryByTenantIdAndStatus(@Param("tenantId") int tenantId,
@Param("states") int[] states);
/**
* query process instance by worker group and stateArray
* @param workerGroupId workerGroupId
* @param states states array
* @return process instance list
*/
List<ProcessInstance> queryByWorkerGroupIdAndStatus(@Param("workerGroupId") int workerGroupId,
@Param("states") int[] states);
/**
* process instance page
* @param page page
* @param projectId projectId
* @param processDefinitionId processDefinitionId
* @param searchVal searchVal
* @param statusArray statusArray
* @param host host
* @param startTime startTime
* @param endTime endTime
* @return process instance IPage
*/
/**
* process instance page
* @param page page
* @param projectId projectId
* @param processDefinitionId processDefinitionId
* @param searchVal searchVal
* @param executorId executorId
* @param statusArray statusArray
* @param host host
* @param startTime startTime
* @param endTime endTime
* @return process instance page
*/
IPage<ProcessInstance> queryProcessInstanceListPaging(Page<ProcessInstance> page,
@Param("projectId") int projectId,
@Param("processDefinitionId") Integer processDefinitionId,
@Param("searchVal") String searchVal,
@Param("executorId") Integer executorId,
@Param("states") int[] statusArray,
@Param("host") String host,
@Param("startTime") Date startTime,
@Param("endTime") Date endTime);
/**
* set failover by host and state array
* @param host host
* @param stateArray stateArray
* @return set result
*/
int setFailoverByHostAndStateArray(@Param("host") String host,
@Param("states") int[] stateArray);
/**
* update process instance by state
* @param originState originState
* @param destState destState
* @return update result
*/
int updateProcessInstanceByState(@Param("originState") ExecutionStatus originState,
@Param("destState") ExecutionStatus destState);
/**
* update process instance by tenantId
* @param originTenantId originTenantId
* @param destTenantId destTenantId
* @return update result
*/
int updateProcessInstanceByTenantId(@Param("originTenantId") int originTenantId,
@Param("destTenantId") int destTenantId);
/**
* update process instance by worker groupId
* @param originWorkerGroupId originWorkerGroupId
* @param destWorkerGroupId destWorkerGroupId
* @return update result
*/
int updateProcessInstanceByWorkerGroupId(@Param("originWorkerGroupId") int originWorkerGroupId, @Param("destWorkerGroupId") int destWorkerGroupId);
/**
* count process instance state by user
* @param startTime startTime
* @param endTime endTime
* @param projectIds projectIds
* @return ExecuteStatusCount list
*/
List<ExecuteStatusCount> countInstanceStateByUser(
@Param("startTime") Date startTime,
@Param("endTime") Date endTime,
@Param("projectIds") Integer[] projectIds);
/**
* query process instance by processDefinitionId
* @param processDefinitionId processDefinitionId
* @param size size
* @return process instance list
*/
List<ProcessInstance> queryByProcessDefineId(
@Param("processDefinitionId") int processDefinitionId,
@Param("size") int size);
/**
* query last scheduler process instance
* @param definitionId processDefinitionId
* @param startTime startTime
* @param endTime endTime
* @return process instance
*/
ProcessInstance queryLastSchedulerProcess(@Param("processDefinitionId") int definitionId,
@Param("startTime") Date startTime,
@Param("endTime") Date endTime);
/**
* query last running process instance
* @param definitionId definitionId
* @param startTime startTime
* @param endTime endTime
* @param stateArray stateArray
* @return process instance
*/
ProcessInstance queryLastRunningProcess(@Param("processDefinitionId") int definitionId,
@Param("startTime") Date startTime,
@Param("endTime") Date endTime,
@Param("states") int[] stateArray);
/**
* query last manual process instance
* @param definitionId definitionId
* @param startTime startTime
* @param endTime endTime
* @return process instance
*/
ProcessInstance queryLastManualProcess(@Param("processDefinitionId") int definitionId,
@Param("startTime") Date startTime,
@Param("endTime") Date endTime);
/**
* query top n process instance order by running duration
* @param size
* @param status process instance status
* @param startTime
* @param endTime
* @return ProcessInstance list
*/
List<ProcessInstance> queryTopNProcessInstance(@Param("size") int size,
@Param("startTime") Date startTime,
@Param("endTime") Date endTime,
@Param("status")ExecutionStatus status);
/**
* query process instance by processDefinitionId and stateArray
* @param processDefinitionId processDefinitionId
* @param states states array
* @return process instance list
*/
List<ProcessInstance> queryByProcessDefineIdAndStatus(
@Param("processDefinitionId") int processDefinitionId,
@Param("states") int[] states);
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,615 |
[Bug][master] After the task is executed successfully, but the next task has not been submitted, stop the master,the workflow will fail
|
1.run workflow
2.After the task is executed successfully, but the next task has not been submitted, stop the master,the workflow will fail



**Which version of Dolphin Scheduler:**
-[1.3.2-release]
|
https://github.com/apache/dolphinscheduler/issues/3615
|
https://github.com/apache/dolphinscheduler/pull/3947
|
ccdaee9c04d5eb2a23ad85cdc5d56150babf57c6
|
4f94943b2d04c03c2dd9366cf3550f1595a1964a
| 2020-08-27T09:08:41Z |
java
| 2020-10-19T08:50:13Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import com.alibaba.fastjson.JSON;
import com.google.common.collect.Lists;
import org.apache.commons.io.FileUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.*;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.task.conditions.ConditionsParameters;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.NettyRemotingClient;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.utils.AlertManager;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* master exec thread,split dag
*/
public class MasterExecThread implements Runnable {
/**
* logger of MasterExecThread
*/
private static final Logger logger = LoggerFactory.getLogger(MasterExecThread.class);
/**
* process instance
*/
private ProcessInstance processInstance;
/**
* runing TaskNode
*/
private final Map<MasterBaseTaskExecThread,Future<Boolean>> activeTaskNode = new ConcurrentHashMap<>();
/**
* task exec service
*/
private final ExecutorService taskExecService;
/**
* submit failure nodes
*/
private boolean taskFailedSubmit = false;
/**
* recover node id list
*/
private List<TaskInstance> recoverNodeIdList = new ArrayList<>();
/**
* error task list
*/
private Map<String,TaskInstance> errorTaskList = new ConcurrentHashMap<>();
/**
* complete task list
*/
private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>();
/**
* ready to submit task list
*/
private Map<String, TaskInstance> readyToSubmitTaskList = new ConcurrentHashMap<>();
/**
* depend failed task map
*/
private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>();
/**
* forbidden task map
*/
private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>();
/**
* skip task map
*/
private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>();
/**
* recover tolerance fault task list
*/
private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>();
/**
* alert manager
*/
private AlertManager alertManager = new AlertManager();
/**
* the object of DAG
*/
private DAG<String,TaskNode,TaskNodeRelation> dag;
/**
* process service
*/
private ProcessService processService;
/**
* master config
*/
private MasterConfig masterConfig;
/**
*
*/
private NettyRemotingClient nettyRemotingClient;
/**
* constructor of MasterExecThread
* @param processInstance processInstance
* @param processService processService
* @param nettyRemotingClient nettyRemotingClient
*/
public MasterExecThread(ProcessInstance processInstance, ProcessService processService, NettyRemotingClient nettyRemotingClient){
this.processService = processService;
this.processInstance = processInstance;
this.masterConfig = SpringApplicationContext.getBean(MasterConfig.class);
int masterTaskExecNum = masterConfig.getMasterExecTaskNum();
this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread",
masterTaskExecNum);
this.nettyRemotingClient = nettyRemotingClient;
}
@Override
public void run() {
// process instance is null
if (processInstance == null){
logger.info("process instance is not exists");
return;
}
// check to see if it's done
if (processInstance.getState().typeIsFinished()){
logger.info("process instance is done : {}",processInstance.getId());
return;
}
try {
if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()){
// sub process complement data
executeComplementProcess();
}else{
// execute flow
executeProcess();
}
}catch (Exception e){
logger.error("master exec thread exception", e);
logger.error("process execute failed, process id:{}", processInstance.getId());
processInstance.setState(ExecutionStatus.FAILURE);
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
}finally {
taskExecService.shutdown();
// post handle
postHandle();
}
}
/**
* execute process
* @throws Exception exception
*/
private void executeProcess() throws Exception {
prepareProcess();
runProcess();
endProcess();
}
/**
* execute complement process
* @throws Exception exception
*/
private void executeComplementProcess() throws Exception {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date startDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date endDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
processService.saveProcessInstance(processInstance);
// get schedules
int processDefinitionId = processInstance.getProcessDefinitionId();
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId);
List<Date> listDate = Lists.newLinkedList();
if(!CollectionUtils.isEmpty(schedules)){
for (Schedule schedule : schedules) {
listDate.addAll(CronUtils.getSelfFireDateList(startDate, endDate, schedule.getCrontab()));
}
}
// get first fire date
Iterator<Date> iterator = null;
Date scheduleDate = null;
if(!CollectionUtils.isEmpty(listDate)) {
iterator = listDate.iterator();
scheduleDate = iterator.next();
processInstance.setScheduleTime(scheduleDate);
processService.updateProcessInstance(processInstance);
}else{
scheduleDate = processInstance.getScheduleTime();
if(scheduleDate == null){
scheduleDate = startDate;
}
}
while(Stopper.isRunning()){
logger.info("process {} start to complement {} data",
processInstance.getId(), DateUtils.dateToString(scheduleDate));
// prepare dag and other info
prepareProcess();
if(dag == null){
logger.error("process {} dag is null, please check out parameters",
processInstance.getId());
processInstance.setState(ExecutionStatus.SUCCESS);
processService.updateProcessInstance(processInstance);
return;
}
// execute process ,waiting for end
runProcess();
endProcess();
// process instance failure ,no more complements
if(!processInstance.getState().typeIsSuccess()){
logger.info("process {} state {}, complement not completely!",
processInstance.getId(), processInstance.getState());
break;
}
// current process instance success ,next execute
if(null == iterator){
// loop by day
scheduleDate = DateUtils.getSomeDay(scheduleDate, 1);
if(scheduleDate.after(endDate)){
// all success
logger.info("process {} complement completely!", processInstance.getId());
break;
}
}else{
// loop by schedule date
if(!iterator.hasNext()){
// all success
logger.info("process {} complement completely!", processInstance.getId());
break;
}
scheduleDate = iterator.next();
}
// flow end
// execute next process instance complement data
processInstance.setScheduleTime(scheduleDate);
if(cmdParam.containsKey(Constants.CMDPARAM_RECOVERY_START_NODE_STRING)){
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJson(cmdParam));
}
processInstance.setState(ExecutionStatus.RUNNING_EXEUTION);
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processInstance.getProcessDefinition().getGlobalParamMap(),
processInstance.getProcessDefinition().getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
processInstance.setId(0);
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processService.saveProcessInstance(processInstance);
}
}
/**
* prepare process parameter
* @throws Exception exception
*/
private void prepareProcess() throws Exception {
// init task queue
initTaskQueue();
// gen process dag
buildFlowDag();
logger.info("prepare process :{} end", processInstance.getId());
}
/**
* process end handle
*/
private void endProcess() {
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
if(processInstance.getState().typeIsWaitingThread()){
processService.createRecoveryWaitingThreadCommand(null, processInstance);
}
List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId());
alertManager.sendAlertProcessInstance(processInstance, taskInstances);
}
/**
* generate process dag
* @throws Exception exception
*/
private void buildFlowDag() throws Exception {
recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam());
forbiddenTaskList = DagHelper.getForbiddenTaskNodeMaps(processInstance.getProcessInstanceJson());
// generate process to get DAG info
List<String> recoveryNameList = getRecoveryNodeNameList();
List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam());
ProcessDag processDag = generateFlowDag(processInstance.getProcessInstanceJson(),
startNodeNameList, recoveryNameList, processInstance.getTaskDependType());
if(processDag == null){
logger.error("processDag is null");
return;
}
// generate process dag
dag = DagHelper.buildDagGraph(processDag);
}
/**
* init task queue
*/
private void initTaskQueue(){
taskFailedSubmit = false;
activeTaskNode.clear();
dependFailedTask.clear();
completeTaskList.clear();
errorTaskList.clear();
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId());
for(TaskInstance task : taskInstanceList){
if(task.isTaskComplete()){
completeTaskList.put(task.getName(), task);
}
if(task.getState().typeIsFailure() && !task.taskCanRetry()){
errorTaskList.put(task.getName(), task);
}
}
}
/**
* process post handle
*/
private void postHandle() {
logger.info("develop mode is: {}", CommonUtils.isDevelopMode());
if (!CommonUtils.isDevelopMode()) {
// get exec dir
String execLocalPath = org.apache.dolphinscheduler.common.utils.FileUtils
.getProcessExecDir(processInstance.getProcessDefinition().getProjectId(),
processInstance.getProcessDefinitionId(),
processInstance.getId());
try {
FileUtils.deleteDirectory(new File(execLocalPath));
} catch (IOException e) {
logger.error("delete exec dir failed ", e);
}
}
}
/**
* submit task to execute
* @param taskInstance task instance
* @return TaskInstance
*/
private TaskInstance submitTaskExec(TaskInstance taskInstance) {
MasterBaseTaskExecThread abstractExecThread = null;
if(taskInstance.isSubProcess()){
abstractExecThread = new SubProcessTaskExecThread(taskInstance);
}else if(taskInstance.isDependTask()){
abstractExecThread = new DependentTaskExecThread(taskInstance);
}else if(taskInstance.isConditionsTask()){
abstractExecThread = new ConditionsTaskExecThread(taskInstance);
}else {
abstractExecThread = new MasterTaskExecThread(taskInstance);
}
Future<Boolean> future = taskExecService.submit(abstractExecThread);
activeTaskNode.putIfAbsent(abstractExecThread, future);
return abstractExecThread.getTaskInstance();
}
/**
* find task instance in db.
* in case submit more than one same name task in the same time.
* @param taskName task name
* @return TaskInstance
*/
private TaskInstance findTaskIfExists(String taskName){
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId());
for(TaskInstance taskInstance : taskInstanceList){
if(taskInstance.getName().equals(taskName)){
return taskInstance;
}
}
return null;
}
/**
* encapsulation task
* @param processInstance process instance
* @param nodeName node name
* @return TaskInstance
*/
private TaskInstance createTaskInstance(ProcessInstance processInstance, String nodeName,
TaskNode taskNode) {
TaskInstance taskInstance = findTaskIfExists(nodeName);
if(taskInstance == null){
taskInstance = new TaskInstance();
// task name
taskInstance.setName(nodeName);
// process instance define id
taskInstance.setProcessDefinitionId(processInstance.getProcessDefinitionId());
// task instance state
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
// process instance id
taskInstance.setProcessInstanceId(processInstance.getId());
// task instance node json
taskInstance.setTaskJson(JSON.toJSONString(taskNode));
// task instance type
taskInstance.setTaskType(taskNode.getType());
// task instance whether alert
taskInstance.setAlertFlag(Flag.NO);
// task instance start time
taskInstance.setStartTime(new Date());
// task instance flag
taskInstance.setFlag(Flag.YES);
// task instance retry times
taskInstance.setRetryTimes(0);
// max task instance retry times
taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes());
// retry task instance interval
taskInstance.setRetryInterval(taskNode.getRetryInterval());
// task instance priority
if(taskNode.getTaskInstancePriority() == null){
taskInstance.setTaskInstancePriority(Priority.MEDIUM);
}else{
taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority());
}
String processWorkerGroup = processInstance.getWorkerGroup();
processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup;
String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup();
if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) {
taskInstance.setWorkerGroup(processWorkerGroup);
}else {
taskInstance.setWorkerGroup(taskWorkerGroup);
}
}
return taskInstance;
}
/**
* if all of the task dependence are skip, skip it too.
* @param taskNode
* @return
*/
private boolean isTaskNodeNeedSkip(TaskNode taskNode){
if(CollectionUtils.isEmpty(taskNode.getDepList())){
return false;
}
for(String depNode : taskNode.getDepList()){
if(!skipTaskNodeList.containsKey(depNode)){
return false;
}
}
return true;
}
/**
* set task node skip if dependence all skip
* @param taskNodesSkipList
*/
private void setTaskNodeSkip(List<String> taskNodesSkipList){
for(String skipNode : taskNodesSkipList){
skipTaskNodeList.putIfAbsent(skipNode, dag.getNode(skipNode));
Collection<String> postNodeList = DagHelper.getStartVertex(skipNode, dag, completeTaskList);
List<String> postSkipList = new ArrayList<>();
for(String post : postNodeList){
TaskNode postNode = dag.getNode(post);
if(isTaskNodeNeedSkip(postNode)){
postSkipList.add(post);
}
}
setTaskNodeSkip(postSkipList);
}
}
/**
* parse condition task find the branch process
* set skip flag for another one.
* @param nodeName
* @return
*/
private List<String> parseConditionTask(String nodeName){
List<String> conditionTaskList = new ArrayList<>();
TaskNode taskNode = dag.getNode(nodeName);
if(!taskNode.isConditionsTask()){
return conditionTaskList;
}
ConditionsParameters conditionsParameters =
JSONUtils.parseObject(taskNode.getConditionResult(), ConditionsParameters.class);
TaskInstance taskInstance = completeTaskList.get(nodeName);
if(taskInstance == null){
logger.error("task instance {} cannot find, please check it!", nodeName);
return conditionTaskList;
}
if(taskInstance.getState().typeIsSuccess()){
conditionTaskList = conditionsParameters.getSuccessNode();
setTaskNodeSkip(conditionsParameters.getFailedNode());
}else if(taskInstance.getState().typeIsFailure()){
conditionTaskList = conditionsParameters.getFailedNode();
setTaskNodeSkip(conditionsParameters.getSuccessNode());
}else{
conditionTaskList.add(nodeName);
}
return conditionTaskList;
}
/**
* parse post node list of previous node
* if condition node: return process according to the settings
* if post node completed, return post nodes of the completed node
* @param previousNodeName
* @return
*/
private List<String> parsePostNodeList(String previousNodeName){
List<String> postNodeList = new ArrayList<>();
TaskNode taskNode = dag.getNode(previousNodeName);
if(taskNode != null && taskNode.isConditionsTask()){
return parseConditionTask(previousNodeName);
}
Collection<String> postNodeCollection = DagHelper.getStartVertex(previousNodeName, dag, completeTaskList);
List<String> postSkipList = new ArrayList<>();
// delete success node, parse the past nodes
// if conditions node,
// 1. parse the branch process according the conditions setting
// 2. set skip flag on anther branch process
for(String postNode : postNodeCollection){
if(completeTaskList.containsKey(postNode)){
TaskInstance postTaskInstance = completeTaskList.get(postNode);
if(dag.getNode(postNode).isConditionsTask()){
List<String> conditionTaskNodeList = parseConditionTask(postNode);
for(String conditions : conditionTaskNodeList){
postNodeList.addAll(parsePostNodeList(conditions));
}
}else if(postTaskInstance.getState().typeIsSuccess()){
postNodeList.addAll(parsePostNodeList(postNode));
}else{
postNodeList.add(postNode);
}
}else if(isTaskNodeNeedSkip(dag.getNode(postNode))){
postSkipList.add(postNode);
setTaskNodeSkip(postSkipList);
postSkipList.clear();
}else{
postNodeList.add(postNode);
}
}
return postNodeList;
}
/**
* submit post node
* @param parentNodeName parent node name
*/
private void submitPostNode(String parentNodeName){
List<String> submitTaskNodeList = parsePostNodeList(parentNodeName);
List<TaskInstance> taskInstances = new ArrayList<>();
for(String taskNode : submitTaskNodeList){
taskInstances.add(createTaskInstance(processInstance, taskNode,
dag.getNode(taskNode)));
}
// if previous node success , post node submit
for(TaskInstance task : taskInstances){
if(readyToSubmitTaskList.containsKey(task.getName())){
continue;
}
if(completeTaskList.containsKey(task.getName())){
logger.info("task {} has already run success", task.getName());
continue;
}
if(task.getState().typeIsPause() || task.getState().typeIsCancel()){
logger.info("task {} stopped, the state is {}", task.getName(), task.getState());
}else{
addTaskToStandByList(task);
}
}
}
/**
* determine whether the dependencies of the task node are complete
* @return DependResult
*/
private DependResult isTaskDepsComplete(String taskName) {
Collection<String> startNodes = dag.getBeginNode();
// if vertex,returns true directly
if(startNodes.contains(taskName)){
return DependResult.SUCCESS;
}
TaskNode taskNode = dag.getNode(taskName);
List<String> depNameList = taskNode.getDepList();
for(String depsNode : depNameList ){
if(!dag.containsNode(depsNode)
|| skipTaskNodeList.containsKey(depsNode)
|| forbiddenTaskList.containsKey(depsNode)){
continue;
}
// all the dependencies must be completed
if(!completeTaskList.containsKey(depsNode)){
return DependResult.WAITING;
}
ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState();
if(depTaskState.typeIsPause() || depTaskState.typeIsCancel()){
return DependResult.WAITING;
}
// ignore task state if current task is condition
if(taskNode.isConditionsTask()){
continue;
}
if(!dependTaskSuccess(depsNode, taskName)){
return DependResult.FAILED;
}
}
logger.info("taskName: {} completeDependTaskList: {}", taskName, Arrays.toString(completeTaskList.keySet().toArray()));
return DependResult.SUCCESS;
}
/**
* depend node is completed, but here need check the condition task branch is the next node
* @param dependNodeName
* @param nextNodeName
* @return
*/
private boolean dependTaskSuccess(String dependNodeName, String nextNodeName){
if(dag.getNode(dependNodeName).isConditionsTask()){
//condition task need check the branch to run
List<String> nextTaskList = parseConditionTask(dependNodeName);
if(!nextTaskList.contains(nextNodeName)){
return false;
}
}else {
ExecutionStatus depTaskState = completeTaskList.get(dependNodeName).getState();
if(depTaskState.typeIsFailure()){
return false;
}
}
return true;
}
/**
* query task instance by complete state
* @param state state
* @return task instance list
*/
private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state){
List<TaskInstance> resultList = new ArrayList<>();
for (Map.Entry<String, TaskInstance> entry: completeTaskList.entrySet()) {
if(entry.getValue().getState() == state){
resultList.add(entry.getValue());
}
}
return resultList;
}
/**
* where there are ongoing tasks
* @param state state
* @return ExecutionStatus
*/
private ExecutionStatus runningState(ExecutionStatus state){
if(state == ExecutionStatus.READY_STOP ||
state == ExecutionStatus.READY_PAUSE ||
state == ExecutionStatus.WAITTING_THREAD){
// if the running task is not completed, the state remains unchanged
return state;
}else{
return ExecutionStatus.RUNNING_EXEUTION;
}
}
/**
* exists failure task,contains submit failure、dependency failure,execute failure(retry after)
*
* @return Boolean whether has failed task
*/
private boolean hasFailedTask(){
if(this.taskFailedSubmit){
return true;
}
if(this.errorTaskList.size() > 0){
return true;
}
return this.dependFailedTask.size() > 0;
}
/**
* process instance failure
*
* @return Boolean whether process instance failed
*/
private boolean processFailed(){
if(hasFailedTask()) {
if(processInstance.getFailureStrategy() == FailureStrategy.END){
return true;
}
if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) {
return readyToSubmitTaskList.size() == 0 || activeTaskNode.size() == 0;
}
}
return false;
}
/**
* whether task for waiting thread
* @return Boolean whether has waiting thread task
*/
private boolean hasWaitingThreadTask(){
List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITTING_THREAD);
return CollectionUtils.isNotEmpty(waitingList);
}
/**
* prepare for pause
* 1,failed retry task in the preparation queue , returns to failure directly
* 2,exists pause task,complement not completed, pending submission of tasks, return to suspension
* 3,success
* @return ExecutionStatus
*/
private ExecutionStatus processReadyPause(){
if(hasRetryTaskInStandBy()){
return ExecutionStatus.FAILURE;
}
List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE);
if(CollectionUtils.isNotEmpty(pauseList)
|| !isComplementEnd()
|| readyToSubmitTaskList.size() > 0){
return ExecutionStatus.PAUSE;
}else{
return ExecutionStatus.SUCCESS;
}
}
/**
* generate the latest process instance status by the tasks state
* @return process instance execution status
*/
private ExecutionStatus getProcessInstanceState(){
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
ExecutionStatus state = instance.getState();
if(activeTaskNode.size() > 0 || retryTaskExists()){
// active task and retry task exists
return runningState(state);
}
// process failure
if(processFailed()){
return ExecutionStatus.FAILURE;
}
// waiting thread
if(hasWaitingThreadTask()){
return ExecutionStatus.WAITTING_THREAD;
}
// pause
if(state == ExecutionStatus.READY_PAUSE){
return processReadyPause();
}
// stop
if(state == ExecutionStatus.READY_STOP){
List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP);
List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL);
if(CollectionUtils.isNotEmpty(stopList)
|| CollectionUtils.isNotEmpty(killList)
|| !isComplementEnd()){
return ExecutionStatus.STOP;
}else{
return ExecutionStatus.SUCCESS;
}
}
// success
if(state == ExecutionStatus.RUNNING_EXEUTION){
List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL);
if(readyToSubmitTaskList.size() > 0){
//tasks currently pending submission, no retries, indicating that depend is waiting to complete
return ExecutionStatus.RUNNING_EXEUTION;
}else if(CollectionUtils.isNotEmpty(killTasks)){
// tasks maybe killed manually
return ExecutionStatus.FAILURE;
}else{
// if the waiting queue is empty and the status is in progress, then success
return ExecutionStatus.SUCCESS;
}
}
return state;
}
/**
* whether standby task list have retry tasks
* @return
*/
private boolean retryTaskExists() {
boolean result = false;
for(String taskName : readyToSubmitTaskList.keySet()){
TaskInstance task = readyToSubmitTaskList.get(taskName);
if(task.getState().typeIsFailure()){
result = true;
break;
}
}
return result;
}
/**
* whether complement end
* @return Boolean whether is complement end
*/
private boolean isComplementEnd() {
if(!processInstance.isComplementData()){
return true;
}
try {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
return processInstance.getScheduleTime().equals(endTime);
} catch (Exception e) {
logger.error("complement end failed ",e);
return false;
}
}
/**
* updateProcessInstance process instance state
* after each batch of tasks is executed, the status of the process instance is updated
*/
private void updateProcessInstanceState() {
ExecutionStatus state = getProcessInstanceState();
if(processInstance.getState() != state){
logger.info(
"work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}",
processInstance.getId(), processInstance.getName(),
processInstance.getState(), state,
processInstance.getCommandType());
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
instance.setState(state);
instance.setProcessDefinition(processInstance.getProcessDefinition());
processService.updateProcessInstance(instance);
processInstance = instance;
}
}
/**
* get task dependency result
* @param taskInstance task instance
* @return DependResult
*/
private DependResult getDependResultForTask(TaskInstance taskInstance){
return isTaskDepsComplete(taskInstance.getName());
}
/**
* add task to standby list
* @param taskInstance task instance
*/
private void addTaskToStandByList(TaskInstance taskInstance){
logger.info("add task to stand by list: {}", taskInstance.getName());
readyToSubmitTaskList.putIfAbsent(taskInstance.getName(), taskInstance);
}
/**
* remove task from stand by list
* @param taskInstance task instance
*/
private void removeTaskFromStandbyList(TaskInstance taskInstance){
logger.info("remove task from stand by list: {}", taskInstance.getName());
readyToSubmitTaskList.remove(taskInstance.getName());
}
/**
* has retry task in standby
* @return Boolean whether has retry task in standby
*/
private boolean hasRetryTaskInStandBy(){
for (Map.Entry<String, TaskInstance> entry: readyToSubmitTaskList.entrySet()) {
if(entry.getValue().getState().typeIsFailure()){
return true;
}
}
return false;
}
/**
* submit and watch the tasks, until the work flow stop
*/
private void runProcess(){
// submit start node
submitPostNode(null);
boolean sendTimeWarning = false;
while(!processInstance.isProcessInstanceStop()){
// send warning email if process time out.
if(!sendTimeWarning && checkProcessTimeOut(processInstance) ){
alertManager.sendProcessTimeoutAlert(processInstance,
processService.findProcessDefineById(processInstance.getProcessDefinitionId()));
sendTimeWarning = true;
}
for(Map.Entry<MasterBaseTaskExecThread,Future<Boolean>> entry: activeTaskNode.entrySet()) {
Future<Boolean> future = entry.getValue();
TaskInstance task = entry.getKey().getTaskInstance();
if(!future.isDone()){
continue;
}
// node monitor thread complete
task = this.processService.findTaskInstanceById(task.getId());
if(task == null){
this.taskFailedSubmit = true;
activeTaskNode.remove(entry.getKey());
continue;
}
// node monitor thread complete
if(task.getState().typeIsFinished()){
activeTaskNode.remove(entry.getKey());
}
logger.info("task :{}, id:{} complete, state is {} ",
task.getName(), task.getId(), task.getState());
// node success , post node submit
if(task.getState() == ExecutionStatus.SUCCESS){
completeTaskList.put(task.getName(), task);
submitPostNode(task.getName());
continue;
}
// node fails, retry first, and then execute the failure process
if(task.getState().typeIsFailure()){
if(task.getState() == ExecutionStatus.NEED_FAULT_TOLERANCE){
this.recoverToleranceFaultTaskList.add(task);
}
if(task.taskCanRetry()){
addTaskToStandByList(task);
}else{
completeTaskList.put(task.getName(), task);
if( task.isConditionsTask()
|| DagHelper.haveConditionsAfterNode(task.getName(), dag)) {
submitPostNode(task.getName());
}else{
errorTaskList.put(task.getName(), task);
if(processInstance.getFailureStrategy() == FailureStrategy.END){
killTheOtherTasks();
}
}
}
continue;
}
// other status stop/pause
completeTaskList.put(task.getName(), task);
}
// send alert
if(CollectionUtils.isNotEmpty(this.recoverToleranceFaultTaskList)){
alertManager.sendAlertWorkerToleranceFault(processInstance, recoverToleranceFaultTaskList);
this.recoverToleranceFaultTaskList.clear();
}
// updateProcessInstance completed task status
// failure priority is higher than pause
// if a task fails, other suspended tasks need to be reset kill
if(errorTaskList.size() > 0){
for(Map.Entry<String, TaskInstance> entry: completeTaskList.entrySet()) {
TaskInstance completeTask = entry.getValue();
if(completeTask.getState()== ExecutionStatus.PAUSE){
completeTask.setState(ExecutionStatus.KILL);
completeTaskList.put(entry.getKey(), completeTask);
processService.updateTaskInstance(completeTask);
}
}
}
if(canSubmitTaskToQueue()){
submitStandByTask();
}
try {
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
} catch (InterruptedException e) {
logger.error(e.getMessage(),e);
}
updateProcessInstanceState();
}
logger.info("process:{} end, state :{}", processInstance.getId(), processInstance.getState());
}
/**
* whether check process time out
* @param processInstance task instance
* @return true if time out of process instance > running time of process instance
*/
private boolean checkProcessTimeOut(ProcessInstance processInstance) {
if(processInstance.getTimeout() == 0 ){
return false;
}
Date now = new Date();
long runningTime = DateUtils.diffMin(now, processInstance.getStartTime());
return runningTime > processInstance.getTimeout();
}
/**
* whether can submit task to queue
* @return boolean
*/
private boolean canSubmitTaskToQueue() {
return OSUtils.checkResource(masterConfig.getMasterMaxCpuloadAvg(), masterConfig.getMasterReservedMemory());
}
/**
* close the on going tasks
*/
private void killTheOtherTasks() {
logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(),
activeTaskNode.size());
for (Map.Entry<MasterBaseTaskExecThread, Future<Boolean>> entry : activeTaskNode.entrySet()) {
MasterBaseTaskExecThread taskExecThread = entry.getKey();
Future<Boolean> future = entry.getValue();
TaskInstance taskInstance = taskExecThread.getTaskInstance();
taskInstance = processService.findTaskInstanceById(taskInstance.getId());
if(taskInstance != null && taskInstance.getState().typeIsFinished()){
continue;
}
if (!future.isDone()) {
// record kill info
logger.info("kill process instance, id: {}, task: {}", processInstance.getId(), taskExecThread.getTaskInstance().getId());
// kill node
taskExecThread.kill();
}
}
}
/**
* whether the retry interval is timed out
* @param taskInstance task instance
* @return Boolean
*/
private boolean retryTaskIntervalOverTime(TaskInstance taskInstance){
if(taskInstance.getState() != ExecutionStatus.FAILURE){
return true;
}
if(taskInstance.getId() == 0 ||
taskInstance.getMaxRetryTimes() ==0 ||
taskInstance.getRetryInterval() == 0 ){
return true;
}
Date now = new Date();
long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime());
// task retry does not over time, return false
return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval;
}
/**
* handling the list of tasks to be submitted
*/
private void submitStandByTask(){
for(Map.Entry<String, TaskInstance> entry: readyToSubmitTaskList.entrySet()) {
TaskInstance task = entry.getValue();
DependResult dependResult = getDependResultForTask(task);
if(DependResult.SUCCESS == dependResult){
if(retryTaskIntervalOverTime(task)){
submitTaskExec(task);
removeTaskFromStandbyList(task);
}
}else if(DependResult.FAILED == dependResult){
// if the dependency fails, the current node is not submitted and the state changes to failure.
dependFailedTask.put(entry.getKey(), task);
removeTaskFromStandbyList(task);
logger.info("task {},id:{} depend result : {}",task.getName(), task.getId(), dependResult);
}
}
}
/**
* get recovery task instance
* @param taskId task id
* @return recovery task instance
*/
private TaskInstance getRecoveryTaskInstance(String taskId){
if(!StringUtils.isNotEmpty(taskId)){
return null;
}
try {
Integer intId = Integer.valueOf(taskId);
TaskInstance task = processService.findTaskInstanceById(intId);
if(task == null){
logger.error("start node id cannot be found: {}", taskId);
}else {
return task;
}
}catch (Exception e){
logger.error("get recovery task instance failed ",e);
}
return null;
}
/**
* get start task instance list
* @param cmdParam command param
* @return task instance list
*/
private List<TaskInstance> getStartTaskInstanceList(String cmdParam){
List<TaskInstance> instanceList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if(paramMap != null && paramMap.containsKey(CMDPARAM_RECOVERY_START_NODE_STRING)){
String[] idList = paramMap.get(CMDPARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA);
for(String nodeId : idList){
TaskInstance task = getRecoveryTaskInstance(nodeId);
if(task != null){
instanceList.add(task);
}
}
}
return instanceList;
}
/**
* parse "StartNodeNameList" from cmd param
* @param cmdParam command param
* @return start node name list
*/
private List<String> parseStartNodeName(String cmdParam){
List<String> startNodeNameList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if(paramMap == null){
return startNodeNameList;
}
if(paramMap.containsKey(CMDPARAM_START_NODE_NAMES)){
startNodeNameList = Arrays.asList(paramMap.get(CMDPARAM_START_NODE_NAMES).split(Constants.COMMA));
}
return startNodeNameList;
}
/**
* generate start node name list from parsing command param;
* if "StartNodeIdList" exists in command param, return StartNodeIdList
* @return recovery node name list
*/
private List<String> getRecoveryNodeNameList(){
List<String> recoveryNodeNameList = new ArrayList<>();
if(CollectionUtils.isNotEmpty(recoverNodeIdList)) {
for (TaskInstance task : recoverNodeIdList) {
recoveryNodeNameList.add(task.getName());
}
}
return recoveryNodeNameList;
}
/**
* generate flow dag
* @param processDefinitionJson process definition json
* @param startNodeNameList start node name list
* @param recoveryNodeNameList recovery node name list
* @param depNodeType depend node type
* @return ProcessDag process dag
* @throws Exception exception
*/
public ProcessDag generateFlowDag(String processDefinitionJson,
List<String> startNodeNameList,
List<String> recoveryNodeNameList,
TaskDependType depNodeType)throws Exception{
return DagHelper.generateFlowDag(processDefinitionJson, startNodeNameList, recoveryNodeNameList, depNodeType);
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,966 |
Call submodule, send mail problem
|
Call the submodule, run the option to send an email whether it succeeds or fails, each submodule sends an email, the total end of sending an email, it should be OK as long as the total end of sending an email
调用子模块,运行选择无论成功失败都发送邮件,每个子模块发送一封邮件,应该是总的结束了发送一封邮件

|
https://github.com/apache/dolphinscheduler/issues/3966
|
https://github.com/apache/dolphinscheduler/pull/3972
|
ccc8ba1167f4b447ee4c19bee42b813c32f61027
|
7a088b7325e173c2419398d3a5c3785839c4b72a
| 2020-10-21T09:20:21Z |
java
| 2020-10-22T08:09:55Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/AlertManager.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import org.apache.dolphinscheduler.common.enums.AlertType;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.DaoFactory;
import org.apache.dolphinscheduler.dao.entity.Alert;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.LinkedHashMap;
import java.util.List;
/**
* alert manager
*/
public class AlertManager {
/**
* logger of AlertManager
*/
private static final Logger logger = LoggerFactory.getLogger(AlertManager.class);
/**
* alert dao
*/
private AlertDao alertDao = DaoFactory.getDaoInstance(AlertDao.class);
/**
* command type convert chinese
*
* @param commandType command type
* @return command name
*/
private String getCommandCnName(CommandType commandType) {
switch (commandType) {
case RECOVER_TOLERANCE_FAULT_PROCESS:
return "recover tolerance fault process";
case RECOVER_SUSPENDED_PROCESS:
return "recover suspended process";
case START_CURRENT_TASK_PROCESS:
return "start current task process";
case START_FAILURE_TASK_PROCESS:
return "start failure task process";
case START_PROCESS:
return "start process";
case REPEAT_RUNNING:
return "repeat running";
case SCHEDULER:
return "scheduler";
case COMPLEMENT_DATA:
return "complement data";
case PAUSE:
return "pause";
case STOP:
return "stop";
default:
return "unknown type";
}
}
/**
* process instance format
*/
private static final String PROCESS_INSTANCE_FORMAT =
"\"id:%d\"," +
"\"name:%s\"," +
"\"job type: %s\"," +
"\"state: %s\"," +
"\"recovery:%s\"," +
"\"run time: %d\"," +
"\"start time: %s\"," +
"\"end time: %s\"," +
"\"host: %s\"" ;
/**
* get process instance content
* @param processInstance process instance
* @param taskInstances task instance list
* @return process instance format content
*/
public String getContentProcessInstance(ProcessInstance processInstance,
List<TaskInstance> taskInstances){
String res = "";
if(processInstance.getState().typeIsSuccess()){
res = String.format(PROCESS_INSTANCE_FORMAT,
processInstance.getId(),
processInstance.getName(),
getCommandCnName(processInstance.getCommandType()),
processInstance.getState().toString(),
processInstance.getRecovery().toString(),
processInstance.getRunTimes(),
DateUtils.dateToString(processInstance.getStartTime()),
DateUtils.dateToString(processInstance.getEndTime()),
processInstance.getHost()
);
res = "[" + res + "]";
}else if(processInstance.getState().typeIsFailure()){
List<LinkedHashMap> failedTaskList = new ArrayList<>();
for(TaskInstance task : taskInstances){
if(task.getState().typeIsSuccess()){
continue;
}
LinkedHashMap<String, String> failedTaskMap = new LinkedHashMap();
failedTaskMap.put("process instance id", String.valueOf(processInstance.getId()));
failedTaskMap.put("process instance name", processInstance.getName());
failedTaskMap.put("task id", String.valueOf(task.getId()));
failedTaskMap.put("task name", task.getName());
failedTaskMap.put("task type", task.getTaskType());
failedTaskMap.put("task state", task.getState().toString());
failedTaskMap.put("task start time", DateUtils.dateToString(task.getStartTime()));
failedTaskMap.put("task end time", DateUtils.dateToString(task.getEndTime()));
failedTaskMap.put("host", task.getHost());
failedTaskMap.put("log path", task.getLogPath());
failedTaskList.add(failedTaskMap);
}
res = JSONUtils.toJson(failedTaskList);
}
return res;
}
/**
* getting worker fault tolerant content
*
* @param processInstance process instance
* @param toleranceTaskList tolerance task list
* @return worker tolerance content
*/
private String getWorkerToleranceContent(ProcessInstance processInstance, List<TaskInstance> toleranceTaskList){
List<LinkedHashMap<String, String>> toleranceTaskInstanceList = new ArrayList<>();
for(TaskInstance taskInstance: toleranceTaskList){
LinkedHashMap<String, String> toleranceWorkerContentMap = new LinkedHashMap();
toleranceWorkerContentMap.put("process name", processInstance.getName());
toleranceWorkerContentMap.put("task name", taskInstance.getName());
toleranceWorkerContentMap.put("host", taskInstance.getHost());
toleranceWorkerContentMap.put("task retry times", String.valueOf(taskInstance.getRetryTimes()));
toleranceTaskInstanceList.add(toleranceWorkerContentMap);
}
return JSONUtils.toJson(toleranceTaskInstanceList);
}
/**
* send worker alert fault tolerance
*
* @param processInstance process instance
* @param toleranceTaskList tolerance task list
*/
public void sendAlertWorkerToleranceFault(ProcessInstance processInstance, List<TaskInstance> toleranceTaskList){
try{
Alert alert = new Alert();
alert.setTitle("worker fault tolerance");
alert.setShowType(ShowType.TABLE);
String content = getWorkerToleranceContent(processInstance, toleranceTaskList);
alert.setContent(content);
alert.setAlertType(AlertType.EMAIL);
alert.setCreateTime(new Date());
alert.setAlertGroupId(processInstance.getWarningGroupId() == null ? 1:processInstance.getWarningGroupId());
alert.setReceivers(processInstance.getProcessDefinition().getReceivers());
alert.setReceiversCc(processInstance.getProcessDefinition().getReceiversCc());
alertDao.addAlert(alert);
logger.info("add alert to db , alert : {}", alert.toString());
}catch (Exception e){
logger.error("send alert failed:{} ", e.getMessage());
}
}
/**
* send process instance alert
* @param processInstance process instance
* @param taskInstances task instance list
*/
public void sendAlertProcessInstance(ProcessInstance processInstance,
List<TaskInstance> taskInstances){
boolean sendWarnning = false;
WarningType warningType = processInstance.getWarningType();
switch (warningType){
case ALL:
if(processInstance.getState().typeIsFinished()){
sendWarnning = true;
}
break;
case SUCCESS:
if(processInstance.getState().typeIsSuccess()){
sendWarnning = true;
}
break;
case FAILURE:
if(processInstance.getState().typeIsFailure()){
sendWarnning = true;
}
break;
default:
}
if(!sendWarnning){
return;
}
Alert alert = new Alert();
String cmdName = getCommandCnName(processInstance.getCommandType());
String success = processInstance.getState().typeIsSuccess() ? "success" :"failed";
alert.setTitle(cmdName + " " + success);
ShowType showType = processInstance.getState().typeIsSuccess() ? ShowType.TEXT : ShowType.TABLE;
alert.setShowType(showType);
String content = getContentProcessInstance(processInstance, taskInstances);
alert.setContent(content);
alert.setAlertType(AlertType.EMAIL);
alert.setAlertGroupId(processInstance.getWarningGroupId());
alert.setCreateTime(new Date());
alert.setReceivers(processInstance.getProcessDefinition().getReceivers());
alert.setReceiversCc(processInstance.getProcessDefinition().getReceiversCc());
alertDao.addAlert(alert);
logger.info("add alert to db , alert: {}", alert.toString());
}
/**
* send process timeout alert
*
* @param processInstance process instance
* @param processDefinition process definition
*/
public void sendProcessTimeoutAlert(ProcessInstance processInstance, ProcessDefinition processDefinition) {
alertDao.sendProcessTimeoutAlert(processInstance, processDefinition);
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,958 |
[Bug][api] files should not be created successfully in the directory of the authorized file
|
1. Authorize user A's /pg_cxc/pg_1.sh file to user B
2. User B can successfully create files in the pg_cxc directory

### Expected result
Since only the files in the directory are authorized, user B does not have the permission of the directory pg_cxc
**Which version of Dolphin Scheduler:**
-[1.3.2]
|
https://github.com/apache/dolphinscheduler/issues/3958
|
https://github.com/apache/dolphinscheduler/pull/3980
|
075d1638d0e46f6b6dd3658a6188ea482a0f4802
|
292b0fce0b5fc621a3355d43e989950dceeecd7e
| 2020-10-21T02:56:58Z |
java
| 2020-10-23T06:58:17Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.commons.collections.BeanMap;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.*;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* resources service
*/
@Service
public class ResourcesService extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(ResourcesService.class);
@Autowired
private ResourceMapper resourcesMapper;
@Autowired
private UdfFuncMapper udfFunctionMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private UserMapper userMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Transactional(rollbackFor = Exception.class)
public Result createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
result = verifyResourceName(fullName,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<String, Object>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (DuplicateKeyException e) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirecotry(loginUser,fullName,type,result);
return result;
}
/**
* create resource
*
* @param loginUser login user
* @param name alias
* @param desc description
* @param file file
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result createResource(User loginUser,
String name,
String desc,
ResourceType type,
MultipartFile file,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
/**
* rename file suffix and original suffix must be consistent
*/
logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
// check resoure name exists
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
return result;
}
/**
* check resource is exists
*
* @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
private boolean checkResourceExists(String fullName, int userId, int type ){
List<Resource> resources = resourcesMapper.queryResourceList(fullName, userId, type);
if (resources != null && resources.size() > 0) {
return true;
}
return false;
}
/**
* update resource
* @param loginUser login user
* @param resourceId resource id
* @param name name
* @param desc description
* @param type resource type
* @param file resource file
* @return update result code
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResource(User loginUser,
int resourceId,
String name,
String desc,
ResourceType type,
MultipartFile file) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
if (file == null && name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) {
putMsg(result, Status.SUCCESS);
return result;
}
//check resource aleady exists
String originFullName = resource.getFullName();
String originResourceName = resource.getAlias();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
if (file != null) {
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
/**
* rename file suffix and original suffix must be consistent
*/
logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(FileUtils.suffix(originFullName))) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
}
// query tenant by user id
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// verify whether the resource exists in storage
// get the path of origin file in storage
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
try {
if (!HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.error("{} not exist", originHdfsFileName);
putMsg(result,Status.RESOURCE_NOT_EXIST);
return result;
}
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
}
if (!resource.isDirectory()) {
//get the origin file suffix
String originSuffix = FileUtils.suffix(originFullName);
String suffix = FileUtils.suffix(fullName);
boolean suffixIsChanged = false;
if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) {
suffixIsChanged = true;
}
if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) {
suffixIsChanged = true;
}
//verify whether suffix is changed
if (suffixIsChanged) {
//need verify whether this resource is authorized to other users
Map<String, Object> columnMap = new HashMap<>();
columnMap.put("resources_id", resourceId);
List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap);
if (CollectionUtils.isNotEmpty(resourcesUsers)) {
List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList());
List<User> users = userMapper.selectBatchIds(userIds);
String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString();
logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames);
putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames);
return result;
}
}
}
// updateResource data
Date now = new Date();
resource.setAlias(name);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
if (file != null) {
resource.setFileName(file.getOriginalFilename());
resource.setSize(file.getSize());
}
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory()) {
List<Integer> childrenResource = listAllChildren(resource,false);
if (CollectionUtils.isNotEmpty(childrenResource)) {
String matcherFullName = Matcher.quoteReplacement(fullName);
List<Resource> childResourceList = new ArrayList<>();
Integer[] childResIdArray = childrenResource.toArray(new Integer[childrenResource.size()]);
List<Resource> resourceList = resourcesMapper.listResourceByIds(childResIdArray);
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(childResIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(t.getResourceName().replaceFirst(originFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
}
} else if (ResourceType.UDF.equals(resource.getType())) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new Integer[]{resourceId});
if (CollectionUtils.isNotEmpty(udfFuncs)) {
udfFuncs = udfFuncs.stream().map(t -> {
t.setResourceName(fullName);
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
udfFunctionMapper.batchUpdateUdfFunc(udfFuncs);
}
}
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>(5);
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e);
throw new ServiceException(Status.UPDATE_RESOURCE_ERROR);
}
// if name unchanged, return directly without moving on HDFS
if (originResourceName.equals(name) && file == null) {
return result;
}
if (file != null) {
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
if (!fullName.equals(originFullName)) {
try {
HadoopUtils.getInstance().delete(originHdfsFileName,false);
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(String.format("delete resource: %s failed.", originFullName));
}
}
return result;
}
// get the path of dest file in hdfs
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
} catch (Exception e) {
logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e);
putMsg(result,Status.HDFS_COPY_FAIL);
throw new ServiceException(Status.HDFS_COPY_FAIL);
}
return result;
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
public Map<String, Object> queryResourceListPaging(User loginUser, int direcotryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>(5);
Page<Resource> page = new Page(pageNo, pageSize);
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId= 0;
}
if (direcotryId != -1) {
Resource directory = resourcesMapper.selectById(direcotryId);
if (directory == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
}
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page,
userId,direcotryId, type.ordinal(), searchVal);
PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* create direcoty
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirecotry(User loginUser,String fullName,ResourceType type,Result result){
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
}
/**
* upload file to hdfs
*
* @param loginUser login user
* @param fullName full name
* @param file file
*/
private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
return false;
}
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
// random file name
String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
// save file to hdfs, and delete original file
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
createTenantDirIfNotExists(tenantCode);
}
org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename);
HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
return false;
}
return true;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList);
//JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* query resource list by program type
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceByProgramType(User loginUser, ResourceType type, ProgramType programType) {
Map<String, Object> result = new HashMap<>(5);
String suffix = ".jar";
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
if (programType != null) {
switch (programType) {
case JAVA:
break;
case SCALA:
break;
case PYTHON:
suffix = ".py";
break;
}
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
List<Resource> resources = new ResourceFilter(suffix,new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
* @throws Exception exception
*/
@Transactional(rollbackFor = Exception.class)
public Result delete(User loginUser, int resourceId) throws Exception {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//get resource and hdfs path
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource,true);
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
//if resource type is UDF,need check whether it is bound by UDF functon
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
// get hdfs file by type
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteIds(needDeleteResourceIdArray);
resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* verify resource by name and type
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
public Result verifyResourceName(String fullName, ResourceType type,User loginUser) {
Result result = new Result();
putMsg(result, Status.SUCCESS);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, fullName);
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
if(tenant != null){
String tenantCode = tenant.getTenantCode();
try {
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if(HadoopUtils.getInstance().exists(hdfsFilename)){
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, fullName,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.HDFS_OPERATION_ERROR);
}
}else{
putMsg(result,Status.TENANT_NOT_EXIST);
}
}
return result;
}
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
public Result queryResource(String fullName,Integer id,ResourceType type) {
Result result = new Result();
if (StringUtils.isBlank(fullName) && id == null) {
logger.error("You must input one of fullName and pid");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
logger.error("resource file not exist, resource full name {} ", fullName);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
logger.error("resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
logger.error("parent resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/**
* view resource file online
*
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
public Result readResource(int resourceId, int skipLineNum, int limit) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check preview or not by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// hdfs path
String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {} ", hdfsFileName);
try {
if(HadoopUtils.getInstance().exists(hdfsFileName)){
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
putMsg(result, Status.SUCCESS);
Map<String, Object> map = new HashMap<>();
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, String.join("\n", content));
result.setData(map);
}else{
logger.error("read file {} not exist in hdfs", hdfsFileName);
putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName);
}
} catch (Exception e) {
logger.error("Resource {} read failed", hdfsFileName, e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
}
return result;
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param desc description
* @param content content
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDirectory) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//check file suffix
String nameSuffix = fileSuffix.trim();
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resouce suffix {} not support create", nameSuffix);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDirectory.equals("/") ? String.format("%s%s",currentDirectory,name):String.format("%s/%s",currentDirectory,name);
result = verifyResourceName(fullName,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* updateProcessInstance resource
*
* @param resourceId resource id
* @param content content
* @return update result cod
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResourceContent(int resourceId, String content) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("read file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check can edit by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
resource.setSize(content.getBytes().length);
resource.setUpdateTime(new Date());
resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* @param resourceName resource name
* @param tenantCode tenant code
* @param content content
* @return result
*/
private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result result = new Result();
String localFilename = "";
String hdfsFileName = "";
try {
localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
if (!FileUtils.writeContent2File(content, localFilename)) {
// write file fail
logger.error("file {} fail, content is {}", localFilename, content);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
// get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils hadoopUtils = HadoopUtils.getInstance();
if (!hadoopUtils.exists(resourcePath)) {
// create if tenant dir not exists
createTenantDirIfNotExists(tenantCode);
}
if (hadoopUtils.exists(hdfsFileName)) {
hadoopUtils.delete(hdfsFileName, false);
}
hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
result.setCode(Status.HDFS_OPERATION_ERROR.getCode());
result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName));
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* download file
*
* @param resourceId resource id
* @return resource content
* @throws Exception exception
*/
public org.springframework.core.io.Resource downloadResource(int resourceId) throws Exception {
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new RuntimeException("hdfs not startup");
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new RuntimeException("cant't download directory");
}
int userId = resource.getUserId();
User user = userMapper.selectById(userId);
if(user == null){
logger.error("user id {} not exists", userId);
throw new RuntimeException(String.format("resource owner id %d not exist",userId));
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if(tenant == null){
logger.error("tenant id {} not exists", user.getTenantId());
throw new RuntimeException(String.format("The tenant id %d of resource owner not exist",user.getTenantId()));
}
String tenantCode = tenant.getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true);
return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName);
}
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list ;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Resource> list ;
if (resourceList != null && resourceList.size() > 0) {
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
getAuthorizedResourceList(resourceSet, authedResourceList);
list = new ArrayList<>(resourceSet);
}else {
list = new ArrayList<>(0);
}
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
//only admin can operate
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId);
List<UdfFunc> resultList = new ArrayList<>();
Set<UdfFunc> udfFuncSet = null;
if (CollectionUtils.isNotEmpty(udfFuncList)) {
udfFuncSet = new HashSet<>(udfFuncList);
List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId);
getAuthorizedResourceList(udfFuncSet, authedUDFFuncList);
resultList = new ArrayList<>(udfFuncSet);
}
result.put(Constants.DATA_LIST, resultList);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId);
result.put(Constants.DATA_LIST, udfFuncs);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized file
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
public Map<String, Object> authorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
if (checkAdmin(loginUser, result)){
return result;
}
List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
Visitor visitor = new ResourceTreeVisitor(authedResources);
logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField));
String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get authorized resource list
*
* @param resourceSet resource set
* @param authedResourceList authorized resource list
*/
private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) {
Set<?> authedResourceSet = null;
if (CollectionUtils.isNotEmpty(authedResourceList)) {
authedResourceSet = new HashSet<>(authedResourceList);
resourceSet.removeAll(authedResourceSet);
}
}
/**
* get tenantCode by UserId
*
* @param userId user id
* @param result return result
* @return
*/
private String getTenantCode(int userId,Result result){
User user = userMapper.selectById(userId);
if (user == null) {
logger.error("user {} not exists", userId);
putMsg(result, Status.USER_NOT_EXIST,userId);
return null;
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null){
logger.error("tenant not exists");
putMsg(result, Status.TENANT_NOT_EXIST);
return null;
}
return tenant.getTenantCode();
}
/**
* list all children id
* @param resource resource
* @param containSelf whether add self to children list
* @return all children id
*/
List<Integer> listAllChildren(Resource resource,boolean containSelf){
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1 && containSelf) {
childList.add(resource.getId());
}
if(resource.isDirectory()){
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList){
List<Integer> children = resourcesMapper.listChildren(resourceId);
for(int chlidId:children){
childList.add(chlidId);
listAllChildren(chlidId,childList);
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,956 |
[Bug][Master] When running a task, the resource file is lost, which results in an error
|
When running a shell task, the resource file is lost, which results in an error
TaskParams
Taskparams does not have a resourcelist. When converting, the content of resourcelist will be lost
Not only shell tasks, param parameters in other task types may be overridden
The code is as follows:
VarPoolUtils#setTaskNodeLocalParams
```
/**
* setTaskNodeLocalParams
* @param taskNode taskNode
* @param prop LocalParamName
* @param value LocalParamValue
*/
public static void setTaskNodeLocalParams(TaskNode taskNode, String prop, Object value) {
String taskParamsJson = taskNode.getParams();
TaskParams taskParams = JSONUtils.parseObject(taskParamsJson, TaskParams.class);
if (taskParams == null) {
return;
}
taskParams.setLocalParamValue(prop, value);
taskNode.setParams(JSONUtils.toJsonString(taskParams));
}
```
|
https://github.com/apache/dolphinscheduler/issues/3956
|
https://github.com/apache/dolphinscheduler/pull/3957
|
8fd3932bff374a03ef159bd2a2f495728c8e5d4e
|
05b248f8e3323434c83851d26b1fcf6dc61eef4e
| 2020-10-20T11:23:46Z |
java
| 2020-10-24T09:50:33Z |
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/TaskParams.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.task;
import java.util.Map;
public class TaskParams {
private String rawScript;
private Map<String, String>[] localParams;
public void setRawScript(String rawScript) {
this.rawScript = rawScript;
}
public void setLocalParams(Map<String, String>[] localParams) {
this.localParams = localParams;
}
public String getRawScript() {
return rawScript;
}
public void setLocalParamValue(String prop, Object value) {
if (localParams == null || value == null) {
return;
}
for (int i = 0; i < localParams.length; i++) {
if (localParams[i].get("prop").equals(prop)) {
localParams[i].put("value", (String)value);
}
}
}
public void setLocalParamValue(Map<String, Object> propToValue) {
if (localParams == null || propToValue == null) {
return;
}
for (int i = 0; i < localParams.length; i++) {
String prop = localParams[i].get("prop");
if (propToValue.containsKey(prop)) {
localParams[i].put("value",(String)propToValue.get(prop));
}
}
}
public String getLocalParamValue(String prop) {
if (localParams == null) {
return null;
}
for (int i = 0; i < localParams.length; i++) {
String tmpProp = localParams[i].get("prop");
if (tmpProp.equals(prop)) {
return localParams[i].get("value");
}
}
return null;
}
public Map<String, String>[] getLocalParams() {
return localParams;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,956 |
[Bug][Master] When running a task, the resource file is lost, which results in an error
|
When running a shell task, the resource file is lost, which results in an error
TaskParams
Taskparams does not have a resourcelist. When converting, the content of resourcelist will be lost
Not only shell tasks, param parameters in other task types may be overridden
The code is as follows:
VarPoolUtils#setTaskNodeLocalParams
```
/**
* setTaskNodeLocalParams
* @param taskNode taskNode
* @param prop LocalParamName
* @param value LocalParamValue
*/
public static void setTaskNodeLocalParams(TaskNode taskNode, String prop, Object value) {
String taskParamsJson = taskNode.getParams();
TaskParams taskParams = JSONUtils.parseObject(taskParamsJson, TaskParams.class);
if (taskParams == null) {
return;
}
taskParams.setLocalParamValue(prop, value);
taskNode.setParams(JSONUtils.toJsonString(taskParams));
}
```
|
https://github.com/apache/dolphinscheduler/issues/3956
|
https://github.com/apache/dolphinscheduler/pull/3957
|
8fd3932bff374a03ef159bd2a2f495728c8e5d4e
|
05b248f8e3323434c83851d26b1fcf6dc61eef4e
| 2020-10-20T11:23:46Z |
java
| 2020-10-24T09:50:33Z |
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/VarPoolUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.task.TaskParams;
import java.text.ParseException;
import java.util.Map;
public class VarPoolUtils {
/**
* getTaskNodeLocalParam
* @param taskNode taskNode
* @param prop prop
* @return localParamForProp
*/
public static Object getTaskNodeLocalParam(TaskNode taskNode, String prop) {
String taskParamsJson = taskNode.getParams();
TaskParams taskParams = JSONUtils.parseObject(taskParamsJson, TaskParams.class);
if (taskParams == null) {
return null;
}
return taskParams.getLocalParamValue(prop);
}
/**
* setTaskNodeLocalParams
* @param taskNode taskNode
* @param prop LocalParamName
* @param value LocalParamValue
*/
public static void setTaskNodeLocalParams(TaskNode taskNode, String prop, Object value) {
String taskParamsJson = taskNode.getParams();
TaskParams taskParams = JSONUtils.parseObject(taskParamsJson, TaskParams.class);
if (taskParams == null) {
return;
}
taskParams.setLocalParamValue(prop, value);
taskNode.setParams(JSONUtils.toJsonString(taskParams));
}
/**
* setTaskNodeLocalParams
* @param taskNode taskNode
* @param propToValue propToValue
*/
public static void setTaskNodeLocalParams(TaskNode taskNode, Map<String, Object> propToValue) {
String taskParamsJson = taskNode.getParams();
TaskParams taskParams = JSONUtils.parseObject(taskParamsJson, TaskParams.class);
if (taskParams == null) {
return;
}
taskParams.setLocalParamValue(propToValue);
taskNode.setParams(JSONUtils.toJsonString(taskParams));
}
/**
* convertVarPoolToMap
* @param propToValue propToValue
* @param varPool varPool
* @throws ParseException ParseException
*/
public static void convertVarPoolToMap(Map<String, Object> propToValue, String varPool) throws ParseException {
if (varPool == null || propToValue == null) {
return;
}
String[] splits = varPool.split("\\$VarPool\\$");
for (String kv : splits) {
String[] kvs = kv.split(",");
if (kvs.length == 2) {
propToValue.put(kvs[0], kvs[1]);
} else {
throw new ParseException(kv, 2);
}
}
}
/**
* convertPythonScriptPlaceholders
* @param rawScript rawScript
* @return String
* @throws StringIndexOutOfBoundsException StringIndexOutOfBoundsException
*/
public static String convertPythonScriptPlaceholders(String rawScript) throws StringIndexOutOfBoundsException {
int len = "${setShareVar(${".length();
int scriptStart = 0;
while ((scriptStart = rawScript.indexOf("${setShareVar(${", scriptStart)) != -1) {
int start = -1;
int end = rawScript.indexOf('}', scriptStart + len);
String prop = rawScript.substring(scriptStart + len, end);
start = rawScript.indexOf(',', end);
end = rawScript.indexOf(')', start);
String value = rawScript.substring(start + 1, end);
start = rawScript.indexOf('}', start) + 1;
end = rawScript.length();
String replaceScript = String.format("print(\"${{setValue({},{})}}\".format(\"%s\",%s))", prop, value);
rawScript = rawScript.substring(0, scriptStart) + replaceScript + rawScript.substring(start, end);
scriptStart += replaceScript.length();
}
return rawScript;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,956 |
[Bug][Master] When running a task, the resource file is lost, which results in an error
|
When running a shell task, the resource file is lost, which results in an error
TaskParams
Taskparams does not have a resourcelist. When converting, the content of resourcelist will be lost
Not only shell tasks, param parameters in other task types may be overridden
The code is as follows:
VarPoolUtils#setTaskNodeLocalParams
```
/**
* setTaskNodeLocalParams
* @param taskNode taskNode
* @param prop LocalParamName
* @param value LocalParamValue
*/
public static void setTaskNodeLocalParams(TaskNode taskNode, String prop, Object value) {
String taskParamsJson = taskNode.getParams();
TaskParams taskParams = JSONUtils.parseObject(taskParamsJson, TaskParams.class);
if (taskParams == null) {
return;
}
taskParams.setLocalParamValue(prop, value);
taskNode.setParams(JSONUtils.toJsonString(taskParams));
}
```
|
https://github.com/apache/dolphinscheduler/issues/3956
|
https://github.com/apache/dolphinscheduler/pull/3957
|
8fd3932bff374a03ef159bd2a2f495728c8e5d4e
|
05b248f8e3323434c83851d26b1fcf6dc61eef4e
| 2020-10-20T11:23:46Z |
java
| 2020-10-24T09:50:33Z |
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/VarPoolUtilsTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.model.TaskNode;
import java.util.concurrent.ConcurrentHashMap;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class VarPoolUtilsTest {
private static final Logger logger = LoggerFactory.getLogger(VarPoolUtilsTest.class);
@Test
public void testSetTaskNodeLocalParams() {
String taskJson = "{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\","
+ "\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-75298\",\"maxRetryTimes\":0,\"name\":\"a1\","
+ "\"params\":\"{\\\"rawScript\\\":\\\"print(\\\\\\\"this is python task \\\\\\\",${p0})\\\","
+ "\\\"localParams\\\":[{\\\"prop\\\":\\\"p1\\\",\\\"direct\\\":\\\"IN\\\",\\\"type\\\":\\\"VARCHAR\\\",\\\"value\\\":\\\"1\\\"}],"
+ "\\\"resourceList\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\","
+ "\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\","
+ "\"type\":\"PYTHON\",\"workerGroup\":\"default\"}";
TaskNode taskNode = JSONUtils.parseObject(taskJson, TaskNode.class);
VarPoolUtils.setTaskNodeLocalParams(taskNode, "p1", "test1");
Assert.assertEquals(VarPoolUtils.getTaskNodeLocalParam(taskNode, "p1"), "test1");
ConcurrentHashMap<String, Object> propToValue = new ConcurrentHashMap<String, Object>();
propToValue.put("p1", "test2");
VarPoolUtils.setTaskNodeLocalParams(taskNode, propToValue);
Assert.assertEquals(VarPoolUtils.getTaskNodeLocalParam(taskNode, "p1"), "test2");
}
@Test
public void testConvertVarPoolToMap() throws Exception {
String varPool = "p1,66$VarPool$p2,69$VarPool$";
ConcurrentHashMap<String, Object> propToValue = new ConcurrentHashMap<String, Object>();
VarPoolUtils.convertVarPoolToMap(propToValue, varPool);
Assert.assertEquals((String)propToValue.get("p1"), "66");
Assert.assertEquals((String)propToValue.get("p2"), "69");
logger.info(propToValue.toString());
}
@Test
public void testConvertPythonScriptPlaceholders() throws Exception {
String rawScript = "print(${p1});\n${setShareVar(${p1},3)};\n${setShareVar(${p2},4)};";
rawScript = VarPoolUtils.convertPythonScriptPlaceholders(rawScript);
Assert.assertEquals(rawScript, "print(${p1});\n"
+ "print(\"${{setValue({},{})}}\".format(\"p1\",3));\n"
+ "print(\"${{setValue({},{})}}\".format(\"p2\",4));");
logger.info(rawScript);
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,929 |
[Bug][master/worker] After the master and worker are fault-tolerant, the process instance fails after the task instance is successfully executed
|
### Steps to reproduce
1. Start 2 master services (master1, master2), 2 worker services (worker1, worker2)
2. Run the workflow, the process instance runs in master1, and the task runs in worker1
3. Stop master1, worker1 services
4. After fault tolerance, the process instance runs on master2 and the task runs on worker2. After all tasks are executed successfully, the process instance status is failed
============================================
### 复现步骤
1.启动2个master服务(master1,master2)、2个worker服务(worker1,worker2)
2.运行工作流,流程实例在master1运行,任务在worker1运行
3.停止master1,worker1服务
4.容错后,流程实例在master2运行,任务在worker2运行,所有任务执行成功后,流程实例状态为失败



**Which branch**
-[1.3.3-release]
|
https://github.com/apache/dolphinscheduler/issues/3929
|
https://github.com/apache/dolphinscheduler/pull/3999
|
c11d97f7786eb72d601769a2ba3e1f97453f7c87
|
6caac0f36623cee4fa1cb2edfc35c7db87b08aa9
| 2020-10-16T08:52:08Z |
java
| 2020-10-27T09:37:59Z |
dolphinscheduler-api/src/main/resources/logback-api.xml
|
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- api server logback config start -->
<appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-api-server.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>64MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- api server logback config end -->
<logger name="org.apache.zookeeper" level="WARN"/>
<logger name="org.apache.hbase" level="WARN"/>
<logger name="org.apache.hadoop" level="WARN"/>
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="APILOGFILE"/>
</root>
</configuration>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,929 |
[Bug][master/worker] After the master and worker are fault-tolerant, the process instance fails after the task instance is successfully executed
|
### Steps to reproduce
1. Start 2 master services (master1, master2), 2 worker services (worker1, worker2)
2. Run the workflow, the process instance runs in master1, and the task runs in worker1
3. Stop master1, worker1 services
4. After fault tolerance, the process instance runs on master2 and the task runs on worker2. After all tasks are executed successfully, the process instance status is failed
============================================
### 复现步骤
1.启动2个master服务(master1,master2)、2个worker服务(worker1,worker2)
2.运行工作流,流程实例在master1运行,任务在worker1运行
3.停止master1,worker1服务
4.容错后,流程实例在master2运行,任务在worker2运行,所有任务执行成功后,流程实例状态为失败



**Which branch**
-[1.3.3-release]
|
https://github.com/apache/dolphinscheduler/issues/3929
|
https://github.com/apache/dolphinscheduler/pull/3999
|
c11d97f7786eb72d601769a2ba3e1f97453f7c87
|
6caac0f36623cee4fa1cb2edfc35c7db87b08aa9
| 2020-10-16T08:52:08Z |
java
| 2020-10-27T09:37:59Z |
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/utils/DagHelper.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.utils;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessData;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
/**
* dag tools
*/
public class DagHelper {
private static final Logger logger = LoggerFactory.getLogger(DagHelper.class);
/**
* generate flow node relation list by task node list;
* Edges that are not in the task Node List will not be added to the result
* @param taskNodeList taskNodeList
* @return task node relation list
*/
public static List<TaskNodeRelation> generateRelationListByFlowNodes(List<TaskNode> taskNodeList) {
List<TaskNodeRelation> nodeRelationList = new ArrayList<>();
for (TaskNode taskNode : taskNodeList) {
String preTasks = taskNode.getPreTasks();
List<String> preTaskList = JSONUtils.toList(preTasks, String.class);
if (preTaskList != null) {
for (String depNodeName : preTaskList) {
if (null != findNodeByName(taskNodeList, depNodeName)) {
nodeRelationList.add(new TaskNodeRelation(depNodeName, taskNode.getName()));
}
}
}
}
return nodeRelationList;
}
/**
* generate task nodes needed by dag
* @param taskNodeList taskNodeList
* @param startNodeNameList startNodeNameList
* @param recoveryNodeNameList recoveryNodeNameList
* @param taskDependType taskDependType
* @return task node list
*/
public static List<TaskNode> generateFlowNodeListByStartNode(List<TaskNode> taskNodeList, List<String> startNodeNameList,
List<String> recoveryNodeNameList, TaskDependType taskDependType) {
List<TaskNode> destFlowNodeList = new ArrayList<>();
List<String> startNodeList = startNodeNameList;
if(taskDependType != TaskDependType.TASK_POST
&& CollectionUtils.isEmpty(startNodeList)){
logger.error("start node list is empty! cannot continue run the process ");
return destFlowNodeList;
}
List<TaskNode> destTaskNodeList = new ArrayList<>();
List<TaskNode> tmpTaskNodeList = new ArrayList<>();
if (taskDependType == TaskDependType.TASK_POST
&& CollectionUtils.isNotEmpty(recoveryNodeNameList)) {
startNodeList = recoveryNodeNameList;
}
if (CollectionUtils.isEmpty(startNodeList)) {
// no special designation start nodes
tmpTaskNodeList = taskNodeList;
} else {
// specified start nodes or resume execution
for (String startNodeName : startNodeList) {
TaskNode startNode = findNodeByName(taskNodeList, startNodeName);
List<TaskNode> childNodeList = new ArrayList<>();
if (startNode == null) {
logger.error("start node name [{}] is not in task node list [{}] ",
startNodeName,
taskNodeList
);
continue;
} else if (TaskDependType.TASK_POST == taskDependType) {
childNodeList = getFlowNodeListPost(startNode, taskNodeList);
} else if (TaskDependType.TASK_PRE == taskDependType) {
childNodeList = getFlowNodeListPre(startNode, recoveryNodeNameList, taskNodeList);
} else {
childNodeList.add(startNode);
}
tmpTaskNodeList.addAll(childNodeList);
}
}
for (TaskNode taskNode : tmpTaskNodeList) {
if (null == findNodeByName(destTaskNodeList, taskNode.getName())) {
destTaskNodeList.add(taskNode);
}
}
return destTaskNodeList;
}
/**
* find all the nodes that depended on the start node
* @param startNode startNode
* @param taskNodeList taskNodeList
* @return task node list
*/
private static List<TaskNode> getFlowNodeListPost(TaskNode startNode, List<TaskNode> taskNodeList) {
List<TaskNode> resultList = new ArrayList<>();
for (TaskNode taskNode : taskNodeList) {
List<String> depList = taskNode.getDepList();
if (null != depList && null != startNode && depList.contains(startNode.getName())) {
resultList.addAll(getFlowNodeListPost(taskNode, taskNodeList));
}
}
resultList.add(startNode);
return resultList;
}
/**
* find all nodes that start nodes depend on.
* @param startNode startNode
* @param recoveryNodeNameList recoveryNodeNameList
* @param taskNodeList taskNodeList
* @return task node list
*/
private static List<TaskNode> getFlowNodeListPre(TaskNode startNode, List<String> recoveryNodeNameList, List<TaskNode> taskNodeList) {
List<TaskNode> resultList = new ArrayList<>();
List<String> depList = new ArrayList<>();
if (null != startNode) {
depList = startNode.getDepList();
resultList.add(startNode);
}
if (CollectionUtils.isEmpty(depList)) {
return resultList;
}
for (String depNodeName : depList) {
TaskNode start = findNodeByName(taskNodeList, depNodeName);
if (recoveryNodeNameList.contains(depNodeName)) {
resultList.add(start);
} else {
resultList.addAll(getFlowNodeListPre(start, recoveryNodeNameList, taskNodeList));
}
}
return resultList;
}
/**
* generate dag by start nodes and recovery nodes
* @param processDefinitionJson processDefinitionJson
* @param startNodeNameList startNodeNameList
* @param recoveryNodeNameList recoveryNodeNameList
* @param depNodeType depNodeType
* @return process dag
* @throws Exception if error throws Exception
*/
public static ProcessDag generateFlowDag(String processDefinitionJson,
List<String> startNodeNameList,
List<String> recoveryNodeNameList,
TaskDependType depNodeType) throws Exception {
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
List<TaskNode> taskNodeList = new ArrayList<>();
if (null != processData) {
taskNodeList = processData.getTasks();
}
List<TaskNode> destTaskNodeList = generateFlowNodeListByStartNode(taskNodeList, startNodeNameList, recoveryNodeNameList, depNodeType);
if (destTaskNodeList.isEmpty()) {
return null;
}
List<TaskNodeRelation> taskNodeRelations = generateRelationListByFlowNodes(destTaskNodeList);
ProcessDag processDag = new ProcessDag();
processDag.setEdges(taskNodeRelations);
processDag.setNodes(destTaskNodeList);
return processDag;
}
/**
* parse the forbidden task nodes in process definition.
* @param processDefinitionJson processDefinitionJson
* @return task node map
*/
public static Map<String, TaskNode> getForbiddenTaskNodeMaps(String processDefinitionJson){
Map<String, TaskNode> forbidTaskNodeMap = new ConcurrentHashMap<>();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
List<TaskNode> taskNodeList = new ArrayList<>();
if (null != processData) {
taskNodeList = processData.getTasks();
}
for(TaskNode node : taskNodeList){
if(node.isForbidden()){
forbidTaskNodeMap.putIfAbsent(node.getName(), node);
}
}
return forbidTaskNodeMap;
}
/**
* find node by node name
* @param nodeDetails nodeDetails
* @param nodeName nodeName
* @return task node
*/
public static TaskNode findNodeByName(List<TaskNode> nodeDetails, String nodeName) {
for (TaskNode taskNode : nodeDetails) {
if (taskNode.getName().equals(nodeName)) {
return taskNode;
}
}
return null;
}
/**
* get start vertex in one dag
* it would find the post node if the start vertex is forbidden running
* @param parentNodeName previous node
* @param dag dag
* @param completeTaskList completeTaskList
* @return start Vertex list
*/
public static Collection<String> getStartVertex(String parentNodeName, DAG<String, TaskNode, TaskNodeRelation> dag,
Map<String, TaskInstance> completeTaskList){
if(completeTaskList == null){
completeTaskList = new HashMap<>();
}
Collection<String> startVertexs = null;
if(StringUtils.isNotEmpty(parentNodeName)){
startVertexs = dag.getSubsequentNodes(parentNodeName);
}else{
startVertexs = dag.getBeginNode();
}
List<String> tmpStartVertexs = new ArrayList<>();
if(startVertexs!= null){
tmpStartVertexs.addAll(startVertexs);
}
for(String start : startVertexs){
TaskNode startNode = dag.getNode(start);
if(!startNode.isForbidden() && !completeTaskList.containsKey(start)){
// the start can be submit if not forbidden and not in complete tasks
continue;
}
// then submit the post nodes
Collection<String> postNodes = getStartVertex(start, dag, completeTaskList);
for(String post : postNodes){
TaskNode postNode = dag.getNode(post);
if(taskNodeCanSubmit(postNode, dag, completeTaskList)){
tmpStartVertexs.add(post);
}
}
tmpStartVertexs.remove(start);
}
return tmpStartVertexs;
}
/**
* the task can be submit when all the depends nodes are forbidden or complete
* @param taskNode taskNode
* @param dag dag
* @param completeTaskList completeTaskList
* @return can submit
*/
public static boolean taskNodeCanSubmit(TaskNode taskNode,
DAG<String, TaskNode, TaskNodeRelation> dag,
Map<String, TaskInstance> completeTaskList) {
List<String> dependList = taskNode.getDepList();
if(dependList == null){
return true;
}
for(String dependNodeName : dependList){
TaskNode dependNode = dag.getNode(dependNodeName);
if(!dependNode.isForbidden() && !completeTaskList.containsKey(dependNodeName)){
return false;
}
}
return true;
}
/***
* build dag graph
* @param processDag processDag
* @return dag
*/
public static DAG<String, TaskNode, TaskNodeRelation> buildDagGraph(ProcessDag processDag) {
DAG<String,TaskNode,TaskNodeRelation> dag = new DAG<>();
//add vertex
if (CollectionUtils.isNotEmpty(processDag.getNodes())){
for (TaskNode node : processDag.getNodes()){
dag.addNode(node.getName(),node);
}
}
//add edge
if (CollectionUtils.isNotEmpty(processDag.getEdges())){
for (TaskNodeRelation edge : processDag.getEdges()){
dag.addEdge(edge.getStartNode(),edge.getEndNode());
}
}
return dag;
}
/**
* get process dag
* @param taskNodeList task node list
* @return Process dag
*/
public static ProcessDag getProcessDag(List<TaskNode> taskNodeList) {
List<TaskNodeRelation> taskNodeRelations = new ArrayList<>();
// Traverse node information and build relationships
for (TaskNode taskNode : taskNodeList) {
String preTasks = taskNode.getPreTasks();
List<String> preTasksList = JSONUtils.toList(preTasks, String.class);
// If the dependency is not empty
if (preTasksList != null) {
for (String depNode : preTasksList) {
taskNodeRelations.add(new TaskNodeRelation(depNode, taskNode.getName()));
}
}
}
ProcessDag processDag = new ProcessDag();
processDag.setEdges(taskNodeRelations);
processDag.setNodes(taskNodeList);
return processDag;
}
/**
* is there have conditions after the parent node
* @param parentNodeName
* @return
*/
public static boolean haveConditionsAfterNode(String parentNodeName,
DAG<String, TaskNode, TaskNodeRelation> dag
){
boolean result = false;
Set<String> subsequentNodes = dag.getSubsequentNodes(parentNodeName);
if(CollectionUtils.isEmpty(subsequentNodes)){
return result;
}
for(String nodeName : subsequentNodes){
TaskNode taskNode = dag.getNode(nodeName);
List<String> preTasksList = JSONUtils.toList(taskNode.getPreTasks(), String.class);
if(preTasksList.contains(parentNodeName) && taskNode.isConditionsTask()){
return true;
}
}
return result;
}
/**
* is there have conditions after the parent node
* @param parentNodeName
* @return
*/
public static boolean haveConditionsAfterNode(String parentNodeName,
List<TaskNode> taskNodes
){
boolean result = false;
if(CollectionUtils.isEmpty(taskNodes)){
return result;
}
for(TaskNode taskNode : taskNodes){
List<String> preTasksList = JSONUtils.toList(taskNode.getPreTasks(), String.class);
if(preTasksList.contains(parentNodeName) && taskNode.isConditionsTask()){
return true;
}
}
return result;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,929 |
[Bug][master/worker] After the master and worker are fault-tolerant, the process instance fails after the task instance is successfully executed
|
### Steps to reproduce
1. Start 2 master services (master1, master2), 2 worker services (worker1, worker2)
2. Run the workflow, the process instance runs in master1, and the task runs in worker1
3. Stop master1, worker1 services
4. After fault tolerance, the process instance runs on master2 and the task runs on worker2. After all tasks are executed successfully, the process instance status is failed
============================================
### 复现步骤
1.启动2个master服务(master1,master2)、2个worker服务(worker1,worker2)
2.运行工作流,流程实例在master1运行,任务在worker1运行
3.停止master1,worker1服务
4.容错后,流程实例在master2运行,任务在worker2运行,所有任务执行成功后,流程实例状态为失败



**Which branch**
-[1.3.3-release]
|
https://github.com/apache/dolphinscheduler/issues/3929
|
https://github.com/apache/dolphinscheduler/pull/3999
|
c11d97f7786eb72d601769a2ba3e1f97453f7c87
|
6caac0f36623cee4fa1cb2edfc35c7db87b08aa9
| 2020-10-16T08:52:08Z |
java
| 2020-10-27T09:37:59Z |
dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/utils/DagHelperTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.dao.utils;
import com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessData;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* dag helper test
*/
public class DagHelperTest {
/**
* test task node can submit
* @throws JsonProcessingException if error throws JsonProcessingException
*/
@Test
public void testTaskNodeCanSubmit() throws JsonProcessingException {
//1->2->3->5
//4->3
DAG<String, TaskNode, TaskNodeRelation> dag = generateDag();
TaskNode taskNode3 = dag.getNode("3");
Map<String, TaskInstance > completeTaskList = new HashMap<>();
completeTaskList.putIfAbsent("1", new TaskInstance());
Boolean canSubmit = false;
// 2/4 are forbidden submit 3
TaskNode node2 = dag.getNode("2");
node2.setRunFlag(Constants.FLOWNODE_RUN_FLAG_FORBIDDEN);
TaskNode nodex = dag.getNode("4");
nodex.setRunFlag(Constants.FLOWNODE_RUN_FLAG_FORBIDDEN);
canSubmit = DagHelper.taskNodeCanSubmit(taskNode3, dag, completeTaskList);
Assert.assertEquals(canSubmit, true);
// 2forbidden, 3 cannot be submit
completeTaskList.putIfAbsent("2", new TaskInstance());
TaskNode nodey = dag.getNode("4");
nodey.setRunFlag("");
canSubmit = DagHelper.taskNodeCanSubmit(taskNode3, dag, completeTaskList);
Assert.assertEquals(canSubmit, false);
// 2/3 forbidden submit 5
TaskNode node3 = dag.getNode("3");
node3.setRunFlag(Constants.FLOWNODE_RUN_FLAG_FORBIDDEN);
TaskNode node5 = dag.getNode("5");
canSubmit = DagHelper.taskNodeCanSubmit(node5, dag, completeTaskList);
Assert.assertEquals(canSubmit, true);
}
/**
* 1->2->3->5
* 4->3
* @return dag
* @throws JsonProcessingException if error throws JsonProcessingException
*/
private DAG<String, TaskNode, TaskNodeRelation> generateDag() throws JsonProcessingException {
List<TaskNode> taskNodeList = new ArrayList<>();
TaskNode node1 = new TaskNode();
node1.setId("1");
node1.setName("1");
taskNodeList.add(node1);
TaskNode node2 = new TaskNode();
node2.setId("2");
node2.setName("2");
List<String> dep2 = new ArrayList<>();
dep2.add("1");
node2.setDepList(dep2);
taskNodeList.add(node2);
TaskNode node4 = new TaskNode();
node4.setId("4");
node4.setName("4");
taskNodeList.add(node4);
TaskNode node3 = new TaskNode();
node3.setId("3");
node3.setName("3");
List<String> dep3 = new ArrayList<>();
dep3.add("2");
dep3.add("4");
node3.setDepList(dep3);
taskNodeList.add(node3);
TaskNode node5 = new TaskNode();
node5.setId("5");
node5.setName("5");
List<String> dep5 = new ArrayList<>();
dep5.add("3");
node5.setDepList(dep5);
taskNodeList.add(node5);
List<String> startNodes = new ArrayList<>();
List<String> recoveryNodes = new ArrayList<>();
List<TaskNode> destTaskNodeList = DagHelper.generateFlowNodeListByStartNode(taskNodeList,
startNodes, recoveryNodes, TaskDependType.TASK_POST);
List<TaskNodeRelation> taskNodeRelations =DagHelper.generateRelationListByFlowNodes(destTaskNodeList);
ProcessDag processDag = new ProcessDag();
processDag.setEdges(taskNodeRelations);
processDag.setNodes(destTaskNodeList);
return DagHelper.buildDagGraph(processDag);
}
@Test
public void testBuildDagGraph() {
String shellJson = "{\"globalParams\":[],\"tasks\":[{\"type\":\"SHELL\",\"id\":\"tasks-9527\",\"name\":\"shell-1\"," +
"\"params\":{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"#!/bin/bash\\necho \\\"shell-1\\\"\"}," +
"\"description\":\"\",\"runFlag\":\"NORMAL\",\"dependence\":{},\"maxRetryTimes\":\"0\",\"retryInterval\":\"1\"," +
"\"timeout\":{\"strategy\":\"\",\"interval\":1,\"enable\":false},\"taskInstancePriority\":\"MEDIUM\"," +
"\"workerGroupId\":-1,\"preTasks\":[]}],\"tenantId\":1,\"timeout\":0}";
ProcessData processData = JSONUtils.parseObject(shellJson, ProcessData.class);
assert processData != null;
List<TaskNode> taskNodeList = processData.getTasks();
ProcessDag processDag = DagHelper.getProcessDag(taskNodeList);
DAG<String, TaskNode, TaskNodeRelation> dag = DagHelper.buildDagGraph(processDag);
Assert.assertNotNull(dag);
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,929 |
[Bug][master/worker] After the master and worker are fault-tolerant, the process instance fails after the task instance is successfully executed
|
### Steps to reproduce
1. Start 2 master services (master1, master2), 2 worker services (worker1, worker2)
2. Run the workflow, the process instance runs in master1, and the task runs in worker1
3. Stop master1, worker1 services
4. After fault tolerance, the process instance runs on master2 and the task runs on worker2. After all tasks are executed successfully, the process instance status is failed
============================================
### 复现步骤
1.启动2个master服务(master1,master2)、2个worker服务(worker1,worker2)
2.运行工作流,流程实例在master1运行,任务在worker1运行
3.停止master1,worker1服务
4.容错后,流程实例在master2运行,任务在worker2运行,所有任务执行成功后,流程实例状态为失败



**Which branch**
-[1.3.3-release]
|
https://github.com/apache/dolphinscheduler/issues/3929
|
https://github.com/apache/dolphinscheduler/pull/3999
|
c11d97f7786eb72d601769a2ba3e1f97453f7c87
|
6caac0f36623cee4fa1cb2edfc35c7db87b08aa9
| 2020-10-16T08:52:08Z |
java
| 2020-10-27T09:37:59Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/ConditionsTaskExecThread.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DependResult;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.model.DependentItem;
import org.apache.dolphinscheduler.common.model.DependentTaskModel;
import org.apache.dolphinscheduler.common.task.dependent.DependentParameters;
import org.apache.dolphinscheduler.common.utils.DependentUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
public class ConditionsTaskExecThread extends MasterBaseTaskExecThread {
/**
* dependent parameters
*/
private DependentParameters dependentParameters;
/**
* complete task map
*/
private Map<String, ExecutionStatus> completeTaskList = new ConcurrentHashMap<>();
/**
* condition result
*/
private DependResult conditionResult;
/**
* constructor of MasterBaseTaskExecThread
*
* @param taskInstance task instance
*/
public ConditionsTaskExecThread(TaskInstance taskInstance) {
super(taskInstance);
}
@Override
public Boolean submitWaitComplete() {
try{
this.taskInstance = submit();
logger = LoggerFactory.getLogger(LoggerUtils.buildTaskId(LoggerUtils.TASK_LOGGER_INFO_PREFIX,
taskInstance.getProcessDefinitionId(),
taskInstance.getProcessInstanceId(),
taskInstance.getId()));
String threadLoggerInfoName = String.format(Constants.TASK_LOG_INFO_FORMAT, processService.formatTaskAppId(this.taskInstance));
Thread.currentThread().setName(threadLoggerInfoName);
initTaskParameters();
logger.info("dependent task start");
waitTaskQuit();
updateTaskState();
}catch (Exception e){
logger.error("conditions task run exception" , e);
}
return true;
}
private void waitTaskQuit() {
List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(
taskInstance.getProcessInstanceId()
);
for(TaskInstance task : taskInstances){
completeTaskList.putIfAbsent(task.getName(), task.getState());
}
List<DependResult> modelResultList = new ArrayList<>();
for(DependentTaskModel dependentTaskModel : dependentParameters.getDependTaskList()){
List<DependResult> itemDependResult = new ArrayList<>();
for(DependentItem item : dependentTaskModel.getDependItemList()){
itemDependResult.add(getDependResultForItem(item));
}
DependResult modelResult = DependentUtils.getDependResultForRelation(dependentTaskModel.getRelation(), itemDependResult);
modelResultList.add(modelResult);
}
conditionResult = DependentUtils.getDependResultForRelation(
dependentParameters.getRelation(), modelResultList
);
logger.info("the conditions task depend result : {}", conditionResult);
}
/**
*
*/
private void updateTaskState() {
ExecutionStatus status;
if(this.cancel){
status = ExecutionStatus.KILL;
}else{
status = (conditionResult == DependResult.SUCCESS) ? ExecutionStatus.SUCCESS : ExecutionStatus.FAILURE;
}
taskInstance.setState(status);
taskInstance.setEndTime(new Date());
processService.updateTaskInstance(taskInstance);
}
private void initTaskParameters() {
this.taskInstance.setLogPath(getTaskLogPath(taskInstance));
this.taskInstance.setHost(OSUtils.getHost() + Constants.COLON + masterConfig.getListenPort());
taskInstance.setState(ExecutionStatus.RUNNING_EXEUTION);
taskInstance.setStartTime(new Date());
this.processService.saveTaskInstance(taskInstance);
this.dependentParameters = JSONUtils.parseObject(this.taskInstance.getDependency(), DependentParameters.class);
}
/**
* depend result for depend item
* @param item
* @return
*/
private DependResult getDependResultForItem(DependentItem item){
DependResult dependResult = DependResult.SUCCESS;
if(!completeTaskList.containsKey(item.getDepTasks())){
logger.info("depend item: {} have not completed yet.", item.getDepTasks());
dependResult = DependResult.FAILED;
return dependResult;
}
ExecutionStatus executionStatus = completeTaskList.get(item.getDepTasks());
if(executionStatus != item.getStatus()){
logger.info("depend item : {} expect status: {}, actual status: {}" ,item.getDepTasks(), item.getStatus(), executionStatus);
dependResult = DependResult.FAILED;
}
logger.info("dependent item complete {} {},{}",
Constants.DEPENDENT_SPLIT, item.getDepTasks(), dependResult);
return dependResult;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,929 |
[Bug][master/worker] After the master and worker are fault-tolerant, the process instance fails after the task instance is successfully executed
|
### Steps to reproduce
1. Start 2 master services (master1, master2), 2 worker services (worker1, worker2)
2. Run the workflow, the process instance runs in master1, and the task runs in worker1
3. Stop master1, worker1 services
4. After fault tolerance, the process instance runs on master2 and the task runs on worker2. After all tasks are executed successfully, the process instance status is failed
============================================
### 复现步骤
1.启动2个master服务(master1,master2)、2个worker服务(worker1,worker2)
2.运行工作流,流程实例在master1运行,任务在worker1运行
3.停止master1,worker1服务
4.容错后,流程实例在master2运行,任务在worker2运行,所有任务执行成功后,流程实例状态为失败



**Which branch**
-[1.3.3-release]
|
https://github.com/apache/dolphinscheduler/issues/3929
|
https://github.com/apache/dolphinscheduler/pull/3999
|
c11d97f7786eb72d601769a2ba3e1f97453f7c87
|
6caac0f36623cee4fa1cb2edfc35c7db87b08aa9
| 2020-10-16T08:52:08Z |
java
| 2020-10-27T09:37:59Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import com.alibaba.fastjson.JSON;
import com.google.common.collect.Lists;
import org.apache.commons.io.FileUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.*;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.task.conditions.ConditionsParameters;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.NettyRemotingClient;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.utils.AlertManager;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* master exec thread,split dag
*/
public class MasterExecThread implements Runnable {
/**
* logger of MasterExecThread
*/
private static final Logger logger = LoggerFactory.getLogger(MasterExecThread.class);
/**
* process instance
*/
private ProcessInstance processInstance;
/**
* runing TaskNode
*/
private final Map<MasterBaseTaskExecThread,Future<Boolean>> activeTaskNode = new ConcurrentHashMap<>();
/**
* task exec service
*/
private final ExecutorService taskExecService;
/**
* submit failure nodes
*/
private boolean taskFailedSubmit = false;
/**
* recover node id list
*/
private List<TaskInstance> recoverNodeIdList = new ArrayList<>();
/**
* error task list
*/
private Map<String,TaskInstance> errorTaskList = new ConcurrentHashMap<>();
/**
* complete task list
*/
private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>();
/**
* ready to submit task list
*/
private Map<String, TaskInstance> readyToSubmitTaskList = new ConcurrentHashMap<>();
/**
* depend failed task map
*/
private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>();
/**
* forbidden task map
*/
private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>();
/**
* skip task map
*/
private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>();
/**
* recover tolerance fault task list
*/
private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>();
/**
* alert manager
*/
private AlertManager alertManager = new AlertManager();
/**
* the object of DAG
*/
private DAG<String,TaskNode,TaskNodeRelation> dag;
/**
* process service
*/
private ProcessService processService;
/**
* master config
*/
private MasterConfig masterConfig;
/**
*
*/
private NettyRemotingClient nettyRemotingClient;
/**
* constructor of MasterExecThread
* @param processInstance processInstance
* @param processService processService
* @param nettyRemotingClient nettyRemotingClient
*/
public MasterExecThread(ProcessInstance processInstance, ProcessService processService, NettyRemotingClient nettyRemotingClient){
this.processService = processService;
this.processInstance = processInstance;
this.masterConfig = SpringApplicationContext.getBean(MasterConfig.class);
int masterTaskExecNum = masterConfig.getMasterExecTaskNum();
this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread",
masterTaskExecNum);
this.nettyRemotingClient = nettyRemotingClient;
}
@Override
public void run() {
// process instance is null
if (processInstance == null){
logger.info("process instance is not exists");
return;
}
// check to see if it's done
if (processInstance.getState().typeIsFinished()){
logger.info("process instance is done : {}",processInstance.getId());
return;
}
try {
if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()){
// sub process complement data
executeComplementProcess();
}else{
// execute flow
executeProcess();
}
}catch (Exception e){
logger.error("master exec thread exception", e);
logger.error("process execute failed, process id:{}", processInstance.getId());
processInstance.setState(ExecutionStatus.FAILURE);
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
}finally {
taskExecService.shutdown();
// post handle
postHandle();
}
}
/**
* execute process
* @throws Exception exception
*/
private void executeProcess() throws Exception {
prepareProcess();
runProcess();
endProcess();
}
/**
* execute complement process
* @throws Exception exception
*/
private void executeComplementProcess() throws Exception {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date startDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date endDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
processService.saveProcessInstance(processInstance);
// get schedules
int processDefinitionId = processInstance.getProcessDefinitionId();
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId);
List<Date> listDate = Lists.newLinkedList();
if(!CollectionUtils.isEmpty(schedules)){
for (Schedule schedule : schedules) {
listDate.addAll(CronUtils.getSelfFireDateList(startDate, endDate, schedule.getCrontab()));
}
}
// get first fire date
Iterator<Date> iterator = null;
Date scheduleDate = null;
if(!CollectionUtils.isEmpty(listDate)) {
iterator = listDate.iterator();
scheduleDate = iterator.next();
processInstance.setScheduleTime(scheduleDate);
processService.updateProcessInstance(processInstance);
}else{
scheduleDate = processInstance.getScheduleTime();
if(scheduleDate == null){
scheduleDate = startDate;
}
}
while(Stopper.isRunning()){
logger.info("process {} start to complement {} data",
processInstance.getId(), DateUtils.dateToString(scheduleDate));
// prepare dag and other info
prepareProcess();
if(dag == null){
logger.error("process {} dag is null, please check out parameters",
processInstance.getId());
processInstance.setState(ExecutionStatus.SUCCESS);
processService.updateProcessInstance(processInstance);
return;
}
// execute process ,waiting for end
runProcess();
endProcess();
// process instance failure ,no more complements
if(!processInstance.getState().typeIsSuccess()){
logger.info("process {} state {}, complement not completely!",
processInstance.getId(), processInstance.getState());
break;
}
// current process instance success ,next execute
if(null == iterator){
// loop by day
scheduleDate = DateUtils.getSomeDay(scheduleDate, 1);
if(scheduleDate.after(endDate)){
// all success
logger.info("process {} complement completely!", processInstance.getId());
break;
}
}else{
// loop by schedule date
if(!iterator.hasNext()){
// all success
logger.info("process {} complement completely!", processInstance.getId());
break;
}
scheduleDate = iterator.next();
}
// flow end
// execute next process instance complement data
processInstance.setScheduleTime(scheduleDate);
if(cmdParam.containsKey(Constants.CMDPARAM_RECOVERY_START_NODE_STRING)){
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJson(cmdParam));
}
processInstance.setState(ExecutionStatus.RUNNING_EXEUTION);
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processInstance.getProcessDefinition().getGlobalParamMap(),
processInstance.getProcessDefinition().getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
processInstance.setId(0);
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processService.saveProcessInstance(processInstance);
}
}
/**
* prepare process parameter
* @throws Exception exception
*/
private void prepareProcess() throws Exception {
// init task queue
initTaskQueue();
// gen process dag
buildFlowDag();
logger.info("prepare process :{} end", processInstance.getId());
}
/**
* process end handle
*/
private void endProcess() {
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
if(processInstance.getState().typeIsWaitingThread()){
processService.createRecoveryWaitingThreadCommand(null, processInstance);
}
List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId());
alertManager.sendAlertProcessInstance(processInstance, taskInstances);
}
/**
* generate process dag
* @throws Exception exception
*/
private void buildFlowDag() throws Exception {
recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam());
forbiddenTaskList = DagHelper.getForbiddenTaskNodeMaps(processInstance.getProcessInstanceJson());
// generate process to get DAG info
List<String> recoveryNameList = getRecoveryNodeNameList();
List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam());
ProcessDag processDag = generateFlowDag(processInstance.getProcessInstanceJson(),
startNodeNameList, recoveryNameList, processInstance.getTaskDependType());
if(processDag == null){
logger.error("processDag is null");
return;
}
// generate process dag
dag = DagHelper.buildDagGraph(processDag);
}
/**
* init task queue
*/
private void initTaskQueue(){
taskFailedSubmit = false;
activeTaskNode.clear();
dependFailedTask.clear();
completeTaskList.clear();
errorTaskList.clear();
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId());
for(TaskInstance task : taskInstanceList){
if(task.isTaskComplete()){
completeTaskList.put(task.getName(), task);
}
if(task.getState().typeIsFailure() && !task.taskCanRetry()){
errorTaskList.put(task.getName(), task);
}
}
}
/**
* process post handle
*/
private void postHandle() {
logger.info("develop mode is: {}", CommonUtils.isDevelopMode());
if (!CommonUtils.isDevelopMode()) {
// get exec dir
String execLocalPath = org.apache.dolphinscheduler.common.utils.FileUtils
.getProcessExecDir(processInstance.getProcessDefinition().getProjectId(),
processInstance.getProcessDefinitionId(),
processInstance.getId());
try {
FileUtils.deleteDirectory(new File(execLocalPath));
} catch (IOException e) {
logger.error("delete exec dir failed ", e);
}
}
}
/**
* submit task to execute
* @param taskInstance task instance
* @return TaskInstance
*/
private TaskInstance submitTaskExec(TaskInstance taskInstance) {
MasterBaseTaskExecThread abstractExecThread = null;
if(taskInstance.isSubProcess()){
abstractExecThread = new SubProcessTaskExecThread(taskInstance);
}else if(taskInstance.isDependTask()){
abstractExecThread = new DependentTaskExecThread(taskInstance);
}else if(taskInstance.isConditionsTask()){
abstractExecThread = new ConditionsTaskExecThread(taskInstance);
}else {
abstractExecThread = new MasterTaskExecThread(taskInstance);
}
Future<Boolean> future = taskExecService.submit(abstractExecThread);
activeTaskNode.putIfAbsent(abstractExecThread, future);
return abstractExecThread.getTaskInstance();
}
/**
* find task instance in db.
* in case submit more than one same name task in the same time.
* @param taskName task name
* @return TaskInstance
*/
private TaskInstance findTaskIfExists(String taskName){
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId());
for(TaskInstance taskInstance : taskInstanceList){
if(taskInstance.getName().equals(taskName)){
return taskInstance;
}
}
return null;
}
/**
* encapsulation task
* @param processInstance process instance
* @param nodeName node name
* @return TaskInstance
*/
private TaskInstance createTaskInstance(ProcessInstance processInstance, String nodeName,
TaskNode taskNode) {
TaskInstance taskInstance = findTaskIfExists(nodeName);
if(taskInstance == null){
taskInstance = new TaskInstance();
// task name
taskInstance.setName(nodeName);
// process instance define id
taskInstance.setProcessDefinitionId(processInstance.getProcessDefinitionId());
// task instance state
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
// process instance id
taskInstance.setProcessInstanceId(processInstance.getId());
// task instance node json
taskInstance.setTaskJson(JSON.toJSONString(taskNode));
// task instance type
taskInstance.setTaskType(taskNode.getType());
// task instance whether alert
taskInstance.setAlertFlag(Flag.NO);
// task instance start time
taskInstance.setStartTime(null);
// task instance flag
taskInstance.setFlag(Flag.YES);
// task instance retry times
taskInstance.setRetryTimes(0);
// max task instance retry times
taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes());
// retry task instance interval
taskInstance.setRetryInterval(taskNode.getRetryInterval());
// task instance priority
if(taskNode.getTaskInstancePriority() == null){
taskInstance.setTaskInstancePriority(Priority.MEDIUM);
}else{
taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority());
}
String processWorkerGroup = processInstance.getWorkerGroup();
processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup;
String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup();
if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) {
taskInstance.setWorkerGroup(processWorkerGroup);
}else {
taskInstance.setWorkerGroup(taskWorkerGroup);
}
}
return taskInstance;
}
/**
* if all of the task dependence are skip, skip it too.
* @param taskNode
* @return
*/
private boolean isTaskNodeNeedSkip(TaskNode taskNode){
if(CollectionUtils.isEmpty(taskNode.getDepList())){
return false;
}
for(String depNode : taskNode.getDepList()){
if(!skipTaskNodeList.containsKey(depNode)){
return false;
}
}
return true;
}
/**
* set task node skip if dependence all skip
* @param taskNodesSkipList
*/
private void setTaskNodeSkip(List<String> taskNodesSkipList){
for(String skipNode : taskNodesSkipList){
skipTaskNodeList.putIfAbsent(skipNode, dag.getNode(skipNode));
Collection<String> postNodeList = DagHelper.getStartVertex(skipNode, dag, completeTaskList);
List<String> postSkipList = new ArrayList<>();
for(String post : postNodeList){
TaskNode postNode = dag.getNode(post);
if(isTaskNodeNeedSkip(postNode)){
postSkipList.add(post);
}
}
setTaskNodeSkip(postSkipList);
}
}
/**
* parse condition task find the branch process
* set skip flag for another one.
* @param nodeName
* @return
*/
private List<String> parseConditionTask(String nodeName){
List<String> conditionTaskList = new ArrayList<>();
TaskNode taskNode = dag.getNode(nodeName);
if(!taskNode.isConditionsTask()){
return conditionTaskList;
}
ConditionsParameters conditionsParameters =
JSONUtils.parseObject(taskNode.getConditionResult(), ConditionsParameters.class);
TaskInstance taskInstance = completeTaskList.get(nodeName);
if(taskInstance == null){
logger.error("task instance {} cannot find, please check it!", nodeName);
return conditionTaskList;
}
if(taskInstance.getState().typeIsSuccess()){
conditionTaskList = conditionsParameters.getSuccessNode();
setTaskNodeSkip(conditionsParameters.getFailedNode());
}else if(taskInstance.getState().typeIsFailure()){
conditionTaskList = conditionsParameters.getFailedNode();
setTaskNodeSkip(conditionsParameters.getSuccessNode());
}else{
conditionTaskList.add(nodeName);
}
return conditionTaskList;
}
/**
* parse post node list of previous node
* if condition node: return process according to the settings
* if post node completed, return post nodes of the completed node
* @param previousNodeName
* @return
*/
private List<String> parsePostNodeList(String previousNodeName){
List<String> postNodeList = new ArrayList<>();
TaskNode taskNode = dag.getNode(previousNodeName);
if(taskNode != null && taskNode.isConditionsTask()){
return parseConditionTask(previousNodeName);
}
Collection<String> postNodeCollection = DagHelper.getStartVertex(previousNodeName, dag, completeTaskList);
List<String> postSkipList = new ArrayList<>();
// delete success node, parse the past nodes
// if conditions node,
// 1. parse the branch process according the conditions setting
// 2. set skip flag on anther branch process
for(String postNode : postNodeCollection){
if(completeTaskList.containsKey(postNode)){
TaskInstance postTaskInstance = completeTaskList.get(postNode);
if(dag.getNode(postNode).isConditionsTask()){
List<String> conditionTaskNodeList = parseConditionTask(postNode);
for(String conditions : conditionTaskNodeList){
postNodeList.addAll(parsePostNodeList(conditions));
}
}else if(postTaskInstance.getState().typeIsSuccess()){
postNodeList.addAll(parsePostNodeList(postNode));
}else{
postNodeList.add(postNode);
}
}else if(isTaskNodeNeedSkip(dag.getNode(postNode))){
postSkipList.add(postNode);
setTaskNodeSkip(postSkipList);
postSkipList.clear();
}else{
postNodeList.add(postNode);
}
}
return postNodeList;
}
/**
* submit post node
* @param parentNodeName parent node name
*/
private void submitPostNode(String parentNodeName){
List<String> submitTaskNodeList = parsePostNodeList(parentNodeName);
List<TaskInstance> taskInstances = new ArrayList<>();
for(String taskNode : submitTaskNodeList){
taskInstances.add(createTaskInstance(processInstance, taskNode,
dag.getNode(taskNode)));
}
// if previous node success , post node submit
for(TaskInstance task : taskInstances){
if(readyToSubmitTaskList.containsKey(task.getName())){
continue;
}
if(completeTaskList.containsKey(task.getName())){
logger.info("task {} has already run success", task.getName());
continue;
}
if(task.getState().typeIsPause() || task.getState().typeIsCancel()){
logger.info("task {} stopped, the state is {}", task.getName(), task.getState());
}else{
addTaskToStandByList(task);
}
}
}
/**
* determine whether the dependencies of the task node are complete
* @return DependResult
*/
private DependResult isTaskDepsComplete(String taskName) {
Collection<String> startNodes = dag.getBeginNode();
// if vertex,returns true directly
if(startNodes.contains(taskName)){
return DependResult.SUCCESS;
}
TaskNode taskNode = dag.getNode(taskName);
List<String> depNameList = taskNode.getDepList();
for(String depsNode : depNameList ){
if(!dag.containsNode(depsNode)
|| skipTaskNodeList.containsKey(depsNode)
|| forbiddenTaskList.containsKey(depsNode)){
continue;
}
// all the dependencies must be completed
if(!completeTaskList.containsKey(depsNode)){
return DependResult.WAITING;
}
ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState();
if(depTaskState.typeIsPause() || depTaskState.typeIsCancel()){
return DependResult.WAITING;
}
// ignore task state if current task is condition
if(taskNode.isConditionsTask()){
continue;
}
if(!dependTaskSuccess(depsNode, taskName)){
return DependResult.FAILED;
}
}
logger.info("taskName: {} completeDependTaskList: {}", taskName, Arrays.toString(completeTaskList.keySet().toArray()));
return DependResult.SUCCESS;
}
/**
* depend node is completed, but here need check the condition task branch is the next node
* @param dependNodeName
* @param nextNodeName
* @return
*/
private boolean dependTaskSuccess(String dependNodeName, String nextNodeName){
if(dag.getNode(dependNodeName).isConditionsTask()){
//condition task need check the branch to run
List<String> nextTaskList = parseConditionTask(dependNodeName);
if(!nextTaskList.contains(nextNodeName)){
return false;
}
}else {
ExecutionStatus depTaskState = completeTaskList.get(dependNodeName).getState();
if(depTaskState.typeIsFailure()){
return false;
}
}
return true;
}
/**
* query task instance by complete state
* @param state state
* @return task instance list
*/
private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state){
List<TaskInstance> resultList = new ArrayList<>();
for (Map.Entry<String, TaskInstance> entry: completeTaskList.entrySet()) {
if(entry.getValue().getState() == state){
resultList.add(entry.getValue());
}
}
return resultList;
}
/**
* where there are ongoing tasks
* @param state state
* @return ExecutionStatus
*/
private ExecutionStatus runningState(ExecutionStatus state){
if(state == ExecutionStatus.READY_STOP ||
state == ExecutionStatus.READY_PAUSE ||
state == ExecutionStatus.WAITTING_THREAD){
// if the running task is not completed, the state remains unchanged
return state;
}else{
return ExecutionStatus.RUNNING_EXEUTION;
}
}
/**
* exists failure task,contains submit failure、dependency failure,execute failure(retry after)
*
* @return Boolean whether has failed task
*/
private boolean hasFailedTask(){
if(this.taskFailedSubmit){
return true;
}
if(this.errorTaskList.size() > 0){
return true;
}
return this.dependFailedTask.size() > 0;
}
/**
* process instance failure
*
* @return Boolean whether process instance failed
*/
private boolean processFailed(){
if(hasFailedTask()) {
if(processInstance.getFailureStrategy() == FailureStrategy.END){
return true;
}
if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) {
return readyToSubmitTaskList.size() == 0 || activeTaskNode.size() == 0;
}
}
return false;
}
/**
* whether task for waiting thread
* @return Boolean whether has waiting thread task
*/
private boolean hasWaitingThreadTask(){
List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITTING_THREAD);
return CollectionUtils.isNotEmpty(waitingList);
}
/**
* prepare for pause
* 1,failed retry task in the preparation queue , returns to failure directly
* 2,exists pause task,complement not completed, pending submission of tasks, return to suspension
* 3,success
* @return ExecutionStatus
*/
private ExecutionStatus processReadyPause(){
if(hasRetryTaskInStandBy()){
return ExecutionStatus.FAILURE;
}
List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE);
if(CollectionUtils.isNotEmpty(pauseList)
|| !isComplementEnd()
|| readyToSubmitTaskList.size() > 0){
return ExecutionStatus.PAUSE;
}else{
return ExecutionStatus.SUCCESS;
}
}
/**
* generate the latest process instance status by the tasks state
* @return process instance execution status
*/
private ExecutionStatus getProcessInstanceState(){
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
ExecutionStatus state = instance.getState();
if(activeTaskNode.size() > 0 || retryTaskExists()){
// active task and retry task exists
return runningState(state);
}
// process failure
if(processFailed()){
return ExecutionStatus.FAILURE;
}
// waiting thread
if(hasWaitingThreadTask()){
return ExecutionStatus.WAITTING_THREAD;
}
// pause
if(state == ExecutionStatus.READY_PAUSE){
return processReadyPause();
}
// stop
if(state == ExecutionStatus.READY_STOP){
List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP);
List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL);
if(CollectionUtils.isNotEmpty(stopList)
|| CollectionUtils.isNotEmpty(killList)
|| !isComplementEnd()){
return ExecutionStatus.STOP;
}else{
return ExecutionStatus.SUCCESS;
}
}
// success
if(state == ExecutionStatus.RUNNING_EXEUTION){
List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL);
if(readyToSubmitTaskList.size() > 0){
//tasks currently pending submission, no retries, indicating that depend is waiting to complete
return ExecutionStatus.RUNNING_EXEUTION;
}else if(CollectionUtils.isNotEmpty(killTasks)){
// tasks maybe killed manually
return ExecutionStatus.FAILURE;
}else{
// if the waiting queue is empty and the status is in progress, then success
return ExecutionStatus.SUCCESS;
}
}
return state;
}
/**
* whether standby task list have retry tasks
* @return
*/
private boolean retryTaskExists() {
boolean result = false;
for(String taskName : readyToSubmitTaskList.keySet()){
TaskInstance task = readyToSubmitTaskList.get(taskName);
if(task.getState().typeIsFailure()){
result = true;
break;
}
}
return result;
}
/**
* whether complement end
* @return Boolean whether is complement end
*/
private boolean isComplementEnd() {
if(!processInstance.isComplementData()){
return true;
}
try {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
return processInstance.getScheduleTime().equals(endTime);
} catch (Exception e) {
logger.error("complement end failed ",e);
return false;
}
}
/**
* updateProcessInstance process instance state
* after each batch of tasks is executed, the status of the process instance is updated
*/
private void updateProcessInstanceState() {
ExecutionStatus state = getProcessInstanceState();
if(processInstance.getState() != state){
logger.info(
"work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}",
processInstance.getId(), processInstance.getName(),
processInstance.getState(), state,
processInstance.getCommandType());
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
instance.setState(state);
instance.setProcessDefinition(processInstance.getProcessDefinition());
processService.updateProcessInstance(instance);
processInstance = instance;
}
}
/**
* get task dependency result
* @param taskInstance task instance
* @return DependResult
*/
private DependResult getDependResultForTask(TaskInstance taskInstance){
return isTaskDepsComplete(taskInstance.getName());
}
/**
* add task to standby list
* @param taskInstance task instance
*/
private void addTaskToStandByList(TaskInstance taskInstance){
logger.info("add task to stand by list: {}", taskInstance.getName());
readyToSubmitTaskList.putIfAbsent(taskInstance.getName(), taskInstance);
}
/**
* remove task from stand by list
* @param taskInstance task instance
*/
private void removeTaskFromStandbyList(TaskInstance taskInstance){
logger.info("remove task from stand by list: {}", taskInstance.getName());
readyToSubmitTaskList.remove(taskInstance.getName());
}
/**
* has retry task in standby
* @return Boolean whether has retry task in standby
*/
private boolean hasRetryTaskInStandBy(){
for (Map.Entry<String, TaskInstance> entry: readyToSubmitTaskList.entrySet()) {
if(entry.getValue().getState().typeIsFailure()){
return true;
}
}
return false;
}
/**
* submit and watch the tasks, until the work flow stop
*/
private void runProcess(){
// submit start node
submitPostNode(null);
boolean sendTimeWarning = false;
while(!processInstance.isProcessInstanceStop() && Stopper.isRunning()){
// send warning email if process time out.
if(!sendTimeWarning && checkProcessTimeOut(processInstance) ){
alertManager.sendProcessTimeoutAlert(processInstance,
processService.findProcessDefineById(processInstance.getProcessDefinitionId()));
sendTimeWarning = true;
}
for(Map.Entry<MasterBaseTaskExecThread,Future<Boolean>> entry: activeTaskNode.entrySet()) {
Future<Boolean> future = entry.getValue();
TaskInstance task = entry.getKey().getTaskInstance();
if(!future.isDone()){
continue;
}
// node monitor thread complete
task = this.processService.findTaskInstanceById(task.getId());
if(task == null){
this.taskFailedSubmit = true;
activeTaskNode.remove(entry.getKey());
continue;
}
// node monitor thread complete
if(task.getState().typeIsFinished()){
activeTaskNode.remove(entry.getKey());
}
logger.info("task :{}, id:{} complete, state is {} ",
task.getName(), task.getId(), task.getState());
// node success , post node submit
if(task.getState() == ExecutionStatus.SUCCESS){
completeTaskList.put(task.getName(), task);
submitPostNode(task.getName());
continue;
}
// node fails, retry first, and then execute the failure process
if(task.getState().typeIsFailure()){
if(task.getState() == ExecutionStatus.NEED_FAULT_TOLERANCE){
this.recoverToleranceFaultTaskList.add(task);
}
if(task.taskCanRetry()){
addTaskToStandByList(task);
}else{
completeTaskList.put(task.getName(), task);
if( task.isConditionsTask()
|| DagHelper.haveConditionsAfterNode(task.getName(), dag)) {
submitPostNode(task.getName());
}else{
errorTaskList.put(task.getName(), task);
if(processInstance.getFailureStrategy() == FailureStrategy.END){
killTheOtherTasks();
}
}
}
continue;
}
// other status stop/pause
completeTaskList.put(task.getName(), task);
}
// send alert
if(CollectionUtils.isNotEmpty(this.recoverToleranceFaultTaskList)){
alertManager.sendAlertWorkerToleranceFault(processInstance, recoverToleranceFaultTaskList);
this.recoverToleranceFaultTaskList.clear();
}
// updateProcessInstance completed task status
// failure priority is higher than pause
// if a task fails, other suspended tasks need to be reset kill
if(errorTaskList.size() > 0){
for(Map.Entry<String, TaskInstance> entry: completeTaskList.entrySet()) {
TaskInstance completeTask = entry.getValue();
if(completeTask.getState()== ExecutionStatus.PAUSE){
completeTask.setState(ExecutionStatus.KILL);
completeTaskList.put(entry.getKey(), completeTask);
processService.updateTaskInstance(completeTask);
}
}
}
if(canSubmitTaskToQueue()){
submitStandByTask();
}
try {
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
} catch (InterruptedException e) {
logger.error(e.getMessage(),e);
}
updateProcessInstanceState();
}
logger.info("process:{} end, state :{}", processInstance.getId(), processInstance.getState());
}
/**
* whether check process time out
* @param processInstance task instance
* @return true if time out of process instance > running time of process instance
*/
private boolean checkProcessTimeOut(ProcessInstance processInstance) {
if(processInstance.getTimeout() == 0 ){
return false;
}
Date now = new Date();
long runningTime = DateUtils.diffMin(now, processInstance.getStartTime());
return runningTime > processInstance.getTimeout();
}
/**
* whether can submit task to queue
* @return boolean
*/
private boolean canSubmitTaskToQueue() {
return OSUtils.checkResource(masterConfig.getMasterMaxCpuloadAvg(), masterConfig.getMasterReservedMemory());
}
/**
* close the on going tasks
*/
private void killTheOtherTasks() {
logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(),
activeTaskNode.size());
for (Map.Entry<MasterBaseTaskExecThread, Future<Boolean>> entry : activeTaskNode.entrySet()) {
MasterBaseTaskExecThread taskExecThread = entry.getKey();
Future<Boolean> future = entry.getValue();
TaskInstance taskInstance = taskExecThread.getTaskInstance();
taskInstance = processService.findTaskInstanceById(taskInstance.getId());
if(taskInstance != null && taskInstance.getState().typeIsFinished()){
continue;
}
if (!future.isDone()) {
// record kill info
logger.info("kill process instance, id: {}, task: {}", processInstance.getId(), taskExecThread.getTaskInstance().getId());
// kill node
taskExecThread.kill();
}
}
}
/**
* whether the retry interval is timed out
* @param taskInstance task instance
* @return Boolean
*/
private boolean retryTaskIntervalOverTime(TaskInstance taskInstance){
if(taskInstance.getState() != ExecutionStatus.FAILURE){
return true;
}
if(taskInstance.getId() == 0 ||
taskInstance.getMaxRetryTimes() ==0 ||
taskInstance.getRetryInterval() == 0 ){
return true;
}
Date now = new Date();
long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime());
// task retry does not over time, return false
return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval;
}
/**
* handling the list of tasks to be submitted
*/
private void submitStandByTask(){
for(Map.Entry<String, TaskInstance> entry: readyToSubmitTaskList.entrySet()) {
TaskInstance task = entry.getValue();
DependResult dependResult = getDependResultForTask(task);
if(DependResult.SUCCESS == dependResult){
if(retryTaskIntervalOverTime(task)){
submitTaskExec(task);
removeTaskFromStandbyList(task);
}
}else if(DependResult.FAILED == dependResult){
// if the dependency fails, the current node is not submitted and the state changes to failure.
dependFailedTask.put(entry.getKey(), task);
removeTaskFromStandbyList(task);
logger.info("task {},id:{} depend result : {}",task.getName(), task.getId(), dependResult);
}
}
}
/**
* get recovery task instance
* @param taskId task id
* @return recovery task instance
*/
private TaskInstance getRecoveryTaskInstance(String taskId){
if(!StringUtils.isNotEmpty(taskId)){
return null;
}
try {
Integer intId = Integer.valueOf(taskId);
TaskInstance task = processService.findTaskInstanceById(intId);
if(task == null){
logger.error("start node id cannot be found: {}", taskId);
}else {
return task;
}
}catch (Exception e){
logger.error("get recovery task instance failed ",e);
}
return null;
}
/**
* get start task instance list
* @param cmdParam command param
* @return task instance list
*/
private List<TaskInstance> getStartTaskInstanceList(String cmdParam){
List<TaskInstance> instanceList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if(paramMap != null && paramMap.containsKey(CMDPARAM_RECOVERY_START_NODE_STRING)){
String[] idList = paramMap.get(CMDPARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA);
for(String nodeId : idList){
TaskInstance task = getRecoveryTaskInstance(nodeId);
if(task != null){
instanceList.add(task);
}
}
}
return instanceList;
}
/**
* parse "StartNodeNameList" from cmd param
* @param cmdParam command param
* @return start node name list
*/
private List<String> parseStartNodeName(String cmdParam){
List<String> startNodeNameList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if(paramMap == null){
return startNodeNameList;
}
if(paramMap.containsKey(CMDPARAM_START_NODE_NAMES)){
startNodeNameList = Arrays.asList(paramMap.get(CMDPARAM_START_NODE_NAMES).split(Constants.COMMA));
}
return startNodeNameList;
}
/**
* generate start node name list from parsing command param;
* if "StartNodeIdList" exists in command param, return StartNodeIdList
* @return recovery node name list
*/
private List<String> getRecoveryNodeNameList(){
List<String> recoveryNodeNameList = new ArrayList<>();
if(CollectionUtils.isNotEmpty(recoverNodeIdList)) {
for (TaskInstance task : recoverNodeIdList) {
recoveryNodeNameList.add(task.getName());
}
}
return recoveryNodeNameList;
}
/**
* generate flow dag
* @param processDefinitionJson process definition json
* @param startNodeNameList start node name list
* @param recoveryNodeNameList recovery node name list
* @param depNodeType depend node type
* @return ProcessDag process dag
* @throws Exception exception
*/
public ProcessDag generateFlowDag(String processDefinitionJson,
List<String> startNodeNameList,
List<String> recoveryNodeNameList,
TaskDependType depNodeType)throws Exception{
return DagHelper.generateFlowDag(processDefinitionJson, startNodeNameList, recoveryNodeNameList, depNodeType);
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,929 |
[Bug][master/worker] After the master and worker are fault-tolerant, the process instance fails after the task instance is successfully executed
|
### Steps to reproduce
1. Start 2 master services (master1, master2), 2 worker services (worker1, worker2)
2. Run the workflow, the process instance runs in master1, and the task runs in worker1
3. Stop master1, worker1 services
4. After fault tolerance, the process instance runs on master2 and the task runs on worker2. After all tasks are executed successfully, the process instance status is failed
============================================
### 复现步骤
1.启动2个master服务(master1,master2)、2个worker服务(worker1,worker2)
2.运行工作流,流程实例在master1运行,任务在worker1运行
3.停止master1,worker1服务
4.容错后,流程实例在master2运行,任务在worker2运行,所有任务执行成功后,流程实例状态为失败



**Which branch**
-[1.3.3-release]
|
https://github.com/apache/dolphinscheduler/issues/3929
|
https://github.com/apache/dolphinscheduler/pull/3999
|
c11d97f7786eb72d601769a2ba3e1f97453f7c87
|
6caac0f36623cee4fa1cb2edfc35c7db87b08aa9
| 2020-10-16T08:52:08Z |
java
| 2020-10-27T09:37:59Z |
dolphinscheduler-server/src/main/resources/logback-master.xml
|
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<conversionRule conversionWord="messsage"
converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/>
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"/>
<Discriminator class="org.apache.dolphinscheduler.server.log.TaskLogDiscriminator">
<key>taskAppId</key>
<logBase>${log.base}</logBase>
</Discriminator>
<sift>
<appender name="FILE-${taskAppId}" class="ch.qos.logback.core.FileAppender">
<file>${log.base}/${taskAppId}.log</file>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<append>true</append>
</appender>
</sift>
</appender>
<!-- master server logback config start -->
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-master.log</file>
<!--<filter class="org.apache.dolphinscheduler.server.log.MasterLogFilter">
<level>INFO</level>
</filter>-->
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- master server logback config end -->
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="MASTERLOGFILE"/>
</root>
</configuration>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,929 |
[Bug][master/worker] After the master and worker are fault-tolerant, the process instance fails after the task instance is successfully executed
|
### Steps to reproduce
1. Start 2 master services (master1, master2), 2 worker services (worker1, worker2)
2. Run the workflow, the process instance runs in master1, and the task runs in worker1
3. Stop master1, worker1 services
4. After fault tolerance, the process instance runs on master2 and the task runs on worker2. After all tasks are executed successfully, the process instance status is failed
============================================
### 复现步骤
1.启动2个master服务(master1,master2)、2个worker服务(worker1,worker2)
2.运行工作流,流程实例在master1运行,任务在worker1运行
3.停止master1,worker1服务
4.容错后,流程实例在master2运行,任务在worker2运行,所有任务执行成功后,流程实例状态为失败



**Which branch**
-[1.3.3-release]
|
https://github.com/apache/dolphinscheduler/issues/3929
|
https://github.com/apache/dolphinscheduler/pull/3999
|
c11d97f7786eb72d601769a2ba3e1f97453f7c87
|
6caac0f36623cee4fa1cb2edfc35c7db87b08aa9
| 2020-10-16T08:52:08Z |
java
| 2020-10-27T09:37:59Z |
dolphinscheduler-server/src/main/resources/logback-worker.xml
|
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
<property name="log.base" value="logs"/>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- worker server logback config start -->
<conversionRule conversionWord="messsage"
converterClass="org.apache.dolphinscheduler.server.log.SensitiveDataConverter"/>
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"/>
<Discriminator class="org.apache.dolphinscheduler.server.log.TaskLogDiscriminator">
<key>taskAppId</key>
<logBase>${log.base}</logBase>
</Discriminator>
<sift>
<appender name="FILE-${taskAppId}" class="org.apache.dolphinscheduler.server.log.TaskLogAppender">
<file>${log.base}/${taskAppId}.log</file>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<append>true</append>
</appender>
</sift>
</appender>
<appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.base}/dolphinscheduler-worker.log</file>
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>INFO</level>
</filter>
<filter class="org.apache.dolphinscheduler.server.log.WorkerLogFilter"/>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<fileNamePattern>${log.base}/dolphinscheduler-worker.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
<maxHistory>168</maxHistory>
<maxFileSize>200MB</maxFileSize>
</rollingPolicy>
<encoder>
<pattern>
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- worker server logback config end -->
<root level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="TASKLOGFILE"/>
<appender-ref ref="WORKERLOGFILE"/>
</root>
</configuration>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,929 |
[Bug][master/worker] After the master and worker are fault-tolerant, the process instance fails after the task instance is successfully executed
|
### Steps to reproduce
1. Start 2 master services (master1, master2), 2 worker services (worker1, worker2)
2. Run the workflow, the process instance runs in master1, and the task runs in worker1
3. Stop master1, worker1 services
4. After fault tolerance, the process instance runs on master2 and the task runs on worker2. After all tasks are executed successfully, the process instance status is failed
============================================
### 复现步骤
1.启动2个master服务(master1,master2)、2个worker服务(worker1,worker2)
2.运行工作流,流程实例在master1运行,任务在worker1运行
3.停止master1,worker1服务
4.容错后,流程实例在master2运行,任务在worker2运行,所有任务执行成功后,流程实例状态为失败



**Which branch**
-[1.3.3-release]
|
https://github.com/apache/dolphinscheduler/issues/3929
|
https://github.com/apache/dolphinscheduler/pull/3999
|
c11d97f7786eb72d601769a2ba3e1f97453f7c87
|
6caac0f36623cee4fa1cb2edfc35c7db87b08aa9
| 2020-10-16T08:52:08Z |
java
| 2020-10-27T09:37:59Z |
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/MasterCommandTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.FailureStrategy;
import org.apache.dolphinscheduler.common.enums.TaskDependType;
import org.apache.dolphinscheduler.common.enums.WarningType;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import org.apache.dolphinscheduler.dao.mapper.CommandMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.junit.Ignore;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
/**
* master test
*/
@Ignore
public class MasterCommandTest {
private final Logger logger = LoggerFactory.getLogger(MasterCommandTest.class);
private CommandMapper commandMapper;
private ProcessDefinitionMapper processDefinitionMapper;
@Test
public void StartFromFailedCommand(){
Command cmd = new Command();
cmd.setCommandType(CommandType.START_FAILURE_TASK_PROCESS);
cmd.setCommandParam("{\"ProcessInstanceId\":325}");
cmd.setProcessDefinitionId(63);
commandMapper.insert(cmd);
}
@Test
public void RecoverSuspendCommand(){
Command cmd = new Command();
cmd.setProcessDefinitionId(44);
cmd.setCommandParam("{\"ProcessInstanceId\":290}");
cmd.setCommandType(CommandType.RECOVER_SUSPENDED_PROCESS);
commandMapper.insert(cmd);
}
@Test
public void startNewProcessCommand(){
Command cmd = new Command();
cmd.setCommandType(CommandType.START_PROCESS);
cmd.setProcessDefinitionId(167);
cmd.setFailureStrategy(FailureStrategy.CONTINUE);
cmd.setWarningType(WarningType.NONE);
cmd.setWarningGroupId(4);
cmd.setExecutorId(19);
commandMapper.insert(cmd);
}
@Test
public void ToleranceCommand(){
Command cmd = new Command();
cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS);
cmd.setCommandParam("{\"ProcessInstanceId\":816}");
cmd.setProcessDefinitionId(15);
commandMapper.insert(cmd);
}
@Test
public void insertCommand(){
Command cmd = new Command();
cmd.setCommandType(CommandType.START_PROCESS);
cmd.setFailureStrategy(FailureStrategy.CONTINUE);
cmd.setWarningType(WarningType.ALL);
cmd.setProcessDefinitionId(72);
cmd.setExecutorId(10);
commandMapper.insert(cmd);
}
@Test
public void testDagHelper(){
ProcessDefinition processDefinition = processDefinitionMapper.selectById(19);
try {
ProcessDag processDag = DagHelper.generateFlowDag(processDefinition.getProcessDefinitionJson(),
new ArrayList<>(), new ArrayList<>(), TaskDependType.TASK_POST);
DAG<String,TaskNode,TaskNodeRelation> dag = DagHelper.buildDagGraph(processDag);
Collection<String> start = DagHelper.getStartVertex("1", dag, null);
System.out.println(start.toString());
Map<String, TaskNode> forbidden = DagHelper.getForbiddenTaskNodeMaps(processDefinition.getProcessDefinitionJson());
System.out.println(forbidden);
} catch (Exception e) {
e.printStackTrace();
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 4,008 |
[Improvement][Swagger] The resources API document does not match the interface parameters
|
The resources API document does not match the interface parameters
|
https://github.com/apache/dolphinscheduler/issues/4008
|
https://github.com/apache/dolphinscheduler/pull/4009
|
c54d09605b262b9e1058199834b4b6c80a2ed7f5
|
f4c14dea53dada047b1833bdeceaa881ae0d6ff4
| 2020-10-29T07:50:34Z |
java
| 2020-11-02T02:08:23Z |
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.controller;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ApiException;
import org.apache.dolphinscheduler.api.service.ResourcesService;
import org.apache.dolphinscheduler.api.service.UdfFuncService;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UdfType;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.Resource;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import springfox.documentation.annotations.ApiIgnore;
import java.util.Map;
import static org.apache.dolphinscheduler.api.enums.Status.*;
/**
* resources controller
*/
@Api(tags = "RESOURCES_TAG", position = 1)
@RestController
@RequestMapping("resources")
public class ResourcesController extends BaseController {
private static final Logger logger = LoggerFactory.getLogger(ResourcesController.class);
@Autowired
private ResourcesService resourceService;
@Autowired
private UdfFuncService udfFuncService;
/**
* create directory
*
* @param loginUser login user
* @param alias alias
* @param description description
* @param type type
* @return create result code
*/
/**
* @param loginUser login user
* @param type type
* @param alias alias
* @param description description
* @param pid parent id
* @param currentDir current directory
* @return
*/
@ApiOperation(value = "createDirctory", notes = "CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
})
@PostMapping(value = "/directory/create")
@ApiException(CREATE_RESOURCE_ERROR)
public Result createDirectory(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "pid") int pid,
@RequestParam(value = "currentDir") String currentDir) {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(), type, alias, description, pid, currentDir);
return resourceService.createDirectory(loginUser, alias, description, type, pid, currentDir);
}
/**
* create resource
*
* @param loginUser login user
* @param alias alias
* @param description description
* @param type type
* @param file file
* @return create result code
*/
@ApiOperation(value = "createResource", notes = "CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
})
@PostMapping(value = "/create")
@ApiException(CREATE_RESOURCE_ERROR)
public Result createResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "name") String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam("file") MultipartFile file,
@RequestParam(value = "pid") int pid,
@RequestParam(value = "currentDir") String currentDir) {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(), type, alias, description, file.getName(), file.getOriginalFilename());
return resourceService.createResource(loginUser, alias, description, type, file, pid, currentDir);
}
/**
* update resource
*
* @param loginUser login user
* @param alias alias
* @param resourceId resource id
* @param type resource type
* @param description description
* @return update result code
*/
@ApiOperation(value = "updateResource", notes = "UPDATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String")
})
@PostMapping(value = "/update")
@ApiException(UPDATE_RESOURCE_ERROR)
public Result updateResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "name") String alias,
@RequestParam(value = "description", required = false) String description) {
logger.info("login user {}, update resource, type: {}, resource alias: {}, desc: {}",
loginUser.getUserName(), type, alias, description);
return resourceService.updateResource(loginUser, resourceId, alias, description, type);
}
/**
* query resources list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryResourceList", notes = "QUERY_RESOURCE_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType")
})
@GetMapping(value = "/list")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_RESOURCES_LIST_ERROR)
public Result queryResourceList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type
) {
logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type);
Map<String, Object> result = resourceService.queryResourceList(loginUser, type);
return returnDataList(result);
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
@ApiOperation(value = "queryResourceListPaging", notes = "QUERY_RESOURCE_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "int"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "20")
})
@GetMapping(value = "/list-paging")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_RESOURCES_LIST_PAGING)
public Result queryResourceListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "id") int id,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize
) {
logger.info("query resource list, login user:{}, resource type:{}, search value:{}",
loginUser.getUserName(), type, searchVal);
Map<String, Object> result = checkPageParams(pageNo, pageSize);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = resourceService.queryResourceListPaging(loginUser, id, type, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
*/
@ApiOperation(value = "deleteResource", notes = "DELETE_RESOURCE_BY_ID_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/delete")
@ResponseStatus(HttpStatus.OK)
@ApiException(DELETE_RESOURCE_ERROR)
public Result deleteResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId
) throws Exception {
logger.info("login user {}, delete resource id: {}",
loginUser.getUserName(), resourceId);
return resourceService.delete(loginUser, resourceId);
}
/**
* verify resource by alias and type
*
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "verifyResourceName", notes = "VERIFY_RESOURCE_NAME_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType = "String")
})
@GetMapping(value = "/verify-name")
@ResponseStatus(HttpStatus.OK)
@ApiException(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR)
public Result verifyResourceName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "fullName") String fullName,
@RequestParam(value = "type") ResourceType type
) {
logger.info("login user {}, verfiy resource alias: {},resource type: {}",
loginUser.getUserName(), fullName, type);
return resourceService.verifyResourceName(fullName, type, loginUser);
}
/**
* query resources jar list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryResourceJarList", notes = "QUERY_RESOURCE_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType")
})
@GetMapping(value = "/list/jar")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_RESOURCES_LIST_ERROR)
public Result queryResourceJarList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type
) {
logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type.toString());
Map<String, Object> result = resourceService.queryResourceJarList(loginUser, type);
return returnDataList(result);
}
/**
* query resource by full name and type
*
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "queryResource", notes = "QUERY_BY_RESOURCE_NAME")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType = "String")
})
@GetMapping(value = "/queryResource")
@ResponseStatus(HttpStatus.OK)
@ApiException(RESOURCE_NOT_EXIST)
public Result queryResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "fullName", required = false) String fullName,
@RequestParam(value = "id", required = false) Integer id,
@RequestParam(value = "type") ResourceType type
) {
logger.info("login user {}, query resource by full name: {} or id: {},resource type: {}",
loginUser.getUserName(), fullName, id, type);
return resourceService.queryResource(fullName, id, type);
}
/**
* view resource file online
*
* @param loginUser login user
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
@ApiOperation(value = "viewResource", notes = "VIEW_RESOURCE_BY_ID_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "skipLineNum", value = "SKIP_LINE_NUM", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "limit", value = "LIMIT", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/view")
@ApiException(VIEW_RESOURCE_FILE_ON_LINE_ERROR)
public Result viewResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId,
@RequestParam(value = "skipLineNum") int skipLineNum,
@RequestParam(value = "limit") int limit
) {
logger.info("login user {}, view resource : {}, skipLineNum {} , limit {}",
loginUser.getUserName(), resourceId, skipLineNum, limit);
return resourceService.readResource(resourceId, skipLineNum, limit);
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param description description
* @param content content
* @return create result code
*/
@ApiOperation(value = "onlineCreateResource", notes = "ONLINE_CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType = "ResourceType"),
@ApiImplicitParam(name = "fileName", value = "RESOURCE_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "suffix", value = "SUFFIX", required = true, dataType = "String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType = "String"),
@ApiImplicitParam(name = "content", value = "CONTENT", required = true, dataType = "String")
})
@PostMapping(value = "/online-create")
@ApiException(CREATE_RESOURCE_FILE_ON_LINE_ERROR)
public Result onlineCreateResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value = "fileName") String fileName,
@RequestParam(value = "suffix") String fileSuffix,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "content") String content,
@RequestParam(value = "pid") int pid,
@RequestParam(value = "currentDir") String currentDir
) {
logger.info("login user {}, online create resource! fileName : {}, type : {}, suffix : {},desc : {},content : {}",
loginUser.getUserName(), fileName, type, fileSuffix, description, content, pid, currentDir);
if (StringUtils.isEmpty(content)) {
logger.error("resource file contents are not allowed to be empty");
return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg());
}
return resourceService.onlineCreateResource(loginUser, type, fileName, fileSuffix, description, content, pid, currentDir);
}
/**
* edit resource file online
*
* @param loginUser login user
* @param resourceId resource id
* @param content content
* @return update result code
*/
@ApiOperation(value = "updateResourceContent", notes = "UPDATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100"),
@ApiImplicitParam(name = "content", value = "CONTENT", required = true, dataType = "String")
})
@PostMapping(value = "/update-content")
@ApiException(EDIT_RESOURCE_FILE_ON_LINE_ERROR)
public Result updateResourceContent(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId,
@RequestParam(value = "content") String content
) {
logger.info("login user {}, updateProcessInstance resource : {}",
loginUser.getUserName(), resourceId);
if (StringUtils.isEmpty(content)) {
logger.error("The resource file contents are not allowed to be empty");
return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg());
}
return resourceService.updateResourceContent(resourceId, content);
}
/**
* download resource file
*
* @param loginUser login user
* @param resourceId resource id
* @return resource content
*/
@ApiOperation(value = "downloadResource", notes = "DOWNLOAD_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/download")
@ResponseBody
@ApiException(DOWNLOAD_RESOURCE_FILE_ERROR)
public ResponseEntity downloadResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId) throws Exception {
logger.info("login user {}, download resource : {}",
loginUser.getUserName(), resourceId);
Resource file = resourceService.downloadResource(resourceId);
if (file == null) {
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(Status.RESOURCE_NOT_EXIST.getMsg());
}
return ResponseEntity
.ok()
.header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + file.getFilename() + "\"")
.body(file);
}
/**
* create udf function
*
* @param loginUser login user
* @param type udf type
* @param funcName function name
* @param argTypes argument types
* @param database database
* @param description description
* @param className class name
* @param resourceId resource id
* @return create result code
*/
@ApiOperation(value = "createUdfFunc", notes = "CREATE_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType"),
@ApiImplicitParam(name = "funcName", value = "FUNC_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "suffix", value = "CLASS_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "argTypes", value = "ARG_TYPES", dataType = "String"),
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", dataType = "String"),
@ApiImplicitParam(name = "description", value = "UDF_DESC", dataType = "String"),
@ApiImplicitParam(name = "resourceId", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@PostMapping(value = "/udf-func/create")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(CREATE_UDF_FUNCTION_ERROR)
public Result createUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") UdfType type,
@RequestParam(value = "funcName") String funcName,
@RequestParam(value = "className") String className,
@RequestParam(value = "argTypes", required = false) String argTypes,
@RequestParam(value = "database", required = false) String database,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "resourceId") int resourceId) {
logger.info("login user {}, create udf function, type: {}, funcName: {},argTypes: {} ,database: {},desc: {},resourceId: {}",
loginUser.getUserName(), type, funcName, argTypes, database, description, resourceId);
return udfFuncService.createUdfFunction(loginUser, funcName, className, argTypes, database, description, type, resourceId);
}
/**
* view udf function
*
* @param loginUser login user
* @param id resource id
* @return udf function detail
*/
@ApiOperation(value = "viewUIUdfFunction", notes = "VIEW_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "resourceId", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/udf-func/update-ui")
@ResponseStatus(HttpStatus.OK)
@ApiException(VIEW_UDF_FUNCTION_ERROR)
public Result viewUIUdfFunction(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("id") int id) {
logger.info("login user {}, query udf{}",
loginUser.getUserName(), id);
Map<String, Object> map = udfFuncService.queryUdfFuncDetail(id);
return returnDataList(map);
}
/**
* update udf function
*
* @param loginUser login user
* @param type resource type
* @param funcName function name
* @param argTypes argument types
* @param database data base
* @param description description
* @param resourceId resource id
* @param className class name
* @param udfFuncId udf function id
* @return update result code
*/
@ApiOperation(value = "updateUdfFunc", notes = "UPDATE_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType"),
@ApiImplicitParam(name = "funcName", value = "FUNC_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "suffix", value = "CLASS_NAME", required = true, dataType = "String"),
@ApiImplicitParam(name = "argTypes", value = "ARG_TYPES", dataType = "String"),
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", dataType = "String"),
@ApiImplicitParam(name = "description", value = "UDF_DESC", dataType = "String"),
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@PostMapping(value = "/udf-func/update")
@ApiException(UPDATE_UDF_FUNCTION_ERROR)
public Result updateUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int udfFuncId,
@RequestParam(value = "type") UdfType type,
@RequestParam(value = "funcName") String funcName,
@RequestParam(value = "className") String className,
@RequestParam(value = "argTypes", required = false) String argTypes,
@RequestParam(value = "database", required = false) String database,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "resourceId") int resourceId) {
logger.info("login user {}, updateProcessInstance udf function id: {},type: {}, funcName: {},argTypes: {} ,database: {},desc: {},resourceId: {}",
loginUser.getUserName(), udfFuncId, type, funcName, argTypes, database, description, resourceId);
Map<String, Object> result = udfFuncService.updateUdfFunc(udfFuncId, funcName, className, argTypes, database, description, type, resourceId);
return returnDataList(result);
}
/**
* query udf function list paging
*
* @param loginUser login user
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return udf function list page
*/
@ApiOperation(value = "queryUdfFuncListPaging", notes = "QUERY_UDF_FUNCTION_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType = "Int", example = "20")
})
@GetMapping(value = "/udf-func/list-paging")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_UDF_FUNCTION_LIST_PAGING_ERROR)
public Result queryUdfFuncList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize
) {
logger.info("query udf functions list, login user:{},search value:{}",
loginUser.getUserName(), searchVal);
Map<String, Object> result = checkPageParams(pageNo, pageSize);
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return returnDataListPaging(result);
}
result = udfFuncService.queryUdfFuncListPaging(loginUser, searchVal, pageNo, pageSize);
return returnDataListPaging(result);
}
/**
* query resource list by type
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryResourceList", notes = "QUERY_RESOURCE_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType = "UdfType")
})
@GetMapping(value = "/udf-func/list")
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_DATASOURCE_BY_TYPE_ERROR)
public Result queryResourceList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("type") UdfType type) {
logger.info("query datasource list, user:{}, type:{}", loginUser.getUserName(), type);
Map<String, Object> result = udfFuncService.queryResourceList(loginUser, type.ordinal());
return returnDataList(result);
}
/**
* verify udf function name can use or not
*
* @param loginUser login user
* @param name name
* @return true if the name can user, otherwise return false
*/
@ApiOperation(value = "verifyUdfFuncName", notes = "VERIFY_UDF_FUNCTION_NAME_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "name", value = "FUNC_NAME", required = true, dataType = "String")
})
@GetMapping(value = "/udf-func/verify-name")
@ResponseStatus(HttpStatus.OK)
@ApiException(VERIFY_UDF_FUNCTION_NAME_ERROR)
public Result verifyUdfFuncName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "name") String name
) {
logger.info("login user {}, verfiy udf function name: {}",
loginUser.getUserName(), name);
return udfFuncService.verifyUdfFuncByName(name);
}
/**
* delete udf function
*
* @param loginUser login user
* @param udfFuncId udf function id
* @return delete result code
*/
@ApiOperation(value = "deleteUdfFunc", notes = "DELETE_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/udf-func/delete")
@ResponseStatus(HttpStatus.OK)
@ApiException(DELETE_UDF_FUNCTION_ERROR)
public Result deleteUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int udfFuncId
) {
logger.info("login user {}, delete udf function id: {}", loginUser.getUserName(), udfFuncId);
return udfFuncService.delete(udfFuncId);
}
/**
* authorized file resource list
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
@ApiOperation(value = "authorizedFile", notes = "AUTHORIZED_FILE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/authed-file")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(AUTHORIZED_FILE_RESOURCE_ERROR)
public Result authorizedFile(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
logger.info("authorized file resource, user: {}, user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizedFile(loginUser, userId);
return returnDataList(result);
}
/**
* unauthorized file resource list
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@ApiOperation(value = "authorizeResourceTree", notes = "AUTHORIZE_RESOURCE_TREE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/authorize-resource-tree")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(AUTHORIZE_RESOURCE_TREE)
public Result authorizeResourceTree(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
logger.info("all resource file, user:{}, user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizeResourceTree(loginUser, userId);
return returnDataList(result);
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@ApiOperation(value = "unauthUDFFunc", notes = "UNAUTHORIZED_UDF_FUNC_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/unauth-udf-func")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(UNAUTHORIZED_UDF_FUNCTION_ERROR)
public Result unauthUDFFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
logger.info("unauthorized udf function, login user:{}, unauthorized user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.unauthorizedUDFFunction(loginUser, userId);
return returnDataList(result);
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
@ApiOperation(value = "authUDFFunc", notes = "AUTHORIZED_UDF_FUNC_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100")
})
@GetMapping(value = "/authed-udf-func")
@ResponseStatus(HttpStatus.CREATED)
@ApiException(AUTHORIZED_UDF_FUNCTION_ERROR)
public Result authorizedUDFFunction(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
logger.info("auth udf function, login user:{}, auth user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizedUDFFunction(loginUser, userId);
return returnDataList(result);
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,985 |
[Feature][Datax] Datax supports setting up running memory
|
DataX default memory is 1g, when the amount of data is large, memory will not be enough
So you need to support setting runtime JVM memory

|
https://github.com/apache/dolphinscheduler/issues/3985
|
https://github.com/apache/dolphinscheduler/pull/3986
|
89f1e93bcf936b527856f658e33fe38ead5ec8b9
|
fe3026627fc2d38da08ae396724cc61bc922374a
| 2020-10-24T07:25:07Z |
java
| 2020-11-16T02:55:20Z |
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.task.datax;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
/**
* DataX parameter
*/
public class DataxParameters extends AbstractParameters {
/**
* if custom json config,eg 0, 1
*/
private int customConfig;
/**
* if customConfig eq 1 ,then json is usable
*/
private String json;
/**
* data source type,eg MYSQL, POSTGRES ...
*/
private String dsType;
/**
* datasource id
*/
private int dataSource;
/**
* data target type,eg MYSQL, POSTGRES ...
*/
private String dtType;
/**
* datatarget id
*/
private int dataTarget;
/**
* sql
*/
private String sql;
/**
* target table
*/
private String targetTable;
/**
* Pre Statements
*/
private List<String> preStatements;
/**
* Post Statements
*/
private List<String> postStatements;
/**
* speed byte num
*/
private int jobSpeedByte;
/**
* speed record count
*/
private int jobSpeedRecord;
public int getCustomConfig() {
return customConfig;
}
public void setCustomConfig(int customConfig) {
this.customConfig = customConfig;
}
public String getJson() {
return json;
}
public void setJson(String json) {
this.json = json;
}
public String getDsType() {
return dsType;
}
public void setDsType(String dsType) {
this.dsType = dsType;
}
public int getDataSource() {
return dataSource;
}
public void setDataSource(int dataSource) {
this.dataSource = dataSource;
}
public String getDtType() {
return dtType;
}
public void setDtType(String dtType) {
this.dtType = dtType;
}
public int getDataTarget() {
return dataTarget;
}
public void setDataTarget(int dataTarget) {
this.dataTarget = dataTarget;
}
public String getSql() {
return sql;
}
public void setSql(String sql) {
this.sql = sql;
}
public String getTargetTable() {
return targetTable;
}
public void setTargetTable(String targetTable) {
this.targetTable = targetTable;
}
public List<String> getPreStatements() {
return preStatements;
}
public void setPreStatements(List<String> preStatements) {
this.preStatements = preStatements;
}
public List<String> getPostStatements() {
return postStatements;
}
public void setPostStatements(List<String> postStatements) {
this.postStatements = postStatements;
}
public int getJobSpeedByte() {
return jobSpeedByte;
}
public void setJobSpeedByte(int jobSpeedByte) {
this.jobSpeedByte = jobSpeedByte;
}
public int getJobSpeedRecord() {
return jobSpeedRecord;
}
public void setJobSpeedRecord(int jobSpeedRecord) {
this.jobSpeedRecord = jobSpeedRecord;
}
@Override
public boolean checkParameters() {
if (customConfig == Flag.NO.ordinal()) {
return dataSource != 0
&& dataTarget != 0
&& StringUtils.isNotEmpty(sql)
&& StringUtils.isNotEmpty(targetTable);
} else {
return StringUtils.isNotEmpty(json);
}
}
@Override
public List<ResourceInfo> getResourceFilesList() {
return new ArrayList<>();
}
@Override
public String toString() {
return "DataxParameters{" +
"customConfig=" + customConfig +
", json='" + json + '\'' +
", dsType='" + dsType + '\'' +
", dataSource=" + dataSource +
", dtType='" + dtType + '\'' +
", dataTarget=" + dataTarget +
", sql='" + sql + '\'' +
", targetTable='" + targetTable + '\'' +
", preStatements=" + preStatements +
", postStatements=" + postStatements +
", jobSpeedByte=" + jobSpeedByte +
", jobSpeedRecord=" + jobSpeedRecord +
'}';
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,985 |
[Feature][Datax] Datax supports setting up running memory
|
DataX default memory is 1g, when the amount of data is large, memory will not be enough
So you need to support setting runtime JVM memory

|
https://github.com/apache/dolphinscheduler/issues/3985
|
https://github.com/apache/dolphinscheduler/pull/3986
|
89f1e93bcf936b527856f658e33fe38ead5ec8b9
|
fe3026627fc2d38da08ae396724cc61bc922374a
| 2020-10-24T07:25:07Z |
java
| 2020-11-16T02:55:20Z |
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/task/DataxParametersTest.java
| |
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,985 |
[Feature][Datax] Datax supports setting up running memory
|
DataX default memory is 1g, when the amount of data is large, memory will not be enough
So you need to support setting runtime JVM memory

|
https://github.com/apache/dolphinscheduler/issues/3985
|
https://github.com/apache/dolphinscheduler/pull/3986
|
89f1e93bcf936b527856f658e33fe38ead5ec8b9
|
fe3026627fc2d38da08ae396724cc61bc922374a
| 2020-10-24T07:25:07Z |
java
| 2020-11-16T02:55:20Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTask.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.datax;
import com.alibaba.druid.sql.ast.SQLStatement;
import com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr;
import com.alibaba.druid.sql.ast.expr.SQLPropertyExpr;
import com.alibaba.druid.sql.ast.statement.*;
import com.alibaba.druid.sql.parser.SQLStatementParser;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.commons.io.FileUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.enums.Flag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.datax.DataxParameters;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory;
import org.apache.dolphinscheduler.server.entity.DataxTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.DataxUtils;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.CommandExecuteResult;
import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor;
import org.slf4j.Logger;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.FileAttribute;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.sql.*;
import java.util.*;
/**
* DataX task
*/
public class DataxTask extends AbstractTask {
/**
* python process(datax only supports version 2.7 by default)
*/
private static final String DATAX_PYTHON = "python2.7";
/**
* datax home path
*/
private static final String DATAX_HOME_EVN = "${DATAX_HOME}";
/**
* datax channel count
*/
private static final int DATAX_CHANNEL_COUNT = 1;
/**
* datax parameters
*/
private DataxParameters dataXParameters;
/**
* shell command executor
*/
private ShellCommandExecutor shellCommandExecutor;
/**
* taskExecutionContext
*/
private TaskExecutionContext taskExecutionContext;
/**
* constructor
* @param taskExecutionContext taskExecutionContext
* @param logger logger
*/
public DataxTask(TaskExecutionContext taskExecutionContext, Logger logger) {
super(taskExecutionContext, logger);
this.taskExecutionContext = taskExecutionContext;
this.shellCommandExecutor = new ShellCommandExecutor(this::logHandle,
taskExecutionContext,logger);
}
/**
* init DataX config
*/
@Override
public void init() {
logger.info("datax task params {}", taskExecutionContext.getTaskParams());
dataXParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), DataxParameters.class);
if (!dataXParameters.checkParameters()) {
throw new RuntimeException("datax task params is not valid");
}
}
/**
* run DataX process
*
* @throws Exception if error throws Exception
*/
@Override
public void handle() throws Exception {
try {
// set the name of the current thread
String threadLoggerInfoName = String.format("TaskLogInfo-%s", taskExecutionContext.getTaskAppId());
Thread.currentThread().setName(threadLoggerInfoName);
// combining local and global parameters
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
dataXParameters.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
// run datax procesDataSourceService.s
String jsonFilePath = buildDataxJsonFile(paramsMap);
String shellCommandFilePath = buildShellCommandFile(jsonFilePath, paramsMap);
CommandExecuteResult commandExecuteResult = shellCommandExecutor.run(shellCommandFilePath);
setExitStatusCode(commandExecuteResult.getExitStatusCode());
setAppIds(commandExecuteResult.getAppIds());
setProcessId(commandExecuteResult.getProcessId());
}
catch (Exception e) {
logger.error("datax task failure", e);
setExitStatusCode(Constants.EXIT_CODE_FAILURE);
throw e;
}
}
/**
* cancel DataX process
*
* @param cancelApplication cancelApplication
* @throws Exception if error throws Exception
*/
@Override
public void cancelApplication(boolean cancelApplication)
throws Exception {
// cancel process
shellCommandExecutor.cancelApplication();
}
/**
* build datax configuration file
*
* @return datax json file name
* @throws Exception if error throws Exception
*/
private String buildDataxJsonFile(Map<String, Property> paramsMap)
throws Exception {
// generate json
String fileName = String.format("%s/%s_job.json",
taskExecutionContext.getExecutePath(),
taskExecutionContext.getTaskAppId());
String json;
Path path = new File(fileName).toPath();
if (Files.exists(path)) {
return fileName;
}
if (dataXParameters.getCustomConfig() == Flag.YES.ordinal()){
json = dataXParameters.getJson().replaceAll("\\r\\n", "\n");
}else {
ObjectNode job = JSONUtils.createObjectNode();
job.putArray("content").addAll(buildDataxJobContentJson());
job.set("setting", buildDataxJobSettingJson());
ObjectNode root = JSONUtils.createObjectNode();
root.set("job", job);
root.set("core", buildDataxCoreJson());
json = root.toString();
}
// replace placeholder
json = ParameterUtils.convertParameterPlaceholders(json, ParamUtils.convert(paramsMap));
logger.debug("datax job json : {}", json);
// create datax json file
FileUtils.writeStringToFile(new File(fileName), json, StandardCharsets.UTF_8);
return fileName;
}
/**
* build datax job config
*
* @return collection of datax job config JSONObject
* @throws SQLException if error throws SQLException
*/
private List<ObjectNode> buildDataxJobContentJson() {
DataxTaskExecutionContext dataxTaskExecutionContext = taskExecutionContext.getDataxTaskExecutionContext();
BaseDataSource dataSourceCfg = DataSourceFactory.getDatasource(DbType.of(dataxTaskExecutionContext.getSourcetype()),
dataxTaskExecutionContext.getSourceConnectionParams());
BaseDataSource dataTargetCfg = DataSourceFactory.getDatasource(DbType.of(dataxTaskExecutionContext.getTargetType()),
dataxTaskExecutionContext.getTargetConnectionParams());
List<ObjectNode> readerConnArr = new ArrayList<>();
ObjectNode readerConn = JSONUtils.createObjectNode();
ArrayNode sqlArr = readerConn.putArray("querySql");
for (String sql : new String[]{dataXParameters.getSql()}) {
sqlArr.add(sql);
}
ArrayNode urlArr = readerConn.putArray("jdbcUrl");
for (String url : new String[]{dataSourceCfg.getJdbcUrl()}) {
urlArr.add(url);
}
readerConnArr.add(readerConn);
ObjectNode readerParam = JSONUtils.createObjectNode();
readerParam.put("username", dataSourceCfg.getUser());
readerParam.put("password", dataSourceCfg.getPassword());
readerParam.putArray("connection").addAll(readerConnArr);
ObjectNode reader = JSONUtils.createObjectNode();
reader.put("name", DataxUtils.getReaderPluginName(DbType.of(dataxTaskExecutionContext.getSourcetype())));
reader.set("parameter", readerParam);
List<ObjectNode> writerConnArr = new ArrayList<>();
ObjectNode writerConn = JSONUtils.createObjectNode();
ArrayNode tableArr = writerConn.putArray("table");
for (String table : new String[]{dataXParameters.getTargetTable()}) {
tableArr.add(table);
}
writerConn.put("jdbcUrl", dataTargetCfg.getJdbcUrl());
writerConnArr.add(writerConn);
ObjectNode writerParam = JSONUtils.createObjectNode();
writerParam.put("username", dataTargetCfg.getUser());
writerParam.put("password", dataTargetCfg.getPassword());
String[] columns = parsingSqlColumnNames(DbType.of(dataxTaskExecutionContext.getSourcetype()),
DbType.of(dataxTaskExecutionContext.getTargetType()),
dataSourceCfg, dataXParameters.getSql());
ArrayNode columnArr = writerParam.putArray("column");
for (String column : columns) {
columnArr.add(column);
}
writerParam.putArray("connection").addAll(writerConnArr);
if (CollectionUtils.isNotEmpty(dataXParameters.getPreStatements())) {
ArrayNode preSqlArr = writerParam.putArray("preSql");
for (String preSql : dataXParameters.getPreStatements()) {
preSqlArr.add(preSql);
}
}
if (CollectionUtils.isNotEmpty(dataXParameters.getPostStatements())) {
ArrayNode postSqlArr = writerParam.putArray("postSql");
for (String postSql : dataXParameters.getPostStatements()) {
postSqlArr.add(postSql);
}
}
ObjectNode writer = JSONUtils.createObjectNode();
writer.put("name", DataxUtils.getWriterPluginName(DbType.of(dataxTaskExecutionContext.getTargetType())));
writer.set("parameter", writerParam);
List<ObjectNode> contentList = new ArrayList<>();
ObjectNode content = JSONUtils.createObjectNode();
content.put("reader", reader.toString());
content.put("writer", writer.toString());
contentList.add(content);
return contentList;
}
/**
* build datax setting config
*
* @return datax setting config JSONObject
*/
private ObjectNode buildDataxJobSettingJson() {
ObjectNode speed = JSONUtils.createObjectNode();
speed.put("channel", DATAX_CHANNEL_COUNT);
if (dataXParameters.getJobSpeedByte() > 0) {
speed.put("byte", dataXParameters.getJobSpeedByte());
}
if (dataXParameters.getJobSpeedRecord() > 0) {
speed.put("record", dataXParameters.getJobSpeedRecord());
}
ObjectNode errorLimit = JSONUtils.createObjectNode();
errorLimit.put("record", 0);
errorLimit.put("percentage", 0);
ObjectNode setting = JSONUtils.createObjectNode();
setting.put("speed", speed.toString());
setting.put("errorLimit", errorLimit.toString());
return setting;
}
private ObjectNode buildDataxCoreJson() {
ObjectNode speed = JSONUtils.createObjectNode();
speed.put("channel", DATAX_CHANNEL_COUNT);
if (dataXParameters.getJobSpeedByte() > 0) {
speed.put("byte", dataXParameters.getJobSpeedByte());
}
if (dataXParameters.getJobSpeedRecord() > 0) {
speed.put("record", dataXParameters.getJobSpeedRecord());
}
ObjectNode channel = JSONUtils.createObjectNode();
channel.set("speed", speed);
ObjectNode transport = JSONUtils.createObjectNode();
transport.set("channel", channel);
ObjectNode core = JSONUtils.createObjectNode();
core.set("transport", transport);
return core;
}
/**
* create command
*
* @return shell command file name
* @throws Exception if error throws Exception
*/
private String buildShellCommandFile(String jobConfigFilePath, Map<String, Property> paramsMap)
throws Exception {
// generate scripts
String fileName = String.format("%s/%s_node.%s",
taskExecutionContext.getExecutePath(),
taskExecutionContext.getTaskAppId(),
OSUtils.isWindows() ? "bat" : "sh");
Path path = new File(fileName).toPath();
if (Files.exists(path)) {
return fileName;
}
// datax python command
StringBuilder sbr = new StringBuilder();
sbr.append(DATAX_PYTHON);
sbr.append(" ");
sbr.append(DATAX_HOME_EVN);
sbr.append(" ");
sbr.append(jobConfigFilePath);
// replace placeholder
String dataxCommand = ParameterUtils.convertParameterPlaceholders(sbr.toString(), ParamUtils.convert(paramsMap));
logger.debug("raw script : {}", dataxCommand);
// create shell command file
Set<PosixFilePermission> perms = PosixFilePermissions.fromString(Constants.RWXR_XR_X);
FileAttribute<Set<PosixFilePermission>> attr = PosixFilePermissions.asFileAttribute(perms);
if (OSUtils.isWindows()) {
Files.createFile(path);
} else {
Files.createFile(path, attr);
}
Files.write(path, dataxCommand.getBytes(), StandardOpenOption.APPEND);
return fileName;
}
/**
* parsing synchronized column names in SQL statements
*
* @param dsType
* the database type of the data source
* @param dtType
* the database type of the data target
* @param dataSourceCfg
* the database connection parameters of the data source
* @param sql
* sql for data synchronization
* @return Keyword converted column names
*/
private String[] parsingSqlColumnNames(DbType dsType, DbType dtType, BaseDataSource dataSourceCfg, String sql) {
String[] columnNames = tryGrammaticalAnalysisSqlColumnNames(dsType, sql);
if (columnNames == null || columnNames.length == 0) {
logger.info("try to execute sql analysis query column name");
columnNames = tryExecuteSqlResolveColumnNames(dataSourceCfg, sql);
}
notNull(columnNames, String.format("parsing sql columns failed : %s", sql));
return DataxUtils.convertKeywordsColumns(dtType, columnNames);
}
/**
* try grammatical parsing column
*
* @param dbType
* database type
* @param sql
* sql for data synchronization
* @return column name array
* @throws RuntimeException if error throws RuntimeException
*/
private String[] tryGrammaticalAnalysisSqlColumnNames(DbType dbType, String sql) {
String[] columnNames;
try {
SQLStatementParser parser = DataxUtils.getSqlStatementParser(dbType, sql);
notNull(parser, String.format("database driver [%s] is not support", dbType.toString()));
SQLStatement sqlStatement = parser.parseStatement();
SQLSelectStatement sqlSelectStatement = (SQLSelectStatement)sqlStatement;
SQLSelect sqlSelect = sqlSelectStatement.getSelect();
List<SQLSelectItem> selectItemList = null;
if (sqlSelect.getQuery() instanceof SQLSelectQueryBlock) {
SQLSelectQueryBlock block = (SQLSelectQueryBlock)sqlSelect.getQuery();
selectItemList = block.getSelectList();
} else if (sqlSelect.getQuery() instanceof SQLUnionQuery) {
SQLUnionQuery unionQuery = (SQLUnionQuery)sqlSelect.getQuery();
SQLSelectQueryBlock block = (SQLSelectQueryBlock)unionQuery.getRight();
selectItemList = block.getSelectList();
}
notNull(selectItemList,
String.format("select query type [%s] is not support", sqlSelect.getQuery().toString()));
columnNames = new String[selectItemList.size()];
for (int i = 0; i < selectItemList.size(); i++ ) {
SQLSelectItem item = selectItemList.get(i);
String columnName = null;
if (item.getAlias() != null) {
columnName = item.getAlias();
} else if (item.getExpr() != null) {
if (item.getExpr() instanceof SQLPropertyExpr) {
SQLPropertyExpr expr = (SQLPropertyExpr)item.getExpr();
columnName = expr.getName();
} else if (item.getExpr() instanceof SQLIdentifierExpr) {
SQLIdentifierExpr expr = (SQLIdentifierExpr)item.getExpr();
columnName = expr.getName();
}
} else {
throw new RuntimeException(
String.format("grammatical analysis sql column [ %s ] failed", item.toString()));
}
if (columnName == null) {
throw new RuntimeException(
String.format("grammatical analysis sql column [ %s ] failed", item.toString()));
}
columnNames[i] = columnName;
}
}
catch (Exception e) {
logger.warn(e.getMessage(), e);
return null;
}
return columnNames;
}
/**
* try to execute sql to resolve column names
*
* @param baseDataSource
* the database connection parameters
* @param sql
* sql for data synchronization
* @return column name array
*/
public String[] tryExecuteSqlResolveColumnNames(BaseDataSource baseDataSource, String sql) {
String[] columnNames;
sql = String.format("SELECT t.* FROM ( %s ) t WHERE 0 = 1", sql);
sql = sql.replace(";", "");
try (
Connection connection = DriverManager.getConnection(baseDataSource.getJdbcUrl(), baseDataSource.getUser(),
baseDataSource.getPassword());
PreparedStatement stmt = connection.prepareStatement(sql);
ResultSet resultSet = stmt.executeQuery()) {
ResultSetMetaData md = resultSet.getMetaData();
int num = md.getColumnCount();
columnNames = new String[num];
for (int i = 1; i <= num; i++ ) {
columnNames[i - 1] = md.getColumnName(i);
}
}
catch (SQLException e) {
logger.warn(e.getMessage(), e);
return null;
}
return columnNames;
}
@Override
public AbstractParameters getParameters() {
return dataXParameters;
}
private void notNull(Object obj, String message) {
if (obj == null) {
throw new RuntimeException(message);
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,985 |
[Feature][Datax] Datax supports setting up running memory
|
DataX default memory is 1g, when the amount of data is large, memory will not be enough
So you need to support setting runtime JVM memory

|
https://github.com/apache/dolphinscheduler/issues/3985
|
https://github.com/apache/dolphinscheduler/pull/3986
|
89f1e93bcf936b527856f658e33fe38ead5ec8b9
|
fe3026627fc2d38da08ae396724cc61bc922374a
| 2020-10-24T07:25:07Z |
java
| 2020-11-16T02:55:20Z |
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/datax/DataxTaskTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.datax;
import java.lang.reflect.Method;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.server.entity.DataxTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.DataxUtils;
import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationContext;
import static org.apache.dolphinscheduler.common.enums.CommandType.START_PROCESS;
/**
* DataxTask Tester.
*/
public class DataxTaskTest {
private static final Logger logger = LoggerFactory.getLogger(DataxTaskTest.class);
private static final String CONNECTION_PARAMS = "{\"user\":\"root\",\"password\":\"123456\",\"address\":\"jdbc:mysql://127.0.0.1:3306\",\"database\":\"test\",\"jdbcUrl\":\"jdbc:mysql://127.0.0.1:3306/test\"}";
private DataxTask dataxTask;
private ProcessService processService;
private ShellCommandExecutor shellCommandExecutor;
private ApplicationContext applicationContext;
private TaskExecutionContext taskExecutionContext;
private TaskProps props = new TaskProps();
@Before
public void before()
throws Exception {
setTaskParems(0);
}
private void setTaskParems(Integer customConfig) {
processService = Mockito.mock(ProcessService.class);
shellCommandExecutor = Mockito.mock(ShellCommandExecutor.class);
applicationContext = Mockito.mock(ApplicationContext.class);
SpringApplicationContext springApplicationContext = new SpringApplicationContext();
springApplicationContext.setApplicationContext(applicationContext);
Mockito.when(applicationContext.getBean(ProcessService.class)).thenReturn(processService);
TaskProps props = new TaskProps();
props.setExecutePath("/tmp");
props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
props.setTaskInstanceId(1);
props.setTenantCode("1");
props.setEnvFile(".dolphinscheduler_env.sh");
props.setTaskStartTime(new Date());
props.setTaskTimeout(0);
if (customConfig == 1) {
props.setTaskParams(
"{\"customConfig\":1, \"localParams\":[{\"prop\":\"test\",\"value\":\"38294729\"}],\"json\":\"{\\\"job\\\":{\\\"setting\\\":{\\\"speed\\\":{\\\"byte\\\":1048576},\\\"errorLimit\\\":{\\\"record\\\":0,\\\"percentage\\\":0.02}},\\\"content\\\":[{\\\"reader\\\":{\\\"name\\\":\\\"rdbmsreader\\\",\\\"parameter\\\":{\\\"username\\\":\\\"xxx\\\",\\\"password\\\":\\\"${test}\\\",\\\"column\\\":[\\\"id\\\",\\\"name\\\"],\\\"splitPk\\\":\\\"pk\\\",\\\"connection\\\":[{\\\"querySql\\\":[\\\"SELECT * from dual\\\"],\\\"jdbcUrl\\\":[\\\"jdbc:dm://ip:port/database\\\"]}],\\\"fetchSize\\\":1024,\\\"where\\\":\\\"1 = 1\\\"}},\\\"writer\\\":{\\\"name\\\":\\\"streamwriter\\\",\\\"parameter\\\":{\\\"print\\\":true}}}]}}\"}");
// "{\"customConfig\":1,\"json\":\"{\\\"job\\\":{\\\"setting\\\":{\\\"speed\\\":{\\\"byte\\\":1048576},\\\"errorLimit\\\":{\\\"record\\\":0,\\\"percentage\\\":0.02}},\\\"content\\\":[{\\\"reader\\\":{\\\"name\\\":\\\"rdbmsreader\\\",\\\"parameter\\\":{\\\"username\\\":\\\"xxx\\\",\\\"password\\\":\\\"xxx\\\",\\\"column\\\":[\\\"id\\\",\\\"name\\\"],\\\"splitPk\\\":\\\"pk\\\",\\\"connection\\\":[{\\\"querySql\\\":[\\\"SELECT * from dual\\\"],\\\"jdbcUrl\\\":[\\\"jdbc:dm://ip:port/database\\\"]}],\\\"fetchSize\\\":1024,\\\"where\\\":\\\"1 = 1\\\"}},\\\"writer\\\":{\\\"name\\\":\\\"streamwriter\\\",\\\"parameter\\\":{\\\"print\\\":true}}}]}}\"}");
} else {
props.setTaskParams(
"{\"customConfig\":0,\"targetTable\":\"test\",\"postStatements\":[],\"jobSpeedByte\":1024,\"jobSpeedRecord\":1000,\"dtType\":\"MYSQL\",\"dataSource\":1,\"dsType\":\"MYSQL\",\"dataTarget\":2,\"jobSpeedByte\":0,\"sql\":\"select 1 as test from dual\",\"preStatements\":[\"delete from test\"],\"postStatements\":[\"delete from test\"]}");
}
taskExecutionContext = Mockito.mock(TaskExecutionContext.class);
Mockito.when(taskExecutionContext.getTaskParams()).thenReturn(props.getTaskParams());
Mockito.when(taskExecutionContext.getExecutePath()).thenReturn("/tmp");
Mockito.when(taskExecutionContext.getTaskAppId()).thenReturn(UUID.randomUUID().toString());
Mockito.when(taskExecutionContext.getTenantCode()).thenReturn("root");
Mockito.when(taskExecutionContext.getStartTime()).thenReturn(new Date());
Mockito.when(taskExecutionContext.getTaskTimeout()).thenReturn(10000);
Mockito.when(taskExecutionContext.getLogPath()).thenReturn("/tmp/dx");
DataxTaskExecutionContext dataxTaskExecutionContext = new DataxTaskExecutionContext();
dataxTaskExecutionContext.setSourcetype(0);
dataxTaskExecutionContext.setTargetType(0);
dataxTaskExecutionContext.setSourceConnectionParams(CONNECTION_PARAMS);
dataxTaskExecutionContext.setTargetConnectionParams(CONNECTION_PARAMS);
Mockito.when(taskExecutionContext.getDataxTaskExecutionContext()).thenReturn(dataxTaskExecutionContext);
dataxTask = PowerMockito.spy(new DataxTask(taskExecutionContext, logger));
dataxTask.init();
props.setCmdTypeIfComplement(START_PROCESS);
Mockito.when(processService.findDataSourceById(1)).thenReturn(getDataSource());
Mockito.when(processService.findDataSourceById(2)).thenReturn(getDataSource());
Mockito.when(processService.findProcessInstanceByTaskId(1)).thenReturn(getProcessInstance());
String fileName = String.format("%s/%s_node.sh", props.getExecutePath(), props.getTaskAppId());
try {
Mockito.when(shellCommandExecutor.run(fileName)).thenReturn(null);
} catch (Exception e) {
e.printStackTrace();
}
dataxTask = PowerMockito.spy(new DataxTask(taskExecutionContext, logger));
dataxTask.init();
}
private DataSource getDataSource() {
DataSource dataSource = new DataSource();
dataSource.setType(DbType.MYSQL);
dataSource.setConnectionParams(CONNECTION_PARAMS);
dataSource.setUserId(1);
return dataSource;
}
private ProcessInstance getProcessInstance() {
ProcessInstance processInstance = new ProcessInstance();
processInstance.setCommandType(START_PROCESS);
processInstance.setScheduleTime(new Date());
return processInstance;
}
@After
public void after()
throws Exception {
}
/**
* Method: DataxTask()
*/
@Test
public void testDataxTask()
throws Exception {
TaskProps props = new TaskProps();
props.setExecutePath("/tmp");
props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
props.setTaskInstanceId(1);
props.setTenantCode("1");
Assert.assertNotNull(new DataxTask(null, logger));
}
/**
* Method: init
*/
@Test
public void testInit()
throws Exception {
try {
dataxTask.init();
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: handle()
*/
@Test
public void testHandle()
throws Exception {
}
/**
* Method: cancelApplication()
*/
@Test
public void testCancelApplication()
throws Exception {
try {
dataxTask.cancelApplication(true);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: parsingSqlColumnNames(DbType dsType, DbType dtType, BaseDataSource
* dataSourceCfg, String sql)
*/
@Test
public void testParsingSqlColumnNames()
throws Exception {
try {
BaseDataSource dataSource = DataSourceFactory.getDatasource(getDataSource().getType(),
getDataSource().getConnectionParams());
Method method = DataxTask.class.getDeclaredMethod("parsingSqlColumnNames", DbType.class, DbType.class, BaseDataSource.class, String.class);
method.setAccessible(true);
String[] columns = (String[]) method.invoke(dataxTask, DbType.MYSQL, DbType.MYSQL, dataSource, "select 1 as a, 2 as `table` from dual");
Assert.assertNotNull(columns);
Assert.assertTrue(columns.length == 2);
Assert.assertEquals("[`a`, `table`]", Arrays.toString(columns));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: tryGrammaticalParsingSqlColumnNames(DbType dbType, String sql)
*/
@Test
public void testTryGrammaticalAnalysisSqlColumnNames()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("tryGrammaticalAnalysisSqlColumnNames", DbType.class, String.class);
method.setAccessible(true);
String[] columns = (String[]) method.invoke(dataxTask, DbType.MYSQL, "select t1.a, t1.b from test t1 union all select a, t2.b from (select a, b from test) t2");
Assert.assertNotNull(columns);
Assert.assertTrue(columns.length == 2);
Assert.assertEquals("[a, b]", Arrays.toString(columns));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: tryExecuteSqlResolveColumnNames(BaseDataSource baseDataSource,
* String sql)
*/
@Test
public void testTryExecuteSqlResolveColumnNames()
throws Exception {
// TODO: Test goes here...
}
/**
* Method: buildDataxJsonFile()
*/
@Test
public void testBuildDataxJsonFile()
throws Exception {
try {
setTaskParems(1);
Method method = DataxTask.class.getDeclaredMethod("buildDataxJsonFile");
method.setAccessible(true);
String filePath = (String) method.invoke(dataxTask, null);
Assert.assertNotNull(filePath);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildDataxJsonFile()
*/
@Test
public void testBuildDataxJsonFile0()
throws Exception {
try {
setTaskParems(0);
Method method = DataxTask.class.getDeclaredMethod("buildDataxJsonFile");
method.setAccessible(true);
String filePath = (String) method.invoke(dataxTask, null);
Assert.assertNotNull(filePath);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildDataxJobContentJson()
*/
@Test
public void testBuildDataxJobContentJson()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("buildDataxJobContentJson");
method.setAccessible(true);
List<ObjectNode> contentList = (List<ObjectNode>) method.invoke(dataxTask, null);
Assert.assertNotNull(contentList);
ObjectNode content = contentList.get(0);
JsonNode reader = JSONUtils.parseObject(content.path("reader").asText());
Assert.assertNotNull(reader);
String readerPluginName = reader.path("name").asText();
Assert.assertEquals(DataxUtils.DATAX_READER_PLUGIN_MYSQL, readerPluginName);
JsonNode writer = JSONUtils.parseObject(content.path("writer").asText());
Assert.assertNotNull(writer);
String writerPluginName = writer.path("name").asText();
Assert.assertEquals(DataxUtils.DATAX_WRITER_PLUGIN_MYSQL, writerPluginName);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildDataxJobSettingJson()
*/
@Test
public void testBuildDataxJobSettingJson()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("buildDataxJobSettingJson");
method.setAccessible(true);
JsonNode setting = (JsonNode) method.invoke(dataxTask, null);
Assert.assertNotNull(setting);
Assert.assertNotNull(setting.get("speed"));
Assert.assertNotNull(setting.get("errorLimit"));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildDataxCoreJson()
*/
@Test
public void testBuildDataxCoreJson()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("buildDataxCoreJson");
method.setAccessible(true);
ObjectNode coreConfig = (ObjectNode) method.invoke(dataxTask, null);
Assert.assertNotNull(coreConfig);
Assert.assertNotNull(coreConfig.get("transport"));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: buildShellCommandFile(String jobConfigFilePath)
*/
@Test
public void testBuildShellCommandFile()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("buildShellCommandFile", String.class);
method.setAccessible(true);
Assert.assertNotNull(method.invoke(dataxTask, "test.json"));
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
/**
* Method: getParameters
*/
@Test
public void testGetParameters()
throws Exception {
Assert.assertTrue(dataxTask.getParameters() != null);
}
/**
* Method: notNull(Object obj, String message)
*/
@Test
public void testNotNull()
throws Exception {
try {
Method method = DataxTask.class.getDeclaredMethod("notNull", Object.class, String.class);
method.setAccessible(true);
method.invoke(dataxTask, "abc", "test throw RuntimeException");
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,985 |
[Feature][Datax] Datax supports setting up running memory
|
DataX default memory is 1g, when the amount of data is large, memory will not be enough
So you need to support setting runtime JVM memory

|
https://github.com/apache/dolphinscheduler/issues/3985
|
https://github.com/apache/dolphinscheduler/pull/3986
|
89f1e93bcf936b527856f658e33fe38ead5ec8b9
|
fe3026627fc2d38da08ae396724cc61bc922374a
| 2020-10-24T07:25:07Z |
java
| 2020-11-16T02:55:20Z |
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/datax.vue
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="datax-model">
<m-list-box>
<div slot="text">{{$t('Custom template')}}</div>
<div slot="content">
<label class="label-box">
<div style="padding-top: 5px;">
<x-switch v-model="enable" @on-click="_onSwitch" :disabled="isDetails"></x-switch>
</div>
</label>
</div>
</m-list-box>
<div v-if="!enable">
<m-list-box>
<div slot="text">{{$t('Datasource')}}</div>
<div slot="content">
<m-datasource
ref="refDs"
@on-dsData="_onDsData"
:supportType="['MYSQL','POSTGRESQL', 'ORACLE', 'SQLSERVER']"
:data="{ type:dsType,datasource:datasource }">
</m-datasource>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('SQL Statement')}}</div>
<div slot="content">
<div class="from-mirror">
<textarea
id="code-sql-mirror"
name="code-sql-mirror"
style="opacity: 0;">
</textarea>
<a class="ans-modal-box-max">
<em class="ans-icon-max" @click="setEditorVal"></em>
</a>
</div>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetDataBase')}}</div>
<div slot="content">
<m-datasource
ref="refDt"
@on-dsData="_onDtData"
:supportType="['MYSQL','POSTGRESQL', 'ORACLE', 'SQLSERVER']"
:data="{ type:dtType,datasource:datatarget }">
</m-datasource>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetTable')}}</div>
<div slot="content">
<x-input
type="input"
v-model="targetTable"
:placeholder="$t('Please enter the table of target')"
autocomplete="off">
</x-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetDataBase')}}{{$t('Pre Statement')}}</div>
<div slot="content">
<m-statement-list
ref="refPreStatements"
@on-statement-list="_onPreStatements"
:statement-list="preStatements">
</m-statement-list>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('TargetDataBase')}}{{$t('Post Statement')}}</div>
<div slot="content">
<m-statement-list
ref="refPostStatements"
@on-statement-list="_onPostStatements"
:statement-list="postStatements">
</m-statement-list>
</div>
</m-list-box>
<m-list-box>
<div slot="text">
<span>{{$t('SpeedByte')}}</span>
</div>
<div slot="content">
<m-select-input v-model="jobSpeedByte" :list="[0,1,10,50,100,512]">
</m-select-input>
<span>({{$t('0 means unlimited by byte')}})</span>
</div>
</m-list-box>
<m-list-box>
<div slot="text">
<span>{{$t('SpeedRecord')}}</span>
</div>
<div slot="content">
<m-select-input v-model="jobSpeedRecord" :list="[0,500,1000,1500,2000,2500,3000]">
</m-select-input>
<span>({{$t('0 means unlimited by count')}})</span>
</div>
</m-list-box>
</div>
<div v-else>
<m-list-box>
<div slot="text">json</div>
<div slot="content">
<div class="from-mirror">
<textarea
id="code-json-mirror"
name="code-json-mirror"
style="opacity: 0;">
</textarea>
<a class="ans-modal-box-max">
<em class="ans-icon-max" @click="setJsonEditorVal"></em>
</a>
</div>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
<m-local-params
ref="refLocalParams"
@on-local-params="_onLocalParams"
:udp-list="localParams"
:hide="false">
</m-local-params>
</div>
</m-list-box>
</div>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import mListBox from './_source/listBox'
import mScriptBox from './_source/scriptBox'
import mDatasource from './_source/datasource'
import mLocalParams from './_source/localParams'
import mStatementList from './_source/statementList'
import disabledState from '@/module/mixin/disabledState'
import mSelectInput from '../_source/selectInput'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
let jsonEditor
export default {
name: 'datax',
data () {
return {
// Data Custom template
enable: false,
// Data source type
dsType: '',
// data source
datasource: '',
// Data source type
dtType: '',
// data source
datatarget: '',
// Return to the selected data source
rtDatasource: '',
// Return to the selected data target
rtDatatarget: '',
// Sql statement
sql: '',
json: '',
// target table
targetTable: '',
// Pre statements
preStatements: [],
// Post statements
postStatements: [],
// speed byte
jobSpeedByte: 0,
// speed record
jobSpeedRecord: 1000,
// Custom parameter
localParams: [],
customConfig: 0,
}
},
mixins: [disabledState],
props: {
backfillItem: Object,
createNodeId: Number
},
methods: {
setEditorVal() {
let self = this
let modal = self.$modal.dialog({
className: 'scriptModal',
closable: false,
showMask: true,
maskClosable: true,
onClose: function() {
},
render (h) {
return h(mScriptBox, {
on: {
getSriptBoxValue (val) {
editor.setValue(val)
},
closeAble () {
// this.$modal.destroy()
modal.remove()
}
},
props: {
item: editor.getValue()
}
})
}
})
},
setJsonEditorVal() {
let self = this
let modal = self.$modal.dialog({
className: 'scriptModal',
closable: false,
showMask: true,
maskClosable: true,
onClose: function() {
},
render (h) {
return h(mScriptBox, {
on: {
getSriptBoxValue (val) {
jsonEditor.setValue(val)
},
closeAble () {
// this.$modal.destroy()
modal.remove()
}
},
props: {
item: jsonEditor.getValue()
}
})
}
})
},
_onSwitch (is) {
if(is) {
this.customConfig = 1
setTimeout(() => {
this._handlerJsonEditor()
}, 200)
} else {
this.customConfig = 0
setTimeout(() => {
this._handlerEditor()
}, 200)
}
},
/**
* return data source
*/
_onDsData (o) {
this.dsType = o.type
this.rtDatasource = o.datasource
},
/**
* return data target
*/
_onDtData (o) {
this.dtType = o.type
this.rtDatatarget = o.datasource
},
/**
* return pre statements
*/
_onPreStatements (a) {
this.preStatements = a
},
/**
* return post statements
*/
_onPostStatements (a) {
this.postStatements = a
},
/**
* return localParams
*/
_onLocalParams (a) {
this.localParams = a
},
/**
* verification
*/
_verification () {
if(this.customConfig) {
if (!jsonEditor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter a JSON Statement(required)')}`)
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
customConfig: this.customConfig,
json: jsonEditor.getValue(),
localParams: this.localParams
})
return true
} else {
if (!editor.getValue()) {
this.$message.warning(`${i18n.$t('Please enter a SQL Statement(required)')}`)
return false
}
// datasource Subcomponent verification
if (!this.$refs.refDs._verifDatasource()) {
return false
}
// datasource Subcomponent verification
if (!this.$refs.refDt._verifDatasource()) {
return false
}
if (!this.targetTable) {
this.$message.warning(`${i18n.$t('Please enter a Target Table(required)')}`)
return false
}
// preStatements Subcomponent verification
if (!this.$refs.refPreStatements._verifProp()) {
return false
}
// postStatements Subcomponent verification
if (!this.$refs.refPostStatements._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
customConfig: this.customConfig,
dsType: this.dsType,
dataSource: this.rtDatasource,
dtType: this.dtType,
dataTarget: this.rtDatatarget,
sql: editor.getValue(),
targetTable: this.targetTable,
jobSpeedByte: this.jobSpeedByte * 1024,
jobSpeedRecord: this.jobSpeedRecord,
preStatements: this.preStatements,
postStatements: this.postStatements
})
return true
}
},
/**
* Processing code highlighting
*/
_handlerEditor () {
this._destroyEditor()
// editor
editor = codemirror('code-sql-mirror', {
mode: 'sql',
readOnly: this.isDetails
})
this.keypress = () => {
if (!editor.getOption('readOnly')) {
editor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
editor.on('keypress', this.keypress)
editor.on('changes', () => {
this._cacheParams()
})
editor.setValue(this.sql)
return editor
},
_handlerJsonEditor () {
this._destroyJsonEditor()
// jsonEditor
jsonEditor = codemirror('code-json-mirror', {
mode: 'json',
readOnly: this.isDetails
})
this.keypress = () => {
if (!jsonEditor.getOption('readOnly')) {
jsonEditor.showHint({
completeSingle: false
})
}
}
// Monitor keyboard
jsonEditor.on('keypress', this.keypress)
jsonEditor.on('changes', () => {
// this._cacheParams()
})
jsonEditor.setValue(this.json)
return jsonEditor
},
_cacheParams () {
this.$emit('on-cache-params', {
dsType: this.dsType,
dataSource: this.rtDatasource,
dtType: this.dtType,
dataTarget: this.rtDatatarget,
sql: editor?editor.getValue():'',
targetTable: this.targetTable,
jobSpeedByte: this.jobSpeedByte * 1024,
jobSpeedRecord: this.jobSpeedRecord,
preStatements: this.preStatements,
postStatements: this.postStatements
});
},
_destroyEditor () {
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
editor.off($('.code-sql-mirror'), 'changes', this.changes)
}
},
_destroyJsonEditor () {
if (jsonEditor) {
jsonEditor.toTextArea() // Uninstall
jsonEditor.off($('.code-json-mirror'), 'keypress', this.keypress)
jsonEditor.off($('.code-json-mirror'), 'changes', this.changes)
}
}
},
created () {
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
// backfill
if(o.params.customConfig == 0) {
this.customConfig = 0
this.enable = false
this.dsType = o.params.dsType || ''
this.datasource = o.params.dataSource || ''
this.dtType = o.params.dtType || ''
this.datatarget = o.params.dataTarget || ''
this.sql = o.params.sql || ''
this.targetTable = o.params.targetTable || ''
this.jobSpeedByte = o.params.jobSpeedByte / 1024 || 0
this.jobSpeedRecord = o.params.jobSpeedRecord || 0
this.preStatements = o.params.preStatements || []
this.postStatements = o.params.postStatements || []
} else {
this.customConfig = 1
this.enable = true
this.json = o.params.json || []
this.localParams = o.params.localParams || ''
}
}
},
mounted () {
if(this.customConfig) {
setTimeout(() => {
this._handlerJsonEditor()
}, 200)
} else {
setTimeout(() => {
this._handlerEditor()
}, 200)
}
},
destroyed () {
/**
* Destroy the editor instance
*/
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
}
if (jsonEditor) {
jsonEditor.toTextArea() // Uninstall
jsonEditor.off($('.code-json-mirror'), 'keypress', this.keypress)
}
},
watch: {
//Watch the cacheParams
cacheParams (val) {
this._cacheParams();
}
},
computed: {
cacheParams () {
return {
dsType: this.dsType,
dataSource: this.rtDatasource,
dtType: this.dtType,
dataTarget: this.rtDatatarget,
targetTable: this.targetTable,
jobSpeedByte: this.jobSpeedByte * 1024,
jobSpeedRecord: this.jobSpeedRecord,
preStatements: this.preStatements,
postStatements: this.postStatements
}
}
},
components: { mListBox, mDatasource, mLocalParams, mStatementList, mSelectInput }
}
</script>
<style lang="scss" rel="stylesheet/scss" scope>
.ans-modal-box-max {
position: absolute;
right: -12px;
top: -16px;
}
</style>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,985 |
[Feature][Datax] Datax supports setting up running memory
|
DataX default memory is 1g, when the amount of data is large, memory will not be enough
So you need to support setting runtime JVM memory

|
https://github.com/apache/dolphinscheduler/issues/3985
|
https://github.com/apache/dolphinscheduler/pull/3986
|
89f1e93bcf936b527856f658e33fe38ead5ec8b9
|
fe3026627fc2d38da08ae396724cc61bc922374a
| 2020-10-24T07:25:07Z |
java
| 2020-11-16T02:55:20Z |
dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export default {
'User Name': 'User Name',
'Please enter user name': 'Please enter user name',
Password: 'Password',
'Please enter your password': 'Please enter your password',
'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22',
Login: 'Login',
Home: 'Home',
'Failed to create node to save': 'Failed to create node to save',
'Global parameters': 'Global parameters',
'Local parameters': 'Local parameters',
'Copy success': 'Copy success',
'The browser does not support automatic copying': 'The browser does not support automatic copying',
'Whether to save the DAG graph': 'Whether to save the DAG graph',
'Current node settings': 'Current node settings',
'View history': 'View history',
'View log': 'View log',
'Force success': 'Force success',
'Enter this child node': 'Enter this child node',
'Node name': 'Node name',
'Run flag': 'Run flag',
Normal: 'Normal',
'Prohibition execution': 'Prohibition execution',
'Please enter description': 'Please enter description',
'Number of failed retries': 'Number of failed retries',
Times: 'Times',
'Failed retry interval': 'Failed retry interval',
Minute: 'Minute',
'Delay execution time': 'Delay execution time',
'Delay execution': 'Delay execution',
Cancel: 'Cancel',
'Confirm add': 'Confirm add',
'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process',
'The task has not been executed and cannot enter the sub-Process': 'The task has not been executed and cannot enter the sub-Process',
'Please enter name (required)': 'Please enter name (required)',
'Name already exists': 'Name already exists',
'Download Log': 'Download Log',
'Refresh Log': 'Refresh Log',
'Enter full screen': 'Enter full screen',
'Cancel full screen': 'Cancel full screen',
Close: 'Close',
'Update log success': 'Update log success',
'No more logs': 'No more logs',
'No log': 'No log',
'Loading Log...': 'Loading Log...',
'Set the DAG diagram name': 'Set the DAG diagram name',
'Please enter description(optional)': 'Please enter description(optional)',
'Set global': 'Set global',
'Whether to update the process definition': 'Whether to update the process definition',
Add: 'Add',
'DAG graph name cannot be empty': 'DAG graph name cannot be empty',
'Create Datasource': 'Create Datasource',
'Project Home': 'Project Home',
'Project Manage': 'Project',
'Create Project': 'Create Project',
'Cron Manage': 'Cron Manage',
'Copy Workflow': 'Copy Workflow',
'Tenant Manage': 'Tenant Manage',
'Create Tenant': 'Create Tenant',
'User Manage': 'User Manage',
'Create User': 'Create User',
'User Information': 'User Information',
'Edit Password': 'Edit Password',
success: 'success',
failed: 'failed',
delete: 'delete',
'Please choose': 'Please choose',
'Please enter a positive integer': 'Please enter a positive integer',
'Program Type': 'Program Type',
'Main class': 'Main class',
'Main jar package': 'Main jar package',
'Please enter main jar package': 'Please enter main jar package',
'Main package': 'Main package',
'Please enter main package': 'Please enter main package',
'Command-line parameters': 'Command-line parameters',
'Please enter Command-line parameters': 'Please enter Command-line parameters',
'Other parameters': 'Other parameters',
'Please enter other parameters': 'Please enter other parameters',
Resources: 'Resources',
'Custom Parameters': 'Custom Parameters',
'Custom template': 'Custom template',
Datasource: 'Datasource',
methods: 'methods',
'Please enter method(optional)': 'Please enter method(optional)',
Script: 'Script',
'Please enter script(required)': 'Please enter script(required)',
'Deploy Mode': 'Deploy Mode',
'Flink Version':'Flink Version',
'Driver core number': 'Driver core number',
'Please enter driver core number': 'Please enter driver core number',
'Driver memory use': 'Driver memory use',
'Please enter driver memory use': 'Please enter driver memory use',
'Number of Executors': 'Number of Executors',
'Please enter the number of Executor': 'Please enter the number of Executor',
'Executor memory': 'Executor memory',
'Please enter the Executor memory': 'Please enter the Executor memory',
'Executor core number': 'Executor core number',
'Please enter Executor core number': 'Please enter Executor core number',
'The number of Executors should be a positive integer': 'The number of Executors should be a positive integer',
'Memory should be a positive integer': 'Memory should be a positive integer',
'Please enter ExecutorPlease enter Executor core number': 'Please enter ExecutorPlease enter Executor core number',
'Core number should be positive integer': 'Core number should be positive integer',
'Please enter jobManager memory': 'Please enter jobManager memory',
'Please enter the taskManager memory': 'Please enter the taskManager memory',
'Please enter solt number': 'Please enter solt number',
'Please enter taskManager number': 'Please enter taskManager number',
'SQL Type': 'SQL Type',
Title: 'Title',
'Please enter the title of email': 'Please enter the title of email',
Table: 'Table',
TableMode: 'Table',
Attachment: 'Attachment',
'SQL Parameter': 'SQL Parameter',
'SQL Statement': 'SQL Statement',
'UDF Function': 'UDF Function',
'Please enter a SQL Statement(required)': 'Please enter a SQL Statement(required)',
'Please enter a JSON Statement(required)': 'Please enter a JSON Statement(required)',
'One form or attachment must be selected': 'One form or attachment must be selected',
'Recipient required': 'Recipient required',
'Mail subject required': 'Mail subject required',
'Child Node': 'Child Node',
'Please select a sub-Process': 'Please select a sub-Process',
Edit: 'Edit',
'Switch To This Version': 'Switch To This Version',
'Datasource Name': 'Datasource Name',
'Please enter datasource name': 'Please enter datasource name',
IP: 'IP',
'Please enter IP': 'Please enter IP',
Port: 'Port',
'Please enter port': 'Please enter port',
'Database Name': 'Database Name',
'Please enter database name': 'Please enter database name',
'Oracle Connect Type': 'ServiceName or SID',
'Oracle Service Name': 'ServiceName',
'Oracle SID': 'SID',
'jdbc connect parameters': 'jdbc connect parameters',
'Test Connect': 'Test Connect',
'Please enter resource name': 'Please enter resource name',
'Please enter resource folder name': 'Please enter resource folder name',
'Please enter a non-query SQL statement': 'Please enter a non-query SQL statement',
'Please enter IP/hostname': 'Please enter IP/hostname',
'jdbc connection parameters is not a correct JSON format': 'jdbc connection parameters is not a correct JSON format',
'#': '#',
'Datasource Type': 'Datasource Type',
'Datasource Parameter': 'Datasource Parameter',
'Create Time': 'Create Time',
'Update Time': 'Update Time',
Operation: 'Operation',
'Current Version': 'Current Version',
'Click to view': 'Click to view',
'Delete?': 'Delete?',
'Switch Version Successfully': 'Switch Version Successfully',
'Confirm Switch To This Version?': 'Confirm Switch To This Version?',
Confirm: 'Confirm',
'Task status statistics': 'Task Status Statistics',
Number: 'Number',
State: 'State',
'Process Status Statistics': 'Process Status Statistics',
'Process Definition Statistics': 'Process Definition Statistics',
'Project Name': 'Project Name',
'Please enter name': 'Please enter name',
'Owned Users': 'Owned Users',
'Process Pid': 'Process Pid',
'Zk registration directory': 'Zk registration directory',
cpuUsage: 'cpuUsage',
memoryUsage: 'memoryUsage',
'Last heartbeat time': 'Last heartbeat time',
'Edit Tenant': 'Edit Tenant',
'Tenant Code': 'Tenant Code',
Queue: 'Yarn Queue',
'Tenant Name': 'Tenant Name',
'Please select a queue': 'default is tenant association queue',
'Please enter the tenant code in English': 'Please enter the tenant code in English',
'Please enter tenant code in English': 'Please enter tenant code in English',
'Please enter tenant code': 'Please enter tenant code',
'Please enter tenant Name': 'Please enter tenant Name',
'The tenant code. Only letters or a combination of letters and numbers are allowed': 'The tenant code. Only letters or a combination of letters and numbers are allowed',
'The tenant code cannot be all numbers': 'The tenant code cannot be all numbers',
'Edit User': 'Edit User',
Tenant: 'Tenant',
Email: 'Email',
Phone: 'Phone',
'User Type':'User Type',
'Please enter phone number': 'Please enter phone number',
'Please enter main class': 'Please enter main class',
'Please enter email': 'Please enter email',
'Please enter the correct email format': 'Please enter the correct email format',
'Please enter the correct mobile phone format': 'Please enter the correct mobile phone format',
Project: 'Project',
Authorize: 'Authorize',
'File resources': 'File resources',
'UDF resources': 'UDF resources',
'Please select UDF resources directory': 'Please select UDF resources directory',
'UDF resources directory': 'UDF resources directory',
'Upload File Size': 'Upload File size cannot exceed 1g',
'Edit alarm group': 'Edit alarm group',
'Create alarm group': 'Create alarm group',
'Group Name': 'Group Name',
'Please enter group name': 'Please enter group name',
'Group Type': 'Group Type',
Remarks: 'Remarks',
SMS: 'SMS',
'Managing Users': 'Managing Users',
Permission: 'Permission',
Administrator: 'Administrator',
'Confirm Password': 'Confirm Password',
'Please enter confirm password': 'Please enter confirm password',
'Password cannot be in Chinese': 'Password cannot be in Chinese',
'Please enter a password (6-22) character password': 'Please enter a password (6-22) character password',
'Confirmation password cannot be in Chinese': 'Confirmation password cannot be in Chinese',
'Please enter a confirmation password (6-22) character password': 'Please enter a confirmation password (6-22) character password',
'The password is inconsistent with the confirmation password': 'The password is inconsistent with the confirmation password',
'Please select the datasource': 'Please select the datasource',
'Please select resources': 'Please select resources',
Query: 'Query',
'Non Query': 'Non Query',
'prop(required)': 'prop(required)',
'value(optional)': 'value(optional)',
'value(required)': 'value(required)',
'prop is empty': 'prop is empty',
'value is empty': 'value is empty',
'prop is repeat': 'prop is repeat',
'Start Time': 'Start Time',
'End Time': 'End Time',
crontab: 'crontab',
'Failure Strategy': 'Failure Strategy',
online: 'online',
offline: 'offline',
'Task Status': 'Task Status',
'Process Instance': 'Process Instance',
'Task Instance': 'Task Instance',
'Select date range': 'Select date range',
Date: 'Date',
waiting: 'waiting',
execution: 'execution',
finish: 'finish',
'Create File': 'Create File',
'Create folder': 'Create folder',
'File Name': 'File Name',
'Folder Name': 'Folder Name',
'File Format': 'File Format',
'Folder Format': 'Folder Format',
'File Content': 'File Content',
Create: 'Create',
'Please enter the resource content': 'Please enter the resource content',
'Resource content cannot exceed 3000 lines': 'Resource content cannot exceed 3000 lines',
'File Details': 'File Details',
'Download Details': 'Download Details',
Return: 'Return',
Save: 'Save',
'File Manage': 'File Manage',
'Upload Files': 'Upload Files',
'Create UDF Function': 'Create UDF Function',
'Upload UDF Resources': 'Upload UDF Resources',
'Service-Master': 'Service-Master',
'Service-Worker': 'Service-Worker',
'Process Name': 'Process Name',
Executor: 'Executor',
'Run Type': 'Run Type',
'Scheduling Time': 'Scheduling Time',
'Run Times': 'Run Times',
host: 'host',
'fault-tolerant sign': 'fault-tolerant sign',
Rerun: 'Rerun',
'Recovery Failed': 'Recovery Failed',
Stop: 'Stop',
Pause: 'Pause',
'Recovery Suspend': 'Recovery Suspend',
Gantt: 'Gantt',
Name: 'Name',
'Node Type': 'Node Type',
'Submit Time': 'Submit Time',
Duration: 'Duration',
'Retry Count': 'Retry Count',
'Task Name': 'Task Name',
'Task Date': 'Task Date',
'Source Table': 'Source Table',
'Record Number': 'Record Number',
'Target Table': 'Target Table',
'Online viewing type is not supported': 'Online viewing type is not supported',
Size: 'Size',
Rename: 'Rename',
Download: 'Download',
Export: 'Export',
'Version Info': 'Version Info',
Submit: 'Submit',
'Edit UDF Function': 'Edit UDF Function',
type: 'type',
'UDF Function Name': 'UDF Function Name',
FILE: 'FILE',
UDF: 'UDF',
'File Subdirectory': 'File Subdirectory',
'Please enter a function name': 'Please enter a function name',
'Package Name': 'Package Name',
'Please enter a Package name': 'Please enter a Package name',
Parameter: 'Parameter',
'Please enter a parameter': 'Please enter a parameter',
'UDF Resources': 'UDF Resources',
'Upload Resources': 'Upload Resources',
Instructions: 'Instructions',
'Please enter a instructions': 'Please enter a instructions',
'Please enter a UDF function name': 'Please enter a UDF function name',
'Select UDF Resources': 'Select UDF Resources',
'Class Name': 'Class Name',
'Jar Package': 'Jar Package',
'Library Name': 'Library Name',
'UDF Resource Name': 'UDF Resource Name',
'File Size': 'File Size',
Description: 'Description',
'Drag Nodes and Selected Items': 'Drag Nodes and Selected Items',
'Select Line Connection': 'Select Line Connection',
'Delete selected lines or nodes': 'Delete selected lines or nodes',
'Full Screen': 'Full Screen',
Unpublished: 'Unpublished',
'Start Process': 'Start Process',
'Execute from the current node': 'Execute from the current node',
'Recover tolerance fault process': 'Recover tolerance fault process',
'Resume the suspension process': 'Resume the suspension process',
'Execute from the failed nodes': 'Execute from the failed nodes',
'Complement Data': 'Complement Data',
slot: 'slot',
taskManager: 'taskManager',
jobManagerMemory: 'jobManagerMemory',
taskManagerMemory: 'taskManagerMemory',
'Scheduling execution': 'Scheduling execution',
'Recovery waiting thread': 'Recovery waiting thread',
'Submitted successfully': 'Submitted successfully',
Executing: 'Executing',
'Ready to pause': 'Ready to pause',
'Ready to stop': 'Ready to stop',
'Need fault tolerance': 'Need fault tolerance',
kill: 'kill',
'Waiting for thread': 'Waiting for thread',
'Waiting for dependence': 'Waiting for dependence',
Start: 'Start',
Copy: 'Copy',
'Copy name': 'Copy name',
Delete: 'Delete',
'Please enter keyword': 'Please enter keyword',
'File Upload': 'File Upload',
'File ReUpload': 'File ReUpload',
'Drag the file into the current upload window': 'Drag the file into the current upload window',
'Drag area upload': 'Drag area upload',
Upload: 'Upload',
'ReUpload File': 'Re-upload file',
'Please enter file name': 'Please enter file name',
'Please select the file to upload': 'Please select the file to upload',
'Resources manage': 'Resources',
Security: 'Security',
Logout: 'Logout',
'No data': 'No data',
'Uploading...': 'Uploading...',
'Loading...': 'Loading...',
List: 'List',
'Unable to download without proper url': 'Unable to download without proper url',
Process: 'Process',
'Process definition': 'Process definition',
'Task record': 'Task record',
'Warning group manage': 'Warning group manage',
'Servers manage': 'Servers manage',
'UDF manage': 'UDF manage',
'Resource manage': 'Resource manage',
'Function manage': 'Function manage',
'Edit password': 'Edit password',
'Ordinary users': 'Ordinary users',
'Create process': 'Create process',
'Import process': 'Import process',
'Timing state': 'Timing state',
Timing: 'Timing',
TreeView: 'TreeView',
'Mailbox already exists! Recipients and copyers cannot repeat': 'Mailbox already exists! Recipients and copyers cannot repeat',
'Mailbox input is illegal': 'Mailbox input is illegal',
'Please set the parameters before starting': 'Please set the parameters before starting',
Continue: 'Continue',
End: 'End',
'Node execution': 'Node execution',
'Backward execution': 'Backward execution',
'Forward execution': 'Forward execution',
'Execute only the current node': 'Execute only the current node',
'Notification strategy': 'Notification strategy',
'Notification group': 'Notification group',
'Please select a notification group': 'Please select a notification group',
Recipient: 'Recipient',
Cc: 'Cc',
'Whether it is a complement process?': 'Whether it is a complement process?',
'Schedule date': 'Schedule date',
'Mode of execution': 'Mode of execution',
'Serial execution': 'Serial execution',
'Parallel execution': 'Parallel execution',
'Set parameters before timing': 'Set parameters before timing',
'Start and stop time': 'Start and stop time',
'Please select time': 'Please select time',
'Please enter crontab': 'Please enter crontab',
none_1: 'none',
success_1: 'success',
failure_1: 'failure',
All_1: 'All',
Toolbar: 'Toolbar',
'View variables': 'View variables',
'Format DAG': 'Format DAG',
'Refresh DAG status': 'Refresh DAG status',
Return_1: 'Return',
'Please enter format': 'Please enter format',
'connection parameter': 'connection parameter',
'Process definition details': 'Process definition details',
'Create process definition': 'Create process definition',
'Scheduled task list': 'Scheduled task list',
'Process instance details': 'Process instance details',
'Create Resource': 'Create Resource',
'User Center': 'User Center',
'Please enter method': 'Please enter method',
none: 'none',
name: 'name',
'Process priority': 'Process priority',
'Task priority': 'Task priority',
'Task timeout alarm': 'Task timeout alarm',
'Timeout strategy': 'Timeout strategy',
'Timeout alarm': 'Timeout alarm',
'Timeout failure': 'Timeout failure',
'Timeout period': 'Timeout period',
'Waiting Dependent complete': 'Waiting Dependent complete',
'Waiting Dependent start': 'Waiting Dependent start',
'Check interval': 'Check interval',
'Timeout must be longer than check interval': 'Timeout must be longer than check interval',
'Timeout strategy must be selected': 'Timeout strategy must be selected',
'Timeout must be a positive integer': 'Timeout must be a positive integer',
'Forced success': 'Forced success',
'Add dependency': 'Add dependency',
and: 'and',
or: 'or',
month: 'month',
week: 'week',
day: 'day',
hour: 'hour',
Running: 'Running',
'Waiting for dependency to complete': 'Waiting for dependency to complete',
Selected: 'Selected',
CurrentHour: 'CurrentHour',
Last1Hour: 'Last1Hour',
Last2Hours: 'Last2Hours',
Last3Hours: 'Last3Hours',
Last24Hours: 'Last24Hours',
today: 'today',
Last1Days: 'Last1Days',
Last2Days: 'Last2Days',
Last3Days: 'Last3Days',
Last7Days: 'Last7Days',
ThisWeek: 'ThisWeek',
LastWeek: 'LastWeek',
LastMonday: 'LastMonday',
LastTuesday: 'LastTuesday',
LastWednesday: 'LastWednesday',
LastThursday: 'LastThursday',
LastFriday: 'LastFriday',
LastSaturday: 'LastSaturday',
LastSunday: 'LastSunday',
ThisMonth: 'ThisMonth',
LastMonth: 'LastMonth',
LastMonthBegin: 'LastMonthBegin',
LastMonthEnd: 'LastMonthEnd',
'Refresh status succeeded': 'Refresh status succeeded',
'Queue manage': 'Yarn Queue manage',
'Create queue': 'Create queue',
'Edit queue': 'Edit queue',
'Datasource manage': 'Datasource',
'History task record': 'History task record',
'Please go online': 'Please go online',
'Queue value': 'Queue value',
'Please enter queue value': 'Please enter queue value',
'Worker group manage': 'Worker group manage',
'Create worker group': 'Create worker group',
'Edit worker group': 'Edit worker group',
'Token manage': 'Token manage',
'Create token': 'Create token',
'Edit token': 'Edit token',
'Please enter the IP address separated by commas': 'Please enter the IP address separated by commas',
'Note: Multiple IP addresses have been comma separated': 'Note: Multiple IP addresses have been comma separated',
'Failure time': 'Failure time',
'Expiration time': 'Expiration time',
User: 'User',
'Please enter token': 'Please enter token',
'Generate token': 'Generate token',
Monitor: 'Monitor',
Group: 'Group',
'Queue statistics': 'Queue statistics',
'Command status statistics': 'Command status statistics',
'Task kill': 'Task Kill',
'Task queue': 'Task queue',
'Error command count': 'Error command count',
'Normal command count': 'Normal command count',
Manage: ' Manage',
'Number of connections': 'Number of connections',
Sent: 'Sent',
Received: 'Received',
'Min latency': 'Min latency',
'Avg latency': 'Avg latency',
'Max latency': 'Max latency',
'Node count': 'Node count',
'Query time': 'Query time',
'Node self-test status': 'Node self-test status',
'Health status': 'Health status',
'Max connections': 'Max connections',
'Threads connections': 'Threads connections',
'Max used connections': 'Max used connections',
'Threads running connections': 'Threads running connections',
'Worker group': 'Worker group',
'Please enter a positive integer greater than 0': 'Please enter a positive integer greater than 0',
'Pre Statement': 'Pre Statement',
'Post Statement': 'Post Statement',
'Statement cannot be empty': 'Statement cannot be empty',
'Process Define Count': 'Work flow Define Count',
'Process Instance Running Count': 'Process Instance Running Count',
'command number of waiting for running': 'command number of waiting for running',
'failure command number': 'failure command number',
'tasks number of waiting running': 'tasks number of waiting running',
'task number of ready to kill': 'task number of ready to kill',
'Statistics manage': 'Statistics Manage',
statistics: 'Statistics',
'select tenant': 'select tenant',
'Please enter Principal': 'Please enter Principal',
'The start time must not be the same as the end': 'The start time must not be the same as the end',
'Startup parameter': 'Startup parameter',
'Startup type': 'Startup type',
'warning of timeout': 'warning of timeout',
'Next five execution times': 'Next five execution times',
'Execute time': 'Execute time',
'Complement range': 'Complement range',
'Http Url': 'Http Url',
'Http Method': 'Http Method',
'Http Parameters': 'Http Parameters',
'Http Parameters Key': 'Http Parameters Key',
'Http Parameters Position': 'Http Parameters Position',
'Http Parameters Value': 'Http Parameters Value',
'Http Check Condition': 'Http Check Condition',
'Http Condition': 'Http Condition',
'Please Enter Http Url': 'Please Enter Http Url(required)',
'Please Enter Http Condition': 'Please Enter Http Condition',
'There is no data for this period of time': 'There is no data for this period of time',
'IP address cannot be empty': 'IP address cannot be empty',
'Please enter the correct IP': 'Please enter the correct IP',
'Please generate token': 'Please generate token',
'Spark Version': 'Spark Version',
TargetDataBase: 'target database',
TargetTable: 'target table',
'Please enter the table of target': 'Please enter the table of target',
'Please enter a Target Table(required)': 'Please enter a Target Table(required)',
SpeedByte: 'speed(byte count)',
SpeedRecord: 'speed(record count)',
'0 means unlimited by byte': '0 means unlimited',
'0 means unlimited by count': '0 means unlimited',
'Modify User': 'Modify User',
'Whether directory': 'Whether directory',
'Yes': 'Yes',
'No': 'No',
'Hadoop Custom Params': 'Hadoop Params',
'Sqoop Advanced Parameters': 'Sqoop Params',
'Sqoop Job Name': 'Job Name',
'Please enter Mysql Database(required)': 'Please enter Mysql Database(required)',
'Please enter Mysql Table(required)': 'Please enter Mysql Table(required)',
'Please enter Columns (Comma separated)': 'Please enter Columns (Comma separated)',
'Please enter Target Dir(required)': 'Please enter Target Dir(required)',
'Please enter Export Dir(required)': 'Please enter Export Dir(required)',
'Please enter Hive Database(required)': 'Please enter Hive Databasec(required)',
'Please enter Hive Table(required)': 'Please enter Hive Table(required)',
'Please enter Hive Partition Keys': 'Please enter Hive Partition Key',
'Please enter Hive Partition Values': 'Please enter Partition Value',
'Please enter Replace Delimiter': 'Please enter Replace Delimiter',
'Please enter Fields Terminated': 'Please enter Fields Terminated',
'Please enter Lines Terminated': 'Please enter Lines Terminated',
'Please enter Concurrency': 'Please enter Concurrency',
'Please enter Update Key': 'Please enter Update Key',
'Please enter Job Name(required)': 'Please enter Job Name(required)',
'Please enter Custom Shell(required)': 'Please enter Custom Shell(required)',
Direct: 'Direct',
Type: 'Type',
ModelType: 'ModelType',
ColumnType: 'ColumnType',
Database: 'Database',
Column: 'Column',
'Map Column Hive': 'Map Column Hive',
'Map Column Java': 'Map Column Java',
'Export Dir': 'Export Dir',
'Hive partition Keys': 'Hive partition Keys',
'Hive partition Values': 'Hive partition Values',
FieldsTerminated: 'FieldsTerminated',
LinesTerminated: 'LinesTerminated',
IsUpdate: 'IsUpdate',
UpdateKey: 'UpdateKey',
UpdateMode: 'UpdateMode',
'Target Dir': 'Target Dir',
DeleteTargetDir: 'DeleteTargetDir',
FileType: 'FileType',
CompressionCodec: 'CompressionCodec',
CreateHiveTable: 'CreateHiveTable',
DropDelimiter: 'DropDelimiter',
OverWriteSrc: 'OverWriteSrc',
ReplaceDelimiter: 'ReplaceDelimiter',
Concurrency: 'Concurrency',
Form: 'Form',
OnlyUpdate: 'OnlyUpdate',
AllowInsert: 'AllowInsert',
'Data Source': 'Data Source',
'Data Target': 'Data Target',
'All Columns': 'All Columns',
'Some Columns': 'Some Columns',
'Branch flow': 'Branch flow',
'Custom Job': 'Custom Job',
'Custom Script': 'Custom Script',
'Cannot select the same node for successful branch flow and failed branch flow': 'Cannot select the same node for successful branch flow and failed branch flow',
'Successful branch flow and failed branch flow are required': 'conditions node Successful and failed branch flow are required',
'Unauthorized or deleted resources': 'Unauthorized or deleted resources',
'Please delete all non-existent resources': 'Please delete all non-existent resources',
'Kinship': 'Workflow relationship',
'Reset': 'Reset',
'KinshipStateActive': 'Active',
'KinshipState1': 'Online',
'KinshipState0': 'Workflow is not online',
'KinshipState10': 'Scheduling is not online',
'Dag label display control': 'Dag label display control',
'Enable': 'Enable',
'Timeout Settings': 'Timeout Settings',
'Connect Timeout':'Connect Timeout',
'Socket Timeout':'Socket Timeout',
'Connect timeout be a positive integer': 'Connect timeout be a positive integer',
'Socket Timeout be a positive integer': 'Socket Timeout be a positive integer',
'ms':'ms',
'Disable': 'Disable',
'No resources exist': 'No resources exist',
'Please delete all non-existing resources': 'Please delete all non-existing resources',
'The Worker group no longer exists, please select the correct Worker group!': 'The Worker group no longer exists, please select the correct Worker group!',
'Please confirm whether the workflow has been saved before downloading': 'Please confirm whether the workflow has been saved before downloading',
'User name length is between 3 and 39': 'User name length is between 3 and 39',
'Please Enter Url': 'Please Enter Url eg. 127.0.0.1:7077',
'Master': 'Master',
'Please select the waterdrop resources':'Please select the waterdrop resources',
zkDirectory: 'zkDirectory',
'Directory detail': 'Directory detail',
'Connection name': 'Connection name',
'Current connection settings': 'Current connection settings',
'Please save the DAG before formatting': 'Please save the DAG before formatting',
'Batch copy': 'Batch copy',
'Related items': 'Related items',
'Project name is required': 'Project name is required',
'Batch move': 'Batch move',
Version: 'Version',
'Pre tasks': 'Pre tasks',
'The workflow canvas is abnormal and cannot be saved, please recreate': 'The workflow canvas is abnormal and cannot be saved, please recreate'
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,985 |
[Feature][Datax] Datax supports setting up running memory
|
DataX default memory is 1g, when the amount of data is large, memory will not be enough
So you need to support setting runtime JVM memory

|
https://github.com/apache/dolphinscheduler/issues/3985
|
https://github.com/apache/dolphinscheduler/pull/3986
|
89f1e93bcf936b527856f658e33fe38ead5ec8b9
|
fe3026627fc2d38da08ae396724cc61bc922374a
| 2020-10-24T07:25:07Z |
java
| 2020-11-16T02:55:20Z |
dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export default {
'User Name': '用户名',
'Please enter user name': '请输入用户名',
Password: '密码',
'Please enter your password': '请输入密码',
'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': '密码至少包含数字,字母和字符的两种组合,长度在6-22之间',
Login: '登录',
Home: '首页',
'Failed to create node to save': '未创建节点保存失败',
'Global parameters': '全局参数',
'Local parameters': '局部参数',
'Copy success': '复制成功',
'The browser does not support automatic copying': '该浏览器不支持自动复制',
'Whether to save the DAG graph': '是否保存DAG图',
'Current node settings': '当前节点设置',
'View history': '查看历史',
'View log': '查看日志',
'Force success': '强制成功',
'Enter this child node': '进入该子节点',
'Node name': '节点名称',
'Please enter name (required)': '请输入名称(必填)',
'Run flag': '运行标志',
Normal: '正常',
'Prohibition execution': '禁止执行',
'Please enter description': '请输入描述',
'Number of failed retries': '失败重试次数',
Times: '次',
'Failed retry interval': '失败重试间隔',
Minute: '分',
'Delay execution time': '延时执行时间',
Cancel: '取消',
'Confirm add': '确认添加',
'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': '新创建子工作流还未执行,不能进入子工作流',
'The task has not been executed and cannot enter the sub-Process': '该任务还未执行,不能进入子工作流',
'Name already exists': '名称已存在请重新输入',
'Download Log': '下载日志',
'Refresh Log': '刷新日志',
'Enter full screen': '进入全屏',
'Cancel full screen': '取消全屏',
Close: '关闭',
'Update log success': '更新日志成功',
'No more logs': '暂无更多日志',
'No log': '暂无日志',
'Loading Log...': '正在努力请求日志中...',
'Set the DAG diagram name': '设置DAG图名称',
'Please enter description(optional)': '请输入描述(选填)',
'Set global': '设置全局',
'Whether to update the process definition': '是否更新流程定义',
Add: '添加',
'DAG graph name cannot be empty': 'DAG图名称不能为空',
'Create Datasource': '创建数据源',
'Project Home': '项目首页',
'Project Manage': '项目管理',
'Create Project': '创建项目',
'Cron Manage': '定时管理',
'Copy Workflow': '复制工作流',
'Tenant Manage': '租户管理',
'Create Tenant': '创建租户',
'User Manage': '用户管理',
'Create User': '创建用户',
'User Information': '用户信息',
'Edit Password': '密码修改',
success: '成功',
failed: '失败',
delete: '删除',
'Please choose': '请选择',
'Please enter a positive integer': '请输入正整数',
'Program Type': '程序类型',
'Main class': '主函数的class',
'Main jar package': '主jar包',
'Please enter main jar package': '请选择主jar包',
'Command-line parameters': '命令行参数',
'Please enter Command-line parameters': '请输入命令行参数',
'Other parameters': '其他参数',
'Please enter other parameters': '请输入其他参数',
Resources: '资源',
'Custom Parameters': '自定义参数',
'Custom template': '自定义模版',
'Please enter main class': '请填写主函数的class',
Datasource: '数据源',
methods: '方法',
'Please enter method(optional)': '请输入方法(选填)',
Script: '脚本',
'Please enter script(required)': '请输入脚本(必填)',
'Deploy Mode': '部署方式',
'Flink Version': 'Flink版本',
'Driver core number': 'Driver内核数',
'Please enter driver core number': '请输入Driver内核数',
'Driver memory use': 'Driver内存数',
'Please enter driver memory use': '请输入Driver内存数',
'Number of Executors': 'Executor数量',
'Please enter the number of Executor': '请输入Executor数量',
'Executor memory': 'Executor内存数',
'Please enter the Executor memory': '请输入Executor内存数',
'Executor core number': 'Executor内核数',
'Please enter Executor core number': '请输入Executor内核数',
'The number of Executors should be a positive integer': 'Executor数量为正整数',
'Memory should be a positive integer': '内存数为数字',
'Please enter ExecutorPlease enter Executor core number': '请填写Executor内核数',
'Core number should be positive integer': '内核数为正整数',
'Please enter jobManager memory': '请输入JobManager内存数',
'Please enter the taskManager memory': '请输入TaskManager内存数',
'Please enter solt number': '请输入solt数量',
'Please enter taskManager number': '请输入taskManager数量',
'SQL Type': 'sql类型',
Title: '主题',
'Please enter the title of email': '请输入邮件主题',
Table: '表名',
TableMode: '表格',
Attachment: '附件',
'SQL Parameter': 'sql参数',
'SQL Statement': 'sql语句',
'UDF Function': 'UDF函数',
FILE: '文件',
UDF: 'UDF',
'File Subdirectory': '文件子目录',
'Please enter a SQL Statement(required)': '请输入sql语句(必填)',
'Please enter a JSON Statement(required)': '请输入json语句(必填)',
'One form or attachment must be selected': '表格、附件必须勾选一个',
'Recipient required': '收件人邮箱必填',
'Mail subject required': '邮件主题必填',
'Child Node': '子节点',
'Please select a sub-Process': '请选择子工作流',
Edit: '编辑',
'Switch To This Version': '切换到该版本',
'Datasource Name': '数据源名称',
'Please enter datasource name': '请输入数据源名称',
IP: 'IP主机名',
'Please enter IP': '请输入IP主机名',
Port: '端口',
'Please enter port': '请输入端口',
'Database Name': '数据库名',
'Please enter database name': '请输入数据库名',
'Oracle Connect Type': '服务名或SID',
'Oracle Service Name': '服务名',
'Oracle SID': 'SID',
'jdbc connect parameters': 'jdbc连接参数',
'Test Connect': '测试连接',
'Please enter resource name': '请输入数据源名称',
'Please enter resource folder name': '请输入资源文件夹名称',
'Please enter a non-query SQL statement': '请输入非查询sql语句',
'Please enter IP/hostname': '请输入IP/主机名',
'jdbc connection parameters is not a correct JSON format': 'jdbc连接参数不是一个正确的JSON格式',
'#': '编号',
'Datasource Type': '数据源类型',
'Datasource Parameter': '数据源参数',
'Create Time': '创建时间',
'Update Time': '更新时间',
Operation: '操作',
'Current Version': '当前版本',
'Click to view': '点击查看',
'Delete?': '确定删除吗?',
'Switch Version Successfully': '切换版本成功',
'Confirm Switch To This Version?': '确定切换到该版本吗?',
Confirm: '确定',
'Task status statistics': '任务状态统计',
Number: '数量',
State: '状态',
'Process Status Statistics': '流程状态统计',
'Process Definition Statistics': '流程定义统计',
'Project Name': '项目名称',
'Please enter name': '请输入名称',
'Owned Users': '所属用户',
'Process Pid': '进程Pid',
'Zk registration directory': 'zk注册目录',
cpuUsage: 'cpuUsage',
memoryUsage: 'memoryUsage',
'Last heartbeat time': '最后心跳时间',
'Edit Tenant': '编辑租户',
'Tenant Code': '租户编码',
'Tenant Name': '租户名称',
Queue: '队列',
'Please enter the tenant code in English': '请输入租户编码只允许英文',
'Please enter tenant code in English': '请输入英文租户编码',
'Please enter tenant code': '请输入租户编码',
'Please enter tenant Name': '请输入租户名称',
'The tenant code. Only letters or a combination of letters and numbers are allowed': '租户编码只允许字母或字母与数字组合',
'Edit User': '编辑用户',
Tenant: '租户',
Email: '邮件',
Phone: '手机',
'User Type':'用户类型',
'Please enter phone number': '请输入手机',
'Please enter email': '请输入邮箱',
'Please enter the correct email format': '请输入正确的邮箱格式',
'Please enter the correct mobile phone format': '请输入正确的手机格式',
Project: '项目',
Authorize: '授权',
'File resources': '文件资源',
'UDF resources': 'UDF资源',
'UDF resources directory': 'UDF资源目录',
'Please select UDF resources directory': '请选择UDF资源目录',
'Edit alarm group': '编辑告警组',
'Create alarm group': '创建告警组',
'Group Name': '组名称',
'Please enter group name': '请输入组名称',
'Group Type': '组类型',
Remarks: '备注',
SMS: '短信',
'Managing Users': '管理用户',
Permission: '权限',
Administrator: '管理员',
'Confirm Password': '确认密码',
'Please enter confirm password': '请输入确认密码',
'Password cannot be in Chinese': '密码不能为中文',
'Please enter a password (6-22) character password': '请输入密码(6-22)字符密码',
'Confirmation password cannot be in Chinese': '确认密码不能为中文',
'Please enter a confirmation password (6-22) character password': '请输入确认密码(6-22)字符密码',
'The password is inconsistent with the confirmation password': '密码与确认密码不一致,请重新确认',
'Please select the datasource': '请选择数据源',
'Please select resources': '请选择资源',
Query: '查询',
'Non Query': '非查询',
'prop(required)': 'prop(必填)',
'value(optional)': 'value(选填)',
'value(required)': 'value(必填)',
'prop is empty': 'prop不能为空',
'value is empty': 'value不能为空',
'prop is repeat': 'prop中有重复',
'Start Time': '开始时间',
'End Time': '结束时间',
crontab: 'crontab',
'Failure Strategy': '失败策略',
online: '上线',
offline: '下线',
'Task Status': '任务状态',
'Process Instance': '工作流实例',
'Task Instance': '任务实例',
'Select date range': '选择日期区间',
Date: '日期',
waiting: '等待',
execution: '执行中',
finish: '完成',
'Create File': '创建文件',
'Create folder': '创建文件夹',
'File Name': '文件名称',
'Folder Name': '文件夹名称',
'File Format': '文件格式',
'Folder Format': '文件夹格式',
'File Content': '文件内容',
'Upload File Size': '文件大小不能超过1G',
Create: '创建',
'Please enter the resource content': '请输入资源内容',
'Resource content cannot exceed 3000 lines': '资源内容不能超过3000行',
'File Details': '文件详情',
'Download Details': '下载详情',
Return: '返回',
Save: '保存',
'File Manage': '文件管理',
'Upload Files': '上传文件',
'Create UDF Function': '创建UDF函数',
'Upload UDF Resources': '上传UDF资源',
'Service-Master': '服务管理-Master',
'Service-Worker': '服务管理-Worker',
'Process Name': '工作流名称',
Executor: '执行用户',
'Run Type': '运行类型',
'Scheduling Time': '调度时间',
'Run Times': '运行次数',
host: 'host',
'fault-tolerant sign': '容错标识',
Rerun: '重跑',
'Recovery Failed': '恢复失败',
Stop: '停止',
Pause: '暂停',
'Recovery Suspend': '恢复运行',
Gantt: '甘特图',
Name: '名称',
'Node Type': '节点类型',
'Submit Time': '提交时间',
Duration: '运行时长',
'Retry Count': '重试次数',
'Task Name': '任务名称',
'Task Date': '任务日期',
'Source Table': '源表',
'Record Number': '记录数',
'Target Table': '目标表',
'Online viewing type is not supported': '不支持在线查看类型',
Size: '大小',
Rename: '重命名',
Download: '下载',
Export: '导出',
'Version Info': '版本信息',
Submit: '提交',
'Edit UDF Function': '编辑UDF函数',
type: '类型',
'UDF Function Name': 'UDF函数名称',
'Please enter a function name': '请输入函数名',
'Package Name': '包名类名',
'Please enter a Package name': '请输入包名类名',
Parameter: '参数',
'Please enter a parameter': '请输入参数',
'UDF Resources': 'UDF资源',
'Upload Resources': '上传资源',
Instructions: '使用说明',
'Please enter a instructions': '请输入使用说明',
'Please enter a UDF function name': '请输入UDF函数名称',
'Select UDF Resources': '请选择UDF资源',
'Class Name': '类名',
'Jar Package': 'jar包',
'Library Name': '库名',
'UDF Resource Name': 'UDF资源名称',
'File Size': '文件大小',
Description: '描述',
'Drag Nodes and Selected Items': '拖动节点和选中项',
'Select Line Connection': '选择线条连接',
'Delete selected lines or nodes': '删除选中的线或节点',
'Full Screen': '全屏',
Unpublished: '未发布',
'Start Process': '启动工作流',
'Execute from the current node': '从当前节点开始执行',
'Recover tolerance fault process': '恢复被容错的工作流',
'Resume the suspension process': '恢复运行流程',
'Execute from the failed nodes': '从失败节点开始执行',
'Complement Data': '补数',
'Scheduling execution': '调度执行',
'Recovery waiting thread': '恢复等待线程',
'Submitted successfully': '提交成功',
Executing: '正在执行',
'Ready to pause': '准备暂停',
'Ready to stop': '准备停止',
'Need fault tolerance': '需要容错',
kill: 'kill',
'Waiting for thread': '等待线程',
'Waiting for dependence': '等待依赖',
Start: '运行',
Copy: '复制节点',
'Copy name': '复制名称',
Delete: '删除',
'Please enter keyword': '请输入关键词',
'File Upload': '文件上传',
'Drag the file into the current upload window': '请将文件拖拽到当前上传窗口内!',
'Drag area upload': '拖动区域上传',
Upload: '上传',
'ReUpload File': '重新上传文件',
'Please enter file name': '请输入文件名',
'Please select the file to upload': '请选择要上传的文件',
'Resources manage': '资源中心',
Security: '安全中心',
Logout: '退出',
'No data': '查询无数据',
'Uploading...': '文件上传中',
'Loading...': '正在努力加载中...',
List: '列表',
'Unable to download without proper url': '无下载url无法下载',
Process: '工作流',
'Process definition': '工作流定义',
'Task record': '任务记录',
'Warning group manage': '告警组管理',
'Servers manage': '服务管理',
'UDF manage': 'UDF管理',
'Resource manage': '资源管理',
'Function manage': '函数管理',
'Edit password': '修改密码',
'Ordinary users': '普通用户',
'Create process': '创建工作流',
'Import process': '导入工作流',
'Timing state': '定时状态',
Timing: '定时',
TreeView: '树形图',
'Mailbox already exists! Recipients and copyers cannot repeat': '邮箱已存在!收件人和抄送人不能重复',
'Mailbox input is illegal': '邮箱输入不合法',
'Please set the parameters before starting': '启动前请先设置参数',
Continue: '继续',
End: '结束',
'Node execution': '节点执行',
'Backward execution': '向后执行',
'Forward execution': '向前执行',
'Execute only the current node': '仅执行当前节点',
'Notification strategy': '通知策略',
'Notification group': '通知组',
'Please select a notification group': '请选择通知组',
Recipient: '收件人',
Cc: '抄送人',
'Whether it is a complement process?': '是否补数',
'Schedule date': '调度日期',
'Mode of execution': '执行方式',
'Serial execution': '串行执行',
'Parallel execution': '并行执行',
'Set parameters before timing': '定时前请先设置参数',
'Start and stop time': '起止时间',
'Please select time': '请选择时间',
'Please enter crontab': '请输入crontab',
none_1: '都不发',
success_1: '成功发',
failure_1: '失败发',
All_1: '成功或失败都发',
Toolbar: '工具栏',
'View variables': '查看变量',
'Format DAG': '格式化DAG',
'Refresh DAG status': '刷新DAG状态',
Return_1: '返回上一节点',
'Please enter format': '请输入格式为',
'connection parameter': '连接参数',
'Process definition details': '流程定义详情',
'Create process definition': '创建流程定义',
'Scheduled task list': '定时任务列表',
'Process instance details': '流程实例详情',
'Create Resource': '创建资源',
'User Center': '用户中心',
'Please enter method': '请输入方法',
none: '无',
name: '名称',
'Process priority': '流程优先级',
'Task priority': '任务优先级',
'Task timeout alarm': '任务超时告警',
'Timeout strategy': '超时策略',
'Timeout alarm': '超时告警',
'Timeout failure': '超时失败',
'Timeout period': '超时时长',
'Waiting Dependent complete': '等待依赖完成',
'Waiting Dependent start': '等待依赖启动',
'Check interval': '检查间隔',
'Timeout strategy must be selected': '超时策略必须选一个',
'Timeout must be a positive integer': '超时时长必须为正整数',
'Timeout must be longer than check interval': '超时时间必须比检查间隔长',
'Add dependency': '添加依赖',
and: '且',
or: '或',
month: '月',
week: '周',
day: '日',
hour: '时',
Running: '正在运行',
'Waiting for dependency to complete': '等待依赖完成',
'Delay execution': '延时执行',
'Forced success': '强制成功过',
Selected: '已选',
CurrentHour: '当前小时',
Last1Hour: '前1小时',
Last2Hours: '前2小时',
Last3Hours: '前3小时',
Last24Hours: '前24小时',
today: '今天',
Last1Days: '昨天',
Last2Days: '前两天',
Last3Days: '前三天',
Last7Days: '前七天',
ThisWeek: '本周',
LastWeek: '上周',
LastMonday: '上周一',
LastTuesday: '上周二',
LastWednesday: '上周三',
LastThursday: '上周四',
LastFriday: '上周五',
LastSaturday: '上周六',
LastSunday: '上周日',
ThisMonth: '本月',
LastMonth: '上月',
LastMonthBegin: '上月初',
LastMonthEnd: '上月末',
'Refresh status succeeded': '刷新状态成功',
'Queue manage': 'Yarn 队列管理',
'Create queue': '创建队列',
'Edit queue': '编辑队列',
'Datasource manage': '数据源中心',
'History task record': '历史任务记录',
'Please go online': '不要忘记上线',
'Queue value': '队列值',
'Please enter queue value': '请输入队列值',
'Worker group manage': 'Worker分组管理',
'Create worker group': '创建Worker分组',
'Edit worker group': '编辑Worker分组',
'Token manage': '令牌管理',
'Create token': '创建令牌',
'Edit token': '编辑令牌',
'Please enter the IP address separated by commas': '请输入IP地址多个用英文逗号隔开',
'Note: Multiple IP addresses have been comma separated': '注意:多个IP地址以英文逗号分割',
'Failure time': '失效时间',
'Expiration time': '失效时间',
User: '用户',
'Please enter token': '请输入令牌',
'Generate token': '生成令牌',
Monitor: '监控中心',
Group: '分组',
'Queue statistics': '队列统计',
'Command status statistics': '命令状态统计',
'Task kill': '等待kill任务',
'Task queue': '等待执行任务',
'Error command count': '错误指令数',
'Normal command count': '正确指令数',
Manage: '管理',
'Number of connections': '连接数',
Sent: '发送量',
Received: '接收量',
'Min latency': '最低延时',
'Avg latency': '平均延时',
'Max latency': '最大延时',
'Node count': '节点数',
'Query time': '当前查询时间',
'Node self-test status': '节点自检状态',
'Health status': '健康状态',
'Max connections': '最大连接数',
'Threads connections': '当前连接数',
'Max used connections': '同时使用连接最大数',
'Threads running connections': '数据库当前活跃连接数',
'Worker group': 'Worker分组',
'Please enter a positive integer greater than 0': '请输入大于 0 的正整数',
'Pre Statement': '前置sql',
'Post Statement': '后置sql',
'Statement cannot be empty': '语句不能为空',
'Process Define Count': '工作流定义数',
'Process Instance Running Count': '正在运行的流程数',
'Please select a queue': '默认为租户关联队列',
'command number of waiting for running': '待执行的命令数',
'failure command number': '执行失败的命令数',
'tasks number of waiting running': '待运行任务数',
'task number of ready to kill': '待杀死任务数',
'Statistics manage': '统计管理',
statistics: '统计',
'select tenant': '选择租户',
'Please enter Principal': '请输入Principal',
'The start time must not be the same as the end': '开始时间和结束时间不能相同',
'Startup parameter': '启动参数',
'Startup type': '启动类型',
'warning of timeout': '超时告警',
'Next five execution times': '接下来五次执行时间',
'Execute time': '执行时间',
'Complement range': '补数范围',
slot: 'slot数量',
taskManager: 'taskManage数量',
jobManagerMemory: 'jobManager内存数',
taskManagerMemory: 'taskManager内存数',
'Http Url': '请求地址',
'Http Method': '请求类型',
'Http Parameters': '请求参数',
'Http Parameters Key': '参数名',
'Http Parameters Position': '参数位置',
'Http Parameters Value': '参数值',
'Http Check Condition': '校验条件',
'Http Condition': '校验内容',
'Please Enter Http Url': '请填写请求地址(必填)',
'Please Enter Http Condition': '请填写校验内容',
'There is no data for this period of time': '该时间段无数据',
'IP address cannot be empty': 'IP地址不能为空',
'Please enter the correct IP': '请输入正确的IP',
'Please generate token': '请生成Token',
'Spark Version': 'Spark版本',
TargetDataBase: '目标库',
TargetTable: '目标表',
'Please enter the table of target': '请输入目标表名',
'Please enter a Target Table(required)': '请输入目标表(必填)',
SpeedByte: '限流(字节数)',
SpeedRecord: '限流(记录数)',
'0 means unlimited by byte': 'KB,0代表不限制',
'0 means unlimited by count': '0代表不限制',
'Modify User': '修改用户',
'Whether directory': '是否文件夹',
Yes: '是',
No: '否',
'Hadoop Custom Params': 'Hadoop参数',
'Sqoop Advanced Parameters': 'Sqoop参数',
'Sqoop Job Name': '任务名称',
'Please enter Mysql Database(required)': '请输入Mysql数据库(必填)',
'Please enter Mysql Table(required)': '请输入Mysql表名(必填)',
'Please enter Columns (Comma separated)': '请输入列名,用 , 隔开',
'Please enter Target Dir(required)': '请输入目标路径(必填)',
'Please enter Export Dir(required)': '请输入数据源路径(必填)',
'Please enter Hive Database(required)': '请输入Hive数据库(必填)',
'Please enter Hive Table(required)': '请输入Hive表名(必填)',
'Please enter Hive Partition Keys': '请输入分区键',
'Please enter Hive Partition Values': '请输入分区值',
'Please enter Replace Delimiter': '请输入替换分隔符',
'Please enter Fields Terminated': '请输入列分隔符',
'Please enter Lines Terminated': '请输入行分隔符',
'Please enter Concurrency': '请输入并发度',
'Please enter Update Key': '请输入更新列',
'Please enter Job Name(required)': '请输入任务名称(必填)',
'Please enter Custom Shell(required)': '请输入自定义脚本',
Direct: '流向',
Type: '类型',
ModelType: '模式',
ColumnType: '列类型',
Database: '数据库',
Column: '列',
'Map Column Hive': 'Hive类型映射',
'Map Column Java': 'Java类型映射',
'Export Dir': '数据源路径',
'Hive partition Keys': 'Hive 分区键',
'Hive partition Values': 'Hive 分区值',
FieldsTerminated: '列分隔符',
LinesTerminated: '行分隔符',
IsUpdate: '是否更新',
UpdateKey: '更新列',
UpdateMode: '更新类型',
'Target Dir': '目标路径',
DeleteTargetDir: '是否删除目录',
FileType: '保存格式',
CompressionCodec: '压缩类型',
CreateHiveTable: '是否创建新表',
DropDelimiter: '是否删除分隔符',
OverWriteSrc: '是否覆盖数据源',
ReplaceDelimiter: '替换分隔符',
Concurrency: '并发度',
Form: '表单',
OnlyUpdate: '只更新',
AllowInsert: '无更新便插入',
'Data Source': '数据来源',
'Data Target': '数据目的',
'All Columns': '全表导入',
'Some Columns': '选择列',
'Branch flow': '分支流转',
'Custom Job': '自定义任务',
'Custom Script': '自定义脚本',
'Cannot select the same node for successful branch flow and failed branch flow': '成功分支流转和失败分支流转不能选择同一个节点',
'Successful branch flow and failed branch flow are required': 'conditions节点成功和失败分支流转必填',
'No resources exist': '不存在资源',
'Please delete all non-existing resources': '请删除所有不存在资源',
'Unauthorized or deleted resources': '未授权或已删除资源',
'Please delete all non-existent resources': '请删除所有未授权或已删除资源',
'Kinship': '工作流关系',
'Reset': '重置',
'KinshipStateActive': '当前选择',
'KinshipState1': '已上线',
'KinshipState0': '工作流未上线',
'KinshipState10': '调度未上线',
'Dag label display control': 'Dag节点名称显隐',
'Enable': '启用',
'Disable': '停用',
'The Worker group no longer exists, please select the correct Worker group!': '该Worker分组已经不存在,请选择正确的Worker分组!',
'Please confirm whether the workflow has been saved before downloading': '下载前请确定工作流是否已保存',
'User name length is between 3 and 39': '用户名长度在3~39之间',
'Timeout Settings': '超时设置',
'Connect Timeout':'连接超时',
'Socket Timeout':'Socket超时',
'Connect timeout be a positive integer': '连接超时必须为数字',
'Socket Timeout be a positive integer': 'Socket超时必须为数字',
'ms':'毫秒',
'Please Enter Url': '请直接填写地址,例如:127.0.0.1:7077',
'Master': 'Master',
'Please select the waterdrop resources':'请选择waterdrop配置文件',
zkDirectory: 'zk注册目录',
'Directory detail': '查看目录详情',
'Connection name': '连线名',
'Current connection settings': '当前连线设置',
'Please save the DAG before formatting': '格式化前请先保存DAG',
'Batch copy': '批量复制',
'Related items': '关联项目',
'Project name is required': '项目名称必填',
'Batch move': '批量移动',
Version: '版本',
'Pre tasks': '前置任务',
'The workflow canvas is abnormal and cannot be saved, please recreate': '该工作流画布异常,无法保存,请重新创建'
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,985 |
[Feature][Datax] Datax supports setting up running memory
|
DataX default memory is 1g, when the amount of data is large, memory will not be enough
So you need to support setting runtime JVM memory

|
https://github.com/apache/dolphinscheduler/issues/3985
|
https://github.com/apache/dolphinscheduler/pull/3986
|
89f1e93bcf936b527856f658e33fe38ead5ec8b9
|
fe3026627fc2d38da08ae396724cc61bc922374a
| 2020-10-24T07:25:07Z |
java
| 2020-11-16T02:55:20Z |
pom.xml
|
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>1.3.4-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/incubator-dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>[email protected]</post>
<subscribe>[email protected]</subscribe>
<unsubscribe>[email protected]</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<spring.version>5.1.18.RELEASE</spring.version>
<spring.boot.version>2.1.17.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.2.3</quartz.version>
<jackson.version>2.9.8</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<druid.version>1.1.22</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.11</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>5.1.34</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.7.0</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>3.17</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>20.0</guava.version>
<postgresql.version>42.1.4</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.5.0</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<presto.jdbc.version>0.238.1</presto.jdbc.version>
<jsp-2.1.version>6.1.14</jsp-2.1.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.0.0</checkstyle.version>
<apache.rat.version>0.13</apache.rat.version>
<zookeeper.version>3.4.14</zookeeper.version>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
<guava-retry.version>2.0.0</guava-retry.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-plugin-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
<version>${zookeeper.version}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.facebook.presto</groupId>
<artifactId>presto-jdbc</artifactId>
<version>${presto.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<!-- for api module -->
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
<version>${jsp-2.1.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<version>${guava-retry.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-incubating-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<configuration>
<source>8</source>
<failOnError>false</failOnError>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<includes>
<include>**/alert/utils/DingTalkUtilsTest.java</include>
<include>**/alert/template/AlertTemplateFactoryTest.java</include>
<include>**/alert/template/impl/DefaultHTMLTemplateTest.java</include>
<include>**/alert/utils/EnterpriseWeChatUtilsTest.java</include>
<include>**/alert/utils/ExcelUtilsTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/utils/JSONUtilsTest.java</include>
<!--<include>**/alert/utils/MailUtilsTest.java</include>-->
<include>**/alert/plugin/EmailAlertPluginTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TenantControllerTest.java</include>
<include>**/api/dto/resources/filter/ResourceFilterTest.java</include>
<include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include>
<includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest>
<include>**/api/enums/StatusTest.java</include>
<include>**/api/exceptions/ApiExceptionHandlerTest.java</include>
<include>**/api/exceptions/ServiceExceptionTest.java</include>
<include>**/api/interceptor/LoginHandlerInterceptorTest.java</include>
<include>**/api/security/PasswordAuthenticatorTest.java</include>
<include>**/api/security/SecurityConfigTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/DataSourceServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/ExecutorServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/ProcessDefinitionVersionServiceTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/SchedulerServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/service/WorkFlowLineageServiceTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/WorkFlowLineageControllerTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/ResultTest.java</include>
<include>**/common/graph/DAGTest.java</include>
<include>**/common/os/OshiTest.java</include>
<include>**/common/os/OSUtilsTest.java</include>
<include>**/common/shell/ShellExecutorTest.java</include>
<include>**/common/task/EntityTestUtils.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/HttpParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/common/threadutils/ThreadPoolExecutorsTest.java</include>
<include>**/common/threadutils/ThreadUtilsTest.java</include>
<include>**/common/utils/process/ProcessBuilderForWin32Test.java</include>
<include>**/common/utils/process/ProcessEnvironmentForWin32Test.java</include>
<include>**/common/utils/process/ProcessImplForWin32Test.java</include>
<include>**/common/utils/CollectionUtilsTest.java</include>
<include>**/common/utils/CommonUtilsTest.java</include>
<include>**/common/utils/DateUtilsTest.java</include>
<include>**/common/utils/DependentUtilsTest.java</include>
<include>**/common/utils/EncryptionUtilsTest.java</include>
<include>**/common/utils/FileUtilsTest.java</include>
<include>**/common/utils/IpUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/OSUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/PreconditionsTest.java</include>
<include>**/common/utils/PropertyUtilsTest.java</include>
<include>**/common/utils/SchemaUtilsTest.java</include>
<include>**/common/utils/ScriptRunnerTest.java</include>
<include>**/common/utils/SensitiveLogUtilsTest.java</include>
<include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/VarPoolUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/utils/KerberosHttpClientTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/utils/RetryerUtilsTest.java</include>
<include>**/common/plugin/FilePluginManagerTest</include>
<include>**/common/plugin/PluginClassLoaderTest</include>
<include>**/common/enums/ExecutionStatusTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/mapper/ConnectionFactoryTest.java</include>
<include>**/dao/mapper/DataSourceMapperTest.java</include>
<include>**/dao/entity/TaskInstanceTest.java</include>
<include>**/dao/entity/UdfFuncTest.java</include>
<include>**/remote/JsonSerializerTest.java</include>
<include>**/remote/RemoveTaskLogResponseCommandTest.java</include>
<include>**/remote/RemoveTaskLogRequestCommandTest.java</include>
<include>**/remote/NettyRemotingClientTest.java</include>
<include>**/remote/NettyUtilTest.java</include>
<include>**/remote/ResponseFutureTest.java</include>
<include>**/server/log/LoggerServerTest.java</include>
<include>**/server/entity/SQLTaskExecutionContextTest.java</include>
<include>**/server/log/MasterLogFilterTest.java</include>
<include>**/server/log/SensitiveDataConverterTest.java</include>
<!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>-->
<include>**/server/log/TaskLogFilterTest.java</include>
<include>**/server/log/WorkerLogFilterTest.java</include>
<include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include>
<include>**/server/master/runner/MasterTaskExecThreadTest.java</include>
<!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>-->
<include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include>
<include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include>
<include>**/server/master/register/MasterRegistryTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include>
<include>**/server/master/AlertManagerTest.java</include>
<include>**/server/master/MasterCommandTest.java</include>
<include>**/server/master/DependentTaskTest.java</include>
<include>**/server/master/ConditionsTaskTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/server/master/ParamsTest.java</include>
<include>**/server/master/SubProcessTaskTest.java</include>
<include>**/server/master/processor/TaskAckProcessorTest.java</include>
<include>**/server/master/processor/TaskKillResponseProcessorTest.java</include>
<include>**/server/register/ZookeeperNodeManagerTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>
<include>**/server/utils/HostTest.java</include>
<!--<include>**/server/utils/FlinkArgsUtilsTest.java</include>-->
<include>**/server/utils/LogUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/utils/ProcessUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/worker/processor/TaskCallbackServiceTest.java</include>
<include>**/server/worker/registry/WorkerRegistryTest.java</include>
<include>**/server/worker/shell/ShellCommandExecutorTest.java</include>
<include>**/server/worker/sql/SqlExecutorTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/EnvFileTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<!--<include>**/server/worker/task/datax/DataxTaskTest.java</include>-->
<!--<include>**/server/worker/task/http/HttpTaskTest.java</include>-->
<include>**/server/worker/task/sqoop/SqoopTaskTest.java</include>
<include>**/server/worker/task/TaskManagerTest.java</include>
<include>**/server/worker/EnvFileTest.java</include>
<include>**/server/worker/runner/TaskExecuteThreadTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/process/ProcessServiceTest.java</include>
<include>**/service/zk/DefaultEnsembleProviderTest.java</include>
<include>**/service/zk/ZKServerTest.java</include>
<include>**/service/zk/CuratorZookeeperClientTest.java</include>
<include>**/service/queue/TaskUpdateQueueTest.java</include>
<include>**/dao/mapper/DataSourceUserMapperTest.java</include>
<!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>-->
<include>**/dao/mapper/ProcessDefinitionMapperTest.java</include>
<include>**/dao/mapper/ProcessDefinitionVersionMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapperTest.java</include>
<include>**/dao/mapper/ProjectMapperTest.java</include>
<include>**/dao/mapper/ProjectUserMapperTest.java</include>
<include>**/dao/mapper/QueueMapperTest.java</include>
<include>**/dao/mapper/ResourceUserMapperTest.java</include>
<include>**/dao/mapper/ScheduleMapperTest.java</include>
<include>**/dao/mapper/SessionMapperTest.java</include>
<include>**/dao/mapper/TaskInstanceMapperTest.java</include>
<include>**/dao/mapper/TenantMapperTest.java</include>
<include>**/dao/mapper/UdfFuncMapperTest.java</include>
<include>**/dao/mapper/UDFUserMapperTest.java</include>
<include>**/dao/mapper/UserAlertGroupMapperTest.java</include>
<include>**/dao/mapper/UserMapperTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
<include>**/dao/datasource/OracleDataSourceTest.java</include>
<include>**/dao/datasource/HiveDataSourceTest.java</include>
<include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include>
<include>**/dao/upgrade/WokrerGrouopDaoTest.java</include>
<include>**/dao/upgrade/UpgradeDaoTest.java</include>
<include>**/plugin/model/AlertDataTest.java</include>
<include>**/plugin/model/AlertInfoTest.java</include>
<include>**/plugin/utils/PropertyUtilsTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<destFile>target/jacoco.exec</destFile>
<dataFile>target/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>jacoco-initialize</id>
<goals>
<goal>prepare-agent</goal>
</goals>
</execution>
<execution>
<id>jacoco-site</id>
<phase>test</phase>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
<version>${apache.rat.version}</version>
<configuration>
<excludeSubProjects>false</excludeSubProjects>
<addDefaultLicenseMatchers>false</addDefaultLicenseMatchers>
<licenses>
<license implementation="org.apache.rat.analysis.license.SimplePatternBasedLicense">
<licenseFamilyCategory>AL20</licenseFamilyCategory>
<licenseFamilyName>Apache License, 2.0</licenseFamilyName>
<patterns>
<pattern>Licensed to the Apache Software Foundation (ASF)</pattern>
</patterns>
</license>
</licenses>
<licenseFamilies>
<licenseFamily implementation="org.apache.rat.license.SimpleLicenseFamily">
<familyName>Apache License, 2.0</familyName>
</licenseFamily>
</licenseFamilies>
<excludes>
<exclude>**/node_modules/**</exclude>
<exclude>**/node/**</exclude>
<exclude>**/dist/**</exclude>
<exclude>**/licenses/**</exclude>
<exclude>.github/**</exclude>
<exclude>**/sql/soft_version</exclude>
<exclude>**/common/utils/ScriptRunner.java</exclude>
<exclude>**/*.json</exclude>
<!-- document files -->
<exclude>**/*.md</exclude>
<excldue>**/*.MD</excldue>
<exclude>**/*.txt</exclude>
<exclude>**/docs/**</exclude>
<exclude>**/*.babelrc</exclude>
<exclude>**/*.eslintrc</exclude>
<exclude>**/.mvn/jvm.config</exclude>
<exclude>**/.mvn/wrapper/**</exclude>
</excludes>
<consoleOutput>true</consoleOutput>
</configuration>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.18</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<suppressionsLocation>style/checkstyle-suppressions.xml</suppressionsLocation>
<suppressionsFileExpression>checkstyle.suppressions.file</suppressionsFileExpression>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
<skip>true</skip>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
<module>dolphinscheduler-plugin-api</module>
<module>dolphinscheduler-microbench</module>
</modules>
</project>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 4,054 |
[Bug][Api] The last week of the month for adding/editing timing, preview and save timing will report an error
|
1.Add or Edit timing, select the last week of the month regularly
2. Click the preview or save button, an error will be reported




**Expect Result**
0 15 10? * 6L 2020-2025 Fire at 10:15 am on the last Friday of each month from 2020 to 2025
--------------------------------------------------------------------------------------------------------
0 15 10 ? * 6L 2020-2025 2020年至2025年的每月的最后一个星期五上午10:15触发
**Which version of Dolphin Scheduler:**
-[1.3.2]
|
https://github.com/apache/dolphinscheduler/issues/4054
|
https://github.com/apache/dolphinscheduler/pull/4081
|
3c89c9ad74e55e3aca1a36a4341202e31ceecbf7
|
f152cae7f6beee585bc43a2b0bee2686c21d03c4
| 2020-11-11T09:56:20Z |
java
| 2020-11-19T02:59:25Z |
dolphinscheduler-ui/src/js/module/components/crontab/source/_times/day.vue
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="day-model">
<div class="v-crontab-from-model">
<x-radio-group v-model="radioDay" vertical>
<div class="list-box">
<x-radio label="everyDay">
<span class="text">{{$t('每一天')}}</span>
</x-radio>
</div>
<div class="list-box">
<x-radio label="WkintervalWeek">
<span class="text">{{$t('每隔')}}</span>
<m-input-number :min="0" :max="7" :props-value="parseInt(WkintervalWeekPerformVal)" @on-number="onWkintervalWeekPerform"></m-input-number>
<span class="text">{{$t('天执行 从')}}</span>
<x-select :placeholder="$t('请选择具体小时数')" style="width: 200px" v-model="WkintervalWeekStartVal">
<x-option
v-for="item in selectWeekList"
:key="item.value"
:value="item.value"
:label="item.label">
</x-option>
</x-select>
<span class="text">{{$t('开始')}}</span>
</x-radio>
</div>
<div class="list-box">
<x-radio label="intervalDay">
<span class="text">{{$t('每隔')}}</span>
<m-input-number :min="0" :max="31" :props-value="parseInt(intervalDayPerformVal)" @on-number="onIntervalDayPerform"></m-input-number>
<span class="text">{{$t('天执行 从')}}</span>
<m-input-number :min="1" :max="31" :props-value="parseInt(intervalDayStartVal)" @on-number="onIntervalDayStart"></m-input-number>
<span class="text">{{$t('天开始')}}</span>
</x-radio>
</div>
<div class="list-box">
<x-radio label="WkspecificWeek">
<!--<span class="text">(周)</span>-->
<span class="text">{{$t('具体星期几(可多选)')}}</span>
<x-select multiple :placeholder="$t('请选择具体周几')" v-model="WkspecificWeekVal">
<x-option
v-for="item in selectSpecificWeekList"
:key="item.value"
:value="item.value"
:label="item.label">
</x-option>
</x-select>
</x-radio>
</div>
<div class="list-box">
<x-radio label="specificDay">
<span class="text">{{$t('具体天数(可多选)')}}</span>
<x-select multiple :placeholder="$t('请选择具体天数')" v-model="WkspecificDayVal">
<x-option
v-for="item in selectSpecificDayList"
:key="item.value"
:value="item.value"
:label="item.label">
</x-option>
</x-select>
</x-radio>
</div>
<div class="list-box">
<x-radio label="monthLastDays">
<span class="text">{{$t('在这个月的最后一天')}}</span>
</x-radio>
</div>
<div class="list-box">
<x-radio label="monthLastWorkingDays">
<span class="text">{{$t('在这个月的最后一个工作日')}}</span>
</x-radio>
</div>
<div class="list-box">
<x-radio label="monthLastWeeks">
<span class="text">{{$t('在这个月的最后一个')}}</span>
<x-select :placeholder="$t('请选择具体周几')" v-model="monthLastWeeksVal">
<x-option
v-for="item in monthLastWeeksList"
:key="item.value"
:value="item.value"
:label="item.label">
</x-option>
</x-select>
</x-radio>
</div>
<div class="list-box">
<x-radio label="monthTailBefore">
<m-input-number :min="0" :max="31" :props-value="parseInt(monthTailBeforeVal)" @on-number="onMonthTailBefore"></m-input-number>
<span class="text">{{$t('在本月底前')}}</span>
</x-radio>
</div>
<div class="list-box">
<x-radio label="recentlyWorkingDaysMonth">
<span class="text">{{$t('最近的工作日(周一至周五)至本月')}}</span>
<m-input-number :min="0" :max="31" :props-value="parseInt(recentlyWorkingDaysMonthVal)" @on-number="onRecentlyWorkingDaysMonth"></m-input-number>
<span class="text">{{$t('日')}}</span>
</x-radio>
</div>
<div class="list-box">
<x-radio label="WkmonthNumWeeks">
<!--<span class="text">(周)</span>-->
<span class="text">{{$t('在这个月的第')}}</span>
<m-input-number :min="0" :max="31" :props-value="parseInt(WkmonthNumWeeksDayVal)" @on-number="onWkmonthNumWeeksDay"></m-input-number>
<x-select :placeholder="$t('请选择具体周几')" style="width: 200px" v-model="WkmonthNumWeeksWeekVal">
<x-option
v-for="item in WkmonthNumWeeksWeekList"
:key="item.value"
:value="item.value"
:label="item.label">
</x-option>
</x-select>
</x-radio>
</div>
</x-radio-group>
</div>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '../_source/i18n'
import { selectList, isStr, isWeek } from '../util/index'
import mInputNumber from '../_source/input-number'
export default {
name: 'day',
mixins: [i18n],
data () {
return {
radioDay: 'everyDay',
dayValue: '*',
weekValue: '?',
everyDayVal: '*',
WkintervalWeekPerformVal: 2, // Every few days
WkintervalWeekStartVal: 2, // What day of the week
selectWeekList: _.map(_.cloneDeep(selectList['week']), v => {
return {
value: v.value,
label: `${this.$t(v.label)}`
}
}),
intervalDayPerformVal: 1, // Every other day
intervalDayStartVal: 1, // From the day
WkspecificWeekVal: [], // Specific day of the week
selectSpecificWeekList: selectList['specificWeek'],
WkspecificDayVal: [], // Specific day of the week
selectSpecificDayList: selectList['day'],
monthLastDaysVal: 'L',
monthLastWorkingDays: 'LW',
monthLastWeeksVal: '1L',
monthLastWeeksList: _.map(_.cloneDeep(selectList['lastWeeks']), v => {
return {
value: v.value,
label: `${this.$t(v.label)}`
}
}),
monthTailBeforeVal: 1,
recentlyWorkingDaysMonthVal: 1,
WkmonthNumWeeksDayVal: 1,
WkmonthNumWeeksWeekVal: 1,
WkmonthNumWeeksWeekList: _.map(_.cloneDeep(selectList['week']), v => {
return {
value: v.value,
label: `${this.$t(v.label)}`
}
})
}
},
props: {
dayVal: String,
weekVal: String
},
methods: {
// Every few weeks
onWkintervalWeekPerform (val) {
this.WkintervalWeekPerformVal = val
if (this.radioDay === 'WkintervalWeek') {
this.dayValue = `?`
this.weekValue = `${this.WkintervalWeekStartVal}/${this.WkintervalWeekPerformVal}`
}
},
// Every other day
onIntervalDayPerform (val) {
this.intervalDayPerformVal = val
if (this.radioDay === 'intervalDay') {
this.dayValue = `${this.intervalDayStartVal}/${this.intervalDayPerformVal}`
}
},
// From week day
onIntervalDayStart (val) {
this.intervalDayStartVal = val
if (this.radioDay === 'intervalDay') {
this.dayValue = `${this.intervalDayStartVal}/${this.intervalDayPerformVal}`
}
},
// By the end of this month
onMonthTailBefore (val) {
this.monthTailBeforeVal = val
if (this.radioDay === 'monthTailBefore') {
this.dayValue = `L-${this.monthTailBeforeVal}`
}
},
// Last working day
onRecentlyWorkingDaysMonth (val) {
this.recentlyWorkingDaysMonthVal = val
if (this.radioDay === 'recentlyWorkingDaysMonth') {
this.dayValue = `${this.recentlyWorkingDaysMonthVal}W`
}
},
// On the day of this month
onWkmonthNumWeeksDay (val) {
this.WkmonthNumWeeksDayVal = val
this.weekValue = `${this.WkmonthNumWeeksWeekVal}#${this.WkmonthNumWeeksDayVal}`
},
// Reset every day
everyDayReset () {
this.dayValue = _.cloneDeep(this.everyDayVal)
},
// Reset interval week starts from *
WkintervalWeekReset () {
this.weekValue = `${this.WkintervalWeekStartVal}/${this.WkintervalWeekPerformVal}`
},
// Reset interval days from *
intervalDayReset () {
this.dayValue = `${this.intervalDayStartVal}/${this.intervalDayPerformVal}`
},
// Specific week (multiple choices)
WkspecificWeekReset () {
this.weekValue = this.WkspecificWeekVal.length ? this.WkspecificWeekVal.join(',') : '*'
},
// Specific days (multiple choices)
specificDayReset () {
this.dayValue = this.WkspecificDayVal.length ? this.WkspecificDayVal.join(',') : '*'
},
// On the last day of the month
monthLastDaysReset () {
this.dayValue = _.cloneDeep(this.monthLastDaysVal)
},
// On the last working day of the month
monthLastWorkingDaysReset () {
this.dayValue = _.cloneDeep(this.monthLastWorkingDays)
},
// At the end of the month*
monthLastWeeksReset () {
this.dayValue = _.cloneDeep(this.monthLastWeeksVal)
},
// By the end of this month
monthTailBeforeReset () {
this.dayValue = `L-${this.monthTailBeforeVal}`
},
// Last working day (Monday to Friday) to this month
recentlyWorkingDaysMonthReset () {
this.dayValue = `${this.recentlyWorkingDaysMonthVal}W`
},
// On the day of this month
WkmonthNumReset () {
this.weekValue = `${this.WkmonthNumWeeksWeekVal}#${this.WkmonthNumWeeksDayVal}`
}
},
watch: {
dayValue (val) {
this.$emit('on-day-value', val)
// console.log('dayValue=> ' + val)
},
weekValue (val) {
this.$emit('on-week-value', val)
// console.log('weekValue=> ' + val)
},
// Selected type
radioDay (val) {
switch (val) {
case 'everyDay':
this.weekValue = '?'
this.everyDayReset()
break
case 'WkintervalWeek':
this.dayValue = '?'
this.WkintervalWeekReset()
break
case 'intervalDay':
this.weekValue = '?'
this.intervalDayReset()
break
case 'WkspecificWeek':
this.dayValue = '?'
this.WkspecificWeekReset()
break
case 'specificDay':
this.weekValue = '?'
this.specificDayReset()
break
case 'monthLastDays':
this.weekValue = '?'
this.monthLastDaysReset()
break
case 'monthLastWorkingDays':
this.weekValue = '?'
this.monthLastWorkingDaysReset()
break
case 'monthLastWeeks':
this.weekValue = '?'
this.monthLastWeeksReset()
break
case 'monthTailBefore':
this.weekValue = '?'
this.monthTailBeforeReset()
break
case 'recentlyWorkingDaysMonth':
this.weekValue = '?'
this.recentlyWorkingDaysMonthReset()
break
case 'WkmonthNumWeeks':
this.dayValue = '?'
this.WkmonthNumReset()
break
}
},
WkintervalWeekStartVal (val) {
if (this.radioDay === 'WkintervalWeek') {
this.dayValue = `?`
this.weekValue = `${val}/${this.WkintervalWeekPerformVal}`
}
},
// Specific day of the week (multiple choice)
WkspecificWeekVal (val) {
if (this.radioDay === 'WkspecificWeek') {
this.dayValue = `?`
this.weekValue = val.join(',')
}
},
// Specific days (multiple choices)
WkspecificDayVal (val) {
if (this.radioDay === 'specificDay') {
this.weekValue = `?`
this.dayValue = val.join(',')
}
},
monthLastWeeksVal (val) {
if (this.radioDay === 'monthLastWeeks') {
this.weekValue = `?`
this.dayValue = val
}
},
WkmonthNumWeeksWeekVal (val) {
if (this.radioDay === 'WkmonthNumWeeks') {
this.dayValue = `?`
this.weekValue = `${val}#${this.WkmonthNumWeeksDayVal}`
}
}
},
beforeCreate () {
},
created () {
let $dayVal = _.cloneDeep(this.dayVal)
let $weekVal = _.cloneDeep(this.weekVal)
let isWeek1 = $weekVal.indexOf('/') !== -1
let isWeek2 = $weekVal.indexOf('#') !== -1
// Initialization
if ($dayVal === '*' && $weekVal === '?') {
console.log('Initialization')
this.radioDay = 'everyDay'
return
}
// week
if (isWeek1 || isWeek2 || isWeek($weekVal)) {
this.dayValue = `?`
/**
* Processing by sequence number (excluding days)
* @param [
* WkintervalWeek=>(/),
* WkspecificWeek=>(TUE,WED),
* WkmonthNumWeeks=>(#)
* ]
*/
let hanleWeekOne = () => {
console.log('1/3')
let a = isStr($weekVal, '/')
this.WkintervalWeekStartVal = parseInt(a[0])
this.WkintervalWeekPerformVal = parseInt(a[1])
this.dayValue = `?`
this.weekValue = `${this.WkintervalWeekPerformVal}/${this.WkintervalWeekStartVal}`
this.radioDay = 'WkintervalWeek'
}
let hanleWeekTwo = () => {
console.log('TUE,WED')
this.WkspecificWeekVal = $weekVal.split(',')
this.radioDay = 'WkspecificWeek'
}
let hanleWeekThree = () => {
console.log('6#5')
let a = isStr($weekVal, '#')
this.WkmonthNumWeeksWeekVal = parseInt(a[0])
this.WkmonthNumWeeksDayVal = parseInt(a[1])
this.radioDay = 'WkmonthNumWeeks'
}
// Processing week
if (isStr($weekVal, '/')) {
hanleWeekOne()
} else if (isStr($weekVal, '#')) {
hanleWeekThree()
} else if (isWeek($weekVal)) {
hanleWeekTwo()
}
} else {
this.weekValue = `?`
/**
* Processing by sequence number (excluding week)
* @param [
* everyDay=>(*),
* intervalDay=>(1/1),
* specificDay=>(1,2,5,3,4),
* monthLastDays=>(L),
* monthLastWorkingDays=>(LW),
* monthLastWeeks=>(3L),
* monthTailBefore=>(L-4),
* recentlyWorkingDaysMonth=>(6W)
* ]
*/
const hanleDayOne = () => {
console.log('*')
}
const hanleDayTwo = () => {
console.log('1/1')
let a = isStr($dayVal, '/')
this.intervalDayStartVal = parseInt(a[0])
this.intervalDayPerformVal = parseInt(a[1])
this.radioDay = 'intervalDay'
}
const hanleDayThree = () => {
console.log('1,2,5,3,4')
this.WkspecificDayVal = $dayVal.split(',')
this.radioDay = 'specificDay'
}
const hanleDayFour = () => {
console.log('L')
this.radioDay = 'monthLastDays'
}
const hanleDayFive = () => {
console.log('LW')
this.radioDay = 'monthLastWorkingDays'
}
const hanleDaySix = () => {
console.log('3L')
this.monthLastWeeksVal = $dayVal
this.radioDay = 'monthLastWeeks'
}
const hanleDaySeven = () => {
console.log('L-4')
let a = isStr($dayVal, '-')
this.monthTailBeforeVal = parseInt(a[1])
this.radioDay = 'monthTailBefore'
}
const hanleDayEight = () => {
console.log('6W')
this.recentlyWorkingDaysMonthVal = parseInt($dayVal.slice(0, $dayVal.length - 1))
this.radioDay = 'recentlyWorkingDaysMonth'
}
if ($dayVal === '*') {
hanleDayOne()
} else if (isStr($dayVal, '/')) {
hanleDayTwo()
} else if ($dayVal === 'L') {
hanleDayFour()
} else if ($dayVal === 'LW') {
hanleDayFive()
} else if ($dayVal.charAt($dayVal.length - 1) === 'L') {
hanleDaySix()
} else if (isStr($dayVal, '-')) {
hanleDaySeven()
} else if ($dayVal.charAt($dayVal.length - 1) === 'W') {
hanleDayEight()
} else {
hanleDayThree()
}
}
},
beforeMount () {
},
mounted () {
},
beforeUpdate () {
},
updated () {
},
beforeDestroy () {
},
destroyed () {
},
computed: {},
components: { mInputNumber }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.day-model {
.ans-radio-group-vertical {
.ans-radio-wrapper {
margin: 5px 0;
display: inline-block
}
}
}
</style>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 4,104 |
[Bug][Sqoop] when jdbc connect string contains special characters, import fail
|
**Describe the bug**
when jdbc connect string contains special characters, import fail
**To Reproduce**
Steps to reproduce the behavior, for example:
1. sqoop import data source string contains special characters like "&"
2. run sqoop task
3. find command not found error
**Expected behavior**
sqoop import RDBMS success
**Which version of Dolphin Scheduler:**
-[ds-1.3.3]
|
https://github.com/apache/dolphinscheduler/issues/4104
|
https://github.com/apache/dolphinscheduler/pull/4105
|
656ec295b9e09468fc6a871df821ba42436c5e57
|
1cf40e1d1e4379e6b50a92871987d59291ccfd50
| 2020-11-25T15:22:52Z |
java
| 2020-11-27T01:46:23Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sqoop/generator/sources/MysqlSourceGenerator.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.sqoop.generator.sources;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.SqoopQueryType;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.sqoop.SqoopParameters;
import org.apache.dolphinscheduler.common.task.sqoop.sources.SourceMysqlParameter;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory;
import org.apache.dolphinscheduler.server.entity.SqoopTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.worker.task.sqoop.SqoopConstants;
import org.apache.dolphinscheduler.server.worker.task.sqoop.generator.ISourceGenerator;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* mysql source generator
*/
public class MysqlSourceGenerator implements ISourceGenerator {
private static final Logger logger = LoggerFactory.getLogger(MysqlSourceGenerator.class);
@Override
public String generate(SqoopParameters sqoopParameters, TaskExecutionContext taskExecutionContext) {
StringBuilder mysqlSourceSb = new StringBuilder();
try {
SourceMysqlParameter sourceMysqlParameter = JSONUtils.parseObject(sqoopParameters.getSourceParams(), SourceMysqlParameter.class);
SqoopTaskExecutionContext sqoopTaskExecutionContext = taskExecutionContext.getSqoopTaskExecutionContext();
if (null != sourceMysqlParameter) {
BaseDataSource baseDataSource = DataSourceFactory.getDatasource(DbType.of(sqoopTaskExecutionContext.getSourcetype()),
sqoopTaskExecutionContext.getSourceConnectionParams());
if (null != baseDataSource) {
mysqlSourceSb.append(Constants.SPACE).append(SqoopConstants.DB_CONNECT)
.append(Constants.SPACE).append(baseDataSource.getJdbcUrl())
.append(Constants.SPACE).append(SqoopConstants.DB_USERNAME)
.append(Constants.SPACE).append(baseDataSource.getUser())
.append(Constants.SPACE).append(SqoopConstants.DB_PWD)
.append(Constants.SPACE).append(Constants.DOUBLE_QUOTES).append(baseDataSource.getPassword()).append(Constants.DOUBLE_QUOTES);
//sqoop table & sql query
if (sourceMysqlParameter.getSrcQueryType() == SqoopQueryType.FORM.getCode()) {
if (StringUtils.isNotEmpty(sourceMysqlParameter.getSrcTable())) {
mysqlSourceSb.append(Constants.SPACE).append(SqoopConstants.TABLE)
.append(Constants.SPACE).append(sourceMysqlParameter.getSrcTable());
}
if (StringUtils.isNotEmpty(sourceMysqlParameter.getSrcColumns())) {
mysqlSourceSb.append(Constants.SPACE).append(SqoopConstants.COLUMNS)
.append(Constants.SPACE).append(sourceMysqlParameter.getSrcColumns());
}
} else if (sourceMysqlParameter.getSrcQueryType() == SqoopQueryType.SQL.getCode()
&& StringUtils.isNotEmpty(sourceMysqlParameter.getSrcQuerySql())) {
String srcQuery = sourceMysqlParameter.getSrcQuerySql();
mysqlSourceSb.append(Constants.SPACE).append(SqoopConstants.QUERY)
.append(Constants.SPACE).append(Constants.DOUBLE_QUOTES).append(srcQuery);
if (srcQuery.toLowerCase().contains(SqoopConstants.QUERY_WHERE)) {
mysqlSourceSb.append(Constants.SPACE).append(SqoopConstants.QUERY_CONDITION).append(Constants.DOUBLE_QUOTES);
} else {
mysqlSourceSb.append(Constants.SPACE).append(SqoopConstants.QUERY_WITHOUT_CONDITION).append(Constants.DOUBLE_QUOTES);
}
}
//sqoop hive map column
List<Property> mapColumnHive = sourceMysqlParameter.getMapColumnHive();
if (null != mapColumnHive && !mapColumnHive.isEmpty()) {
StringBuilder columnMap = new StringBuilder();
for (Property item : mapColumnHive) {
columnMap.append(item.getProp()).append(Constants.EQUAL_SIGN).append(item.getValue()).append(Constants.COMMA);
}
if (StringUtils.isNotEmpty(columnMap.toString())) {
mysqlSourceSb.append(Constants.SPACE).append(SqoopConstants.MAP_COLUMN_HIVE)
.append(Constants.SPACE).append(columnMap.substring(0, columnMap.length() - 1));
}
}
//sqoop map column java
List<Property> mapColumnJava = sourceMysqlParameter.getMapColumnJava();
if (null != mapColumnJava && !mapColumnJava.isEmpty()) {
StringBuilder columnMap = new StringBuilder();
for (Property item : mapColumnJava) {
columnMap.append(item.getProp()).append(Constants.EQUAL_SIGN).append(item.getValue()).append(Constants.COMMA);
}
if (StringUtils.isNotEmpty(columnMap.toString())) {
mysqlSourceSb.append(Constants.SPACE).append(SqoopConstants.MAP_COLUMN_JAVA)
.append(Constants.SPACE).append(columnMap.substring(0, columnMap.length() - 1));
}
}
}
}
} catch (Exception e) {
logger.error(String.format("Sqoop task mysql source params build failed: [%s]", e.getMessage()));
}
return mysqlSourceSb.toString();
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 4,104 |
[Bug][Sqoop] when jdbc connect string contains special characters, import fail
|
**Describe the bug**
when jdbc connect string contains special characters, import fail
**To Reproduce**
Steps to reproduce the behavior, for example:
1. sqoop import data source string contains special characters like "&"
2. run sqoop task
3. find command not found error
**Expected behavior**
sqoop import RDBMS success
**Which version of Dolphin Scheduler:**
-[ds-1.3.3]
|
https://github.com/apache/dolphinscheduler/issues/4104
|
https://github.com/apache/dolphinscheduler/pull/4105
|
656ec295b9e09468fc6a871df821ba42436c5e57
|
1cf40e1d1e4379e6b50a92871987d59291ccfd50
| 2020-11-25T15:22:52Z |
java
| 2020-11-27T01:46:23Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sqoop/generator/targets/MysqlTargetGenerator.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.sqoop.generator.targets;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.task.sqoop.SqoopParameters;
import org.apache.dolphinscheduler.common.task.sqoop.targets.TargetMysqlParameter;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory;
import org.apache.dolphinscheduler.server.entity.SqoopTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.worker.task.sqoop.SqoopConstants;
import org.apache.dolphinscheduler.server.worker.task.sqoop.generator.ITargetGenerator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* mysql target generator
*/
public class MysqlTargetGenerator implements ITargetGenerator {
private static final Logger logger = LoggerFactory.getLogger(MysqlTargetGenerator.class);
@Override
public String generate(SqoopParameters sqoopParameters, TaskExecutionContext taskExecutionContext) {
StringBuilder mysqlTargetSb = new StringBuilder();
try {
TargetMysqlParameter targetMysqlParameter =
JSONUtils.parseObject(sqoopParameters.getTargetParams(), TargetMysqlParameter.class);
SqoopTaskExecutionContext sqoopTaskExecutionContext = taskExecutionContext.getSqoopTaskExecutionContext();
if (null != targetMysqlParameter && targetMysqlParameter.getTargetDatasource() != 0) {
// get datasource
BaseDataSource baseDataSource = DataSourceFactory.getDatasource(DbType.of(sqoopTaskExecutionContext.getTargetType()),
sqoopTaskExecutionContext.getTargetConnectionParams());
if (null != baseDataSource) {
mysqlTargetSb.append(Constants.SPACE).append(SqoopConstants.DB_CONNECT)
.append(Constants.SPACE).append(baseDataSource.getJdbcUrl())
.append(Constants.SPACE).append(SqoopConstants.DB_USERNAME)
.append(Constants.SPACE).append(baseDataSource.getUser())
.append(Constants.SPACE).append(SqoopConstants.DB_PWD)
.append(Constants.SPACE).append(Constants.DOUBLE_QUOTES).append(baseDataSource.getPassword()).append(Constants.DOUBLE_QUOTES)
.append(Constants.SPACE).append(SqoopConstants.TABLE)
.append(Constants.SPACE).append(targetMysqlParameter.getTargetTable());
if (StringUtils.isNotEmpty(targetMysqlParameter.getTargetColumns())) {
mysqlTargetSb.append(Constants.SPACE).append(SqoopConstants.COLUMNS)
.append(Constants.SPACE).append(targetMysqlParameter.getTargetColumns());
}
if (StringUtils.isNotEmpty(targetMysqlParameter.getFieldsTerminated())) {
mysqlTargetSb.append(Constants.SPACE).append(SqoopConstants.FIELDS_TERMINATED_BY);
if (targetMysqlParameter.getFieldsTerminated().contains("'")) {
mysqlTargetSb.append(Constants.SPACE).append(targetMysqlParameter.getFieldsTerminated());
} else {
mysqlTargetSb.append(Constants.SPACE).append(Constants.SINGLE_QUOTES).append(targetMysqlParameter.getFieldsTerminated()).append(Constants.SINGLE_QUOTES);
}
}
if (StringUtils.isNotEmpty(targetMysqlParameter.getLinesTerminated())) {
mysqlTargetSb.append(Constants.SPACE).append(SqoopConstants.LINES_TERMINATED_BY);
if (targetMysqlParameter.getLinesTerminated().contains(Constants.SINGLE_QUOTES)) {
mysqlTargetSb.append(Constants.SPACE).append(targetMysqlParameter.getLinesTerminated());
} else {
mysqlTargetSb.append(Constants.SPACE).append(Constants.SINGLE_QUOTES).append(targetMysqlParameter.getLinesTerminated()).append(Constants.SINGLE_QUOTES);
}
}
if (targetMysqlParameter.getIsUpdate()
&& StringUtils.isNotEmpty(targetMysqlParameter.getTargetUpdateKey())
&& StringUtils.isNotEmpty(targetMysqlParameter.getTargetUpdateMode())) {
mysqlTargetSb.append(Constants.SPACE).append(SqoopConstants.UPDATE_KEY)
.append(Constants.SPACE).append(targetMysqlParameter.getTargetUpdateKey())
.append(Constants.SPACE).append(SqoopConstants.UPDATE_MODE)
.append(Constants.SPACE).append(targetMysqlParameter.getTargetUpdateMode());
}
}
}
} catch (Exception e) {
logger.error(String.format("Sqoop mysql target params build failed: [%s]", e.getMessage()));
}
return mysqlTargetSb.toString();
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 4,104 |
[Bug][Sqoop] when jdbc connect string contains special characters, import fail
|
**Describe the bug**
when jdbc connect string contains special characters, import fail
**To Reproduce**
Steps to reproduce the behavior, for example:
1. sqoop import data source string contains special characters like "&"
2. run sqoop task
3. find command not found error
**Expected behavior**
sqoop import RDBMS success
**Which version of Dolphin Scheduler:**
-[ds-1.3.3]
|
https://github.com/apache/dolphinscheduler/issues/4104
|
https://github.com/apache/dolphinscheduler/pull/4105
|
656ec295b9e09468fc6a871df821ba42436c5e57
|
1cf40e1d1e4379e6b50a92871987d59291ccfd50
| 2020-11-25T15:22:52Z |
java
| 2020-11-27T01:46:23Z |
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTaskTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.sqoop;
import org.apache.dolphinscheduler.common.task.sqoop.SqoopParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.server.entity.SqoopTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.worker.task.sqoop.generator.SqoopJobGenerator;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import java.util.Date;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationContext;
/**
* sqoop task test
*/
@RunWith(MockitoJUnitRunner.Silent.class)
public class SqoopTaskTest {
private static final Logger logger = LoggerFactory.getLogger(SqoopTaskTest.class);
private SqoopTask sqoopTask;
@Before
public void before() {
ProcessService processService = Mockito.mock(ProcessService.class);
ApplicationContext applicationContext = Mockito.mock(ApplicationContext.class);
SpringApplicationContext springApplicationContext = new SpringApplicationContext();
springApplicationContext.setApplicationContext(applicationContext);
Mockito.when(applicationContext.getBean(ProcessService.class)).thenReturn(processService);
TaskExecutionContext taskExecutionContext = new TaskExecutionContext();
taskExecutionContext.setTaskAppId(String.valueOf(System.currentTimeMillis()));
taskExecutionContext.setTenantCode("1");
taskExecutionContext.setEnvFile(".dolphinscheduler_env.sh");
taskExecutionContext.setStartTime(new Date());
taskExecutionContext.setTaskTimeout(0);
taskExecutionContext.setTaskParams("{\"jobName\":\"sqoop_import\",\"jobType\":\"TEMPLATE\",\"concurrency\":1,"
+ "\"modelType\":\"import\",\"sourceType\":\"MYSQL\",\"targetType\":\"HIVE\",\"sourceParams\":\"{\\\"srcDatasource\\\":2,\\\"srcTable\\\":\\\"person_2\\\","
+ "\\\"srcQueryType\\\":\\\"1\\\",\\\"srcQuerySql\\\":\\\"SELECT * FROM person_2\\\",\\\"srcColumnType\\\":\\\"0\\\","
+ "\\\"srcColumns\\\":\\\"\\\",\\\"srcConditionList\\\":[],\\\"mapColumnHive\\\":[],"
+ "\\\"mapColumnJava\\\":[{\\\"prop\\\":\\\"id\\\",\\\"direct\\\":\\\"IN\\\",\\\"type\\\":\\\"VARCHAR\\\",\\\"value\\\":\\\"Integer\\\"}]}\""
+ ",\"targetParams\":\"{\\\"hiveDatabase\\\":\\\"stg\\\",\\\"hiveTable\\\":\\\"person_internal_2\\\",\\\"createHiveTable\\\":true,"
+ "\\\"dropDelimiter\\\":false,\\\"hiveOverWrite\\\":true,\\\"replaceDelimiter\\\":\\\"\\\",\\\"hivePartitionKey\\\":\\\"date\\\","
+ "\\\"hivePartitionValue\\\":\\\"2020-02-16\\\"}\",\"localParams\":[]}");
sqoopTask = new SqoopTask(taskExecutionContext, logger);
//test sqoop task init method
sqoopTask.init();
}
/**
* test SqoopJobGenerator
*/
@Test
public void testGenerator() {
TaskExecutionContext mysqlTaskExecutionContext = getMysqlTaskExecutionContext();
//sqoop TEMPLATE job
//import mysql to HDFS with hadoop
String mysqlToHdfs =
"{\"jobName\":\"sqoop_import\",\"hadoopCustomParams\":[{\"prop\":\"mapreduce.map.memory.mb\",\"direct\":\"IN\",\"type\":\"VARCHAR\",\"value\":\"4096\"}],"
+ "\"sqoopAdvancedParams\":[{\"prop\":\"--direct\",\"direct\":\"IN\",\"type\":\"VARCHAR\",\"value\":\"\"}],\"jobType\":\"TEMPLATE\",\"concurrency\":1,"
+ "\"modelType\":\"import\",\"sourceType\":\"MYSQL\",\"targetType\":\"HDFS\","
+ "\"sourceParams\":\"{\\\"srcDatasource\\\":2,\\\"srcTable\\\":\\\"person_2\\\",\\\"srcQueryType\\\":\\\"0\\\",\\\"srcQuerySql\\\":\\\"\\\",\\\"srcColumnType\\\":\\\"0\\\","
+ "\\\"srcColumns\\\":\\\"\\\",\\\"srcConditionList\\\":[],\\\"mapColumnHive\\\":[],\\\"mapColumnJava\\\":[]}\",\"targetParams\":\"{\\\"targetPath\\\":\\\"/ods/tmp/test/person7\\\","
+ "\\\"deleteTargetDir\\\":true,\\\"fileType\\\":\\\"--as-textfile\\\",\\\"compressionCodec\\\":\\\"\\\",\\\"fieldsTerminated\\\":\\\"@\\\","
+ "\\\"linesTerminated\\\":\\\"\\\\\\\\n\\\"}\",\"localParams\":[]}";
SqoopParameters mysqlToHdfsParams = JSONUtils.parseObject(mysqlToHdfs, SqoopParameters.class);
SqoopJobGenerator generator = new SqoopJobGenerator();
String mysqlToHdfsScript = generator.generateSqoopJob(mysqlToHdfsParams, mysqlTaskExecutionContext);
String mysqlToHdfsExpected =
"sqoop import -D mapred.job.name=sqoop_import -D mapreduce.map.memory.mb=4096 --direct -m 1 --connect jdbc:mysql://192.168.0.111:3306/test "
+ "--username kylo --password \"123456\" --table person_2 --target-dir /ods/tmp/test/person7 --as-textfile "
+ "--delete-target-dir --fields-terminated-by '@' --lines-terminated-by '\\n' --null-non-string 'NULL' --null-string 'NULL'";
Assert.assertEquals(mysqlToHdfsExpected, mysqlToHdfsScript);
//export hdfs to mysql using update mode
String hdfsToMysql = "{\"jobName\":\"sqoop_import\",\"jobType\":\"TEMPLATE\",\"concurrency\":1,\"modelType\":\"export\",\"sourceType\":\"HDFS\","
+ "\"targetType\":\"MYSQL\",\"sourceParams\":\"{\\\"exportDir\\\":\\\"/ods/tmp/test/person7\\\"}\","
+ "\"targetParams\":\"{\\\"targetDatasource\\\":2,\\\"targetTable\\\":\\\"person_3\\\",\\\"targetColumns\\\":\\\"id,name,age,sex,create_time\\\","
+ "\\\"preQuery\\\":\\\"\\\",\\\"isUpdate\\\":true,\\\"targetUpdateKey\\\":\\\"id\\\",\\\"targetUpdateMode\\\":\\\"allowinsert\\\","
+ "\\\"fieldsTerminated\\\":\\\"@\\\",\\\"linesTerminated\\\":\\\"\\\\\\\\n\\\"}\",\"localParams\":[]}";
SqoopParameters hdfsToMysqlParams = JSONUtils.parseObject(hdfsToMysql, SqoopParameters.class);
String hdfsToMysqlScript = generator.generateSqoopJob(hdfsToMysqlParams, mysqlTaskExecutionContext);
String hdfsToMysqlScriptExpected =
"sqoop export -D mapred.job.name=sqoop_import -m 1 --export-dir /ods/tmp/test/person7 --connect jdbc:mysql://192.168.0.111:3306/test "
+ "--username kylo --password \"123456\" --table person_3 --columns id,name,age,sex,create_time --fields-terminated-by '@' "
+ "--lines-terminated-by '\\n' --update-key id --update-mode allowinsert";
Assert.assertEquals(hdfsToMysqlScriptExpected, hdfsToMysqlScript);
//export hive to mysql
String hiveToMysql =
"{\"jobName\":\"sqoop_import\",\"jobType\":\"TEMPLATE\",\"concurrency\":1,\"modelType\":\"export\",\"sourceType\":\"HIVE\","
+ "\"targetType\":\"MYSQL\",\"sourceParams\":\"{\\\"hiveDatabase\\\":\\\"stg\\\",\\\"hiveTable\\\":\\\"person_internal\\\","
+ "\\\"hivePartitionKey\\\":\\\"date\\\",\\\"hivePartitionValue\\\":\\\"2020-02-17\\\"}\","
+ "\"targetParams\":\"{\\\"targetDatasource\\\":2,\\\"targetTable\\\":\\\"person_3\\\",\\\"targetColumns\\\":\\\"\\\",\\\"preQuery\\\":\\\"\\\","
+ "\\\"isUpdate\\\":false,\\\"targetUpdateKey\\\":\\\"\\\",\\\"targetUpdateMode\\\":\\\"allowinsert\\\",\\\"fieldsTerminated\\\":\\\"@\\\","
+ "\\\"linesTerminated\\\":\\\"\\\\\\\\n\\\"}\",\"localParams\":[]}";
SqoopParameters hiveToMysqlParams = JSONUtils.parseObject(hiveToMysql, SqoopParameters.class);
String hiveToMysqlScript = generator.generateSqoopJob(hiveToMysqlParams, mysqlTaskExecutionContext);
String hiveToMysqlExpected =
"sqoop export -D mapred.job.name=sqoop_import -m 1 --hcatalog-database stg --hcatalog-table person_internal --hcatalog-partition-keys date "
+ "--hcatalog-partition-values 2020-02-17 --connect jdbc:mysql://192.168.0.111:3306/test --username kylo --password \"123456\" --table person_3 "
+ "--fields-terminated-by '@' --lines-terminated-by '\\n'";
Assert.assertEquals(hiveToMysqlExpected, hiveToMysqlScript);
//import mysql to hive
String mysqlToHive =
"{\"jobName\":\"sqoop_import\",\"jobType\":\"TEMPLATE\",\"concurrency\":1,\"modelType\":\"import\",\"sourceType\":\"MYSQL\",\"targetType\":\"HIVE\","
+ "\"sourceParams\":\"{\\\"srcDatasource\\\":2,\\\"srcTable\\\":\\\"person_2\\\",\\\"srcQueryType\\\":\\\"1\\\","
+ "\\\"srcQuerySql\\\":\\\"SELECT * FROM person_2\\\",\\\"srcColumnType\\\":\\\"0\\\",\\\"srcColumns\\\":\\\"\\\",\\\"srcConditionList\\\":[],"
+ "\\\"mapColumnHive\\\":[],\\\"mapColumnJava\\\":[{\\\"prop\\\":\\\"id\\\",\\\"direct\\\":\\\"IN\\\",\\\"type\\\":\\\"VARCHAR\\\",\\\"value\\\":\\\"Integer\\\"}]}\","
+ "\"targetParams\":\"{\\\"hiveDatabase\\\":\\\"stg\\\",\\\"hiveTable\\\":\\\"person_internal_2\\\",\\\"createHiveTable\\\":true,\\\"dropDelimiter\\\":false,"
+ "\\\"hiveOverWrite\\\":true,\\\"replaceDelimiter\\\":\\\"\\\",\\\"hivePartitionKey\\\":\\\"date\\\",\\\"hivePartitionValue\\\":\\\"2020-02-16\\\"}\",\"localParams\":[]}";
SqoopParameters mysqlToHiveParams = JSONUtils.parseObject(mysqlToHive, SqoopParameters.class);
String mysqlToHiveScript = generator.generateSqoopJob(mysqlToHiveParams, mysqlTaskExecutionContext);
String mysqlToHiveExpected =
"sqoop import -D mapred.job.name=sqoop_import -m 1 --connect jdbc:mysql://192.168.0.111:3306/test --username kylo --password \"123456\" "
+ "--query \"SELECT * FROM person_2 WHERE \\$CONDITIONS\" --map-column-java id=Integer --hive-import --hive-table stg.person_internal_2 "
+ "--create-hive-table --hive-overwrite --delete-target-dir --hive-partition-key date --hive-partition-value 2020-02-16";
Assert.assertEquals(mysqlToHiveExpected, mysqlToHiveScript);
//sqoop CUSTOM job
String sqoopCustomString = "{\"jobType\":\"CUSTOM\",\"localParams\":[],\"customShell\":\"sqoop import\"}";
SqoopParameters sqoopCustomParams = JSONUtils.parseObject(sqoopCustomString, SqoopParameters.class);
String sqoopCustomScript = generator.generateSqoopJob(sqoopCustomParams, new TaskExecutionContext());
String sqoopCustomExpected = "sqoop import";
Assert.assertEquals(sqoopCustomExpected, sqoopCustomScript);
}
/**
* get taskExecutionContext include mysql
*
* @return TaskExecutionContext
*/
private TaskExecutionContext getMysqlTaskExecutionContext() {
TaskExecutionContext taskExecutionContext = new TaskExecutionContext();
SqoopTaskExecutionContext sqoopTaskExecutionContext = new SqoopTaskExecutionContext();
String mysqlSourceConnectionParams =
"{\"address\":\"jdbc:mysql://192.168.0.111:3306\",\"database\":\"test\",\"jdbcUrl\":\"jdbc:mysql://192.168.0.111:3306/test\",\"user\":\"kylo\",\"password\":\"123456\"}";
String mysqlTargetConnectionParams =
"{\"address\":\"jdbc:mysql://192.168.0.111:3306\",\"database\":\"test\",\"jdbcUrl\":\"jdbc:mysql://192.168.0.111:3306/test\",\"user\":\"kylo\",\"password\":\"123456\"}";
sqoopTaskExecutionContext.setDataSourceId(2);
sqoopTaskExecutionContext.setDataTargetId(2);
sqoopTaskExecutionContext.setSourcetype(0);
sqoopTaskExecutionContext.setTargetConnectionParams(mysqlTargetConnectionParams);
sqoopTaskExecutionContext.setSourceConnectionParams(mysqlSourceConnectionParams);
sqoopTaskExecutionContext.setTargetType(0);
taskExecutionContext.setSqoopTaskExecutionContext(sqoopTaskExecutionContext);
return taskExecutionContext;
}
@Test
public void testGetParameters() {
Assert.assertNotNull(sqoopTask.getParameters());
}
/**
* Method: init
*/
@Test
public void testInit() {
try {
sqoopTask.init();
} catch (Exception e) {
Assert.fail(e.getMessage());
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,478 |
[Bug][Common] read the file garbled
|
When reading the file, no encoding is specified, resulting in garbled code


|
https://github.com/apache/dolphinscheduler/issues/3478
|
https://github.com/apache/dolphinscheduler/pull/3479
|
1cf40e1d1e4379e6b50a92871987d59291ccfd50
|
bb13f2eae78e4b829816068e8ca3d337d97a0fd2
| 2020-08-13T03:13:35Z |
java
| 2020-11-27T13:55:28Z |
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.PropertyUtils;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.mock.web.MockMultipartFile;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@RunWith(PowerMockRunner.class)
@PowerMockIgnore({"sun.security.*", "javax.net.*"})
@PrepareForTest({HadoopUtils.class,PropertyUtils.class, FileUtils.class,org.apache.dolphinscheduler.api.utils.FileUtils.class})
public class ResourcesServiceTest {
private static final Logger logger = LoggerFactory.getLogger(ResourcesServiceTest.class);
@InjectMocks
private ResourcesService resourcesService;
@Mock
private ResourceMapper resourcesMapper;
@Mock
private TenantMapper tenantMapper;
@Mock
private ResourceUserMapper resourceUserMapper;
@Mock
private HadoopUtils hadoopUtils;
@Mock
private UserMapper userMapper;
@Mock
private UdfFuncMapper udfFunctionMapper;
@Mock
private ProcessDefinitionMapper processDefinitionMapper;
@Before
public void setUp() {
PowerMockito.mockStatic(HadoopUtils.class);
PowerMockito.mockStatic(FileUtils.class);
PowerMockito.mockStatic(org.apache.dolphinscheduler.api.utils.FileUtils.class);
try {
// new HadoopUtils
PowerMockito.whenNew(HadoopUtils.class).withNoArguments().thenReturn(hadoopUtils);
} catch (Exception e) {
e.printStackTrace();
}
PowerMockito.when(HadoopUtils.getInstance()).thenReturn(hadoopUtils);
PowerMockito.mockStatic(PropertyUtils.class);
}
@Test
public void testCreateResource(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_FILE_IS_EMPTY
MockMultipartFile mockMultipartFile = new MockMultipartFile("test.pdf",new String().getBytes());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_FILE_IS_EMPTY.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_FORBID_CHANGE
mockMultipartFile = new MockMultipartFile("test.pdf","test.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("test.pdf")).thenReturn("pdf");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_FORBID_CHANGE.getMsg(),result.getMsg());
//UDF_RESOURCE_SUFFIX_NOT_JAR
mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.pdf","ResourcesServiceTest.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.pdf")).thenReturn("pdf");
result = resourcesService.createResource(user,"ResourcesServiceTest.pdf","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg(),result.getMsg());
}
@Test
public void testCreateDirecotry(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//PARENT_RESOURCE_NOT_EXIST
user.setId(1);
user.setTenantId(1);
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.PARENT_RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.queryResourceList("/directoryTest", 0, 0)).thenReturn(getResourceList());
result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
}
@Test
public void testUpdateResource(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.updateResource(user,1,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null);
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.updateResource(user,0,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//USER_NO_OPERATION_PERM
result = resourcesService.updateResource(user,1,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST
user.setId(1);
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(HadoopUtils.getHdfsFileName(Mockito.any(), Mockito.any(),Mockito.anyString())).thenReturn("test1");
try {
Mockito.when(HadoopUtils.getInstance().exists(Mockito.any())).thenReturn(false);
} catch (IOException e) {
logger.error(e.getMessage(),e);
}
result = resourcesService.updateResource(user, 1, "ResourcesServiceTest1.jar", "ResourcesServiceTest", ResourceType.UDF,null);
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//SUCCESS
user.setId(1);
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
try {
Mockito.when(HadoopUtils.getInstance().exists(Mockito.any())).thenReturn(true);
} catch (IOException e) {
logger.error(e.getMessage(),e);
}
result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,null);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
//RESOURCE_EXIST
Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest1.jar", 0, 0)).thenReturn(getResourceList());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.FILE,null);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
//USER_NOT_EXIST
Mockito.when(userMapper.selectById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF,null);
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF,null);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg());
//SUCCESS
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test");
try {
PowerMockito.when(HadoopUtils.getInstance().copy(Mockito.anyString(),Mockito.anyString(),true,true)).thenReturn(true);
} catch (Exception e) {
logger.error(e.getMessage(),e);
}
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF,null);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
}
@Test
public void testQueryResourceListPaging(){
User loginUser = new User();
loginUser.setUserType(UserType.ADMIN_USER);
IPage<Resource> resourcePage = new Page<>(1,10);
resourcePage.setTotal(1);
resourcePage.setRecords(getResourceList());
Mockito.when(resourcesMapper.queryResourcePaging(Mockito.any(Page.class),
Mockito.eq(0),Mockito.eq(-1), Mockito.eq(0), Mockito.eq("test"))).thenReturn(resourcePage);
Map<String, Object> result = resourcesService.queryResourceListPaging(loginUser,-1,ResourceType.FILE,"test",1,10);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
PageInfo pageInfo = (PageInfo) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(pageInfo.getLists()));
}
@Test
public void testQueryResourceList(){
User loginUser = new User();
loginUser.setId(0);
loginUser.setUserType(UserType.ADMIN_USER);
Mockito.when(resourcesMapper.queryResourceListAuthored(0, 0,0)).thenReturn(getResourceList());
Map<String, Object> result = resourcesService.queryResourceList(loginUser, ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
List<Resource> resourceList = (List<Resource>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(resourceList));
}
@Test
public void testDelete(){
User loginUser = new User();
loginUser.setId(0);
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
try {
// HDFS_NOT_STARTUP
Result result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//RESOURCE_NOT_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
result = resourcesService.delete(loginUser,2);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
// USER_NO_OPERATION_PERM
result = resourcesService.delete(loginUser,2);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
//TENANT_NOT_EXIST
loginUser.setUserType(UserType.ADMIN_USER);
loginUser.setTenantId(2);
Mockito.when(userMapper.selectById(Mockito.anyInt())).thenReturn(loginUser);
result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
//SUCCESS
loginUser.setTenantId(1);
Mockito.when(hadoopUtils.delete(Mockito.anyString(), Mockito.anyBoolean())).thenReturn(true);
result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
} catch (Exception e) {
logger.error("delete error",e);
Assert.assertTrue(false);
}
}
@Test
public void testVerifyResourceName(){
User user = new User();
user.setId(1);
Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest.jar", 0, 0)).thenReturn(getResourceList());
Result result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg());
//TENANT_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
String unExistFullName = "/test.jar";
try {
Mockito.when(hadoopUtils.exists(unExistFullName)).thenReturn(false);
} catch (IOException e) {
logger.error("hadoop error",e);
}
result = resourcesService.verifyResourceName("/test.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
//RESOURCE_FILE_EXIST
user.setTenantId(1);
try {
Mockito.when(hadoopUtils.exists("test")).thenReturn(true);
} catch (IOException e) {
logger.error("hadoop error",e);
}
PowerMockito.when(HadoopUtils.getHdfsResourceFileName("123", "test1")).thenReturn("test");
result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertTrue(Status.RESOURCE_EXIST.getCode()==result.getCode());
//SUCCESS
result = resourcesService.verifyResourceName("test2",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
}
@Test
public void testReadResource(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
//HDFS_NOT_STARTUP
Result result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.readResource(2,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
//USER_NOT_EXIST
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode()==result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_FILE_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
try {
Mockito.when(hadoopUtils.exists(Mockito.anyString())).thenReturn(false);
} catch (IOException e) {
logger.error("hadoop error",e);
}
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertTrue(Status.RESOURCE_FILE_NOT_EXIST.getCode()==result.getCode());
//SUCCESS
try {
Mockito.when(hadoopUtils.exists(null)).thenReturn(true);
Mockito.when(hadoopUtils.catFile(null,1,10)).thenReturn(getContent());
} catch (IOException e) {
logger.error("hadoop error",e);
}
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
}
@Test
public void testOnlineCreateResource() {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
PowerMockito.when(HadoopUtils.getHdfsResDir("hdfsdDir")).thenReturn("hdfsDir");
PowerMockito.when(HadoopUtils.getHdfsUdfDir("udfDir")).thenReturn("udfDir");
User user = getUser();
//HDFS_NOT_STARTUP
Result result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
//RuntimeException
try {
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content",-1,"/");
}catch (RuntimeException ex){
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), ex.getMessage());
}
//SUCCESS
Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test");
PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
}
@Test
public void testUpdateResourceContent(){
User loginUser = new User();
loginUser.setId(0);
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
// HDFS_NOT_STARTUP
Result result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//RESOURCE_NOT_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
result = resourcesService.updateResourceContent(2,"content");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
//USER_NOT_EXIST
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertTrue(Status.TENANT_NOT_EXIST.getCode() == result.getCode());
//SUCCESS
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test");
PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
}
@Test
public void testDownloadResource(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
Mockito.when(userMapper.selectById(1)).thenReturn(getUser());
org.springframework.core.io.Resource resourceMock = Mockito.mock(org.springframework.core.io.Resource.class);
try {
//resource null
org.springframework.core.io.Resource resource = resourcesService.downloadResource(1);
Assert.assertNull(resource);
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
PowerMockito.when(org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(Mockito.any())).thenReturn(resourceMock);
resource = resourcesService.downloadResource(1);
Assert.assertNotNull(resource);
} catch (Exception e) {
logger.error("DownloadResource error",e);
Assert.assertTrue(false);
}
}
@Test
public void testUnauthorizedFile(){
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.unauthorizedFile(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM,result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(resourcesMapper.queryResourceExceptUserId(1)).thenReturn(getResourceList());
result = resourcesService.unauthorizedFile(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<Resource> resources = (List<Resource>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(resources));
}
@Test
public void testUnauthorizedUDFFunction(){
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.unauthorizedUDFFunction(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM,result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(udfFunctionMapper.queryUdfFuncExceptUserId(1)).thenReturn(getUdfFuncList());
result = resourcesService.unauthorizedUDFFunction(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<UdfFunc> udfFuncs = (List<UdfFunc>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(udfFuncs));
}
@Test
public void testAuthorizedUDFFunction(){
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.authorizedUDFFunction(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM,result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(udfFunctionMapper.queryAuthedUdfFunc(1)).thenReturn(getUdfFuncList());
result = resourcesService.authorizedUDFFunction(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<UdfFunc> udfFuncs = (List<UdfFunc>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(udfFuncs));
}
@Test
public void testAuthorizedFile(){
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.authorizedFile(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM,result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(resourcesMapper.queryAuthorizedResourceList(1)).thenReturn(getResourceList());
result = resourcesService.authorizedFile(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<Resource> resources = (List<Resource>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(resources));
}
private List<Resource> getResourceList(){
List<Resource> resources = new ArrayList<>();
resources.add(getResource());
return resources;
}
private Tenant getTenant() {
Tenant tenant = new Tenant();
tenant.setTenantCode("123");
return tenant;
}
private Resource getResource(){
Resource resource = new Resource();
resource.setPid(-1);
resource.setUserId(1);
resource.setDescription("ResourcesServiceTest.jar");
resource.setAlias("ResourcesServiceTest.jar");
resource.setFullName("/ResourcesServiceTest.jar");
resource.setType(ResourceType.FILE);
return resource;
}
private Resource getUdfResource(){
Resource resource = new Resource();
resource.setUserId(1);
resource.setDescription("udfTest");
resource.setAlias("udfTest.jar");
resource.setFullName("/udfTest.jar");
resource.setType(ResourceType.UDF);
return resource;
}
private UdfFunc getUdfFunc(){
UdfFunc udfFunc = new UdfFunc();
udfFunc.setId(1);
return udfFunc;
}
private List<UdfFunc> getUdfFuncList(){
List<UdfFunc> udfFuncs = new ArrayList<>();
udfFuncs.add(getUdfFunc());
return udfFuncs;
}
private User getUser(){
User user = new User();
user.setId(1);
user.setTenantId(1);
return user;
}
private List<String> getContent(){
List<String> contentList = new ArrayList<>();
contentList.add("test");
return contentList;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,478 |
[Bug][Common] read the file garbled
|
When reading the file, no encoding is specified, resulting in garbled code


|
https://github.com/apache/dolphinscheduler/issues/3478
|
https://github.com/apache/dolphinscheduler/pull/3479
|
1cf40e1d1e4379e6b50a92871987d59291ccfd50
|
bb13f2eae78e4b829816068e8ca3d337d97a0fd2
| 2020-08-13T03:13:35Z |
java
| 2020-11-27T13:55:28Z |
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/FileUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import static org.apache.dolphinscheduler.common.Constants.DATA_BASEDIR_PATH;
import static org.apache.dolphinscheduler.common.Constants.RESOURCE_VIEW_SUFFIXS;
import static org.apache.dolphinscheduler.common.Constants.RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE;
import static org.apache.dolphinscheduler.common.Constants.YYYYMMDDHHMMSS;
import org.apache.commons.io.Charsets;
import org.apache.commons.io.IOUtils;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.StringReader;
import java.io.UnsupportedEncodingException;
import java.nio.charset.Charset;
import java.nio.charset.UnsupportedCharsetException;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* file utils
*/
public class FileUtils {
public static final Logger logger = LoggerFactory.getLogger(FileUtils.class);
public static final String DATA_BASEDIR = PropertyUtils.getString(DATA_BASEDIR_PATH, "/tmp/dolphinscheduler");
public static final ThreadLocal<Logger> taskLoggerThreadLocal = new ThreadLocal<>();
private FileUtils() {
throw new UnsupportedOperationException("Construct FileUtils");
}
/**
* get file suffix
*
* @param filename file name
* @return file suffix
*/
public static String suffix(String filename) {
String fileSuffix = "";
if (StringUtils.isNotEmpty(filename)) {
int lastIndex = filename.lastIndexOf('.');
if (lastIndex > 0) {
fileSuffix = filename.substring(lastIndex + 1);
}
}
return fileSuffix;
}
/**
* get download file absolute path and name
*
* @param filename file name
* @return download file name
*/
public static String getDownloadFilename(String filename) {
String fileName = String.format("%s/download/%s/%s", DATA_BASEDIR, DateUtils.getCurrentTime(YYYYMMDDHHMMSS), filename);
File file = new File(fileName);
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
return fileName;
}
/**
* get upload file absolute path and name
*
* @param tenantCode tenant code
* @param filename file name
* @return local file path
*/
public static String getUploadFilename(String tenantCode, String filename) {
String fileName = String.format("%s/%s/resources/%s", DATA_BASEDIR, tenantCode, filename);
File file = new File(fileName);
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
return fileName;
}
/**
* directory of process execution
*
* @param projectId project id
* @param processDefineId process definition id
* @param processInstanceId process instance id
* @param taskInstanceId task instance id
* @return directory of process execution
*/
public static String getProcessExecDir(int projectId, int processDefineId, int processInstanceId, int taskInstanceId) {
String fileName = String.format("%s/exec/process/%s/%s/%s/%s", DATA_BASEDIR, Integer.toString(projectId),
Integer.toString(processDefineId), Integer.toString(processInstanceId), Integer.toString(taskInstanceId));
File file = new File(fileName);
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
return fileName;
}
/**
* directory of process instances
*
* @param projectId project id
* @param processDefineId process definition id
* @param processInstanceId process instance id
* @return directory of process instances
*/
public static String getProcessExecDir(int projectId, int processDefineId, int processInstanceId) {
String fileName = String.format("%s/exec/process/%s/%s/%s", DATA_BASEDIR, Integer.toString(projectId),
Integer.toString(processDefineId), Integer.toString(processInstanceId));
File file = new File(fileName);
if (!file.getParentFile().exists()) {
file.getParentFile().mkdirs();
}
return fileName;
}
/**
* @return get suffixes for resource files that support online viewing
*/
public static String getResourceViewSuffixs() {
return PropertyUtils.getString(RESOURCE_VIEW_SUFFIXS, RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE);
}
/**
* create directory and user
*
* @param execLocalPath execute local path
* @param userName user name
* @throws IOException errors
*/
public static void createWorkDirAndUserIfAbsent(String execLocalPath, String userName) throws IOException {
//if work dir exists, first delete
File execLocalPathFile = new File(execLocalPath);
if (execLocalPathFile.exists()) {
org.apache.commons.io.FileUtils.forceDelete(execLocalPathFile);
}
//create work dir
org.apache.commons.io.FileUtils.forceMkdir(execLocalPathFile);
String mkdirLog = "create dir success " + execLocalPath;
LoggerUtils.logInfo(Optional.ofNullable(logger), mkdirLog);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), mkdirLog);
//if not exists this user,then create
OSUtils.taskLoggerThreadLocal.set(taskLoggerThreadLocal.get());
try {
if (!OSUtils.getUserList().contains(userName)) {
boolean isSuccessCreateUser = OSUtils.createUser(userName);
String infoLog;
if (isSuccessCreateUser) {
infoLog = String.format("create user name success %s", userName);
} else {
infoLog = String.format("create user name fail %s", userName);
}
LoggerUtils.logInfo(Optional.ofNullable(logger), infoLog);
LoggerUtils.logInfo(Optional.ofNullable(taskLoggerThreadLocal.get()), infoLog);
}
} catch (Throwable e) {
LoggerUtils.logError(Optional.ofNullable(logger), e);
LoggerUtils.logError(Optional.ofNullable(taskLoggerThreadLocal.get()), e);
}
OSUtils.taskLoggerThreadLocal.remove();
}
/**
* write content to file ,if parent path not exists, it will do one's utmost to mkdir
*
* @param content content
* @param filePath target file path
* @return true if write success
*/
public static boolean writeContent2File(String content, String filePath) {
boolean flag = true;
BufferedReader bufferedReader = null;
BufferedWriter bufferedWriter = null;
try {
File distFile = new File(filePath);
if (!distFile.getParentFile().exists() && !distFile.getParentFile().mkdirs()) {
FileUtils.logger.error("mkdir parent failed");
return false;
}
bufferedReader = new BufferedReader(new StringReader(content));
bufferedWriter = new BufferedWriter(new FileWriter(distFile));
char[] buf = new char[1024];
int len;
while ((len = bufferedReader.read(buf)) != -1) {
bufferedWriter.write(buf, 0, len);
}
bufferedWriter.flush();
bufferedReader.close();
bufferedWriter.close();
} catch (IOException e) {
FileUtils.logger.error(e.getMessage(), e);
flag = false;
return flag;
} finally {
IOUtils.closeQuietly(bufferedWriter);
IOUtils.closeQuietly(bufferedReader);
}
return flag;
}
/**
* Writes a String to a file creating the file if it does not exist.
* <p>
* NOTE: As from v1.3, the parent directories of the file will be created
* if they do not exist.
*
* @param file the file to write
* @param data the content to write to the file
* @param encoding the encoding to use, {@code null} means platform default
* @throws IOException in case of an I/O error
* @throws java.io.UnsupportedEncodingException if the encoding is not supported by the VM
* @since 2.4
*/
public static void writeStringToFile(File file, String data, Charset encoding) throws IOException {
writeStringToFile(file, data, encoding, false);
}
/**
* Writes a String to a file creating the file if it does not exist.
* <p>
* NOTE: As from v1.3, the parent directories of the file will be created
* if they do not exist.
*
* @param file the file to write
* @param data the content to write to the file
* @param encoding the encoding to use, {@code null} means platform default
* @throws IOException in case of an I/O error
* @throws java.io.UnsupportedEncodingException if the encoding is not supported by the VM
*/
public static void writeStringToFile(File file, String data, String encoding) throws IOException {
writeStringToFile(file, data, encoding, false);
}
/**
* Writes a String to a file creating the file if it does not exist.
*
* @param file the file to write
* @param data the content to write to the file
* @param encoding the encoding to use, {@code null} means platform default
* @param append if {@code true}, then the String will be added to the
* end of the file rather than overwriting
* @throws IOException in case of an I/O error
* @since 2.3
*/
public static void writeStringToFile(File file, String data, Charset encoding, boolean append) throws IOException {
OutputStream out = null;
try {
out = openOutputStream(file, append);
IOUtils.write(data, out, encoding);
out.close(); // don't swallow close Exception if copy completes normally
} finally {
IOUtils.closeQuietly(out);
}
}
/**
* Writes a String to a file creating the file if it does not exist.
*
* @param file the file to write
* @param data the content to write to the file
* @param encoding the encoding to use, {@code null} means platform default
* @param append if {@code true}, then the String will be added to the
* end of the file rather than overwriting
* @throws IOException in case of an I/O error
* @throws UnsupportedCharsetException thrown instead of {@link UnsupportedEncodingException} in version 2.2 if the encoding is not
* supported by the VM
* @since 2.1
*/
public static void writeStringToFile(File file, String data, String encoding, boolean append) throws IOException {
writeStringToFile(file, data, Charsets.toCharset(encoding), append);
}
/**
* Writes a String to a file creating the file if it does not exist using the default encoding for the VM.
*
* @param file the file to write
* @param data the content to write to the file
* @throws IOException in case of an I/O error
*/
public static void writeStringToFile(File file, String data) throws IOException {
writeStringToFile(file, data, Charset.defaultCharset(), false);
}
/**
* Writes a String to a file creating the file if it does not exist using the default encoding for the VM.
*
* @param file the file to write
* @param data the content to write to the file
* @param append if {@code true}, then the String will be added to the
* end of the file rather than overwriting
* @throws IOException in case of an I/O error
* @since 2.1
*/
public static void writeStringToFile(File file, String data, boolean append) throws IOException {
writeStringToFile(file, data, Charset.defaultCharset(), append);
}
/**
* Opens a {@link FileOutputStream} for the specified file, checking and
* creating the parent directory if it does not exist.
* <p>
* At the end of the method either the stream will be successfully opened,
* or an exception will have been thrown.
* <p>
* The parent directory will be created if it does not exist.
* The file will be created if it does not exist.
* An exception is thrown if the file object exists but is a directory.
* An exception is thrown if the file exists but cannot be written to.
* An exception is thrown if the parent directory cannot be created.
*
* @param file the file to open for output, must not be {@code null}
* @return a new {@link FileOutputStream} for the specified file
* @throws IOException if the file object is a directory
* @throws IOException if the file cannot be written to
* @throws IOException if a parent directory needs creating but that fails
* @since 1.3
*/
public static FileOutputStream openOutputStream(File file) throws IOException {
return openOutputStream(file, false);
}
/**
* Opens a {@link FileOutputStream} for the specified file, checking and
* creating the parent directory if it does not exist.
* <p>
* At the end of the method either the stream will be successfully opened,
* or an exception will have been thrown.
* <p>
* The parent directory will be created if it does not exist.
* The file will be created if it does not exist.
* An exception is thrown if the file object exists but is a directory.
* An exception is thrown if the file exists but cannot be written to.
* An exception is thrown if the parent directory cannot be created.
*
* @param file the file to open for output, must not be {@code null}
* @param append if {@code true}, then bytes will be added to the
* end of the file rather than overwriting
* @return a new {@link FileOutputStream} for the specified file
* @throws IOException if the file object is a directory
* @throws IOException if the file cannot be written to
* @throws IOException if a parent directory needs creating but that fails
* @since 2.1
*/
public static FileOutputStream openOutputStream(File file, boolean append) throws IOException {
if (file.exists()) {
if (file.isDirectory()) {
throw new IOException("File '" + file + "' exists but is a directory");
}
if (!file.canWrite()) {
throw new IOException("File '" + file + "' cannot be written to");
}
} else {
File parent = file.getParentFile();
if (parent != null && !parent.mkdirs() && !parent.isDirectory()) {
throw new IOException("Directory '" + parent + "' could not be created");
}
}
return new FileOutputStream(file, append);
}
/**
* deletes a directory recursively
*
* @param dir directory
* @throws IOException in case deletion is unsuccessful
*/
public static void deleteDir(String dir) throws IOException {
org.apache.commons.io.FileUtils.deleteDirectory(new File(dir));
}
/**
* Deletes a file. If file is a directory, delete it and all sub-directories.
* <p>
* The difference between File.delete() and this method are:
* <ul>
* <li>A directory to be deleted does not have to be empty.</li>
* <li>You get exceptions when a file or directory cannot be deleted.
* (java.io.File methods returns a boolean)</li>
* </ul>
*
* @param filename file name
* @throws IOException in case deletion is unsuccessful
*/
public static void deleteFile(String filename) throws IOException {
org.apache.commons.io.FileUtils.forceDelete(new File(filename));
}
/**
* Gets all the parent subdirectories of the parentDir directory
*
* @param parentDir parent dir
* @return all dirs
*/
public static File[] getAllDir(String parentDir) {
if (parentDir == null || "".equals(parentDir)) {
throw new RuntimeException("parentDir can not be empty");
}
File file = new File(parentDir);
if (!file.exists() || !file.isDirectory()) {
throw new RuntimeException("parentDir not exist, or is not a directory:" + parentDir);
}
return file.listFiles(File::isDirectory);
}
/**
* Get Content
*
* @param inputStream input stream
* @return string of input stream
*/
public static String readFile2Str(InputStream inputStream) {
try {
ByteArrayOutputStream output = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int length;
while ((length = inputStream.read(buffer)) != -1) {
output.write(buffer, 0, length);
}
return output.toString();
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw new RuntimeException(e);
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,478 |
[Bug][Common] read the file garbled
|
When reading the file, no encoding is specified, resulting in garbled code


|
https://github.com/apache/dolphinscheduler/issues/3478
|
https://github.com/apache/dolphinscheduler/pull/3479
|
1cf40e1d1e4379e6b50a92871987d59291ccfd50
|
bb13f2eae78e4b829816068e8ca3d337d97a0fd2
| 2020-08-13T03:13:35Z |
java
| 2020-11-27T13:55:28Z |
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import static org.apache.dolphinscheduler.common.Constants.RESOURCE_UPLOAD_PATH;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.ResUploadType;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.client.cli.RMAdminCLI;
import java.io.BufferedReader;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.file.Files;
import java.security.PrivilegedExceptionAction;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
/**
* hadoop utils
* single instance
*/
public class HadoopUtils implements Closeable {
private static final Logger logger = LoggerFactory.getLogger(HadoopUtils.class);
private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
public static final String resourceUploadPath = PropertyUtils.getString(RESOURCE_UPLOAD_PATH, "/dolphinscheduler");
public static final String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS);
public static final String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
public static final String jobHistoryAddress = PropertyUtils.getString(Constants.YARN_JOB_HISTORY_STATUS_ADDRESS);
private static final String HADOOP_UTILS_KEY = "HADOOP_UTILS_KEY";
private static final LoadingCache<String, HadoopUtils> cache = CacheBuilder
.newBuilder()
.expireAfterWrite(PropertyUtils.getInt(Constants.KERBEROS_EXPIRE_TIME, 2), TimeUnit.HOURS)
.build(new CacheLoader<String, HadoopUtils>() {
@Override
public HadoopUtils load(String key) throws Exception {
return new HadoopUtils();
}
});
private static volatile boolean yarnEnabled = false;
private Configuration configuration;
private FileSystem fs;
private HadoopUtils() {
init();
initHdfsPath();
}
public static HadoopUtils getInstance() {
return cache.getUnchecked(HADOOP_UTILS_KEY);
}
/**
* init dolphinscheduler root path in hdfs
*/
private void initHdfsPath() {
Path path = new Path(resourceUploadPath);
try {
if (!fs.exists(path)) {
fs.mkdirs(path);
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
/**
* init hadoop configuration
*/
private void init() {
try {
configuration = new Configuration();
String resourceStorageType = PropertyUtils.getUpperCaseString(Constants.RESOURCE_STORAGE_TYPE);
ResUploadType resUploadType = ResUploadType.valueOf(resourceStorageType);
if (resUploadType == ResUploadType.HDFS) {
if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false)) {
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF,
PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
hdfsUser = "";
UserGroupInformation.setConfiguration(configuration);
UserGroupInformation.loginUserFromKeytab(PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_USERNAME),
PropertyUtils.getString(Constants.LOGIN_USER_KEY_TAB_PATH));
}
String defaultFS = configuration.get(Constants.FS_DEFAULTFS);
//first get key from core-site.xml hdfs-site.xml ,if null ,then try to get from properties file
// the default is the local file system
if (defaultFS.startsWith("file")) {
String defaultFSProp = PropertyUtils.getString(Constants.FS_DEFAULTFS);
if (StringUtils.isNotBlank(defaultFSProp)) {
Map<String, String> fsRelatedProps = PropertyUtils.getPrefixedProperties("fs.");
configuration.set(Constants.FS_DEFAULTFS, defaultFSProp);
fsRelatedProps.forEach((key, value) -> configuration.set(key, value));
} else {
logger.error("property:{} can not to be empty, please set!", Constants.FS_DEFAULTFS);
throw new RuntimeException(
String.format("property: %s can not to be empty, please set!", Constants.FS_DEFAULTFS)
);
}
} else {
logger.info("get property:{} -> {}, from core-site.xml hdfs-site.xml ", Constants.FS_DEFAULTFS, defaultFS);
}
if (fs == null) {
if (StringUtils.isNotEmpty(hdfsUser)) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(hdfsUser);
ugi.doAs(new PrivilegedExceptionAction<Boolean>() {
@Override
public Boolean run() throws Exception {
fs = FileSystem.get(configuration);
return true;
}
});
} else {
logger.warn("hdfs.root.user is not set value!");
fs = FileSystem.get(configuration);
}
}
} else if (resUploadType == ResUploadType.S3) {
System.setProperty(Constants.AWS_S3_V4, Constants.STRING_TRUE);
configuration.set(Constants.FS_DEFAULTFS, PropertyUtils.getString(Constants.FS_DEFAULTFS));
configuration.set(Constants.FS_S3A_ENDPOINT, PropertyUtils.getString(Constants.FS_S3A_ENDPOINT));
configuration.set(Constants.FS_S3A_ACCESS_KEY, PropertyUtils.getString(Constants.FS_S3A_ACCESS_KEY));
configuration.set(Constants.FS_S3A_SECRET_KEY, PropertyUtils.getString(Constants.FS_S3A_SECRET_KEY));
fs = FileSystem.get(configuration);
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
/**
* @return Configuration
*/
public Configuration getConfiguration() {
return configuration;
}
/**
* get application url
*
* @param applicationId application id
* @return url of application
*/
public String getApplicationUrl(String applicationId) throws Exception {
/**
* if rmHaIds contains xx, it signs not use resourcemanager
* otherwise:
* if rmHaIds is empty, single resourcemanager enabled
* if rmHaIds not empty: resourcemanager HA enabled
*/
String appUrl = "";
if (StringUtils.isEmpty(rmHaIds)) {
//single resourcemanager enabled
appUrl = appAddress;
yarnEnabled = true;
} else {
//resourcemanager HA enabled
appUrl = getAppAddress(appAddress, rmHaIds);
yarnEnabled = true;
logger.info("application url : {}", appUrl);
}
if (StringUtils.isBlank(appUrl)) {
throw new Exception("application url is blank");
}
return String.format(appUrl, applicationId);
}
public String getJobHistoryUrl(String applicationId) {
//eg:application_1587475402360_712719 -> job_1587475402360_712719
String jobId = applicationId.replace("application", "job");
return String.format(jobHistoryAddress, jobId);
}
/**
* cat file on hdfs
*
* @param hdfsFilePath hdfs file path
* @return byte[] byte array
* @throws IOException errors
*/
public byte[] catFile(String hdfsFilePath) throws IOException {
if (StringUtils.isBlank(hdfsFilePath)) {
logger.error("hdfs file path:{} is blank", hdfsFilePath);
return new byte[0];
}
try (FSDataInputStream fsDataInputStream = fs.open(new Path(hdfsFilePath))) {
return IOUtils.toByteArray(fsDataInputStream);
}
}
/**
* cat file on hdfs
*
* @param hdfsFilePath hdfs file path
* @param skipLineNums skip line numbers
* @param limit read how many lines
* @return content of file
* @throws IOException errors
*/
public List<String> catFile(String hdfsFilePath, int skipLineNums, int limit) throws IOException {
if (StringUtils.isBlank(hdfsFilePath)) {
logger.error("hdfs file path:{} is blank", hdfsFilePath);
return Collections.emptyList();
}
try (FSDataInputStream in = fs.open(new Path(hdfsFilePath))) {
BufferedReader br = new BufferedReader(new InputStreamReader(in));
Stream<String> stream = br.lines().skip(skipLineNums).limit(limit);
return stream.collect(Collectors.toList());
}
}
/**
* make the given file and all non-existent parents into
* directories. Has the semantics of Unix 'mkdir -p'.
* Existence of the directory hierarchy is not an error.
*
* @param hdfsPath path to create
* @return mkdir result
* @throws IOException errors
*/
public boolean mkdir(String hdfsPath) throws IOException {
return fs.mkdirs(new Path(hdfsPath));
}
/**
* copy files between FileSystems
*
* @param srcPath source hdfs path
* @param dstPath destination hdfs path
* @param deleteSource whether to delete the src
* @param overwrite whether to overwrite an existing file
* @return if success or not
* @throws IOException errors
*/
public boolean copy(String srcPath, String dstPath, boolean deleteSource, boolean overwrite) throws IOException {
return FileUtil.copy(fs, new Path(srcPath), fs, new Path(dstPath), deleteSource, overwrite, fs.getConf());
}
/**
* the src file is on the local disk. Add it to FS at
* the given dst name.
*
* @param srcFile local file
* @param dstHdfsPath destination hdfs path
* @param deleteSource whether to delete the src
* @param overwrite whether to overwrite an existing file
* @return if success or not
* @throws IOException errors
*/
public boolean copyLocalToHdfs(String srcFile, String dstHdfsPath, boolean deleteSource, boolean overwrite) throws IOException {
Path srcPath = new Path(srcFile);
Path dstPath = new Path(dstHdfsPath);
fs.copyFromLocalFile(deleteSource, overwrite, srcPath, dstPath);
return true;
}
/**
* copy hdfs file to local
*
* @param srcHdfsFilePath source hdfs file path
* @param dstFile destination file
* @param deleteSource delete source
* @param overwrite overwrite
* @return result of copy hdfs file to local
* @throws IOException errors
*/
public boolean copyHdfsToLocal(String srcHdfsFilePath, String dstFile, boolean deleteSource, boolean overwrite) throws IOException {
Path srcPath = new Path(srcHdfsFilePath);
File dstPath = new File(dstFile);
if (dstPath.exists()) {
if (dstPath.isFile()) {
if (overwrite) {
Files.delete(dstPath.toPath());
}
} else {
logger.error("destination file must be a file");
}
}
if (!dstPath.getParentFile().exists()) {
dstPath.getParentFile().mkdirs();
}
return FileUtil.copy(fs, srcPath, dstPath, deleteSource, fs.getConf());
}
/**
* delete a file
*
* @param hdfsFilePath the path to delete.
* @param recursive if path is a directory and set to
* true, the directory is deleted else throws an exception. In
* case of a file the recursive can be set to either true or false.
* @return true if delete is successful else false.
* @throws IOException errors
*/
public boolean delete(String hdfsFilePath, boolean recursive) throws IOException {
return fs.delete(new Path(hdfsFilePath), recursive);
}
/**
* check if exists
*
* @param hdfsFilePath source file path
* @return result of exists or not
* @throws IOException errors
*/
public boolean exists(String hdfsFilePath) throws IOException {
return fs.exists(new Path(hdfsFilePath));
}
/**
* Gets a list of files in the directory
*
* @param filePath file path
* @return {@link FileStatus} file status
* @throws Exception errors
*/
public FileStatus[] listFileStatus(String filePath) throws Exception {
try {
return fs.listStatus(new Path(filePath));
} catch (IOException e) {
logger.error("Get file list exception", e);
throw new Exception("Get file list exception", e);
}
}
/**
* Renames Path src to Path dst. Can take place on local fs
* or remote DFS.
*
* @param src path to be renamed
* @param dst new path after rename
* @return true if rename is successful
* @throws IOException on failure
*/
public boolean rename(String src, String dst) throws IOException {
return fs.rename(new Path(src), new Path(dst));
}
/**
* hadoop resourcemanager enabled or not
*
* @return result
*/
public boolean isYarnEnabled() {
return yarnEnabled;
}
/**
* get the state of an application
*
* @param applicationId application id
* @return the return may be null or there may be other parse exceptions
*/
public ExecutionStatus getApplicationStatus(String applicationId) throws Exception {
if (StringUtils.isEmpty(applicationId)) {
return null;
}
String result = Constants.FAILED;
String applicationUrl = getApplicationUrl(applicationId);
logger.info("applicationUrl={}", applicationUrl);
String responseContent;
if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE, false)) {
responseContent = KerberosHttpClient.get(applicationUrl);
} else {
responseContent = HttpUtils.get(applicationUrl);
}
if (responseContent != null) {
ObjectNode jsonObject = JSONUtils.parseObject(responseContent);
if (!jsonObject.has("app")) {
return ExecutionStatus.FAILURE;
}
result = jsonObject.path("app").path("finalStatus").asText();
} else {
//may be in job history
String jobHistoryUrl = getJobHistoryUrl(applicationId);
logger.info("jobHistoryUrl={}", jobHistoryUrl);
responseContent = HttpUtils.get(jobHistoryUrl);
if (null != responseContent) {
ObjectNode jsonObject = JSONUtils.parseObject(responseContent);
if (!jsonObject.has("job")) {
return ExecutionStatus.FAILURE;
}
result = jsonObject.path("job").path("state").asText();
} else {
return ExecutionStatus.FAILURE;
}
}
switch (result) {
case Constants.ACCEPTED:
return ExecutionStatus.SUBMITTED_SUCCESS;
case Constants.SUCCEEDED:
return ExecutionStatus.SUCCESS;
case Constants.NEW:
case Constants.NEW_SAVING:
case Constants.SUBMITTED:
case Constants.FAILED:
return ExecutionStatus.FAILURE;
case Constants.KILLED:
return ExecutionStatus.KILL;
case Constants.RUNNING:
default:
return ExecutionStatus.RUNNING_EXECUTION;
}
}
/**
* get data hdfs path
*
* @return data hdfs path
*/
public static String getHdfsDataBasePath() {
if ("/".equals(resourceUploadPath)) {
// if basepath is configured to /, the generated url may be //default/resources (with extra leading /)
return "";
} else {
return resourceUploadPath;
}
}
/**
* hdfs resource dir
*
* @param tenantCode tenant code
* @param resourceType resource type
* @return hdfs resource dir
*/
public static String getHdfsDir(ResourceType resourceType, String tenantCode) {
String hdfsDir = "";
if (resourceType.equals(ResourceType.FILE)) {
hdfsDir = getHdfsResDir(tenantCode);
} else if (resourceType.equals(ResourceType.UDF)) {
hdfsDir = getHdfsUdfDir(tenantCode);
}
return hdfsDir;
}
/**
* hdfs resource dir
*
* @param tenantCode tenant code
* @return hdfs resource dir
*/
public static String getHdfsResDir(String tenantCode) {
return String.format("%s/resources", getHdfsTenantDir(tenantCode));
}
/**
* hdfs user dir
*
* @param tenantCode tenant code
* @param userId user id
* @return hdfs resource dir
*/
public static String getHdfsUserDir(String tenantCode, int userId) {
return String.format("%s/home/%d", getHdfsTenantDir(tenantCode), userId);
}
/**
* hdfs udf dir
*
* @param tenantCode tenant code
* @return get udf dir on hdfs
*/
public static String getHdfsUdfDir(String tenantCode) {
return String.format("%s/udfs", getHdfsTenantDir(tenantCode));
}
/**
* get hdfs file name
*
* @param resourceType resource type
* @param tenantCode tenant code
* @param fileName file name
* @return hdfs file name
*/
public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) {
if (fileName.startsWith("/")) {
fileName = fileName.replaceFirst("/", "");
}
return String.format("%s/%s", getHdfsDir(resourceType, tenantCode), fileName);
}
/**
* get absolute path and name for resource file on hdfs
*
* @param tenantCode tenant code
* @param fileName file name
* @return get absolute path and name for file on hdfs
*/
public static String getHdfsResourceFileName(String tenantCode, String fileName) {
if (fileName.startsWith("/")) {
fileName = fileName.replaceFirst("/", "");
}
return String.format("%s/%s", getHdfsResDir(tenantCode), fileName);
}
/**
* get absolute path and name for udf file on hdfs
*
* @param tenantCode tenant code
* @param fileName file name
* @return get absolute path and name for udf file on hdfs
*/
public static String getHdfsUdfFileName(String tenantCode, String fileName) {
if (fileName.startsWith("/")) {
fileName = fileName.replaceFirst("/", "");
}
return String.format("%s/%s", getHdfsUdfDir(tenantCode), fileName);
}
/**
* @param tenantCode tenant code
* @return file directory of tenants on hdfs
*/
public static String getHdfsTenantDir(String tenantCode) {
return String.format("%s/%s", getHdfsDataBasePath(), tenantCode);
}
/**
* getAppAddress
*
* @param appAddress app address
* @param rmHa resource manager ha
* @return app address
*/
public static String getAppAddress(String appAddress, String rmHa) {
//get active ResourceManager
String activeRM = YarnHAAdminUtils.getAcitveRMName(rmHa);
String[] split1 = appAddress.split(Constants.DOUBLE_SLASH);
if (split1.length != 2) {
return null;
}
String start = split1[0] + Constants.DOUBLE_SLASH;
String[] split2 = split1[1].split(Constants.COLON);
if (split2.length != 2) {
return null;
}
String end = Constants.COLON + split2[1];
return start + activeRM + end;
}
@Override
public void close() throws IOException {
if (fs != null) {
try {
fs.close();
} catch (IOException e) {
logger.error("Close HadoopUtils instance failed", e);
throw new IOException("Close HadoopUtils instance failed", e);
}
}
}
/**
* yarn ha admin utils
*/
private static final class YarnHAAdminUtils extends RMAdminCLI {
/**
* get active resourcemanager
*
* @param rmIds
* @return
*/
public static String getAcitveRMName(String rmIds) {
String[] rmIdArr = rmIds.split(Constants.COMMA);
int activeResourceManagerPort = PropertyUtils.getInt(Constants.HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT, 8088);
String yarnUrl = "http://%s:" + activeResourceManagerPort + "/ws/v1/cluster/info";
String state = null;
try {
/**
* send http get request to rm1
*/
state = getRMState(String.format(yarnUrl, rmIdArr[0]));
if (Constants.HADOOP_RM_STATE_ACTIVE.equals(state)) {
return rmIdArr[0];
} else if (Constants.HADOOP_RM_STATE_STANDBY.equals(state)) {
state = getRMState(String.format(yarnUrl, rmIdArr[1]));
if (Constants.HADOOP_RM_STATE_ACTIVE.equals(state)) {
return rmIdArr[1];
}
} else {
return null;
}
} catch (Exception e) {
state = getRMState(String.format(yarnUrl, rmIdArr[1]));
if (Constants.HADOOP_RM_STATE_ACTIVE.equals(state)) {
return rmIdArr[0];
}
}
return null;
}
/**
* get ResourceManager state
*
* @param url
* @return
*/
public static String getRMState(String url) {
String retStr = HttpUtils.get(url);
if (StringUtils.isEmpty(retStr)) {
return null;
}
//to json
ObjectNode jsonObject = JSONUtils.parseObject(retStr);
//get ResourceManager state
if (!jsonObject.has("clusterInfo")) {
return null;
}
return jsonObject.get("clusterInfo").path("haState").asText();
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,478 |
[Bug][Common] read the file garbled
|
When reading the file, no encoding is specified, resulting in garbled code


|
https://github.com/apache/dolphinscheduler/issues/3478
|
https://github.com/apache/dolphinscheduler/pull/3479
|
1cf40e1d1e4379e6b50a92871987d59291ccfd50
|
bb13f2eae78e4b829816068e8ca3d337d97a0fd2
| 2020-08-13T03:13:35Z |
java
| 2020-11-27T13:55:28Z |
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/FileUtilsTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import static org.apache.dolphinscheduler.common.Constants.YYYYMMDDHHMMSS;
@RunWith(PowerMockRunner.class)
@PrepareForTest(DateUtils.class)
public class FileUtilsTest {
@Test
public void suffix() {
Assert.assertEquals("java", FileUtils.suffix("ninfor.java"));
Assert.assertEquals("", FileUtils.suffix(null));
Assert.assertEquals("", FileUtils.suffix(""));
Assert.assertEquals("", FileUtils.suffix("ninfor-java"));
}
@Test
public void testGetDownloadFilename() {
PowerMockito.mockStatic(DateUtils.class);
PowerMockito.when(DateUtils.getCurrentTime(YYYYMMDDHHMMSS)).thenReturn("20190101101059");
Assert.assertEquals("/tmp/dolphinscheduler/download/20190101101059/test",
FileUtils.getDownloadFilename("test"));
}
@Test
public void testGetUploadFilename() {
Assert.assertEquals("/tmp/dolphinscheduler/aaa/resources/bbb",
FileUtils.getUploadFilename("aaa","bbb"));
}
@Test
public void testGetProcessExecDir() {
String dir = FileUtils.getProcessExecDir(1,2,3, 4);
Assert.assertEquals("/tmp/dolphinscheduler/exec/process/1/2/3/4", dir);
dir = FileUtils.getProcessExecDir(1,2,3);
Assert.assertEquals("/tmp/dolphinscheduler/exec/process/1/2/3", dir);
}
@Test
public void testCreateWorkDirAndUserIfAbsent() {
try {
FileUtils.createWorkDirAndUserIfAbsent("/tmp/createWorkDirAndUserIfAbsent", "test123");
Assert.assertTrue(true);
} catch (Exception e) {
Assert.assertTrue(false);
}
}
@Test
public void testSetValue() {
try {
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"true");
Assert.assertTrue(PropertyUtils.getBoolean(Constants.DATASOURCE_ENCRYPTION_ENABLE));
PropertyUtils.setValue(Constants.DATASOURCE_ENCRYPTION_ENABLE,"false");
Assert.assertFalse(PropertyUtils.getBoolean(Constants.DATASOURCE_ENCRYPTION_ENABLE));
} catch (Exception e) {
Assert.assertTrue(false);
}
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 4,144 |
There is a vulnerability in jackson-databind 2.9.8 ,upgrade recommended
|
https://github.com/apache/incubator-dolphinscheduler/blob/aa0974fd1f759e96430d3f1b8dac291d6ea7388c/pom.xml#L68
CVE-2019-14379 CVE-2019-14540 CVE-2019-16335 CVE-2019-16942 CVE-2019-16943
Recommended upgrade version:
2.9.10.6
|
https://github.com/apache/dolphinscheduler/issues/4144
|
https://github.com/apache/dolphinscheduler/pull/4153
|
a522da068f8c188e0d792f84416a52e5040de459
|
1a854f8056cbc19a1520e1134cf48070e96720cf
| 2020-12-02T08:19:05Z |
java
| 2020-12-03T07:25:49Z |
dolphinscheduler-dist/release-docs/LICENSE
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=======================================================================
Apache DolphinScheduler Subcomponents:
The Apache DolphinScheduler project contains subcomponents with separate copyright
notices and license terms. Your use of the source code for the these
subcomponents is subject to the terms and conditions of the following
licenses.
========================================================================
Apache 2.0 licenses
========================================================================
The following components are provided under the Apache License. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
ant-1.6.5 https://mvnrepository.com/artifact/ant/ant/1.6.5, Apache 2.0
apacheds-i18n 2.0.0-M15: https://mvnrepository.com/artifact/org.apache.directory.server/apacheds-i18n/2.0.0-M15, Apache 2.0
apacheds-kerberos-codec 2.0.0-M15: https://mvnrepository.com/artifact/org.apache.directory.server/apacheds-kerberos-codec/2.0.0-M15, Apache 2.0
apache-el 8.5.35.1: https://mvnrepository.com/artifact/org.mortbay.jasper/apache-el/8.5.35.1, Apache 2.0
api-asn1-api 1.0.0-M20: https://mvnrepository.com/artifact/org.apache.directory.api/api-asn1-api/1.0.0-M20, Apache 2.0
api-util 1.0.0-M20: https://mvnrepository.com/artifact/org.apache.directory.api/api-util/1.0.0-M20, Apache 2.0
audience-annotations 0.5.0: https://mvnrepository.com/artifact/org.apache.yetus/audience-annotations/0.5.0, Apache 2.0
avro 1.7.4: https://github.com/apache/avro, Apache 2.0
aws-sdk-java 1.7.4: https://mvnrepository.com/artifact/com.amazonaws/aws-java-sdk/1.7.4, Apache 2.0
bonecp 0.8.0.RELEASE: https://github.com/wwadge/bonecp, Apache 2.0
byte-buddy 1.9.10: https://mvnrepository.com/artifact/net.bytebuddy/byte-buddy/1.9.10, Apache 2.0
classmate 1.4.0: https://mvnrepository.com/artifact/com.fasterxml/classmate/1.4.0, Apache 2.0
clickhouse-jdbc 0.1.52: https://mvnrepository.com/artifact/ru.yandex.clickhouse/clickhouse-jdbc/0.1.52, Apache 2.0
commons-beanutils 1.7.0 https://mvnrepository.com/artifact/commons-beanutils/commons-beanutils/1.7.0, Apache 2.0
commons-cli 1.2: https://mvnrepository.com/artifact/commons-cli/commons-cli/1.2, Apache 2.0
commons-codec 1.11: https://mvnrepository.com/artifact/commons-codec/commons-codec/1.11, Apache 2.0
commons-collections 3.2.2: https://mvnrepository.com/artifact/commons-collections/commons-collections/3.2.2, Apache 2.0
commons-collections4 4.1: https://mvnrepository.com/artifact/org.apache.commons/commons-collections4/4.1, Apache 2.0
commons-compress 1.4.1: https://mvnrepository.com/artifact/org.apache.commons/commons-compress/1.4.1, Apache 2.0
commons-configuration 1.10: https://mvnrepository.com/artifact/commons-configuration/commons-configuration/1.10, Apache 2.0
commons-daemon 1.0.13 https://mvnrepository.com/artifact/commons-daemon/commons-daemon/1.0.13, Apache 2.0
commons-dbcp 1.4: https://github.com/apache/commons-dbcp, Apache 2.0
commons-email 1.5: https://github.com/apache/commons-email, Apache 2.0
commons-httpclient 3.0.1: https://mvnrepository.com/artifact/commons-httpclient/commons-httpclient/3.0.1, Apache 2.0
commons-io 2.4: https://github.com/apache/commons-io, Apache 2.0
commons-lang 2.6: https://github.com/apache/commons-lang, Apache 2.0
commons-logging 1.1.1: https://github.com/apache/commons-logging, Apache 2.0
commons-math3 3.1.1: https://github.com/apache/commons-math, Apache 2.0
commons-net 3.1: https://github.com/apache/commons-net, Apache 2.0
commons-pool 1.6: https://github.com/apache/commons-pool, Apache 2.0
cron-utils 5.0.5: https://mvnrepository.com/artifact/com.cronutils/cron-utils/5.0.5, Apache 2.0
curator-client 4.3.0: https://mvnrepository.com/artifact/org.apache.curator/curator-client/4.3.0, Apache 2.0
curator-framework 4.3.0: https://mvnrepository.com/artifact/org.apache.curator/curator-framework/4.3.0, Apache 2.0
curator-recipes 4.3.0: https://mvnrepository.com/artifact/org.apache.curator/curator-recipes/4.3.0, Apache 2.0
datanucleus-api-jdo 4.2.1: https://mvnrepository.com/artifact/org.datanucleus/datanucleus-api-jdo/4.2.1, Apache 2.0
datanucleus-core 4.1.6: https://mvnrepository.com/artifact/org.datanucleus/datanucleus-core/4.1.6, Apache 2.0
datanucleus-rdbms 4.1.7: https://mvnrepository.com/artifact/org.datanucleus/datanucleus-rdbms/4.1.7, Apache 2.0
derby 10.14.2.0: https://github.com/apache/derby, Apache 2.0
druid 1.1.14: https://mvnrepository.com/artifact/com.alibaba/druid/1.1.14, Apache 2.0
gson 2.8.5: https://github.com/google/gson, Apache 2.0
guava 20.0: https://mvnrepository.com/artifact/com.google.guava/guava/20.0, Apache 2.0
guice 3.0: https://mvnrepository.com/artifact/com.google.inject/guice/3.0, Apache 2.0
guice-servlet 3.0: https://mvnrepository.com/artifact/com.google.inject.extensions/guice-servlet/3.0, Apache 2.0
hadoop-annotations 2.7.3:https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-annotations/2.7.3, Apache 2.0
hadoop-auth 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-auth/2.7.3, Apache 2.0
hadoop-aws 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-aws/2.7.3, Apache 2.0
hadoop-client 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client/2.7.3, Apache 2.0
hadoop-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common/2.7.3, Apache 2.0
hadoop-hdfs 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs/2.7.3, Apache 2.0
hadoop-mapreduce-client-app 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-app/2.7.3, Apache 2.0
hadoop-mapreduce-client-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-common/2.7.3, Apache 2.0
hadoop-mapreduce-client-core 2.7.3: https://mvnrepository.com/artifact/io.hops/hadoop-mapreduce-client-core/2.7.3, Apache 2.0
hadoop-mapreduce-client-jobclient 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-jobclient/2.7.3, Apache 2.0
hadoop-mapreduce-client-shuffle 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-shuffle/2.7.3, Apache 2.0
hadoop-yarn-api 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-api/2.7.3, Apache 2.0
hadoop-yarn-client 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-client/2.7.3, Apache 2.0
hadoop-yarn-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-common/2.7.3, Apache 2.0
hadoop-yarn-server-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-server-common/2.7.3, Apache 2.0
hibernate-validator 6.0.14.Final: https://github.com/hibernate/hibernate-validator, Apache 2.0
HikariCP 3.2.0: https://mvnrepository.com/artifact/com.zaxxer/HikariCP/3.2.0, Apache 2.0
hive-common 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-common/2.1.0, Apache 2.0
hive-jdbc 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-jdbc/2.1.0, Apache 2.0
hive-metastore 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-metastore/2.1.0, Apache 2.0
hive-orc 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-orc/2.1.0, Apache 2.0
hive-serde 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-serde/2.1.0, Apache 2.0
hive-service 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-service/2.1.0, Apache 2.0
hive-service-rpc 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-service-rpc/2.1.0, Apache 2.0
hive-storage-api 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-storage-api/2.1.0, Apache 2.0
htrace-core 3.1.0-incubating: https://mvnrepository.com/artifact/org.apache.htrace/htrace-core/3.1.0-incubating, Apache 2.0
httpclient 4.4.1: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpclient/4.4.1, Apache 2.0
httpcore 4.4.1: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpcore/4.4.1, Apache 2.0
httpmime 4.5.7: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpmime/4.5.7, Apache 2.0
jackson-annotations 2.9.8: https://mvnrepository.com/artifact/com.fasterxml.jackson.core/jackson-annotations/2.9.8, Apache 2.0
jackson-core 2.9.8: https://github.com/FasterXML/jackson-core, Apache 2.0
jackson-core-asl 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-core-asl/1.9.13, Apache 2.0
jackson-databind 2.9.8: https://github.com/FasterXML/jackson-databind, Apache 2.0
jackson-datatype-jdk8 2.9.8: https://mvnrepository.com/artifact/com.fasterxml.jackson.datatype/jackson-datatype-jdk8/2.9.8, Apache 2.0
jackson-datatype-jsr310 2.9.8: https://mvnrepository.com/artifact/com.fasterxml.jackson.datatype/jackson-datatype-jsr310/2.9.8, Apache 2.0
jackson-jaxrs 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-jaxrs/1.9.13, Apache 2.0 and LGPL 2.1
jackson-mapper-asl 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-mapper-asl/1.9.13, Apache 2.0
jackson-module-parameter-names 2.9.8: https://mvnrepository.com/artifact/com.fasterxml.jackson.module/jackson-module-parameter-names/2.9.8, Apache 2.0
jackson-xc 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-xc/1.9.13, Apache 2.0 and LGPL 2.1
javax.inject 1: https://mvnrepository.com/artifact/javax.inject/javax.inject/1, Apache 2.0
javax.jdo-3.2.0-m3: https://mvnrepository.com/artifact/org.datanucleus/javax.jdo/3.2.0-m3, Apache 2.0
java-xmlbuilder 0.4 : https://mvnrepository.com/artifact/com.jamesmurty.utils/java-xmlbuilder/0.4, Apache 2.0
jboss-logging 3.3.2.Final: https://mvnrepository.com/artifact/org.jboss.logging/jboss-logging/3.3.2.Final, Apache 2.0
jdo-api 3.0.1: https://mvnrepository.com/artifact/javax.jdo/jdo-api/3.0.1, Apache 2.0
jets3t 0.9.0: https://mvnrepository.com/artifact/net.java.dev.jets3t/jets3t/0.9.0, Apache 2.0
jettison 1.1: https://github.com/jettison-json/jettison, Apache 2.0
jetty 6.1.26: https://mvnrepository.com/artifact/org.mortbay.jetty/jetty/6.1.26, Apache 2.0 and EPL 1.0
jetty-continuation 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-continuation/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-http 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-http/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-io 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-io/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-security 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-security/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-server 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-server/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-servlet 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-servlet/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-servlets 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-servlets/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-util 6.1.26: https://mvnrepository.com/artifact/org.mortbay.jetty/jetty-util/6.1.26, Apache 2.0 and EPL 1.0
jetty-util 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-util/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-webapp 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-webapp/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-xml 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-xml/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jna 4.5.2: https://mvnrepository.com/artifact/net.java.dev.jna/jna/4.5.2, Apache 2.0 and LGPL 2.1
jna-platform 4.5.2: https://mvnrepository.com/artifact/net.java.dev.jna/jna-platform/4.5.2, Apache 2.0 and LGPL 2.1
joda-time 2.10.1: https://github.com/JodaOrg/joda-time, Apache 2.0
jpam 1.1: https://mvnrepository.com/artifact/net.sf.jpam/jpam/1.1, Apache 2.0
jsqlparser 2.1: https://github.com/JSQLParser/JSqlParser, Apache 2.0 or LGPL 2.1
jsp-api-2.1 6.1.14: https://mvnrepository.com/artifact/org.mortbay.jetty/jsp-api-2.1/6.1.14, Apache 2.0
jsr305 3.0.0: https://mvnrepository.com/artifact/com.google.code.findbugs/jsr305, Apache 2.0
libfb303 0.9.3: https://mvnrepository.com/artifact/org.apache.thrift/libfb303/0.9.3, Apache 2.0
libthrift 0.9.3: https://mvnrepository.com/artifact/org.apache.thrift/libthrift/0.9.3, Apache 2.0
log4j-api 2.11.2: https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-api/2.11.2, Apache 2.0
log4j-core-2.11.2: https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-core/2.11.2, Apache 2.0
log4j 1.2.17: https://mvnrepository.com/artifact/log4j/log4j/1.2.17, Apache 2.0
log4j-1.2-api 2.11.2: https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-1.2-api/2.11.2, Apache 2.0
lz4 1.3.0: https://mvnrepository.com/artifact/net.jpountz.lz4/lz4/1.3.0, Apache 2.0
mapstruct 1.2.0.Final: https://github.com/mapstruct/mapstruct, Apache 2.0
mybatis 3.5.2 https://mvnrepository.com/artifact/org.mybatis/mybatis/3.5.2, Apache 2.0
mybatis-plus 3.2.0: https://github.com/baomidou/mybatis-plus, Apache 2.0
mybatis-plus-annotation 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-annotation/3.2.0, Apache 2.0
mybatis-plus-boot-starter 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-boot-starter/3.2.0, Apache 2.0
mybatis-plus-core 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-core/3.2.0, Apache 2.0
mybatis-plus-extension 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-extension/3.2.0, Apache 2.0
mybatis-spring 2.0.2: https://mvnrepository.com/artifact/org.mybatis/mybatis-spring/2.0.2, Apache 2.0
netty 3.6.2.Final: https://github.com/netty/netty, Apache 2.0
netty-all 4.1.33.Final: https://github.com/netty/netty/blob/netty-4.1.33.Final/LICENSE.txt, Apache 2.0
opencsv 2.3: https://mvnrepository.com/artifact/net.sf.opencsv/opencsv/2.3, Apache 2.0
parquet-hadoop-bundle 1.8.1: https://mvnrepository.com/artifact/org.apache.parquet/parquet-hadoop-bundle/1.8.1, Apache 2.0
poi 3.17: https://mvnrepository.com/artifact/org.apache.poi/poi/3.17, Apache 2.0
quartz 2.3.0: https://mvnrepository.com/artifact/org.quartz-scheduler/quartz/2.3.0, Apache 2.0
quartz-jobs 2.3.0: https://mvnrepository.com/artifact/org.quartz-scheduler/quartz-jobs/2.3.0, Apache 2.0
snakeyaml 1.23: https://mvnrepository.com/artifact/org.yaml/snakeyaml/1.23, Apache 2.0
snappy 0.2: https://mvnrepository.com/artifact/org.iq80.snappy/snappy/0.2, Apache 2.0
snappy-java 1.0.4.1: https://github.com/xerial/snappy-java, Apache 2.0
spring-aop 5.1.18.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-aop/5.1.18.RELEASE, Apache 2.0
spring-beans 5.1.18.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-beans/5.1.18.RELEASE, Apache 2.0
spring-boot 2.1.17.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot/2.1.17.RELEASE, Apache 2.0
spring-boot-autoconfigure 2.1.17.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-autoconfigure/2.1.17.RELEASE, Apache 2.0
spring-boot-starter 2.1.17.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter/2.1.17.RELEASE, Apache 2.0
spring-boot-starter-aop 2.1.17.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-aop/2.1.17.RELEASE, Apache 2.0
spring-boot-starter-jdbc 2.1.17.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-jdbc/2.1.17.RELEASE, Apache 2.0
spring-boot-starter-jetty 2.1.17.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-jetty/2.1.17.RELEASE, Apache 2.0
spring-boot-starter-json 2.1.17.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-json/2.1.17.RELEASE, Apache 2.0
spring-boot-starter-logging 2.1.17.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-logging/2.1.17.RELEASE, Apache 2.0
spring-boot-starter-web 2.1.17.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-web/2.1.17.RELEASE, Apache 2.0
spring-context 5.1.18.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-context/5.1.18.RELEASE, Apache 2.0
spring-core 5.1.18.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-core/5.1.18.RELEASE, Apache 2.0
spring-expression 5.1.18.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-expression/5.1.18.RELEASE, Apache 2.0
springfox-core 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-core, Apache 2.0
springfox-schema 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-schema, Apache 2.0
springfox-spi 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-spi, Apache 2.0
springfox-spring-web 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-spring-web/2.9.2, Apache 2.0
springfox-swagger2 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-swagger2/2.9.2, Apache 2.0
springfox-swagger-common 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-swagger-common/2.9.2, Apache 2.0
springfox-swagger-ui 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-swagger-ui/2.9.2, Apache 2.0
spring-jcl 5.1.18.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-jcl/5.1.18.RELEASE, Apache 2.0
spring-jdbc 5.1.18.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-jdbc/5.1.18.RELEASE, Apache 2.0
spring-plugin-core 1.2.0.RELEASE: https://mvnrepository.com/artifact/org.springframework.plugin/spring-plugin-core/1.2.0.RELEASE, Apache 2.0
spring-plugin-metadata 1.2.0.RELEASE: https://mvnrepository.com/artifact/org.springframework.plugin/spring-plugin-metadata/1.2.0.RELEASE, Apache 2.0
spring-tx 5.1.18.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-tx/5.1.18.RELEASE, Apache 2.0
spring-web 5.1.18.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-web/5.1.18.RELEASE, Apache 2.0
spring-webmvc 5.1.18.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-webmvc/5.1.18.RELEASE, Apache 2.0
swagger-annotations 1.5.20: https://mvnrepository.com/artifact/io.swagger/swagger-annotations/1.5.20, Apache 2.0
swagger-bootstrap-ui 1.9.3: https://mvnrepository.com/artifact/com.github.xiaoymin/swagger-bootstrap-ui/1.9.3, Apache 2.0
swagger-models 1.5.20: https://mvnrepository.com/artifact/io.swagger/swagger-models/1.5.20, Apache 2.0
tephra-api 0.6.0: https://mvnrepository.com/artifact/co.cask.tephra/tephra-api/0.6.0, Apache 2.0
validation-api 2.0.1.Final: https://mvnrepository.com/artifact/javax.validation/validation-api/2.0.1.Final, Apache 2.0
xercesImpl 2.9.1: https://mvnrepository.com/artifact/xerces/xercesImpl/2.9.1, Apache 2.0
xml-apis 1.4.01: https://mvnrepository.com/artifact/xml-apis/xml-apis/1.4.01, Apache 2.0 and W3C
zookeeper 3.4.14: https://mvnrepository.com/artifact/org.apache.zookeeper/zookeeper/3.4.14, Apache 2.0
presto-jdbc 0.238.1 https://mvnrepository.com/artifact/com.facebook.presto/presto-jdbc/0.238.1
HikariCP-java6 2.3.13: https://mvnrepository.com/artifact/com.zaxxer/HikariCP-java6/2.3.13, Apache 2.0
========================================================================
BSD licenses
========================================================================
The following components are provided under a BSD license. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
asm 3.1: https://github.com/jdf/javalin/tree/master/lib/asm-3.1, BSD
javolution 5.5.1: https://mvnrepository.com/artifact/javolution/javolution/5.5.1, BSD
jline 0.9.94: https://github.com/jline/jline3, BSD
jsch 0.1.42: https://mvnrepository.com/artifact/com.jcraft/jsch/0.1.42, BSD
leveldbjni-all 1.8: https://github.com/fusesource/leveldbjni, BSD-3-Clause
postgresql 42.1.4: https://mvnrepository.com/artifact/org.postgresql/postgresql/42.1.4, BSD 2-clause
protobuf-java 2.5.0: https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java/2.5.0, BSD 2-clause
paranamer 2.3: https://mvnrepository.com/artifact/com.thoughtworks.paranamer/paranamer/2.3, BSD
threetenbp 1.3.6: https://mvnrepository.com/artifact/org.threeten/threetenbp/1.3.6, BSD 3-clause
xmlenc 0.52: https://mvnrepository.com/artifact/xmlenc/xmlenc/0.52, BSD
hamcrest-core 1.3: https://mvnrepository.com/artifact/org.hamcrest/hamcrest-core/1.3, BSD 2-Clause
========================================================================
CDDL licenses
========================================================================
The following components are provided under the CDDL License. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
activation 1.1: https://mvnrepository.com/artifact/javax.activation/activation/1.1 CDDL 1.0
javax.activation-api 1.2.0: https://mvnrepository.com/artifact/javax.activation/javax.activation-api/1.2.0, CDDL and LGPL 2.0
javax.annotation-api 1.3.2: https://mvnrepository.com/artifact/javax.annotation/javax.annotation-api/1.3.2, CDDL + GPLv2
javax.mail 1.6.2: https://mvnrepository.com/artifact/com.sun.mail/javax.mail/1.6.2, CDDL/GPLv2
javax.servlet-api 3.1.0: https://mvnrepository.com/artifact/javax.servlet/javax.servlet-api/3.1.0, CDDL + GPLv2
jaxb-api 2.3.1: https://mvnrepository.com/artifact/javax.xml.bind/jaxb-api/2.3.1, CDDL 1.1
jaxb-impl 2.2.3-1: https://mvnrepository.com/artifact/com.sun.xml.bind/jaxb-impl/2.2.3-1, CDDL and GPL 1.1
jersey-client 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-client/1.9, CDDL 1.1 and GPL 1.1
jersey-core 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-core/1.9, CDDL 1.1 and GPL 1.1
jersey-guice 1.9: https://mvnrepository.com/artifact/com.sun.jersey.contribs/jersey-guice/1.9, CDDL 1.1 and GPL 1.1
jersey-json 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-json/1.9, CDDL 1.1 and GPL 1.1
jersey-server 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-server/1.9, CDDL 1.1 and GPL 1.1
jsp-2.1 6.1.14 https://mvnrepository.com/artifact/org.mortbay.jetty/jsp-2.1/6.1.14, CDDL 1.0
jta 1.1: https://mvnrepository.com/artifact/javax.transaction/jta/1.1, CDDL 1.0
transaction-api 1.1: https://mvnrepository.com/artifact/javax.transaction/transaction-api/1.1, CDDL 1.0
========================================================================
EPL licenses
========================================================================
The following components are provided under the EPL License. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
aspectjweaver 1.9.2:https://mvnrepository.com/artifact/org.aspectj/aspectjweaver/1.9.2, EPL 1.0
core-3.1.1 https://mvnrepository.com/artifact/org.eclipse.jdt/core/3.1.1 Eclipse Public License v1.0
logback-classic 1.2.3: https://mvnrepository.com/artifact/ch.qos.logback/logback-classic/1.2.3, EPL 1.0 and LGPL 2.1
logback-core 1.2.3: https://mvnrepository.com/artifact/ch.qos.logback/logback-core/1.2.3, EPL 1.0 and LGPL 2.1
oshi-core 3.5.0: https://mvnrepository.com/artifact/com.github.oshi/oshi-core/3.5.0, EPL 1.0
junit 4.12: https://mvnrepository.com/artifact/junit/junit/4.12, EPL 1.0
h2-1.4.200 https://github.com/h2database/h2database/blob/master/LICENSE.txt, MPL 2.0 or EPL 1.0
mchange-commons-java 0.2.11: https://mvnrepository.com/artifact/com.mchange/mchange-commons-java/0.2.11, EPL 1.0
c3p0 0.9.5.2: https://mvnrepository.com/artifact/com.mchange/c3p0/0.9.5.2, EPL 1.0
========================================================================
MIT licenses
========================================================================
The following components are provided under a MIT 2.0 license. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
jul-to-slf4j 1.7.25: https://mvnrepository.com/artifact/org.slf4j/jul-to-slf4j/1.7.25, MIT
mssql-jdbc 6.1.0.jre8: https://mvnrepository.com/artifact/com.microsoft.sqlserver/mssql-jdbc/6.1.0.jre8, MIT
slf4j-api 1.7.5: https://mvnrepository.com/artifact/org.slf4j/slf4j-api/1.7.5, MIT
========================================================================
MPL 1.1 licenses
========================================================================
The following components are provided under a MPL 1.1 license. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
jamon-runtime 2.3.1: https://mvnrepository.com/artifact/org.jamon/jamon-runtime/2.3.1, MPL-1.1
========================================================================
Public Domain licenses
========================================================================
xz 1.0: https://mvnrepository.com/artifact/org.tukaani/xz/1.0, Public Domain
aopalliance 1.0: https://mvnrepository.com/artifact/aopalliance/aopalliance/1.0, Public Domain
========================================================================
UI related licenses
========================================================================
The following components are used in UI.See project link for details.
The text of each license is also included at licenses/ui-licenses/LICENSE-[project].txt.
========================================
MIT licenses
========================================
ans-UI 1.1.7: https://github.com/analysys/ans-ui MIT
axios 0.16.2: https://github.com/axios/axios MIT
bootstrap 3.3.7: https://github.com/twbs/bootstrap MIT
canvg 1.5.1: https://github.com/canvg/canvg MIT
clipboard 2.0.1: https://github.com/zenorocha/clipboard.js MIT
codemirror 5.43.0: https://github.com/codemirror/CodeMirror MIT
dayjs 1.7.8: https://github.com/iamkun/dayjs MIT
html2canvas 0.5.0-beta4: https://github.com/niklasvh/html2canvas MIT
jquery 3.3.1: https://github.com/jquery/jquery MIT
jquery-ui 1.12.1: https://github.com/jquery/jquery-ui MIT
js-cookie 2.2.1: https://github.com/js-cookie/js-cookie MIT
jsplumb 2.8.6: https://github.com/jsplumb/jsplumb MIT and GPLv2
lodash 4.17.11: https://github.com/lodash/lodash MIT
vue-treeselect 0.4.0: https://github.com/riophae/vue-treeselect MIT
vue 2.5.17: https://github.com/vuejs/vue MIT
vue-router 2.7.0: https://github.com/vuejs/vue-router MIT
vuex 3.0.0: https://github.com/vuejs/vuex MIT
vuex-router-sync 4.1.2: https://github.com/vuejs/vuex-router-sync MIT
dagre 0.8.5: https://github.com/dagrejs/dagre MIT
========================================
Apache 2.0 licenses
========================================
echarts 4.1.0: https://github.com/apache/incubator-echarts Apache-2.0
========================================
BSD licenses
========================================
d3 3.5.17: https://github.com/d3/d3 BSD-3-Clause
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 4,144 |
There is a vulnerability in jackson-databind 2.9.8 ,upgrade recommended
|
https://github.com/apache/incubator-dolphinscheduler/blob/aa0974fd1f759e96430d3f1b8dac291d6ea7388c/pom.xml#L68
CVE-2019-14379 CVE-2019-14540 CVE-2019-16335 CVE-2019-16942 CVE-2019-16943
Recommended upgrade version:
2.9.10.6
|
https://github.com/apache/dolphinscheduler/issues/4144
|
https://github.com/apache/dolphinscheduler/pull/4153
|
a522da068f8c188e0d792f84416a52e5040de459
|
1a854f8056cbc19a1520e1134cf48070e96720cf
| 2020-12-02T08:19:05Z |
java
| 2020-12-03T07:25:49Z |
dolphinscheduler-dist/release-docs/licenses/LICENSE-HikariCP-java6.txt
|
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 4,144 |
There is a vulnerability in jackson-databind 2.9.8 ,upgrade recommended
|
https://github.com/apache/incubator-dolphinscheduler/blob/aa0974fd1f759e96430d3f1b8dac291d6ea7388c/pom.xml#L68
CVE-2019-14379 CVE-2019-14540 CVE-2019-16335 CVE-2019-16942 CVE-2019-16943
Recommended upgrade version:
2.9.10.6
|
https://github.com/apache/dolphinscheduler/issues/4144
|
https://github.com/apache/dolphinscheduler/pull/4153
|
a522da068f8c188e0d792f84416a52e5040de459
|
1a854f8056cbc19a1520e1134cf48070e96720cf
| 2020-12-02T08:19:05Z |
java
| 2020-12-03T07:25:49Z |
dolphinscheduler-dist/release-docs/licenses/LICENSE-c3p0.txt
|
Eclipse Public License - v 1.0
THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC
LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM
CONSTITUTES RECIPIENT’S ACCEPTANCE OF THIS AGREEMENT.
1. DEFINITIONS
"Contribution" means:
a) in the case of the initial Contributor, the initial code and documentation
distributed under this Agreement, and
b) in the case of each subsequent Contributor:
i) changes to the Program, and
ii) additions to the Program;
where such changes and/or additions to the Program originate from and are
distributed by that particular Contributor. A Contribution 'originates' from
a Contributor if it was added to the Program by such Contributor itself or
anyone acting on such Contributor’s behalf. Contributions do not include
additionsto the Program which: (i) are separate modules of software distributed
in conjunction with the Program under their own license agreement, and (ii) are
not derivative works of the Program.
"Contributor" means any person or entity that distributes the Program.
"Licensed Patents " mean patent claims licensable by a Contributor which are
necessarily infringed by the use or sale of its Contribution alone or when
combined with the Program.
"Program" means the Contributions distributed in accordance with this Agreement.
"Recipient" means anyone who receives the Program under this Agreement,
including all Contributors.
2. GRANT OF RIGHTS
a) Subject to the terms of this Agreement, each Contributor hereby grants
Recipient a non-exclusive, worldwide, royalty-free copyright license to
reproduce, prepare derivative works of, publicly display, publicly perform,
distribute and sublicense the Contribution of such Contributor, if any, and such
derivative works, in source code and object code form.
b) Subject to the terms of this Agreement, each Contributor hereby grants
Recipient a non-exclusive, worldwide, royalty-free patent license under
Licensed Patents to make, use, sell, offer to sell, import and otherwise
transfer the Contribution of such Contributor, if any, in source code and
object code form. This patent license shall apply to the combination of the
Contribution and the Program if, at the time the Contribution is added by the
Contributor, such addition of the Contribution causes such combination to be
covered by the Licensed Patents. The patent license shall not apply to any
other combinations which include the Contribution. No hardware per se is
licensed hereunder.
c) Recipient understands that although each Contributor grants the licenses to
its Contributions set forth herein, no assurances are provided by any
Contributor that the Program does not infringe the patent or other intellectual
property rights of any other entity. Each Contributor disclaims any liability
to Recipient for claims brought by any other entity based on infringement of
intellectual property rights or otherwise. As a condition to exercising the
rights and licenses granted hereunder, each Recipient hereby assumes sole
responsibility to secure any other intellectual property rights needed, if any.
For example, if a third party patent license is required to allow Recipient to
distribute the Program, it is Recipient’s responsibility to acquire that
license before distributing the Program.
d) Each Contributor represents that to its knowledge it has sufficient
copyright rights in its Contribution, if any, to grant the copyright license
set forth in this Agreement.
3. REQUIREMENTS
A Contributor may choose to distribute the Program in object code form under
its own license agreement, provided that:
a) it complies with the terms and conditions of this Agreement; and
b) its license agreement:
i) effectively disclaims on behalf of all Contributors all warranties and
conditions, express and implied, including warranties or conditions of
title and non-infringement, and implied warranties or conditions of
merchantability and fitness for a particular purpose;
ii) effectively excludes on behalf of all Contributors all liability for
damages, including direct, indirect, special, incidental and
consequential damages, such as lost profits;
iii) states that any provisions which differ from this Agreement are
offered by that Contributor alone and not by any other party; and
iv) states that source code for the Program is available from such
Contributor, and informs licensees how to obtain it in a reasonable
manner on or through a medium customarily used for software exchange.
When the Program is made available in source code form:
a) it must be made available under this Agreement; and
b) a copy of this Agreement must be included with each copy of the Program.
Contributors may not remove or alter any copyright notices contained within
the Program.
Each Contributor must identify itself as the originator of its Contribution,
if any, in a manner that reasonably allows subsequent Recipients to identify
the originator of the Contribution.
4. COMMERCIAL DISTRIBUTION
Commercial distributors of software may accept certain responsibilities with
respect to end users, business partners and the like. While this license is
intended to facilitate the commercial use of the Program, the Contributor who
includes the Program in a commercial product offering should do so in a manner
which does not create potential liability for other Contributors. Therefore,
if a Contributor includes the Program in a commercial product offering, such
Contributor ("Commercial Contributor") hereby agrees to defend and indemnify
every other Contributor ("Indemnified Contributor") against any losses, damages
and costs (collectively "Losses") arising from claims, lawsuits and other legal
actions brought by a third party against the Indemnified Contributor to the
extent caused by the acts or omissions of such Commercial Contributor in
connection with its distribution of the Program in a commercial product
offering. The obligations in this section do not apply to any claims or Losses
relating to any actual or alleged intellectual property infringement. In order
to qualify, an Indemnified Contributor must: a) promptly notify the Commercial
Contributor in writing of such claim, and b) allow the Commercial Contributor
to control, and cooperate with the Commercial Contributor in, the defense and
any related settlement negotiations. The Indemnified Contributor may participate
in any such claim at its own expense.
For example, a Contributor might include the Program in a commercial product
offering, Product X. That Contributor is then a Commercial Contributor. If that
Commercial Contributor then makes performance claims, or offers warranties
related to Product X, those performance claims and warranties are such
Commercial Contributor’s responsibility alone. Under this section, the
Commercial Contributor would have to defend claims against the other
Contributors related to those performance claims and warranties, and if a court
requires any other Contributor to pay any damages as a result, the Commercial
Contributor must pay those damages.
5. NO WARRANTY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR
IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE,
NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each
Recipient is solely responsible for determining the appropriateness of using
and distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement , including but not limited to the
risks and costs of program errors, compliance with applicable laws, damage to
or loss of data, programs or equipment, and unavailability or interruption of
operations.
6. DISCLAIMER OF LIABILITY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY
CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST
PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS
GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
7. GENERAL
If any provision of this Agreement is invalid or unenforceable under applicable
law, it shall not affect the validity or enforceability of the remainder of the
terms of this Agreement, and without further action by the parties hereto, such
provision shall be reformed to the minimum extent necessary to make such
provision valid and enforceable.
If Recipient institutes patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Program itself
(excluding combinations of the Program with other software or hardware)
infringes such Recipient’s patent(s), then such Recipient’s rights granted
under Section 2(b) shall terminate as of the date such litigation is filed.
All Recipient’s rights under this Agreement shall terminate if it fails to
comply with any of the material terms or conditions of this Agreement and does
not cure such failure in a reasonable period of time after becoming aware of
such noncompliance. If all Recipient’s rights under this Agreement terminate,
Recipient agrees to cease use and distribution of the Program as soon as
reasonably practicable. However, Recipient’s obligations under this Agreement
and any licenses granted by Recipient relating to the Program shall continue
and survive.
Everyone is permitted to copy and distribute copies of this Agreement, but in
order to avoid inconsistency the Agreement is copyrighted and may only be
modified in the following manner. The Agreement Steward reserves the right to
publish new versions (including revisions) of this Agreement from time to time.
No one other than the Agreement Steward has the right to modify this Agreement.
The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation
may assign the responsibility to serve as the Agreement Steward to a suitable
separate entity. Each new version of the Agreement will be given a
distinguishing version number. The Program (including Contributions) may always
be distributed subject to the version of the Agreement under which it was
received. In addition, after a new version of the Agreement is published,
Contributor may elect to distribute the Program (including its Contributions)
under the new version. Except as expressly stated in Sections 2(a) and 2(b)
above, Recipient receives no rights or licenses to the intellectual property of
any Contributor under this Agreement, whether expressly, by implication,
estoppel or otherwise. All rights in the Program not expressly granted under
this Agreement are reserved.
This Agreement is governed by the laws of the State of New York and the
intellectual property laws of the United States of America. No party to this
Agreement will bring a legal action under this Agreement more than one year
after the cause of action arose. Each party waives its rights to a jury trial
in any resulting litigation.
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 4,144 |
There is a vulnerability in jackson-databind 2.9.8 ,upgrade recommended
|
https://github.com/apache/incubator-dolphinscheduler/blob/aa0974fd1f759e96430d3f1b8dac291d6ea7388c/pom.xml#L68
CVE-2019-14379 CVE-2019-14540 CVE-2019-16335 CVE-2019-16942 CVE-2019-16943
Recommended upgrade version:
2.9.10.6
|
https://github.com/apache/dolphinscheduler/issues/4144
|
https://github.com/apache/dolphinscheduler/pull/4153
|
a522da068f8c188e0d792f84416a52e5040de459
|
1a854f8056cbc19a1520e1134cf48070e96720cf
| 2020-12-02T08:19:05Z |
java
| 2020-12-03T07:25:49Z |
dolphinscheduler-dist/release-docs/licenses/LICENSE-mchange-commons-java.txt
|
Eclipse Public License - v 1.0
THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC
LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM
CONSTITUTES RECIPIENT’S ACCEPTANCE OF THIS AGREEMENT.
1. DEFINITIONS
"Contribution" means:
a) in the case of the initial Contributor, the initial code and documentation
distributed under this Agreement, and
b) in the case of each subsequent Contributor:
i) changes to the Program, and
ii) additions to the Program;
where such changes and/or additions to the Program originate from and are
distributed by that particular Contributor. A Contribution 'originates' from
a Contributor if it was added to the Program by such Contributor itself or
anyone acting on such Contributor’s behalf. Contributions do not include
additionsto the Program which: (i) are separate modules of software distributed
in conjunction with the Program under their own license agreement, and (ii) are
not derivative works of the Program.
"Contributor" means any person or entity that distributes the Program.
"Licensed Patents " mean patent claims licensable by a Contributor which are
necessarily infringed by the use or sale of its Contribution alone or when
combined with the Program.
"Program" means the Contributions distributed in accordance with this Agreement.
"Recipient" means anyone who receives the Program under this Agreement,
including all Contributors.
2. GRANT OF RIGHTS
a) Subject to the terms of this Agreement, each Contributor hereby grants
Recipient a non-exclusive, worldwide, royalty-free copyright license to
reproduce, prepare derivative works of, publicly display, publicly perform,
distribute and sublicense the Contribution of such Contributor, if any, and such
derivative works, in source code and object code form.
b) Subject to the terms of this Agreement, each Contributor hereby grants
Recipient a non-exclusive, worldwide, royalty-free patent license under
Licensed Patents to make, use, sell, offer to sell, import and otherwise
transfer the Contribution of such Contributor, if any, in source code and
object code form. This patent license shall apply to the combination of the
Contribution and the Program if, at the time the Contribution is added by the
Contributor, such addition of the Contribution causes such combination to be
covered by the Licensed Patents. The patent license shall not apply to any
other combinations which include the Contribution. No hardware per se is
licensed hereunder.
c) Recipient understands that although each Contributor grants the licenses to
its Contributions set forth herein, no assurances are provided by any
Contributor that the Program does not infringe the patent or other intellectual
property rights of any other entity. Each Contributor disclaims any liability
to Recipient for claims brought by any other entity based on infringement of
intellectual property rights or otherwise. As a condition to exercising the
rights and licenses granted hereunder, each Recipient hereby assumes sole
responsibility to secure any other intellectual property rights needed, if any.
For example, if a third party patent license is required to allow Recipient to
distribute the Program, it is Recipient’s responsibility to acquire that
license before distributing the Program.
d) Each Contributor represents that to its knowledge it has sufficient
copyright rights in its Contribution, if any, to grant the copyright license
set forth in this Agreement.
3. REQUIREMENTS
A Contributor may choose to distribute the Program in object code form under
its own license agreement, provided that:
a) it complies with the terms and conditions of this Agreement; and
b) its license agreement:
i) effectively disclaims on behalf of all Contributors all warranties and
conditions, express and implied, including warranties or conditions of
title and non-infringement, and implied warranties or conditions of
merchantability and fitness for a particular purpose;
ii) effectively excludes on behalf of all Contributors all liability for
damages, including direct, indirect, special, incidental and
consequential damages, such as lost profits;
iii) states that any provisions which differ from this Agreement are
offered by that Contributor alone and not by any other party; and
iv) states that source code for the Program is available from such
Contributor, and informs licensees how to obtain it in a reasonable
manner on or through a medium customarily used for software exchange.
When the Program is made available in source code form:
a) it must be made available under this Agreement; and
b) a copy of this Agreement must be included with each copy of the Program.
Contributors may not remove or alter any copyright notices contained within
the Program.
Each Contributor must identify itself as the originator of its Contribution,
if any, in a manner that reasonably allows subsequent Recipients to identify
the originator of the Contribution.
4. COMMERCIAL DISTRIBUTION
Commercial distributors of software may accept certain responsibilities with
respect to end users, business partners and the like. While this license is
intended to facilitate the commercial use of the Program, the Contributor who
includes the Program in a commercial product offering should do so in a manner
which does not create potential liability for other Contributors. Therefore,
if a Contributor includes the Program in a commercial product offering, such
Contributor ("Commercial Contributor") hereby agrees to defend and indemnify
every other Contributor ("Indemnified Contributor") against any losses, damages
and costs (collectively "Losses") arising from claims, lawsuits and other legal
actions brought by a third party against the Indemnified Contributor to the
extent caused by the acts or omissions of such Commercial Contributor in
connection with its distribution of the Program in a commercial product
offering. The obligations in this section do not apply to any claims or Losses
relating to any actual or alleged intellectual property infringement. In order
to qualify, an Indemnified Contributor must: a) promptly notify the Commercial
Contributor in writing of such claim, and b) allow the Commercial Contributor
to control, and cooperate with the Commercial Contributor in, the defense and
any related settlement negotiations. The Indemnified Contributor may participate
in any such claim at its own expense.
For example, a Contributor might include the Program in a commercial product
offering, Product X. That Contributor is then a Commercial Contributor. If that
Commercial Contributor then makes performance claims, or offers warranties
related to Product X, those performance claims and warranties are such
Commercial Contributor’s responsibility alone. Under this section, the
Commercial Contributor would have to defend claims against the other
Contributors related to those performance claims and warranties, and if a court
requires any other Contributor to pay any damages as a result, the Commercial
Contributor must pay those damages.
5. NO WARRANTY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR
IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE,
NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each
Recipient is solely responsible for determining the appropriateness of using
and distributing the Program and assumes all risks associated with its
exercise of rights under this Agreement , including but not limited to the
risks and costs of program errors, compliance with applicable laws, damage to
or loss of data, programs or equipment, and unavailability or interruption of
operations.
6. DISCLAIMER OF LIABILITY
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY
CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST
PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS
GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
7. GENERAL
If any provision of this Agreement is invalid or unenforceable under applicable
law, it shall not affect the validity or enforceability of the remainder of the
terms of this Agreement, and without further action by the parties hereto, such
provision shall be reformed to the minimum extent necessary to make such
provision valid and enforceable.
If Recipient institutes patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Program itself
(excluding combinations of the Program with other software or hardware)
infringes such Recipient’s patent(s), then such Recipient’s rights granted
under Section 2(b) shall terminate as of the date such litigation is filed.
All Recipient’s rights under this Agreement shall terminate if it fails to
comply with any of the material terms or conditions of this Agreement and does
not cure such failure in a reasonable period of time after becoming aware of
such noncompliance. If all Recipient’s rights under this Agreement terminate,
Recipient agrees to cease use and distribution of the Program as soon as
reasonably practicable. However, Recipient’s obligations under this Agreement
and any licenses granted by Recipient relating to the Program shall continue
and survive.
Everyone is permitted to copy and distribute copies of this Agreement, but in
order to avoid inconsistency the Agreement is copyrighted and may only be
modified in the following manner. The Agreement Steward reserves the right to
publish new versions (including revisions) of this Agreement from time to time.
No one other than the Agreement Steward has the right to modify this Agreement.
The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation
may assign the responsibility to serve as the Agreement Steward to a suitable
separate entity. Each new version of the Agreement will be given a
distinguishing version number. The Program (including Contributions) may always
be distributed subject to the version of the Agreement under which it was
received. In addition, after a new version of the Agreement is published,
Contributor may elect to distribute the Program (including its Contributions)
under the new version. Except as expressly stated in Sections 2(a) and 2(b)
above, Recipient receives no rights or licenses to the intellectual property of
any Contributor under this Agreement, whether expressly, by implication,
estoppel or otherwise. All rights in the Program not expressly granted under
this Agreement are reserved.
This Agreement is governed by the laws of the State of New York and the
intellectual property laws of the United States of America. No party to this
Agreement will bring a legal action under this Agreement more than one year
after the cause of action arose. Each party waives its rights to a jury trial
in any resulting litigation.
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 4,144 |
There is a vulnerability in jackson-databind 2.9.8 ,upgrade recommended
|
https://github.com/apache/incubator-dolphinscheduler/blob/aa0974fd1f759e96430d3f1b8dac291d6ea7388c/pom.xml#L68
CVE-2019-14379 CVE-2019-14540 CVE-2019-16335 CVE-2019-16942 CVE-2019-16943
Recommended upgrade version:
2.9.10.6
|
https://github.com/apache/dolphinscheduler/issues/4144
|
https://github.com/apache/dolphinscheduler/pull/4153
|
a522da068f8c188e0d792f84416a52e5040de459
|
1a854f8056cbc19a1520e1134cf48070e96720cf
| 2020-12-02T08:19:05Z |
java
| 2020-12-03T07:25:49Z |
pom.xml
|
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>1.3.4-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/incubator-dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>[email protected]</post>
<subscribe>[email protected]</subscribe>
<unsubscribe>[email protected]</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<spring.version>5.1.18.RELEASE</spring.version>
<spring.boot.version>2.1.17.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.3.0</quartz.version>
<jackson.version>2.9.8</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<druid.version>1.1.22</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.11</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>5.1.34</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.7.0</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>3.17</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>20.0</guava.version>
<postgresql.version>42.1.4</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.5.0</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<presto.jdbc.version>0.238.1</presto.jdbc.version>
<jsp-2.1.version>6.1.14</jsp-2.1.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.0.0</checkstyle.version>
<apache.rat.version>0.13</apache.rat.version>
<zookeeper.version>3.4.14</zookeeper.version>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
<guava-retry.version>2.0.0</guava-retry.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-plugin-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
<version>${zookeeper.version}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.facebook.presto</groupId>
<artifactId>presto-jdbc</artifactId>
<version>${presto.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<!-- for api module -->
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
<version>${jsp-2.1.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<version>${guava-retry.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-incubating-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<configuration>
<source>8</source>
<failOnError>false</failOnError>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<includes>
<include>**/alert/utils/DingTalkUtilsTest.java</include>
<include>**/alert/template/AlertTemplateFactoryTest.java</include>
<include>**/alert/template/impl/DefaultHTMLTemplateTest.java</include>
<include>**/alert/utils/EnterpriseWeChatUtilsTest.java</include>
<include>**/alert/utils/ExcelUtilsTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/utils/JSONUtilsTest.java</include>
<!--<include>**/alert/utils/MailUtilsTest.java</include>-->
<include>**/alert/plugin/EmailAlertPluginTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TenantControllerTest.java</include>
<include>**/api/dto/resources/filter/ResourceFilterTest.java</include>
<include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include>
<includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest>
<include>**/api/enums/StatusTest.java</include>
<include>**/api/exceptions/ApiExceptionHandlerTest.java</include>
<include>**/api/exceptions/ServiceExceptionTest.java</include>
<include>**/api/interceptor/LoginHandlerInterceptorTest.java</include>
<include>**/api/security/PasswordAuthenticatorTest.java</include>
<include>**/api/security/SecurityConfigTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/DataSourceServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/ExecutorServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/ProcessDefinitionVersionServiceTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/SchedulerServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/service/WorkFlowLineageServiceTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TaskInstanceControllerTest.java</include>
<include>**/api/controller/WorkFlowLineageControllerTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/ResultTest.java</include>
<include>**/common/graph/DAGTest.java</include>
<include>**/common/os/OshiTest.java</include>
<include>**/common/os/OSUtilsTest.java</include>
<include>**/common/shell/ShellExecutorTest.java</include>
<include>**/common/task/DataxParametersTest.java</include>
<include>**/common/task/EntityTestUtils.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/HttpParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/common/threadutils/ThreadPoolExecutorsTest.java</include>
<include>**/common/threadutils/ThreadUtilsTest.java</include>
<include>**/common/utils/process/ProcessBuilderForWin32Test.java</include>
<include>**/common/utils/process/ProcessEnvironmentForWin32Test.java</include>
<include>**/common/utils/process/ProcessImplForWin32Test.java</include>
<include>**/common/utils/CollectionUtilsTest.java</include>
<include>**/common/utils/CommonUtilsTest.java</include>
<include>**/common/utils/DateUtilsTest.java</include>
<include>**/common/utils/DependentUtilsTest.java</include>
<include>**/common/utils/EncryptionUtilsTest.java</include>
<include>**/common/utils/FileUtilsTest.java</include>
<include>**/common/utils/IpUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/OSUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/TimePlaceholderUtilsTest.java</include>
<include>**/common/utils/PreconditionsTest.java</include>
<include>**/common/utils/PropertyUtilsTest.java</include>
<include>**/common/utils/SchemaUtilsTest.java</include>
<include>**/common/utils/ScriptRunnerTest.java</include>
<include>**/common/utils/SensitiveLogUtilsTest.java</include>
<include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/VarPoolUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/utils/KerberosHttpClientTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/utils/RetryerUtilsTest.java</include>
<include>**/common/plugin/FilePluginManagerTest</include>
<include>**/common/plugin/PluginClassLoaderTest</include>
<include>**/common/enums/ExecutionStatusTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/mapper/ConnectionFactoryTest.java</include>
<include>**/dao/mapper/DataSourceMapperTest.java</include>
<include>**/dao/entity/TaskInstanceTest.java</include>
<include>**/dao/entity/UdfFuncTest.java</include>
<include>**/remote/JsonSerializerTest.java</include>
<include>**/remote/RemoveTaskLogResponseCommandTest.java</include>
<include>**/remote/RemoveTaskLogRequestCommandTest.java</include>
<include>**/remote/NettyRemotingClientTest.java</include>
<include>**/remote/NettyUtilTest.java</include>
<include>**/remote/ResponseFutureTest.java</include>
<include>**/server/log/LoggerServerTest.java</include>
<include>**/server/entity/SQLTaskExecutionContextTest.java</include>
<include>**/server/log/MasterLogFilterTest.java</include>
<include>**/server/log/SensitiveDataConverterTest.java</include>
<!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>-->
<include>**/server/log/TaskLogFilterTest.java</include>
<include>**/server/log/WorkerLogFilterTest.java</include>
<include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include>
<include>**/server/master/runner/MasterTaskExecThreadTest.java</include>
<!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>-->
<include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include>
<include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include>
<include>**/server/master/register/MasterRegistryTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include>
<include>**/server/master/AlertManagerTest.java</include>
<include>**/server/master/MasterCommandTest.java</include>
<include>**/server/master/DependentTaskTest.java</include>
<include>**/server/master/ConditionsTaskTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/server/master/ParamsTest.java</include>
<include>**/server/master/SubProcessTaskTest.java</include>
<include>**/server/master/processor/TaskAckProcessorTest.java</include>
<include>**/server/master/processor/TaskKillResponseProcessorTest.java</include>
<include>**/server/master/processor/queue/TaskResponseServiceTest.java</include>
<include>**/server/register/ZookeeperNodeManagerTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>
<include>**/server/utils/HostTest.java</include>
<!--<include>**/server/utils/FlinkArgsUtilsTest.java</include>-->
<include>**/server/utils/LogUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/utils/ProcessUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/worker/processor/TaskCallbackServiceTest.java</include>
<include>**/server/worker/registry/WorkerRegistryTest.java</include>
<include>**/server/worker/shell/ShellCommandExecutorTest.java</include>
<include>**/server/worker/sql/SqlExecutorTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/EnvFileTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<!--<include>**/server/worker/task/datax/DataxTaskTest.java</include>-->
<!--<include>**/server/worker/task/http/HttpTaskTest.java</include>-->
<include>**/server/worker/task/sqoop/SqoopTaskTest.java</include>
<include>**/server/worker/task/TaskManagerTest.java</include>
<include>**/server/worker/EnvFileTest.java</include>
<include>**/server/worker/runner/TaskExecuteThreadTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/process/ProcessServiceTest.java</include>
<include>**/service/zk/DefaultEnsembleProviderTest.java</include>
<include>**/service/zk/ZKServerTest.java</include>
<include>**/service/zk/CuratorZookeeperClientTest.java</include>
<include>**/service/queue/TaskUpdateQueueTest.java</include>
<include>**/dao/mapper/DataSourceUserMapperTest.java</include>
<!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>-->
<include>**/dao/mapper/ProcessDefinitionMapperTest.java</include>
<include>**/dao/mapper/ProcessDefinitionVersionMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapperTest.java</include>
<include>**/dao/mapper/ProjectMapperTest.java</include>
<include>**/dao/mapper/ProjectUserMapperTest.java</include>
<include>**/dao/mapper/QueueMapperTest.java</include>
<include>**/dao/mapper/ResourceUserMapperTest.java</include>
<include>**/dao/mapper/ScheduleMapperTest.java</include>
<include>**/dao/mapper/SessionMapperTest.java</include>
<include>**/dao/mapper/TaskInstanceMapperTest.java</include>
<include>**/dao/mapper/TenantMapperTest.java</include>
<include>**/dao/mapper/UdfFuncMapperTest.java</include>
<include>**/dao/mapper/UDFUserMapperTest.java</include>
<include>**/dao/mapper/UserAlertGroupMapperTest.java</include>
<include>**/dao/mapper/UserMapperTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
<include>**/dao/datasource/OracleDataSourceTest.java</include>
<include>**/dao/datasource/HiveDataSourceTest.java</include>
<include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include>
<include>**/dao/upgrade/WokrerGrouopDaoTest.java</include>
<include>**/dao/upgrade/UpgradeDaoTest.java</include>
<include>**/plugin/model/AlertDataTest.java</include>
<include>**/plugin/model/AlertInfoTest.java</include>
<include>**/plugin/utils/PropertyUtilsTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<destFile>target/jacoco.exec</destFile>
<dataFile>target/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>jacoco-initialize</id>
<goals>
<goal>prepare-agent</goal>
</goals>
</execution>
<execution>
<id>jacoco-site</id>
<phase>test</phase>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
<version>${apache.rat.version}</version>
<configuration>
<excludeSubProjects>false</excludeSubProjects>
<addDefaultLicenseMatchers>false</addDefaultLicenseMatchers>
<licenses>
<license implementation="org.apache.rat.analysis.license.SimplePatternBasedLicense">
<licenseFamilyCategory>AL20</licenseFamilyCategory>
<licenseFamilyName>Apache License, 2.0</licenseFamilyName>
<patterns>
<pattern>Licensed to the Apache Software Foundation (ASF)</pattern>
</patterns>
</license>
</licenses>
<licenseFamilies>
<licenseFamily implementation="org.apache.rat.license.SimpleLicenseFamily">
<familyName>Apache License, 2.0</familyName>
</licenseFamily>
</licenseFamilies>
<excludes>
<exclude>**/node_modules/**</exclude>
<exclude>**/node/**</exclude>
<exclude>**/dist/**</exclude>
<exclude>**/licenses/**</exclude>
<exclude>.github/**</exclude>
<exclude>**/sql/soft_version</exclude>
<exclude>**/common/utils/ScriptRunner.java</exclude>
<exclude>**/*.json</exclude>
<!-- document files -->
<exclude>**/*.md</exclude>
<excldue>**/*.MD</excldue>
<exclude>**/*.txt</exclude>
<exclude>**/docs/**</exclude>
<exclude>**/*.babelrc</exclude>
<exclude>**/*.eslintrc</exclude>
<exclude>**/.mvn/jvm.config</exclude>
<exclude>**/.mvn/wrapper/**</exclude>
</excludes>
<consoleOutput>true</consoleOutput>
</configuration>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.18</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<suppressionsLocation>style/checkstyle-suppressions.xml</suppressionsLocation>
<suppressionsFileExpression>checkstyle.suppressions.file</suppressionsFileExpression>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
<skip>true</skip>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
<module>dolphinscheduler-plugin-api</module>
<module>dolphinscheduler-microbench</module>
</modules>
</project>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 4,144 |
There is a vulnerability in jackson-databind 2.9.8 ,upgrade recommended
|
https://github.com/apache/incubator-dolphinscheduler/blob/aa0974fd1f759e96430d3f1b8dac291d6ea7388c/pom.xml#L68
CVE-2019-14379 CVE-2019-14540 CVE-2019-16335 CVE-2019-16942 CVE-2019-16943
Recommended upgrade version:
2.9.10.6
|
https://github.com/apache/dolphinscheduler/issues/4144
|
https://github.com/apache/dolphinscheduler/pull/4153
|
a522da068f8c188e0d792f84416a52e5040de459
|
1a854f8056cbc19a1520e1134cf48070e96720cf
| 2020-12-02T08:19:05Z |
java
| 2020-12-03T07:25:49Z |
tools/dependencies/known-dependencies.txt
|
HikariCP-3.2.0.jar
activation-1.1.jar
ant-1.6.5.jar
aopalliance-1.0.jar
apache-el-8.5.54.jar
apacheds-i18n-2.0.0-M15.jar
apacheds-kerberos-codec-2.0.0-M15.jar
api-asn1-api-1.0.0-M20.jar
api-util-1.0.0-M20.jar
asm-3.1.jar
aspectjweaver-1.9.6.jar
audience-annotations-0.5.0.jar
avro-1.7.4.jar
aws-java-sdk-1.7.4.jar
bonecp-0.8.0.RELEASE.jar
byte-buddy-1.9.16.jar
classmate-1.4.0.jar
clickhouse-jdbc-0.1.52.jar
commons-cli-1.2.jar
commons-codec-1.11.jar
commons-collections-3.2.2.jar
commons-collections4-4.1.jar
commons-compress-1.4.1.jar
commons-compiler-3.0.16.jar
commons-configuration-1.10.jar
commons-daemon-1.0.13.jar
commons-beanutils-1.7.0.jar
commons-dbcp-1.4.jar
commons-email-1.5.jar
commons-httpclient-3.0.1.jar
commons-io-2.4.jar
commons-lang-2.6.jar
commons-logging-1.1.1.jar
commons-math3-3.1.1.jar
commons-net-3.1.jar
commons-pool-1.6.jar
core-3.1.1.jar
cron-utils-5.0.5.jar
curator-client-4.3.0.jar
curator-framework-4.3.0.jar
curator-recipes-4.3.0.jar
datanucleus-api-jdo-4.2.1.jar
datanucleus-core-4.1.6.jar
datanucleus-rdbms-4.1.7.jar
derby-10.14.2.0.jar
druid-1.1.22.jar
gson-2.8.6.jar
guava-20.0.jar
guice-3.0.jar
guice-servlet-3.0.jar
h2-1.4.200.jar
hadoop-annotations-2.7.3.jar
hadoop-auth-2.7.3.jar
hadoop-aws-2.7.3.jar
hadoop-client-2.7.3.jar
hadoop-common-2.7.3.jar
hadoop-hdfs-2.7.3.jar
hadoop-mapreduce-client-app-2.7.3.jar
hadoop-mapreduce-client-common-2.7.3.jar
hadoop-mapreduce-client-core-2.7.3.jar
hadoop-mapreduce-client-jobclient-2.7.3.jar
hadoop-mapreduce-client-shuffle-2.7.3.jar
hadoop-yarn-api-2.7.3.jar
hadoop-yarn-client-2.7.3.jar
hadoop-yarn-common-2.7.3.jar
hadoop-yarn-server-common-2.7.3.jar
hamcrest-core-1.3.jar
hibernate-validator-6.0.20.Final.jar
hive-common-2.1.0.jar
hive-jdbc-2.1.0.jar
hive-metastore-2.1.0.jar
hive-orc-2.1.0.jar
hive-serde-2.1.0.jar
hive-service-2.1.0.jar
hive-service-rpc-2.1.0.jar
hive-storage-api-2.1.0.jar
htrace-core-3.1.0-incubating.jar
httpclient-4.4.1.jar
httpcore-4.4.1.jar
httpmime-4.5.12.jar
jackson-annotations-2.9.8.jar
jackson-core-2.9.8.jar
jackson-core-asl-1.9.13.jar
jackson-databind-2.9.8.jar
jackson-datatype-jdk8-2.9.10.jar
jackson-datatype-jsr310-2.9.10.jar
jackson-jaxrs-1.9.13.jar
jackson-mapper-asl-1.9.13.jar
jackson-module-parameter-names-2.9.10.jar
jackson-xc-1.9.13.jar
jamon-runtime-2.3.1.jar
janino-3.0.16.jar
java-xmlbuilder-0.4.jar
javax.activation-api-1.2.0.jar
javax.annotation-api-1.3.2.jar
javax.inject-1.jar
javax.jdo-3.2.0-m3.jar
javax.mail-1.6.2.jar
javax.servlet-api-3.1.0.jar
javolution-5.5.1.jar
jaxb-api-2.3.1.jar
jaxb-impl-2.2.3-1.jar
jboss-logging-3.3.3.Final.jar
jdo-api-3.0.1.jar
jersey-client-1.9.jar
jersey-core-1.9.jar
jersey-guice-1.9.jar
jersey-json-1.9.jar
jersey-server-1.9.jar
jets3t-0.9.0.jar
jettison-1.1.jar
jetty-6.1.26.jar
jetty-continuation-9.4.31.v20200723.jar
jetty-http-9.4.31.v20200723.jar
jetty-io-9.4.31.v20200723.jar
jetty-security-9.4.31.v20200723.jar
jetty-server-9.4.31.v20200723.jar
jetty-servlet-9.4.31.v20200723.jar
jetty-servlets-9.4.31.v20200723.jar
jetty-util-6.1.26.jar
jetty-util-9.4.31.v20200723.jar
jetty-webapp-9.4.31.v20200723.jar
jetty-xml-9.4.31.v20200723.jar
jline-0.9.94.jar
jna-4.5.2.jar
jna-platform-4.5.2.jar
joda-time-2.10.6.jar
jpam-1.1.jar
jsch-0.1.42.jar
jsp-2.1-6.1.14.jar
jsp-api-2.1-6.1.14.jar
jsp-api-2.1.jar
jsqlparser-2.1.jar
jsr305-3.0.0.jar
jta-1.1.jar
jul-to-slf4j-1.7.30.jar
junit-4.12.jar
leveldbjni-all-1.8.jar
libfb303-0.9.3.jar
libthrift-0.9.3.jar
log4j-1.2-api-2.11.2.jar
log4j-1.2.17.jar
log4j-api-2.11.2.jar
log4j-core-2.11.2.jar
logback-classic-1.2.3.jar
logback-core-1.2.3.jar
lz4-1.3.0.jar
mapstruct-1.2.0.Final.jar
mssql-jdbc-6.1.0.jre8.jar
mybatis-3.5.2.jar
mybatis-plus-3.2.0.jar
mybatis-plus-annotation-3.2.0.jar
mybatis-plus-boot-starter-3.2.0.jar
mybatis-plus-core-3.2.0.jar
mybatis-plus-extension-3.2.0.jar
mybatis-spring-2.0.2.jar
netty-3.6.2.Final.jar
netty-all-4.1.52.Final.jar
opencsv-2.3.jar
oshi-core-3.5.0.jar
paranamer-2.3.jar
parquet-hadoop-bundle-1.8.1.jar
poi-3.17.jar
postgresql-42.1.4.jar
protobuf-java-2.5.0.jar
quartz-2.3.0.jar
quartz-jobs-2.3.0.jar
slf4j-api-1.7.5.jar
snakeyaml-1.23.jar
snappy-0.2.jar
snappy-java-1.0.4.1.jar
spring-aop-5.1.18.RELEASE.jar
spring-beans-5.1.18.RELEASE.jar
spring-boot-2.1.17.RELEASE.jar
spring-boot-autoconfigure-2.1.17.RELEASE.jar
spring-boot-starter-2.1.17.RELEASE.jar
spring-boot-starter-aop-2.1.17.RELEASE.jar
spring-boot-starter-jdbc-2.1.17.RELEASE.jar
spring-boot-starter-jetty-2.1.17.RELEASE.jar
spring-boot-starter-json-2.1.17.RELEASE.jar
spring-boot-starter-logging-2.1.17.RELEASE.jar
spring-boot-starter-web-2.1.17.RELEASE.jar
spring-context-5.1.18.RELEASE.jar
spring-core-5.1.18.RELEASE.jar
spring-expression-5.1.18.RELEASE.jar
spring-jcl-5.1.18.RELEASE.jar
spring-jdbc-5.1.18.RELEASE.jar
spring-plugin-core-1.2.0.RELEASE.jar
spring-plugin-metadata-1.2.0.RELEASE.jar
spring-tx-5.1.18.RELEASE.jar
spring-web-5.1.18.RELEASE.jar
spring-webmvc-5.1.18.RELEASE.jar
springfox-core-2.9.2.jar
springfox-schema-2.9.2.jar
springfox-spi-2.9.2.jar
springfox-spring-web-2.9.2.jar
springfox-swagger-common-2.9.2.jar
springfox-swagger-ui-2.9.2.jar
springfox-swagger2-2.9.2.jar
swagger-annotations-1.5.20.jar
swagger-bootstrap-ui-1.9.3.jar
swagger-models-1.5.20.jar
tephra-api-0.6.0.jar
threetenbp-1.3.6.jar
transaction-api-1.1.jar
validation-api-2.0.1.Final.jar
xercesImpl-2.9.1.jar
xml-apis-1.4.01.jar
xmlenc-0.52.jar
xz-1.0.jar
zookeeper-3.4.14.jar
guava-retrying-2.0.0.jar
presto-jdbc-0.238.1.jar
HikariCP-java6-2.3.13.jar
mchange-commons-java-0.2.11.jar
c3p0-0.9.5.2.jar
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,457 |
[bug] flink args build problem
|
**Describe the bug**
There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters
**To Reproduce**
1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed
2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid
!!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set
2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
|
https://github.com/apache/dolphinscheduler/issues/3457
|
https://github.com/apache/dolphinscheduler/pull/4166
|
68541f281d0b0908f605ad49847d3e7acdd5a302
|
cbc30b4900215424dcbbfb49539259d32273efc3
| 2020-08-10T12:56:16Z |
java
| 2020-12-10T14:37:21Z |
dolphinscheduler-api/src/main/resources/application-api.properties
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# server port
server.port=12345
# session config
server.servlet.session.timeout=7200
server.servlet.context-path=/dolphinscheduler/
# Set time zone
spring.jackson.time-zone=GMT+8
# file size limit for upload
spring.servlet.multipart.max-file-size=1024MB
spring.servlet.multipart.max-request-size=1024MB
# enable response compression
server.compression.enabled=true
server.compression.mime-types=text/html,text/xml,text/plain,text/css,text/javascript,application/javascript,application/json,application/xml
#post content
server.jetty.max-http-post-size=5000000
spring.messages.encoding=UTF-8
#i18n classpath folder , file prefix messages, if have many files, use "," seperator
spring.messages.basename=i18n/messages
# Authentication types (supported types: PASSWORD)
security.authentication.type=PASSWORD
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,457 |
[bug] flink args build problem
|
**Describe the bug**
There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters
**To Reproduce**
1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed
2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid
!!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set
2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
|
https://github.com/apache/dolphinscheduler/issues/3457
|
https://github.com/apache/dolphinscheduler/pull/4166
|
68541f281d0b0908f605ad49847d3e7acdd5a302
|
cbc30b4900215424dcbbfb49539259d32273efc3
| 2020-08-10T12:56:16Z |
java
| 2020-12-10T14:37:21Z |
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import java.util.regex.Pattern;
/**
* Constants
*/
public final class Constants {
private Constants() {
throw new UnsupportedOperationException("Construct Constants");
}
/**
* quartz config
*/
public static final String ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS = "org.quartz.jobStore.driverDelegateClass";
public static final String ORG_QUARTZ_SCHEDULER_INSTANCENAME = "org.quartz.scheduler.instanceName";
public static final String ORG_QUARTZ_SCHEDULER_INSTANCEID = "org.quartz.scheduler.instanceId";
public static final String ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON = "org.quartz.scheduler.makeSchedulerThreadDaemon";
public static final String ORG_QUARTZ_JOBSTORE_USEPROPERTIES = "org.quartz.jobStore.useProperties";
public static final String ORG_QUARTZ_THREADPOOL_CLASS = "org.quartz.threadPool.class";
public static final String ORG_QUARTZ_THREADPOOL_THREADCOUNT = "org.quartz.threadPool.threadCount";
public static final String ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS = "org.quartz.threadPool.makeThreadsDaemons";
public static final String ORG_QUARTZ_THREADPOOL_THREADPRIORITY = "org.quartz.threadPool.threadPriority";
public static final String ORG_QUARTZ_JOBSTORE_CLASS = "org.quartz.jobStore.class";
public static final String ORG_QUARTZ_JOBSTORE_TABLEPREFIX = "org.quartz.jobStore.tablePrefix";
public static final String ORG_QUARTZ_JOBSTORE_ISCLUSTERED = "org.quartz.jobStore.isClustered";
public static final String ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD = "org.quartz.jobStore.misfireThreshold";
public static final String ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL = "org.quartz.jobStore.clusterCheckinInterval";
public static final String ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK = "org.quartz.jobStore.acquireTriggersWithinLock";
public static final String ORG_QUARTZ_JOBSTORE_DATASOURCE = "org.quartz.jobStore.dataSource";
public static final String ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS = "org.quartz.dataSource.myDs.connectionProvider.class";
/**
* quartz config default value
*/
public static final String QUARTZ_TABLE_PREFIX = "QRTZ_";
public static final String QUARTZ_MISFIRETHRESHOLD = "60000";
public static final String QUARTZ_CLUSTERCHECKININTERVAL = "5000";
public static final String QUARTZ_DATASOURCE = "myDs";
public static final String QUARTZ_THREADCOUNT = "25";
public static final String QUARTZ_THREADPRIORITY = "5";
public static final String QUARTZ_INSTANCENAME = "DolphinScheduler";
public static final String QUARTZ_INSTANCEID = "AUTO";
public static final String QUARTZ_ACQUIRETRIGGERSWITHINLOCK = "true";
/**
* common properties path
*/
public static final String COMMON_PROPERTIES_PATH = "/common.properties";
/**
* fs.defaultFS
*/
public static final String FS_DEFAULTFS = "fs.defaultFS";
/**
* fs s3a endpoint
*/
public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint";
/**
* fs s3a access key
*/
public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key";
/**
* fs s3a secret key
*/
public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key";
/**
* yarn.resourcemanager.ha.rm.ids
*/
public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids";
public static final String YARN_RESOURCEMANAGER_HA_XX = "xx";
/**
* yarn.application.status.address
*/
public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address";
/**
* yarn.job.history.status.address
*/
public static final String YARN_JOB_HISTORY_STATUS_ADDRESS = "yarn.job.history.status.address";
/**
* hdfs configuration
* hdfs.root.user
*/
public static final String HDFS_ROOT_USER = "hdfs.root.user";
/**
* hdfs/s3 configuration
* resource.upload.path
*/
public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path";
/**
* data basedir path
*/
public static final String DATA_BASEDIR_PATH = "data.basedir.path";
/**
* dolphinscheduler.env.path
*/
public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path";
/**
* environment properties default path
*/
public static final String ENV_PATH = "env/dolphinscheduler_env.sh";
/**
* python home
*/
public static final String PYTHON_HOME = "PYTHON_HOME";
/**
* resource.view.suffixs
*/
public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs";
public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties";
/**
* development.state
*/
public static final String DEVELOPMENT_STATE = "development.state";
public static final String DEVELOPMENT_STATE_DEFAULT_VALUE = "true";
/**
* string true
*/
public static final String STRING_TRUE = "true";
/**
* string false
*/
public static final String STRING_FALSE = "false";
/**
* resource storage type
*/
public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type";
/**
* MasterServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_MASTERS = "/nodes/master";
/**
* WorkerServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_WORKERS = "/nodes/worker";
/**
* all servers directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_DEAD_SERVERS = "/dead-servers";
/**
* MasterServer lock directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters";
/**
* MasterServer failover directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS = "/lock/failover/masters";
/**
* WorkerServer failover directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS = "/lock/failover/workers";
/**
* MasterServer startup failover runing and fault tolerance process
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters";
/**
* comma ,
*/
public static final String COMMA = ",";
/**
* slash /
*/
public static final String SLASH = "/";
/**
* COLON :
*/
public static final String COLON = ":";
/**
* SPACE " "
*/
public static final String SPACE = " ";
/**
* SINGLE_SLASH /
*/
public static final String SINGLE_SLASH = "/";
/**
* DOUBLE_SLASH //
*/
public static final String DOUBLE_SLASH = "//";
/**
* SINGLE_QUOTES "'"
*/
public static final String SINGLE_QUOTES = "'";
/**
* DOUBLE_QUOTES "\""
*/
public static final String DOUBLE_QUOTES = "\"";
/**
* SEMICOLON ;
*/
public static final String SEMICOLON = ";";
/**
* EQUAL SIGN
*/
public static final String EQUAL_SIGN = "=";
/**
* AT SIGN
*/
public static final String AT_SIGN = "@";
public static final String WORKER_MAX_CPULOAD_AVG = "worker.max.cpuload.avg";
public static final String WORKER_RESERVED_MEMORY = "worker.reserved.memory";
public static final String MASTER_MAX_CPULOAD_AVG = "master.max.cpuload.avg";
public static final String MASTER_RESERVED_MEMORY = "master.reserved.memory";
/**
* date format of yyyy-MM-dd HH:mm:ss
*/
public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss";
/**
* date format of yyyyMMddHHmmss
*/
public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss";
/**
* date format of yyyyMMddHHmmssSSS
*/
public static final String YYYYMMDDHHMMSSSSS = "yyyyMMddHHmmssSSS";
/**
* http connect time out
*/
public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000;
/**
* http connect request time out
*/
public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000;
/**
* httpclient soceket time out
*/
public static final int SOCKET_TIMEOUT = 60 * 1000;
/**
* http header
*/
public static final String HTTP_HEADER_UNKNOWN = "unKnown";
/**
* http X-Forwarded-For
*/
public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For";
/**
* http X-Real-IP
*/
public static final String HTTP_X_REAL_IP = "X-Real-IP";
/**
* UTF-8
*/
public static final String UTF_8 = "UTF-8";
/**
* user name regex
*/
public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,39}$");
/**
* email regex
*/
public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[_|\\-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$");
/**
* read permission
*/
public static final int READ_PERMISSION = 2 * 1;
/**
* write permission
*/
public static final int WRITE_PERMISSION = 2 * 2;
/**
* execute permission
*/
public static final int EXECUTE_PERMISSION = 1;
/**
* default admin permission
*/
public static final int DEFAULT_ADMIN_PERMISSION = 7;
/**
* all permissions
*/
public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION;
/**
* max task timeout
*/
public static final int MAX_TASK_TIMEOUT = 24 * 3600;
/**
* master cpu load
*/
public static final int DEFAULT_MASTER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2;
/**
* master reserved memory
*/
public static final double DEFAULT_MASTER_RESERVED_MEMORY = OSUtils.totalMemorySize() / 10;
/**
* worker cpu load
*/
public static final int DEFAULT_WORKER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2;
/**
* worker reserved memory
*/
public static final double DEFAULT_WORKER_RESERVED_MEMORY = OSUtils.totalMemorySize() / 10;
/**
* default log cache rows num,output when reach the number
*/
public static final int DEFAULT_LOG_ROWS_NUM = 4 * 16;
/**
* log flush interval?output when reach the interval
*/
public static final int DEFAULT_LOG_FLUSH_INTERVAL = 1000;
/**
* time unit secong to minutes
*/
public static final int SEC_2_MINUTES_TIME_UNIT = 60;
/***
*
* rpc port
*/
public static final int RPC_PORT = 50051;
/**
* forbid running task
*/
public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN";
/**
* datasource configuration path
*/
public static final String DATASOURCE_PROPERTIES = "/datasource.properties";
public static final String TASK_RECORD_URL = "task.record.datasource.url";
public static final String TASK_RECORD_FLAG = "task.record.flag";
public static final String TASK_RECORD_USER = "task.record.datasource.username";
public static final String TASK_RECORD_PWD = "task.record.datasource.password";
public static final String DEFAULT = "Default";
public static final String USER = "user";
public static final String PASSWORD = "password";
public static final String XXXXXX = "******";
public static final String NULL = "NULL";
public static final String THREAD_NAME_MASTER_SERVER = "Master-Server";
public static final String THREAD_NAME_WORKER_SERVER = "Worker-Server";
public static final String TASK_RECORD_TABLE_HIVE_LOG = "eamp_hive_log_hd";
public static final String TASK_RECORD_TABLE_HISTORY_HIVE_LOG = "eamp_hive_hist_log_hd";
/**
* command parameter keys
*/
public static final String CMD_PARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId";
public static final String CMD_PARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList";
public static final String CMD_PARAM_RECOVERY_WAITING_THREAD = "WaitingThreadInstanceId";
public static final String CMD_PARAM_SUB_PROCESS = "processInstanceId";
public static final String CMD_PARAM_EMPTY_SUB_PROCESS = "0";
public static final String CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId";
public static final String CMD_PARAM_SUB_PROCESS_DEFINE_ID = "processDefinitionId";
public static final String CMD_PARAM_START_NODE_NAMES = "StartNodeNameList";
/**
* complement data start date
*/
public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate";
/**
* complement data end date
*/
public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate";
/**
* hadoop configuration
*/
public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE";
public static final String HADOOP_RM_STATE_STANDBY = "STANDBY";
public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port";
/**
* data source config
*/
public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name";
public static final String SPRING_DATASOURCE_URL = "spring.datasource.url";
public static final String SPRING_DATASOURCE_USERNAME = "spring.datasource.username";
public static final String SPRING_DATASOURCE_PASSWORD = "spring.datasource.password";
public static final String SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT = "spring.datasource.validationQueryTimeout";
public static final String SPRING_DATASOURCE_INITIAL_SIZE = "spring.datasource.initialSize";
public static final String SPRING_DATASOURCE_MIN_IDLE = "spring.datasource.minIdle";
public static final String SPRING_DATASOURCE_MAX_ACTIVE = "spring.datasource.maxActive";
public static final String SPRING_DATASOURCE_MAX_WAIT = "spring.datasource.maxWait";
public static final String SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS = "spring.datasource.timeBetweenEvictionRunsMillis";
public static final String SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS = "spring.datasource.timeBetweenConnectErrorMillis";
public static final String SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS = "spring.datasource.minEvictableIdleTimeMillis";
public static final String SPRING_DATASOURCE_VALIDATION_QUERY = "spring.datasource.validationQuery";
public static final String SPRING_DATASOURCE_TEST_WHILE_IDLE = "spring.datasource.testWhileIdle";
public static final String SPRING_DATASOURCE_TEST_ON_BORROW = "spring.datasource.testOnBorrow";
public static final String SPRING_DATASOURCE_TEST_ON_RETURN = "spring.datasource.testOnReturn";
public static final String SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS = "spring.datasource.poolPreparedStatements";
public static final String SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT = "spring.datasource.defaultAutoCommit";
public static final String SPRING_DATASOURCE_KEEP_ALIVE = "spring.datasource.keepAlive";
public static final String SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE = "spring.datasource.maxPoolPreparedStatementPerConnectionSize";
public static final String DEVELOPMENT = "development";
public static final String QUARTZ_PROPERTIES_PATH = "quartz.properties";
/**
* sleep time
*/
public static final int SLEEP_TIME_MILLIS = 1000;
/**
* heartbeat for zk info length
*/
public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 10;
/**
* hadoop params
* jar
*/
public static final String JAR = "jar";
/**
* hadoop
*/
public static final String HADOOP = "hadoop";
/**
* -D parameter
*/
public static final String D = "-D";
/**
* -D mapreduce.job.queuename=ququename
*/
public static final String MR_QUEUE = "mapreduce.job.queuename";
/**
* spark params constant
*/
public static final String MASTER = "--master";
public static final String DEPLOY_MODE = "--deploy-mode";
/**
* --class CLASS_NAME
*/
public static final String MAIN_CLASS = "--class";
/**
* --driver-cores NUM
*/
public static final String DRIVER_CORES = "--driver-cores";
/**
* --driver-memory MEM
*/
public static final String DRIVER_MEMORY = "--driver-memory";
/**
* --num-executors NUM
*/
public static final String NUM_EXECUTORS = "--num-executors";
/**
* --executor-cores NUM
*/
public static final String EXECUTOR_CORES = "--executor-cores";
/**
* --executor-memory MEM
*/
public static final String EXECUTOR_MEMORY = "--executor-memory";
/**
* --queue QUEUE
*/
public static final String SPARK_QUEUE = "--queue";
/**
* --queue --qu
*/
public static final String FLINK_QUEUE = "--qu";
/**
* exit code success
*/
public static final int EXIT_CODE_SUCCESS = 0;
/**
* exit code kill
*/
public static final int EXIT_CODE_KILL = 137;
/**
* exit code failure
*/
public static final int EXIT_CODE_FAILURE = -1;
/**
* date format of yyyyMMdd
*/
public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd";
/**
* date format of yyyyMMddHHmmss
*/
public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss";
/**
* system date(yyyyMMddHHmmss)
*/
public static final String PARAMETER_DATETIME = "system.datetime";
/**
* system date(yyyymmdd) today
*/
public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate";
/**
* system date(yyyymmdd) yesterday
*/
public static final String PARAMETER_BUSINESS_DATE = "system.biz.date";
/**
* ACCEPTED
*/
public static final String ACCEPTED = "ACCEPTED";
/**
* SUCCEEDED
*/
public static final String SUCCEEDED = "SUCCEEDED";
/**
* NEW
*/
public static final String NEW = "NEW";
/**
* NEW_SAVING
*/
public static final String NEW_SAVING = "NEW_SAVING";
/**
* SUBMITTED
*/
public static final String SUBMITTED = "SUBMITTED";
/**
* FAILED
*/
public static final String FAILED = "FAILED";
/**
* KILLED
*/
public static final String KILLED = "KILLED";
/**
* RUNNING
*/
public static final String RUNNING = "RUNNING";
/**
* underline "_"
*/
public static final String UNDERLINE = "_";
/**
* quartz job prifix
*/
public static final String QUARTZ_JOB_PRIFIX = "job";
/**
* quartz job group prifix
*/
public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup";
/**
* projectId
*/
public static final String PROJECT_ID = "projectId";
/**
* processId
*/
public static final String SCHEDULE_ID = "scheduleId";
/**
* schedule
*/
public static final String SCHEDULE = "schedule";
/**
* application regex
*/
public static final String APPLICATION_REGEX = "application_\\d+_\\d+";
public static final String PID = OSUtils.isWindows() ? "handle" : "pid";
/**
* month_begin
*/
public static final String MONTH_BEGIN = "month_begin";
/**
* add_months
*/
public static final String ADD_MONTHS = "add_months";
/**
* month_end
*/
public static final String MONTH_END = "month_end";
/**
* week_begin
*/
public static final String WEEK_BEGIN = "week_begin";
/**
* week_end
*/
public static final String WEEK_END = "week_end";
/**
* timestamp
*/
public static final String TIMESTAMP = "timestamp";
public static final char SUBTRACT_CHAR = '-';
public static final char ADD_CHAR = '+';
public static final char MULTIPLY_CHAR = '*';
public static final char DIVISION_CHAR = '/';
public static final char LEFT_BRACE_CHAR = '(';
public static final char RIGHT_BRACE_CHAR = ')';
public static final String ADD_STRING = "+";
public static final String MULTIPLY_STRING = "*";
public static final String DIVISION_STRING = "/";
public static final String LEFT_BRACE_STRING = "(";
public static final char P = 'P';
public static final char N = 'N';
public static final String SUBTRACT_STRING = "-";
public static final String GLOBAL_PARAMS = "globalParams";
public static final String LOCAL_PARAMS = "localParams";
public static final String PROCESS_INSTANCE_STATE = "processInstanceState";
public static final String TASK_LIST = "taskList";
public static final String RWXR_XR_X = "rwxr-xr-x";
/**
* master/worker server use for zk
*/
public static final String MASTER_PREFIX = "master";
public static final String WORKER_PREFIX = "worker";
public static final String DELETE_ZK_OP = "delete";
public static final String ADD_ZK_OP = "add";
public static final String ALIAS = "alias";
public static final String CONTENT = "content";
public static final String DEPENDENT_SPLIT = ":||";
public static final String DEPENDENT_ALL = "ALL";
/**
* preview schedule execute count
*/
public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5;
/**
* kerberos
*/
public static final String KERBEROS = "kerberos";
/**
* kerberos expire time
*/
public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time";
/**
* java.security.krb5.conf
*/
public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf";
/**
* java.security.krb5.conf.path
*/
public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state";
/**
* com.amazonaws.services.s3.enableV4
*/
public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4";
/**
* loginUserFromKeytab user
*/
public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username";
/**
* default worker group id
*/
public static final int DEFAULT_WORKER_ID = -1;
/**
* loginUserFromKeytab path
*/
public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path";
/**
* task log info format
*/
public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s";
/**
* hive conf
*/
public static final String HIVE_CONF = "hiveconf:";
//flink ??
public static final String FLINK_YARN_CLUSTER = "yarn-cluster";
public static final String FLINK_RUN_MODE = "-m";
public static final String FLINK_YARN_SLOT = "-ys";
public static final String FLINK_APP_NAME = "-ynm";
public static final String FLINK_TASK_MANAGE = "-yn";
public static final String FLINK_JOB_MANAGE_MEM = "-yjm";
public static final String FLINK_TASK_MANAGE_MEM = "-ytm";
public static final String FLINK_DETACH = "-d";
public static final String FLINK_MAIN_CLASS = "-c";
public static final int[] NOT_TERMINATED_STATES = new int[] {
ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXECUTION.ordinal(),
ExecutionStatus.DELAY_EXECUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal(),
ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(),
ExecutionStatus.WAITTING_THREAD.ordinal(),
ExecutionStatus.WAITTING_DEPEND.ordinal()
};
/**
* status
*/
public static final String STATUS = "status";
/**
* message
*/
public static final String MSG = "msg";
/**
* data total
*/
public static final String COUNT = "count";
/**
* page size
*/
public static final String PAGE_SIZE = "pageSize";
/**
* current page no
*/
public static final String PAGE_NUMBER = "pageNo";
/**
*
*/
public static final String DATA_LIST = "data";
public static final String TOTAL_LIST = "totalList";
public static final String CURRENT_PAGE = "currentPage";
public static final String TOTAL_PAGE = "totalPage";
public static final String TOTAL = "total";
/**
* session user
*/
public static final String SESSION_USER = "session.user";
public static final String SESSION_ID = "sessionId";
public static final String PASSWORD_DEFAULT = "******";
/**
* driver
*/
public static final String ORG_POSTGRESQL_DRIVER = "org.postgresql.Driver";
public static final String COM_MYSQL_JDBC_DRIVER = "com.mysql.jdbc.Driver";
public static final String ORG_APACHE_HIVE_JDBC_HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver";
public static final String COM_CLICKHOUSE_JDBC_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver";
public static final String COM_ORACLE_JDBC_DRIVER = "oracle.jdbc.driver.OracleDriver";
public static final String COM_SQLSERVER_JDBC_DRIVER = "com.microsoft.sqlserver.jdbc.SQLServerDriver";
public static final String COM_DB2_JDBC_DRIVER = "com.ibm.db2.jcc.DB2Driver";
public static final String COM_PRESTO_JDBC_DRIVER = "com.facebook.presto.jdbc.PrestoDriver";
/**
* database type
*/
public static final String MYSQL = "MYSQL";
public static final String POSTGRESQL = "POSTGRESQL";
public static final String HIVE = "HIVE";
public static final String SPARK = "SPARK";
public static final String CLICKHOUSE = "CLICKHOUSE";
public static final String ORACLE = "ORACLE";
public static final String SQLSERVER = "SQLSERVER";
public static final String DB2 = "DB2";
public static final String PRESTO = "PRESTO";
/**
* jdbc url
*/
public static final String JDBC_MYSQL = "jdbc:mysql://";
public static final String JDBC_POSTGRESQL = "jdbc:postgresql://";
public static final String JDBC_HIVE_2 = "jdbc:hive2://";
public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://";
public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@";
public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//";
public static final String JDBC_SQLSERVER = "jdbc:sqlserver://";
public static final String JDBC_DB2 = "jdbc:db2://";
public static final String JDBC_PRESTO = "jdbc:presto://";
public static final String ADDRESS = "address";
public static final String DATABASE = "database";
public static final String JDBC_URL = "jdbcUrl";
public static final String PRINCIPAL = "principal";
public static final String OTHER = "other";
public static final String ORACLE_DB_CONNECT_TYPE = "connectType";
/**
* session timeout
*/
public static final int SESSION_TIME_OUT = 7200;
public static final int MAX_FILE_SIZE = 1024 * 1024 * 1024;
public static final String UDF = "UDF";
public static final String CLASS = "class";
public static final String RECEIVERS = "receivers";
public static final String RECEIVERS_CC = "receiversCc";
/**
* dataSource sensitive param
*/
public static final String DATASOURCE_PASSWORD_REGEX = "(?<=(\"password\":\")).*?(?=(\"))";
/**
* default worker group
*/
public static final String DEFAULT_WORKER_GROUP = "default";
public static final Integer TASK_INFO_LENGTH = 5;
/**
* new
* schedule time
*/
public static final String PARAMETER_SHECDULE_TIME = "schedule.time";
/**
* authorize writable perm
*/
public static final int AUTHORIZE_WRITABLE_PERM = 7;
/**
* authorize readable perm
*/
public static final int AUTHORIZE_READABLE_PERM = 4;
/**
* plugin configurations
*/
public static final String PLUGIN_JAR_SUFFIX = ".jar";
public static final int NORAML_NODE_STATUS = 0;
public static final int ABNORMAL_NODE_STATUS = 1;
public static final String START_TIME = "start time";
public static final String END_TIME = "end time";
/**
* system line separator
*/
public static final String SYSTEM_LINE_SEPARATOR = System.getProperty("line.separator");
/**
* net system properties
*/
public static final String DOLPHIN_SCHEDULER_PREFERRED_NETWORK_INTERFACE = "dolphin.scheduler.network.interface.preferred";
/**
* datasource encryption salt
*/
public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*";
public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable";
public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt";
/**
* Network IP gets priority, default inner outer
*/
public static final String NETWORK_PRIORITY_STRATEGY = "dolphin.scheduler.network.priority.strategy";
/**
* exec shell scripts
*/
public static final String SH = "sh";
/**
* pstree, get pud and sub pid
*/
public static final String PSTREE = "pstree";
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,457 |
[bug] flink args build problem
|
**Describe the bug**
There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters
**To Reproduce**
1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed
2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid
!!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set
2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
|
https://github.com/apache/dolphinscheduler/issues/3457
|
https://github.com/apache/dolphinscheduler/pull/4166
|
68541f281d0b0908f605ad49847d3e7acdd5a302
|
cbc30b4900215424dcbbfb49539259d32273efc3
| 2020-08-10T12:56:16Z |
java
| 2020-12-10T14:37:21Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtils.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.flink.FlinkParameters;
import java.util.ArrayList;
import java.util.List;
/**
* flink args utils
*/
public class FlinkArgsUtils {
private static final String LOCAL_DEPLOY_MODE = "local";
private static final String FLINK_VERSION_BEFORE_1_10 = "<1.10";
/**
* build args
* @param param flink parameters
* @return argument list
*/
public static List<String> buildArgs(FlinkParameters param) {
List<String> args = new ArrayList<>();
String deployMode = "cluster";
String tmpDeployMode = param.getDeployMode();
if (StringUtils.isNotEmpty(tmpDeployMode)) {
deployMode = tmpDeployMode;
}
if (!LOCAL_DEPLOY_MODE.equals(deployMode)) {
args.add(Constants.FLINK_RUN_MODE); //-m
args.add(Constants.FLINK_YARN_CLUSTER); //yarn-cluster
int slot = param.getSlot();
if (slot != 0) {
args.add(Constants.FLINK_YARN_SLOT);
args.add(String.format("%d", slot)); //-ys
}
String appName = param.getAppName();
if (StringUtils.isNotEmpty(appName)) { //-ynm
args.add(Constants.FLINK_APP_NAME);
args.add(appName);
}
// judgy flink version,from flink1.10,the parameter -yn removed
String flinkVersion = param.getFlinkVersion();
if (FLINK_VERSION_BEFORE_1_10.equals(flinkVersion)) {
int taskManager = param.getTaskManager();
if (taskManager != 0) { //-yn
args.add(Constants.FLINK_TASK_MANAGE);
args.add(String.format("%d", taskManager));
}
}
String jobManagerMemory = param.getJobManagerMemory();
if (StringUtils.isNotEmpty(jobManagerMemory)) {
args.add(Constants.FLINK_JOB_MANAGE_MEM);
args.add(jobManagerMemory); //-yjm
}
String taskManagerMemory = param.getTaskManagerMemory();
if (StringUtils.isNotEmpty(taskManagerMemory)) { // -ytm
args.add(Constants.FLINK_TASK_MANAGE_MEM);
args.add(taskManagerMemory);
}
args.add(Constants.FLINK_DETACH); //-d
}
ProgramType programType = param.getProgramType();
String mainClass = param.getMainClass();
if (programType != null && programType != ProgramType.PYTHON && StringUtils.isNotEmpty(mainClass)) {
args.add(Constants.FLINK_MAIN_CLASS); //-c
args.add(param.getMainClass()); //main class
}
ResourceInfo mainJar = param.getMainJar();
if (mainJar != null) {
args.add(mainJar.getRes());
}
String mainArgs = param.getMainArgs();
if (StringUtils.isNotEmpty(mainArgs)) {
args.add(mainArgs);
}
// --files --conf --libjar ...
String others = param.getOthers();
String queue = param.getQueue();
if (StringUtils.isNotEmpty(others)) {
if (!others.contains(Constants.FLINK_QUEUE) && StringUtils.isNotEmpty(queue) && !deployMode.equals(LOCAL_DEPLOY_MODE)) {
args.add(Constants.FLINK_QUEUE);
args.add(param.getQueue());
}
args.add(others);
} else if (StringUtils.isNotEmpty(queue) && !deployMode.equals(LOCAL_DEPLOY_MODE)) {
args.add(Constants.FLINK_QUEUE);
args.add(param.getQueue());
}
return args;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,457 |
[bug] flink args build problem
|
**Describe the bug**
There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters
**To Reproduce**
1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed
2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid
!!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set
2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
|
https://github.com/apache/dolphinscheduler/issues/3457
|
https://github.com/apache/dolphinscheduler/pull/4166
|
68541f281d0b0908f605ad49847d3e7acdd5a302
|
cbc30b4900215424dcbbfb49539259d32273efc3
| 2020-08-10T12:56:16Z |
java
| 2020-12-10T14:37:21Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.flink;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.flink.FlinkParameters;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.server.utils.FlinkArgsUtils;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* flink task
*/
public class FlinkTask extends AbstractYarnTask {
/**
* flink command
*/
private static final String FLINK_COMMAND = "flink";
private static final String FLINK_RUN = "run";
/**
* flink parameters
*/
private FlinkParameters flinkParameters;
/**
* taskExecutionContext
*/
private TaskExecutionContext taskExecutionContext;
public FlinkTask(TaskExecutionContext taskExecutionContext, Logger logger) {
super(taskExecutionContext, logger);
this.taskExecutionContext = taskExecutionContext;
}
@Override
public void init() {
logger.info("flink task params {}", taskExecutionContext.getTaskParams());
flinkParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), FlinkParameters.class);
if (!flinkParameters.checkParameters()) {
throw new RuntimeException("flink task params is not valid");
}
flinkParameters.setQueue(taskExecutionContext.getQueue());
setMainJarName();
if (StringUtils.isNotEmpty(flinkParameters.getMainArgs())) {
String args = flinkParameters.getMainArgs();
// replace placeholder
Map<String, Property> paramsMap = ParamUtils.convert(ParamUtils.getUserDefParamsMap(taskExecutionContext.getDefinedParams()),
taskExecutionContext.getDefinedParams(),
flinkParameters.getLocalParametersMap(),
CommandType.of(taskExecutionContext.getCmdTypeIfComplement()),
taskExecutionContext.getScheduleTime());
logger.info("param Map : {}", paramsMap);
if (paramsMap != null ){
args = ParameterUtils.convertParameterPlaceholders(args, ParamUtils.convert(paramsMap));
logger.info("param args : {}", args);
}
flinkParameters.setMainArgs(args);
}
}
/**
* create command
* @return command
*/
@Override
protected String buildCommand() {
List<String> args = new ArrayList<>();
args.add(FLINK_COMMAND);
args.add(FLINK_RUN);
logger.info("flink task args : {}", args);
// other parameters
args.addAll(FlinkArgsUtils.buildArgs(flinkParameters));
String command = ParameterUtils
.convertParameterPlaceholders(String.join(" ", args), taskExecutionContext.getDefinedParams());
logger.info("flink task command : {}", command);
return command;
}
@Override
protected void setMainJarName() {
// main jar
ResourceInfo mainJar = flinkParameters.getMainJar();
if (mainJar != null) {
int resourceId = mainJar.getId();
String resourceName;
if (resourceId == 0) {
resourceName = mainJar.getRes();
} else {
Resource resource = processService.getResourceById(flinkParameters.getMainJar().getId());
if (resource == null) {
logger.error("resource id: {} not exist", resourceId);
throw new RuntimeException(String.format("resource id: %d not exist", resourceId));
}
resourceName = resource.getFullName().replaceFirst("/", "");
}
mainJar.setRes(resourceName);
flinkParameters.setMainJar(mainJar);
}
}
@Override
public AbstractParameters getParameters() {
return flinkParameters;
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,457 |
[bug] flink args build problem
|
**Describe the bug**
There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters
**To Reproduce**
1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed
2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid
!!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set
2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
|
https://github.com/apache/dolphinscheduler/issues/3457
|
https://github.com/apache/dolphinscheduler/pull/4166
|
68541f281d0b0908f605ad49847d3e7acdd5a302
|
cbc30b4900215424dcbbfb49539259d32273efc3
| 2020-08-10T12:56:16Z |
java
| 2020-12-10T14:37:21Z |
dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/utils/FlinkArgsUtilsTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.utils;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.flink.FlinkParameters;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
/**
* Test FlinkArgsUtils
*/
public class FlinkArgsUtilsTest {
private static final Logger logger = LoggerFactory.getLogger(FlinkArgsUtilsTest.class);
public String mode = "cluster";
public int slot = 2;
public String appName = "testFlink";
public int taskManager = 4;
public String taskManagerMemory = "2G";
public String jobManagerMemory = "4G";
public ProgramType programType = ProgramType.JAVA;
public String mainClass = "com.test";
public ResourceInfo mainJar = null;
public String mainArgs = "testArgs";
public String queue = "queue1";
public String others = "--input file:///home";
public String flinkVersion = "<1.10";
@Before
public void setUp() throws Exception {
ResourceInfo main = new ResourceInfo();
main.setRes("testflink-1.0.0-SNAPSHOT.jar");
mainJar = main;
}
/**
* Test buildArgs
*/
@Test
public void testBuildArgs() {
//Define params
FlinkParameters param = new FlinkParameters();
param.setDeployMode(mode);
param.setMainClass(mainClass);
param.setAppName(appName);
param.setSlot(slot);
param.setTaskManager(taskManager);
param.setJobManagerMemory(jobManagerMemory);
param.setTaskManagerMemory(taskManagerMemory);
param.setMainJar(mainJar);
param.setProgramType(programType);
param.setMainArgs(mainArgs);
param.setQueue(queue);
param.setOthers(others);
param.setFlinkVersion(flinkVersion);
//Invoke buildArgs
List<String> result = FlinkArgsUtils.buildArgs(param);
for (String s : result) {
logger.info(s);
}
//Expected values and order
assertEquals(20, result.size());
assertEquals("-m", result.get(0));
assertEquals("yarn-cluster", result.get(1));
assertEquals("-ys", result.get(2));
assertSame(Integer.valueOf(result.get(3)),slot);
assertEquals("-ynm",result.get(4));
assertEquals(result.get(5),appName);
assertEquals("-yn", result.get(6));
assertSame(Integer.valueOf(result.get(7)),taskManager);
assertEquals("-yjm", result.get(8));
assertEquals(result.get(9),jobManagerMemory);
assertEquals("-ytm", result.get(10));
assertEquals(result.get(11),taskManagerMemory);
assertEquals("-d", result.get(12));
assertEquals("-c", result.get(13));
assertEquals(result.get(14),mainClass);
assertEquals(result.get(15),mainJar.getRes());
assertEquals(result.get(16),mainArgs);
assertEquals("--qu", result.get(17));
assertEquals(result.get(18),queue);
assertEquals(result.get(19),others);
//Others param without --qu
FlinkParameters param1 = new FlinkParameters();
param1.setQueue(queue);
param1.setDeployMode(mode);
result = FlinkArgsUtils.buildArgs(param1);
assertEquals(5, result.size());
}
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,457 |
[bug] flink args build problem
|
**Describe the bug**
There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters
**To Reproduce**
1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed
2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid
!!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set
2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
|
https://github.com/apache/dolphinscheduler/issues/3457
|
https://github.com/apache/dolphinscheduler/pull/4166
|
68541f281d0b0908f605ad49847d3e7acdd5a302
|
cbc30b4900215424dcbbfb49539259d32273efc3
| 2020-08-10T12:56:16Z |
java
| 2020-12-10T14:37:21Z |
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/tasks/flink.vue
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="flink-model">
<m-list-box>
<div slot="text">{{$t('Program Type')}}</div>
<div slot="content">
<x-select
style="width: 130px;"
v-model="programType"
:disabled="isDetails">
<x-option
v-for="city in programTypeList"
:key="city.code"
:value="city.code"
:label="city.code">
</x-option>
</x-select>
</div>
</m-list-box>
<m-list-box v-if="programType !== 'PYTHON'">
<div slot="text">{{$t('Main class')}}</div>
<div slot="content">
<x-input
:disabled="isDetails"
type="input"
v-model="mainClass"
:placeholder="$t('Please enter main class')"
autocomplete="off">
</x-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Main jar package')}}</div>
<div slot="content">
<treeselect v-model="mainJar" maxHeight="200" :options="mainJarLists" :disable-branch-nodes="true" :normalizer="normalizer" :disabled="isDetails" :placeholder="$t('Please enter main jar package')">
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
</treeselect>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Deploy Mode')}}</div>
<div slot="content">
<x-radio-group v-model="deployMode">
<x-radio :label="'cluster'" :disabled="isDetails"></x-radio>
<x-radio :label="'local'" :disabled="isDetails"></x-radio>
</x-radio-group>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Flink Version')}}</div>
<div slot="content">
<x-select
style="width: 100px;"
v-model="flinkVersion"
:disabled="isDetails">
<x-option
v-for="version in flinkVersionList"
:key="version.code"
:value="version.code"
:label="version.code">
</x-option>
</x-select>
</div>
</m-list-box>
<div class="list-box-4p">
<div class="clearfix list">
<span class="sp1" style="word-break:break-all">{{$t('jobManagerMemory')}}</span>
<span class="sp2">
<x-input
:disabled="isDetails"
type="input"
v-model="jobManagerMemory"
:placeholder="$t('Please enter jobManager memory')"
style="width: 200px;"
autocomplete="off">
</x-input>
</span>
<span class="sp1 sp3">{{$t('taskManagerMemory')}}</span>
<span class="sp2">
<x-input
:disabled="isDetails"
type="input"
v-model="taskManagerMemory"
:placeholder="$t('Please enter the taskManager memory')"
style="width: 186px;"
autocomplete="off">
</x-input>
</span>
</div>
<div class="clearfix list">
<span class="sp1">{{$t('slot')}}</span>
<span class="sp2">
<x-input
:disabled="isDetails"
type="input"
v-model="slot"
:placeholder="$t('Please enter solt number')"
style="width: 200px;"
autocomplete="off">
</x-input>
</span>
<div v-if="flinkVersion !== '>=1.10'">
<span class="sp1 sp3">{{$t('taskManager')}}</span>
<span class="sp2">
<x-input
:disabled="isDetails"
type="input"
v-model="taskManager"
:placeholder="$t('Please enter taskManager number')"
style="width: 186px;"
autocomplete="off">
</x-input>
</span>
</div>
</div>
</div>
<m-list-box>
<div slot="text">{{$t('Command-line parameters')}}</div>
<div slot="content">
<x-input
:autosize="{minRows:2}"
:disabled="isDetails"
type="textarea"
v-model="mainArgs"
:placeholder="$t('Please enter Command-line parameters')"
autocomplete="off">
</x-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Other parameters')}}</div>
<div slot="content">
<x-input
:disabled="isDetails"
:autosize="{minRows:2}"
type="textarea"
v-model="others"
:placeholder="$t('Please enter other parameters')">
</x-input>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Resources')}}</div>
<div slot="content">
<treeselect v-model="resourceList" :multiple="true" maxHeight="200" :options="mainJarList" :normalizer="normalizer" :disabled="isDetails" :value-consists-of="valueConsistsOf" :placeholder="$t('Please select resources')">
<div slot="value-label" slot-scope="{ node }">{{ node.raw.fullName }}</div>
</treeselect>
</div>
</m-list-box>
<m-list-box>
<div slot="text">{{$t('Custom Parameters')}}</div>
<div slot="content">
<m-local-params
ref="refLocalParams"
@on-local-params="_onLocalParams"
:udp-list="localParams"
:hide="false">
</m-local-params>
</div>
</m-list-box>
</div>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import mLocalParams from './_source/localParams'
import mListBox from './_source/listBox'
import mResources from './_source/resources'
import Treeselect from '@riophae/vue-treeselect'
import '@riophae/vue-treeselect/dist/vue-treeselect.css'
import disabledState from '@/module/mixin/disabledState'
export default {
name: 'flink',
data () {
return {
valueConsistsOf: 'LEAF_PRIORITY',
// Main function class
mainClass: '',
// Master jar package
mainJar: null,
// Master jar package(List)
mainJarLists: [],
mainJarList: [],
// Deployment method
deployMode: 'cluster',
// Resource(list)
resourceList: [],
// Cache ResourceList
cacheResourceList: [],
// Custom function
localParams: [],
// Driver Number of cores
slot: 1,
// Driver Number of memory
taskManager: '2',
// jobManager Memory
jobManagerMemory: '1G',
// taskManager Memory
taskManagerMemory: '2G',
// Command line argument
mainArgs: '',
// Other parameters
others: '',
// Program type
programType: 'SCALA',
// Program type(List)
programTypeList: [{ code: 'JAVA' }, { code: 'SCALA' }, { code: 'PYTHON' }],
flinkVersion:'<1.10',
// Flink Versions(List)
flinkVersionList: [{ code: '<1.10' }, { code: '>=1.10' }],
normalizer(node) {
return {
label: node.name
}
},
allNoResources: [],
noRes: [],
}
},
props: {
backfillItem: Object
},
mixins: [disabledState],
methods: {
/**
* getResourceId
*/
marjarId(name) {
this.store.dispatch('dag/getResourceId',{
type: 'FILE',
fullName: '/'+name
}).then(res => {
this.mainJar = res.id
}).catch(e => {
this.$message.error(e.msg || '')
})
},
/**
* return localParams
*/
_onLocalParams (a) {
this.localParams = a
},
/**
* return resourceList
*/
_onResourcesData (a) {
this.resourceList = a
},
/**
* cache resourceList
*/
_onCacheResourcesData (a) {
this.cacheResourceList = a
},
/**
* verification
*/
_verification () {
if (this.programType !== 'PYTHON' && !this.mainClass) {
this.$message.warning(`${i18n.$t('Please enter main class')}`)
return false
}
if (!this.mainJar) {
this.$message.warning(`${i18n.$t('Please enter main jar package')}`)
return false
}
if (!this.jobManagerMemory) {
this.$message.warning(`${i18n.$t('Please enter jobManager memory')}`)
return false
}
if (!Number.isInteger(parseInt(this.jobManagerMemory))) {
this.$message.warning(`${i18n.$t('Memory should be a positive integer')}`)
return false
}
if (!this.taskManagerMemory) {
this.$message.warning(`${i18n.$t('Please enter the taskManager memory')}`)
return false
}
if (!_.isNumber(parseInt(this.taskManagerMemory))) {
this.$message.warning(`${i18n.$t('Memory should be a positive integer')}`)
return false
}
// noRes
if (this.noRes.length>0) {
this.$message.warning(`${i18n.$t('Please delete all non-existent resources')}`)
return false
}
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
// storage
this.$emit('on-params', {
mainClass: this.mainClass,
mainJar: {
id: this.mainJar
},
deployMode: this.deployMode,
resourceList: _.map(this.resourceList, v => {
return {id: v}
}),
localParams: this.localParams,
flinkVersion: this.flinkVersion,
slot: this.slot,
taskManager: this.taskManager,
jobManagerMemory: this.jobManagerMemory,
taskManagerMemory: this.taskManagerMemory,
mainArgs: this.mainArgs,
others: this.others,
programType: this.programType
})
return true
},
diGuiTree(item) { // Recursive convenience tree structure
item.forEach(item => {
item.children === '' || item.children === undefined || item.children === null || item.children.length === 0?
this.operationTree(item) : this.diGuiTree(item.children);
})
},
operationTree(item) {
if(item.dirctory) {
item.isDisabled =true
}
delete item.children
},
searchTree(element, id) {
// 根据id查找节点
if (element.id == id) {
return element;
} else if (element.children != null) {
var i;
var result = null;
for (i = 0; result == null && i < element.children.length; i++) {
result = this.searchTree(element.children[i], id);
}
return result;
}
return null;
},
dataProcess(backResource) {
let isResourceId = []
let resourceIdArr = []
if(this.resourceList.length>0) {
this.resourceList.forEach(v=>{
this.mainJarList.forEach(v1=>{
if(this.searchTree(v1,v)) {
isResourceId.push(this.searchTree(v1,v))
}
})
})
resourceIdArr = isResourceId.map(item=>{
return item.id
})
Array.prototype.diff = function(a) {
return this.filter(function(i) {return a.indexOf(i) < 0;});
};
let diffSet = this.resourceList.diff(resourceIdArr);
let optionsCmp = []
if(diffSet.length>0) {
diffSet.forEach(item=>{
backResource.forEach(item1=>{
if(item==item1.id || item==item1.res) {
optionsCmp.push(item1)
}
})
})
}
let noResources = [{
id: -1,
name: $t('Unauthorized or deleted resources'),
fullName: '/'+$t('Unauthorized or deleted resources'),
children: []
}]
if(optionsCmp.length>0) {
this.allNoResources = optionsCmp
optionsCmp = optionsCmp.map(item=>{
return {id: item.id,name: item.name,fullName: item.res}
})
optionsCmp.forEach(item=>{
item.isNew = true
})
noResources[0].children = optionsCmp
this.mainJarList = this.mainJarList.concat(noResources)
}
}
},
},
watch: {
// Listening type
programType (type) {
if (type === 'PYTHON') {
this.mainClass = ''
}
},
//Watch the cacheParams
cacheParams (val) {
this.$emit('on-cache-params', val);
}
},
computed: {
cacheParams () {
let isResourceId = []
let resourceIdArr = []
if(this.resourceList.length>0) {
this.resourceList.forEach(v=>{
this.mainJarList.forEach(v1=>{
if(this.searchTree(v1,v)) {
isResourceId.push(this.searchTree(v1,v))
}
})
})
resourceIdArr = isResourceId.map(item=>{
return {id: item.id,name: item.name,res: item.fullName}
})
}
let result = []
resourceIdArr.forEach(item=>{
this.allNoResources.forEach(item1=>{
if(item.id==item1.id) {
// resultBool = true
result.push(item1)
}
})
})
this.noRes = result
return {
mainClass: this.mainClass,
mainJar: {
id: this.mainJar
},
deployMode: this.deployMode,
resourceList: resourceIdArr,
localParams: this.localParams,
slot: this.slot,
taskManager: this.taskManager,
jobManagerMemory: this.jobManagerMemory,
taskManagerMemory: this.taskManagerMemory,
mainArgs: this.mainArgs,
others: this.others,
programType: this.programType
}
}
},
created () {
let item = this.store.state.dag.resourcesListS
let items = this.store.state.dag.resourcesListJar
this.diGuiTree(item)
this.diGuiTree(items)
this.mainJarList = item
this.mainJarLists = items
let o = this.backfillItem
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.mainClass = o.params.mainClass || ''
if(o.params.mainJar.res) {
this.marjarId(o.params.mainJar.res)
} else if(o.params.mainJar.res=='') {
this.mainJar = ''
} else {
this.mainJar = o.params.mainJar.id || ''
}
this.deployMode = o.params.deployMode || ''
this.flinkVersion = o.params.flinkVersion || '<1.10'
this.slot = o.params.slot || 1
this.taskManager = o.params.taskManager || '2'
this.jobManagerMemory = o.params.jobManagerMemory || '1G'
this.taskManagerMemory = o.params.taskManagerMemory || '2G'
this.mainArgs = o.params.mainArgs || ''
this.others = o.params.others
this.programType = o.params.programType || 'SCALA'
// backfill resourceList
let backResource = o.params.resourceList || []
let resourceList = o.params.resourceList || []
if (resourceList.length) {
_.map(resourceList, v => {
if(!v.id) {
this.store.dispatch('dag/getResourceId',{
type: 'FILE',
fullName: '/'+v.res
}).then(res => {
this.resourceList.push(res.id)
this.dataProcess(backResource)
}).catch(e => {
this.resourceList.push(v.res)
this.dataProcess(backResource)
})
} else {
this.resourceList.push(v.id)
this.dataProcess(backResource)
}
})
this.cacheResourceList = resourceList
}
// backfill localParams
let localParams = o.params.localParams || []
if (localParams.length) {
this.localParams = localParams
}
}
},
mounted () {
},
components: { mLocalParams, mListBox, mResources, Treeselect }
}
</script>
<style lang="scss" rel="stylesheet/scss">
.flink-model {
.list-box-4p {
.list {
margin-bottom: 14px;
.sp1 {
float: left;
width: 112px;
text-align: right;
margin-right: 10px;
font-size: 14px;
color: #777;
display: inline-block;
padding-top: 6px;
}
.sp2 {
float: left;
margin-right: 4px;
}
.sp3 {
width: 176px;
}
}
}
}
.vue-treeselect--disabled {
.vue-treeselect__control {
background-color: #ecf3f8;
.vue-treeselect__single-value {
color: #6d859e;
}
}
}
</style>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,457 |
[bug] flink args build problem
|
**Describe the bug**
There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters
**To Reproduce**
1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed
2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid
!!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set
2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
|
https://github.com/apache/dolphinscheduler/issues/3457
|
https://github.com/apache/dolphinscheduler/pull/4166
|
68541f281d0b0908f605ad49847d3e7acdd5a302
|
cbc30b4900215424dcbbfb49539259d32273efc3
| 2020-08-10T12:56:16Z |
java
| 2020-12-10T14:37:21Z |
dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export default {
'User Name': '用户名',
'Please enter user name': '请输入用户名',
Password: '密码',
'Please enter your password': '请输入密码',
'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': '密码至少包含数字,字母和字符的两种组合,长度在6-22之间',
Login: '登录',
Home: '首页',
'Failed to create node to save': '未创建节点保存失败',
'Global parameters': '全局参数',
'Local parameters': '局部参数',
'Copy success': '复制成功',
'The browser does not support automatic copying': '该浏览器不支持自动复制',
'Whether to save the DAG graph': '是否保存DAG图',
'Current node settings': '当前节点设置',
'View history': '查看历史',
'View log': '查看日志',
'Force success': '强制成功',
'Enter this child node': '进入该子节点',
'Node name': '节点名称',
'Please enter name (required)': '请输入名称(必填)',
'Run flag': '运行标志',
Normal: '正常',
'Prohibition execution': '禁止执行',
'Please enter description': '请输入描述',
'Number of failed retries': '失败重试次数',
Times: '次',
'Failed retry interval': '失败重试间隔',
Minute: '分',
'Delay execution time': '延时执行时间',
Cancel: '取消',
'Confirm add': '确认添加',
'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': '新创建子工作流还未执行,不能进入子工作流',
'The task has not been executed and cannot enter the sub-Process': '该任务还未执行,不能进入子工作流',
'Name already exists': '名称已存在请重新输入',
'Download Log': '下载日志',
'Refresh Log': '刷新日志',
'Enter full screen': '进入全屏',
'Cancel full screen': '取消全屏',
Close: '关闭',
'Update log success': '更新日志成功',
'No more logs': '暂无更多日志',
'No log': '暂无日志',
'Loading Log...': '正在努力请求日志中...',
'Set the DAG diagram name': '设置DAG图名称',
'Please enter description(optional)': '请输入描述(选填)',
'Set global': '设置全局',
'Whether to update the process definition': '是否更新流程定义',
Add: '添加',
'DAG graph name cannot be empty': 'DAG图名称不能为空',
'Create Datasource': '创建数据源',
'Project Home': '项目首页',
'Project Manage': '项目管理',
'Create Project': '创建项目',
'Cron Manage': '定时管理',
'Copy Workflow': '复制工作流',
'Tenant Manage': '租户管理',
'Create Tenant': '创建租户',
'User Manage': '用户管理',
'Create User': '创建用户',
'User Information': '用户信息',
'Edit Password': '密码修改',
success: '成功',
failed: '失败',
delete: '删除',
'Please choose': '请选择',
'Please enter a positive integer': '请输入正整数',
'Program Type': '程序类型',
'Main class': '主函数的class',
'Main jar package': '主jar包',
'Please enter main jar package': '请选择主jar包',
'Command-line parameters': '命令行参数',
'Please enter Command-line parameters': '请输入命令行参数',
'Other parameters': '其他参数',
'Please enter other parameters': '请输入其他参数',
Resources: '资源',
'Custom Parameters': '自定义参数',
'Custom template': '自定义模版',
'Please enter main class': '请填写主函数的class',
Datasource: '数据源',
methods: '方法',
'Please enter method(optional)': '请输入方法(选填)',
Script: '脚本',
'Please enter script(required)': '请输入脚本(必填)',
'Deploy Mode': '部署方式',
'Flink Version': 'Flink版本',
'Driver core number': 'Driver内核数',
'Please enter driver core number': '请输入Driver内核数',
'Driver memory use': 'Driver内存数',
'Please enter driver memory use': '请输入Driver内存数',
'Number of Executors': 'Executor数量',
'Please enter the number of Executor': '请输入Executor数量',
'Executor memory': 'Executor内存数',
'Please enter the Executor memory': '请输入Executor内存数',
'Executor core number': 'Executor内核数',
'Please enter Executor core number': '请输入Executor内核数',
'The number of Executors should be a positive integer': 'Executor数量为正整数',
'Memory should be a positive integer': '内存数为数字',
'Please enter ExecutorPlease enter Executor core number': '请填写Executor内核数',
'Core number should be positive integer': '内核数为正整数',
'Please enter jobManager memory': '请输入JobManager内存数',
'Please enter the taskManager memory': '请输入TaskManager内存数',
'Please enter solt number': '请输入solt数量',
'Please enter taskManager number': '请输入taskManager数量',
'SQL Type': 'sql类型',
Title: '主题',
'Please enter the title of email': '请输入邮件主题',
Table: '表名',
TableMode: '表格',
Attachment: '附件',
'SQL Parameter': 'sql参数',
'SQL Statement': 'sql语句',
'UDF Function': 'UDF函数',
FILE: '文件',
UDF: 'UDF',
'File Subdirectory': '文件子目录',
'Please enter a SQL Statement(required)': '请输入sql语句(必填)',
'Please enter a JSON Statement(required)': '请输入json语句(必填)',
'One form or attachment must be selected': '表格、附件必须勾选一个',
'Recipient required': '收件人邮箱必填',
'Mail subject required': '邮件主题必填',
'Child Node': '子节点',
'Please select a sub-Process': '请选择子工作流',
Edit: '编辑',
'Switch To This Version': '切换到该版本',
'Datasource Name': '数据源名称',
'Please enter datasource name': '请输入数据源名称',
IP: 'IP主机名',
'Please enter IP': '请输入IP主机名',
Port: '端口',
'Please enter port': '请输入端口',
'Database Name': '数据库名',
'Please enter database name': '请输入数据库名',
'Oracle Connect Type': '服务名或SID',
'Oracle Service Name': '服务名',
'Oracle SID': 'SID',
'jdbc connect parameters': 'jdbc连接参数',
'Test Connect': '测试连接',
'Please enter resource name': '请输入数据源名称',
'Please enter resource folder name': '请输入资源文件夹名称',
'Please enter a non-query SQL statement': '请输入非查询sql语句',
'Please enter IP/hostname': '请输入IP/主机名',
'jdbc connection parameters is not a correct JSON format': 'jdbc连接参数不是一个正确的JSON格式',
'#': '编号',
'Datasource Type': '数据源类型',
'Datasource Parameter': '数据源参数',
'Create Time': '创建时间',
'Update Time': '更新时间',
Operation: '操作',
'Current Version': '当前版本',
'Click to view': '点击查看',
'Delete?': '确定删除吗?',
'Switch Version Successfully': '切换版本成功',
'Confirm Switch To This Version?': '确定切换到该版本吗?',
Confirm: '确定',
'Task status statistics': '任务状态统计',
Number: '数量',
State: '状态',
'Process Status Statistics': '流程状态统计',
'Process Definition Statistics': '流程定义统计',
'Project Name': '项目名称',
'Please enter name': '请输入名称',
'Owned Users': '所属用户',
'Process Pid': '进程Pid',
'Zk registration directory': 'zk注册目录',
cpuUsage: 'cpuUsage',
memoryUsage: 'memoryUsage',
'Last heartbeat time': '最后心跳时间',
'Edit Tenant': '编辑租户',
'Tenant Code': '租户编码',
'Tenant Name': '租户名称',
Queue: '队列',
'Please enter the tenant code in English': '请输入租户编码只允许英文',
'Please enter tenant code in English': '请输入英文租户编码',
'Please enter tenant code': '请输入租户编码',
'Please enter tenant Name': '请输入租户名称',
'The tenant code. Only letters or a combination of letters and numbers are allowed': '租户编码只允许字母或字母与数字组合',
'Edit User': '编辑用户',
Tenant: '租户',
Email: '邮件',
Phone: '手机',
'User Type':'用户类型',
'Please enter phone number': '请输入手机',
'Please enter email': '请输入邮箱',
'Please enter the correct email format': '请输入正确的邮箱格式',
'Please enter the correct mobile phone format': '请输入正确的手机格式',
Project: '项目',
Authorize: '授权',
'File resources': '文件资源',
'UDF resources': 'UDF资源',
'UDF resources directory': 'UDF资源目录',
'Please select UDF resources directory': '请选择UDF资源目录',
'Edit alarm group': '编辑告警组',
'Create alarm group': '创建告警组',
'Group Name': '组名称',
'Please enter group name': '请输入组名称',
'Group Type': '组类型',
Remarks: '备注',
SMS: '短信',
'Managing Users': '管理用户',
Permission: '权限',
Administrator: '管理员',
'Confirm Password': '确认密码',
'Please enter confirm password': '请输入确认密码',
'Password cannot be in Chinese': '密码不能为中文',
'Please enter a password (6-22) character password': '请输入密码(6-22)字符密码',
'Confirmation password cannot be in Chinese': '确认密码不能为中文',
'Please enter a confirmation password (6-22) character password': '请输入确认密码(6-22)字符密码',
'The password is inconsistent with the confirmation password': '密码与确认密码不一致,请重新确认',
'Please select the datasource': '请选择数据源',
'Please select resources': '请选择资源',
Query: '查询',
'Non Query': '非查询',
'prop(required)': 'prop(必填)',
'value(optional)': 'value(选填)',
'value(required)': 'value(必填)',
'prop is empty': 'prop不能为空',
'value is empty': 'value不能为空',
'prop is repeat': 'prop中有重复',
'Start Time': '开始时间',
'End Time': '结束时间',
crontab: 'crontab',
'Failure Strategy': '失败策略',
online: '上线',
offline: '下线',
'Task Status': '任务状态',
'Process Instance': '工作流实例',
'Task Instance': '任务实例',
'Select date range': '选择日期区间',
Date: '日期',
waiting: '等待',
execution: '执行中',
finish: '完成',
'Create File': '创建文件',
'Create folder': '创建文件夹',
'File Name': '文件名称',
'Folder Name': '文件夹名称',
'File Format': '文件格式',
'Folder Format': '文件夹格式',
'File Content': '文件内容',
'Upload File Size': '文件大小不能超过1G',
Create: '创建',
'Please enter the resource content': '请输入资源内容',
'Resource content cannot exceed 3000 lines': '资源内容不能超过3000行',
'File Details': '文件详情',
'Download Details': '下载详情',
Return: '返回',
Save: '保存',
'File Manage': '文件管理',
'Upload Files': '上传文件',
'Create UDF Function': '创建UDF函数',
'Upload UDF Resources': '上传UDF资源',
'Service-Master': '服务管理-Master',
'Service-Worker': '服务管理-Worker',
'Process Name': '工作流名称',
Executor: '执行用户',
'Run Type': '运行类型',
'Scheduling Time': '调度时间',
'Run Times': '运行次数',
host: 'host',
'fault-tolerant sign': '容错标识',
Rerun: '重跑',
'Recovery Failed': '恢复失败',
Stop: '停止',
Pause: '暂停',
'Recovery Suspend': '恢复运行',
Gantt: '甘特图',
Name: '名称',
'Node Type': '节点类型',
'Submit Time': '提交时间',
Duration: '运行时长',
'Retry Count': '重试次数',
'Task Name': '任务名称',
'Task Date': '任务日期',
'Source Table': '源表',
'Record Number': '记录数',
'Target Table': '目标表',
'Online viewing type is not supported': '不支持在线查看类型',
Size: '大小',
Rename: '重命名',
Download: '下载',
Export: '导出',
'Version Info': '版本信息',
Submit: '提交',
'Edit UDF Function': '编辑UDF函数',
type: '类型',
'UDF Function Name': 'UDF函数名称',
'Please enter a function name': '请输入函数名',
'Package Name': '包名类名',
'Please enter a Package name': '请输入包名类名',
Parameter: '参数',
'Please enter a parameter': '请输入参数',
'UDF Resources': 'UDF资源',
'Upload Resources': '上传资源',
Instructions: '使用说明',
'Please enter a instructions': '请输入使用说明',
'Please enter a UDF function name': '请输入UDF函数名称',
'Select UDF Resources': '请选择UDF资源',
'Class Name': '类名',
'Jar Package': 'jar包',
'Library Name': '库名',
'UDF Resource Name': 'UDF资源名称',
'File Size': '文件大小',
Description: '描述',
'Drag Nodes and Selected Items': '拖动节点和选中项',
'Select Line Connection': '选择线条连接',
'Delete selected lines or nodes': '删除选中的线或节点',
'Full Screen': '全屏',
Unpublished: '未发布',
'Start Process': '启动工作流',
'Execute from the current node': '从当前节点开始执行',
'Recover tolerance fault process': '恢复被容错的工作流',
'Resume the suspension process': '恢复运行流程',
'Execute from the failed nodes': '从失败节点开始执行',
'Complement Data': '补数',
'Scheduling execution': '调度执行',
'Recovery waiting thread': '恢复等待线程',
'Submitted successfully': '提交成功',
Executing: '正在执行',
'Ready to pause': '准备暂停',
'Ready to stop': '准备停止',
'Need fault tolerance': '需要容错',
kill: 'kill',
'Waiting for thread': '等待线程',
'Waiting for dependence': '等待依赖',
Start: '运行',
Copy: '复制节点',
'Copy name': '复制名称',
Delete: '删除',
'Please enter keyword': '请输入关键词',
'File Upload': '文件上传',
'Drag the file into the current upload window': '请将文件拖拽到当前上传窗口内!',
'Drag area upload': '拖动区域上传',
Upload: '上传',
'ReUpload File': '重新上传文件',
'Please enter file name': '请输入文件名',
'Please select the file to upload': '请选择要上传的文件',
'Resources manage': '资源中心',
Security: '安全中心',
Logout: '退出',
'No data': '查询无数据',
'Uploading...': '文件上传中',
'Loading...': '正在努力加载中...',
List: '列表',
'Unable to download without proper url': '无下载url无法下载',
Process: '工作流',
'Process definition': '工作流定义',
'Task record': '任务记录',
'Warning group manage': '告警组管理',
'Servers manage': '服务管理',
'UDF manage': 'UDF管理',
'Resource manage': '资源管理',
'Function manage': '函数管理',
'Edit password': '修改密码',
'Ordinary users': '普通用户',
'Create process': '创建工作流',
'Import process': '导入工作流',
'Timing state': '定时状态',
Timing: '定时',
TreeView: '树形图',
'Mailbox already exists! Recipients and copyers cannot repeat': '邮箱已存在!收件人和抄送人不能重复',
'Mailbox input is illegal': '邮箱输入不合法',
'Please set the parameters before starting': '启动前请先设置参数',
Continue: '继续',
End: '结束',
'Node execution': '节点执行',
'Backward execution': '向后执行',
'Forward execution': '向前执行',
'Execute only the current node': '仅执行当前节点',
'Notification strategy': '通知策略',
'Notification group': '通知组',
'Please select a notification group': '请选择通知组',
Recipient: '收件人',
Cc: '抄送人',
'Whether it is a complement process?': '是否补数',
'Schedule date': '调度日期',
'Mode of execution': '执行方式',
'Serial execution': '串行执行',
'Parallel execution': '并行执行',
'Set parameters before timing': '定时前请先设置参数',
'Start and stop time': '起止时间',
'Please select time': '请选择时间',
'Please enter crontab': '请输入crontab',
none_1: '都不发',
success_1: '成功发',
failure_1: '失败发',
All_1: '成功或失败都发',
Toolbar: '工具栏',
'View variables': '查看变量',
'Format DAG': '格式化DAG',
'Refresh DAG status': '刷新DAG状态',
Return_1: '返回上一节点',
'Please enter format': '请输入格式为',
'connection parameter': '连接参数',
'Process definition details': '流程定义详情',
'Create process definition': '创建流程定义',
'Scheduled task list': '定时任务列表',
'Process instance details': '流程实例详情',
'Create Resource': '创建资源',
'User Center': '用户中心',
'Please enter method': '请输入方法',
none: '无',
name: '名称',
'Process priority': '流程优先级',
'Task priority': '任务优先级',
'Task timeout alarm': '任务超时告警',
'Timeout strategy': '超时策略',
'Timeout alarm': '超时告警',
'Timeout failure': '超时失败',
'Timeout period': '超时时长',
'Waiting Dependent complete': '等待依赖完成',
'Waiting Dependent start': '等待依赖启动',
'Check interval': '检查间隔',
'Timeout strategy must be selected': '超时策略必须选一个',
'Timeout must be a positive integer': '超时时长必须为正整数',
'Timeout must be longer than check interval': '超时时间必须比检查间隔长',
'Add dependency': '添加依赖',
and: '且',
or: '或',
month: '月',
week: '周',
day: '日',
hour: '时',
Running: '正在运行',
'Waiting for dependency to complete': '等待依赖完成',
'Delay execution': '延时执行',
'Forced success': '强制成功过',
Selected: '已选',
CurrentHour: '当前小时',
Last1Hour: '前1小时',
Last2Hours: '前2小时',
Last3Hours: '前3小时',
Last24Hours: '前24小时',
today: '今天',
Last1Days: '昨天',
Last2Days: '前两天',
Last3Days: '前三天',
Last7Days: '前七天',
ThisWeek: '本周',
LastWeek: '上周',
LastMonday: '上周一',
LastTuesday: '上周二',
LastWednesday: '上周三',
LastThursday: '上周四',
LastFriday: '上周五',
LastSaturday: '上周六',
LastSunday: '上周日',
ThisMonth: '本月',
LastMonth: '上月',
LastMonthBegin: '上月初',
LastMonthEnd: '上月末',
'Refresh status succeeded': '刷新状态成功',
'Queue manage': 'Yarn 队列管理',
'Create queue': '创建队列',
'Edit queue': '编辑队列',
'Datasource manage': '数据源中心',
'History task record': '历史任务记录',
'Please go online': '不要忘记上线',
'Queue value': '队列值',
'Please enter queue value': '请输入队列值',
'Worker group manage': 'Worker分组管理',
'Create worker group': '创建Worker分组',
'Edit worker group': '编辑Worker分组',
'Token manage': '令牌管理',
'Create token': '创建令牌',
'Edit token': '编辑令牌',
'Please enter the IP address separated by commas': '请输入IP地址多个用英文逗号隔开',
'Note: Multiple IP addresses have been comma separated': '注意:多个IP地址以英文逗号分割',
'Failure time': '失效时间',
'Expiration time': '失效时间',
User: '用户',
'Please enter token': '请输入令牌',
'Generate token': '生成令牌',
Monitor: '监控中心',
Group: '分组',
'Queue statistics': '队列统计',
'Command status statistics': '命令状态统计',
'Task kill': '等待kill任务',
'Task queue': '等待执行任务',
'Error command count': '错误指令数',
'Normal command count': '正确指令数',
Manage: '管理',
'Number of connections': '连接数',
Sent: '发送量',
Received: '接收量',
'Min latency': '最低延时',
'Avg latency': '平均延时',
'Max latency': '最大延时',
'Node count': '节点数',
'Query time': '当前查询时间',
'Node self-test status': '节点自检状态',
'Health status': '健康状态',
'Max connections': '最大连接数',
'Threads connections': '当前连接数',
'Max used connections': '同时使用连接最大数',
'Threads running connections': '数据库当前活跃连接数',
'Worker group': 'Worker分组',
'Please enter a positive integer greater than 0': '请输入大于 0 的正整数',
'Pre Statement': '前置sql',
'Post Statement': '后置sql',
'Statement cannot be empty': '语句不能为空',
'Process Define Count': '工作流定义数',
'Process Instance Running Count': '正在运行的流程数',
'Please select a queue': '默认为租户关联队列',
'command number of waiting for running': '待执行的命令数',
'failure command number': '执行失败的命令数',
'tasks number of waiting running': '待运行任务数',
'task number of ready to kill': '待杀死任务数',
'Statistics manage': '统计管理',
statistics: '统计',
'select tenant': '选择租户',
'Please enter Principal': '请输入Principal',
'The start time must not be the same as the end': '开始时间和结束时间不能相同',
'Startup parameter': '启动参数',
'Startup type': '启动类型',
'warning of timeout': '超时告警',
'Next five execution times': '接下来五次执行时间',
'Execute time': '执行时间',
'Complement range': '补数范围',
slot: 'slot数量',
taskManager: 'taskManage数量',
jobManagerMemory: 'jobManager内存数',
taskManagerMemory: 'taskManager内存数',
'Http Url': '请求地址',
'Http Method': '请求类型',
'Http Parameters': '请求参数',
'Http Parameters Key': '参数名',
'Http Parameters Position': '参数位置',
'Http Parameters Value': '参数值',
'Http Check Condition': '校验条件',
'Http Condition': '校验内容',
'Please Enter Http Url': '请填写请求地址(必填)',
'Please Enter Http Condition': '请填写校验内容',
'There is no data for this period of time': '该时间段无数据',
'IP address cannot be empty': 'IP地址不能为空',
'Please enter the correct IP': '请输入正确的IP',
'Please generate token': '请生成Token',
'Spark Version': 'Spark版本',
TargetDataBase: '目标库',
TargetTable: '目标表',
'Please enter the table of target': '请输入目标表名',
'Please enter a Target Table(required)': '请输入目标表(必填)',
SpeedByte: '限流(字节数)',
SpeedRecord: '限流(记录数)',
'0 means unlimited by byte': 'KB,0代表不限制',
'0 means unlimited by count': '0代表不限制',
'Modify User': '修改用户',
'Whether directory': '是否文件夹',
Yes: '是',
No: '否',
'Hadoop Custom Params': 'Hadoop参数',
'Sqoop Advanced Parameters': 'Sqoop参数',
'Sqoop Job Name': '任务名称',
'Please enter Mysql Database(required)': '请输入Mysql数据库(必填)',
'Please enter Mysql Table(required)': '请输入Mysql表名(必填)',
'Please enter Columns (Comma separated)': '请输入列名,用 , 隔开',
'Please enter Target Dir(required)': '请输入目标路径(必填)',
'Please enter Export Dir(required)': '请输入数据源路径(必填)',
'Please enter Hive Database(required)': '请输入Hive数据库(必填)',
'Please enter Hive Table(required)': '请输入Hive表名(必填)',
'Please enter Hive Partition Keys': '请输入分区键',
'Please enter Hive Partition Values': '请输入分区值',
'Please enter Replace Delimiter': '请输入替换分隔符',
'Please enter Fields Terminated': '请输入列分隔符',
'Please enter Lines Terminated': '请输入行分隔符',
'Please enter Concurrency': '请输入并发度',
'Please enter Update Key': '请输入更新列',
'Please enter Job Name(required)': '请输入任务名称(必填)',
'Please enter Custom Shell(required)': '请输入自定义脚本',
Direct: '流向',
Type: '类型',
ModelType: '模式',
ColumnType: '列类型',
Database: '数据库',
Column: '列',
'Map Column Hive': 'Hive类型映射',
'Map Column Java': 'Java类型映射',
'Export Dir': '数据源路径',
'Hive partition Keys': 'Hive 分区键',
'Hive partition Values': 'Hive 分区值',
FieldsTerminated: '列分隔符',
LinesTerminated: '行分隔符',
IsUpdate: '是否更新',
UpdateKey: '更新列',
UpdateMode: '更新类型',
'Target Dir': '目标路径',
DeleteTargetDir: '是否删除目录',
FileType: '保存格式',
CompressionCodec: '压缩类型',
CreateHiveTable: '是否创建新表',
DropDelimiter: '是否删除分隔符',
OverWriteSrc: '是否覆盖数据源',
ReplaceDelimiter: '替换分隔符',
Concurrency: '并发度',
Form: '表单',
OnlyUpdate: '只更新',
AllowInsert: '无更新便插入',
'Data Source': '数据来源',
'Data Target': '数据目的',
'All Columns': '全表导入',
'Some Columns': '选择列',
'Branch flow': '分支流转',
'Custom Job': '自定义任务',
'Custom Script': '自定义脚本',
'Cannot select the same node for successful branch flow and failed branch flow': '成功分支流转和失败分支流转不能选择同一个节点',
'Successful branch flow and failed branch flow are required': 'conditions节点成功和失败分支流转必填',
'No resources exist': '不存在资源',
'Please delete all non-existing resources': '请删除所有不存在资源',
'Unauthorized or deleted resources': '未授权或已删除资源',
'Please delete all non-existent resources': '请删除所有未授权或已删除资源',
'Kinship': '工作流关系',
'Reset': '重置',
'KinshipStateActive': '当前选择',
'KinshipState1': '已上线',
'KinshipState0': '工作流未上线',
'KinshipState10': '调度未上线',
'Dag label display control': 'Dag节点名称显隐',
'Enable': '启用',
'Disable': '停用',
'The Worker group no longer exists, please select the correct Worker group!': '该Worker分组已经不存在,请选择正确的Worker分组!',
'Please confirm whether the workflow has been saved before downloading': '下载前请确定工作流是否已保存',
'User name length is between 3 and 39': '用户名长度在3~39之间',
'Timeout Settings': '超时设置',
'Connect Timeout':'连接超时',
'Socket Timeout':'Socket超时',
'Connect timeout be a positive integer': '连接超时必须为数字',
'Socket Timeout be a positive integer': 'Socket超时必须为数字',
'ms':'毫秒',
'Please Enter Url': '请直接填写地址,例如:127.0.0.1:7077',
'Master': 'Master',
'Please select the waterdrop resources':'请选择waterdrop配置文件',
zkDirectory: 'zk注册目录',
'Directory detail': '查看目录详情',
'Connection name': '连线名',
'Current connection settings': '当前连线设置',
'Please save the DAG before formatting': '格式化前请先保存DAG',
'Batch copy': '批量复制',
'Related items': '关联项目',
'Project name is required': '项目名称必填',
'Batch move': '批量移动',
Version: '版本',
'Pre tasks': '前置任务',
'Running Memory':'运行内存',
'Max Memory':'最大内存',
'Min Memory':'最小内存',
'The workflow canvas is abnormal and cannot be saved, please recreate': '该工作流画布异常,无法保存,请重新创建'
}
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 3,457 |
[bug] flink args build problem
|
**Describe the bug**
There was a BUG in the Dolphin Scheduler-1.3.1 that Dolphin set Flink launch parameters
**To Reproduce**
1. If the slot number and taskManager number are set, Jar package cannot be found when flink task is executed
2. Among other parameters, specifying -yqu (the queue name of Flink on Yarn) or -ynm (Flink appName) is invalid
!!!! Setting other boot parameters, such as -yqu and -ynm, can cause confusion in the order in which the flink boot parameters are set
2020-08-10 21:03:31.400 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Hadoop version: 3.0.0-cdh6.3.2
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - JVM Options:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog.file=/opt/flink-1.10.1/log/flink-dscheduler-client-cdh-05.log
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlog4j.configuration=file:/opt/flink-1.10.1/conf/log4j-cli.properties
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -Dlogback.configurationFile=file:/opt/flink-1.10.1/conf/logback.xml
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - Program Arguments:
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - run
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -m
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - yarn-cluster
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yjm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 1G
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ytm
2020-08-10 21:03:31.401 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - 6G
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -d
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -c
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - cn.~~.analysis.DurationAndMileage
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - analysis-assembly-2.3.jar
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - --qu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - default
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -yqu
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - test
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - -ynm
2020-08-10 21:03:31.402 10.15.1.19 [main] INFO org.apache.flink.client.cli.CliFrontend - DurationAndMileage
|
https://github.com/apache/dolphinscheduler/issues/3457
|
https://github.com/apache/dolphinscheduler/pull/4166
|
68541f281d0b0908f605ad49847d3e7acdd5a302
|
cbc30b4900215424dcbbfb49539259d32273efc3
| 2020-08-10T12:56:16Z |
java
| 2020-12-10T14:37:21Z |
pom.xml
|
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>1.3.4-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/incubator-dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>[email protected]</post>
<subscribe>[email protected]</subscribe>
<unsubscribe>[email protected]</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<spring.version>5.1.18.RELEASE</spring.version>
<spring.boot.version>2.1.17.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.3.0</quartz.version>
<jackson.version>2.9.10</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<druid.version>1.1.22</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.11</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>5.1.34</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.7.0</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>3.17</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>20.0</guava.version>
<postgresql.version>42.1.4</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.5.0</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<presto.jdbc.version>0.238.1</presto.jdbc.version>
<jsp-2.1.version>6.1.14</jsp-2.1.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.0.0</checkstyle.version>
<apache.rat.version>0.13</apache.rat.version>
<zookeeper.version>3.4.14</zookeeper.version>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
<guava-retry.version>2.0.0</guava-retry.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-plugin-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
<version>${zookeeper.version}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.facebook.presto</groupId>
<artifactId>presto-jdbc</artifactId>
<version>${presto.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<!-- for api module -->
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
<version>${jsp-2.1.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
<dependency>
<groupId>com.github.rholder</groupId>
<artifactId>guava-retrying</artifactId>
<version>${guava-retry.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-incubating-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<configuration>
<source>8</source>
<failOnError>false</failOnError>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<includes>
<include>**/alert/utils/DingTalkUtilsTest.java</include>
<include>**/alert/template/AlertTemplateFactoryTest.java</include>
<include>**/alert/template/impl/DefaultHTMLTemplateTest.java</include>
<include>**/alert/utils/EnterpriseWeChatUtilsTest.java</include>
<include>**/alert/utils/ExcelUtilsTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/utils/JSONUtilsTest.java</include>
<!--<include>**/alert/utils/MailUtilsTest.java</include>-->
<include>**/alert/plugin/EmailAlertPluginTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TenantControllerTest.java</include>
<include>**/api/dto/resources/filter/ResourceFilterTest.java</include>
<include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include>
<includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest>
<include>**/api/enums/StatusTest.java</include>
<include>**/api/exceptions/ApiExceptionHandlerTest.java</include>
<include>**/api/exceptions/ServiceExceptionTest.java</include>
<include>**/api/interceptor/LoginHandlerInterceptorTest.java</include>
<include>**/api/security/PasswordAuthenticatorTest.java</include>
<include>**/api/security/SecurityConfigTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/DataSourceServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/ExecutorServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/ProcessDefinitionVersionServiceTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/SchedulerServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/service/WorkFlowLineageServiceTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/controller/TaskInstanceControllerTest.java</include>
<include>**/api/controller/WorkFlowLineageControllerTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/ResultTest.java</include>
<include>**/common/graph/DAGTest.java</include>
<include>**/common/os/OshiTest.java</include>
<include>**/common/os/OSUtilsTest.java</include>
<include>**/common/shell/ShellExecutorTest.java</include>
<include>**/common/task/DataxParametersTest.java</include>
<include>**/common/task/EntityTestUtils.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/HttpParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/common/threadutils/ThreadPoolExecutorsTest.java</include>
<include>**/common/threadutils/ThreadUtilsTest.java</include>
<include>**/common/utils/process/ProcessBuilderForWin32Test.java</include>
<include>**/common/utils/process/ProcessEnvironmentForWin32Test.java</include>
<include>**/common/utils/process/ProcessImplForWin32Test.java</include>
<include>**/common/utils/CollectionUtilsTest.java</include>
<include>**/common/utils/CommonUtilsTest.java</include>
<include>**/common/utils/DateUtilsTest.java</include>
<include>**/common/utils/DependentUtilsTest.java</include>
<include>**/common/utils/EncryptionUtilsTest.java</include>
<include>**/common/utils/FileUtilsTest.java</include>
<include>**/common/utils/IpUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/OSUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/TimePlaceholderUtilsTest.java</include>
<include>**/common/utils/PreconditionsTest.java</include>
<include>**/common/utils/PropertyUtilsTest.java</include>
<include>**/common/utils/SchemaUtilsTest.java</include>
<include>**/common/utils/ScriptRunnerTest.java</include>
<include>**/common/utils/SensitiveLogUtilsTest.java</include>
<include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/VarPoolUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/utils/KerberosHttpClientTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/utils/RetryerUtilsTest.java</include>
<include>**/common/plugin/FilePluginManagerTest</include>
<include>**/common/plugin/PluginClassLoaderTest</include>
<include>**/common/enums/ExecutionStatusTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/mapper/ConnectionFactoryTest.java</include>
<include>**/dao/mapper/DataSourceMapperTest.java</include>
<include>**/dao/entity/TaskInstanceTest.java</include>
<include>**/dao/entity/UdfFuncTest.java</include>
<include>**/remote/JsonSerializerTest.java</include>
<include>**/remote/RemoveTaskLogResponseCommandTest.java</include>
<include>**/remote/RemoveTaskLogRequestCommandTest.java</include>
<include>**/remote/NettyRemotingClientTest.java</include>
<include>**/remote/NettyUtilTest.java</include>
<include>**/remote/ResponseFutureTest.java</include>
<include>**/server/log/LoggerServerTest.java</include>
<include>**/server/entity/SQLTaskExecutionContextTest.java</include>
<include>**/server/log/MasterLogFilterTest.java</include>
<include>**/server/log/SensitiveDataConverterTest.java</include>
<!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>-->
<include>**/server/log/TaskLogFilterTest.java</include>
<include>**/server/log/WorkerLogFilterTest.java</include>
<include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include>
<include>**/server/master/runner/MasterTaskExecThreadTest.java</include>
<!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>-->
<include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include>
<include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include>
<include>**/server/master/register/MasterRegistryTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include>
<include>**/server/master/AlertManagerTest.java</include>
<include>**/server/master/MasterCommandTest.java</include>
<include>**/server/master/DependentTaskTest.java</include>
<include>**/server/master/ConditionsTaskTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/server/master/ParamsTest.java</include>
<include>**/server/master/SubProcessTaskTest.java</include>
<include>**/server/master/processor/TaskAckProcessorTest.java</include>
<include>**/server/master/processor/TaskKillResponseProcessorTest.java</include>
<include>**/server/master/processor/queue/TaskResponseServiceTest.java</include>
<include>**/server/register/ZookeeperNodeManagerTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>
<include>**/server/utils/HostTest.java</include>
<!--<include>**/server/utils/FlinkArgsUtilsTest.java</include>-->
<include>**/server/utils/LogUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/utils/ProcessUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/worker/processor/TaskCallbackServiceTest.java</include>
<include>**/server/worker/registry/WorkerRegistryTest.java</include>
<include>**/server/worker/shell/ShellCommandExecutorTest.java</include>
<include>**/server/worker/sql/SqlExecutorTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/EnvFileTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<!--<include>**/server/worker/task/datax/DataxTaskTest.java</include>-->
<!--<include>**/server/worker/task/http/HttpTaskTest.java</include>-->
<include>**/server/worker/task/sqoop/SqoopTaskTest.java</include>
<include>**/server/worker/task/TaskManagerTest.java</include>
<include>**/server/worker/EnvFileTest.java</include>
<include>**/server/worker/runner/TaskExecuteThreadTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/process/ProcessServiceTest.java</include>
<include>**/service/zk/DefaultEnsembleProviderTest.java</include>
<include>**/service/zk/ZKServerTest.java</include>
<include>**/service/zk/CuratorZookeeperClientTest.java</include>
<include>**/service/queue/TaskUpdateQueueTest.java</include>
<include>**/dao/mapper/DataSourceUserMapperTest.java</include>
<!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>-->
<include>**/dao/mapper/ProcessDefinitionMapperTest.java</include>
<include>**/dao/mapper/ProcessDefinitionVersionMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapperTest.java</include>
<include>**/dao/mapper/ProjectMapperTest.java</include>
<include>**/dao/mapper/ProjectUserMapperTest.java</include>
<include>**/dao/mapper/QueueMapperTest.java</include>
<include>**/dao/mapper/ResourceUserMapperTest.java</include>
<include>**/dao/mapper/ScheduleMapperTest.java</include>
<include>**/dao/mapper/SessionMapperTest.java</include>
<include>**/dao/mapper/TaskInstanceMapperTest.java</include>
<include>**/dao/mapper/TenantMapperTest.java</include>
<include>**/dao/mapper/UdfFuncMapperTest.java</include>
<include>**/dao/mapper/UDFUserMapperTest.java</include>
<include>**/dao/mapper/UserAlertGroupMapperTest.java</include>
<include>**/dao/mapper/UserMapperTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
<include>**/dao/datasource/OracleDataSourceTest.java</include>
<include>**/dao/datasource/HiveDataSourceTest.java</include>
<include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include>
<include>**/dao/upgrade/WokrerGrouopDaoTest.java</include>
<include>**/dao/upgrade/UpgradeDaoTest.java</include>
<include>**/plugin/model/AlertDataTest.java</include>
<include>**/plugin/model/AlertInfoTest.java</include>
<include>**/plugin/utils/PropertyUtilsTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<destFile>target/jacoco.exec</destFile>
<dataFile>target/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>jacoco-initialize</id>
<goals>
<goal>prepare-agent</goal>
</goals>
</execution>
<execution>
<id>jacoco-site</id>
<phase>test</phase>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
<version>${apache.rat.version}</version>
<configuration>
<excludeSubProjects>false</excludeSubProjects>
<addDefaultLicenseMatchers>false</addDefaultLicenseMatchers>
<licenses>
<license implementation="org.apache.rat.analysis.license.SimplePatternBasedLicense">
<licenseFamilyCategory>AL20</licenseFamilyCategory>
<licenseFamilyName>Apache License, 2.0</licenseFamilyName>
<patterns>
<pattern>Licensed to the Apache Software Foundation (ASF)</pattern>
</patterns>
</license>
</licenses>
<licenseFamilies>
<licenseFamily implementation="org.apache.rat.license.SimpleLicenseFamily">
<familyName>Apache License, 2.0</familyName>
</licenseFamily>
</licenseFamilies>
<excludes>
<exclude>**/node_modules/**</exclude>
<exclude>**/node/**</exclude>
<exclude>**/dist/**</exclude>
<exclude>**/licenses/**</exclude>
<exclude>.github/**</exclude>
<exclude>**/sql/soft_version</exclude>
<exclude>**/common/utils/ScriptRunner.java</exclude>
<exclude>**/*.json</exclude>
<!-- document files -->
<exclude>**/*.md</exclude>
<excldue>**/*.MD</excldue>
<exclude>**/*.txt</exclude>
<exclude>**/docs/**</exclude>
<exclude>**/*.babelrc</exclude>
<exclude>**/*.eslintrc</exclude>
<exclude>**/.mvn/jvm.config</exclude>
<exclude>**/.mvn/wrapper/**</exclude>
</excludes>
<consoleOutput>true</consoleOutput>
</configuration>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.18</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<suppressionsLocation>style/checkstyle-suppressions.xml</suppressionsLocation>
<suppressionsFileExpression>checkstyle.suppressions.file</suppressionsFileExpression>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
<skip>true</skip>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
<module>dolphinscheduler-plugin-api</module>
<module>dolphinscheduler-microbench</module>
</modules>
</project>
|
closed
|
apache/dolphinscheduler
|
https://github.com/apache/dolphinscheduler
| 4,138 |
[Feature][Master] dispatch workgroup error add sleep time
|
If the task group cannot be found when running a task, a large number of logs will be output

It is suggested to add sleep operation
When there are tasks with assignment failure and the number of tasks in the current task queue is less than 10, sleep for 1 second
-------------------
如果运行任务的时候,任务的分组找不到,会输出大量的日志.

建议增加休眠操作:
当存在分配失败的任务 , 并且当前任务队列[taskPriorityQueue]中的任务数量小于10, 休眠1秒.
|
https://github.com/apache/dolphinscheduler/issues/4138
|
https://github.com/apache/dolphinscheduler/pull/4139
|
0039b1bfcbf822cb851898a1ceb4844fd6943731
|
b3120a74d2656f7ad2054ba8245262551063b549
| 2020-12-01T11:08:26Z |
java
| 2020-12-11T07:35:40Z |
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.consumer;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.SqoopJobType;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.enums.UdfType;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.datax.DataxParameters;
import org.apache.dolphinscheduler.common.task.procedure.ProcedureParameters;
import org.apache.dolphinscheduler.common.task.sql.SqlParameters;
import org.apache.dolphinscheduler.common.task.sqoop.SqoopParameters;
import org.apache.dolphinscheduler.common.task.sqoop.sources.SourceMysqlParameter;
import org.apache.dolphinscheduler.common.task.sqoop.targets.TargetMysqlParameter;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.EnumUtils;
import org.apache.dolphinscheduler.common.utils.FileUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.server.builder.TaskExecutionContextBuilder;
import org.apache.dolphinscheduler.server.entity.DataxTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.ProcedureTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.SQLTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.SqoopTaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.entity.TaskPriority;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher;
import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext;
import org.apache.dolphinscheduler.server.master.dispatch.enums.ExecutorType;
import org.apache.dolphinscheduler.server.master.dispatch.exceptions.ExecuteException;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.queue.TaskPriorityQueue;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import javax.annotation.PostConstruct;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
/**
* TaskUpdateQueue consumer
*/
@Component
public class TaskPriorityQueueConsumer extends Thread {
/**
* logger of TaskUpdateQueueConsumer
*/
private static final Logger logger = LoggerFactory.getLogger(TaskPriorityQueueConsumer.class);
/**
* taskUpdateQueue
*/
@Autowired
private TaskPriorityQueue taskPriorityQueue;
/**
* processService
*/
@Autowired
private ProcessService processService;
/**
* executor dispatcher
*/
@Autowired
private ExecutorDispatcher dispatcher;
/**
* master config
*/
@Autowired
private MasterConfig masterConfig;
@PostConstruct
public void init() {
super.setName("TaskUpdateQueueConsumerThread");
super.start();
}
@Override
public void run() {
List<String> failedDispatchTasks = new ArrayList<>();
while (Stopper.isRunning()) {
try {
int fetchTaskNum = masterConfig.getMasterDispatchTaskNumber();
failedDispatchTasks.clear();
for (int i = 0; i < fetchTaskNum; i++) {
if (taskPriorityQueue.size() <= 0) {
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
continue;
}
// if not task , blocking here
String taskPriorityInfo = taskPriorityQueue.take();
TaskPriority taskPriority = TaskPriority.of(taskPriorityInfo);
boolean dispatchResult = dispatch(taskPriority.getTaskId());
if (!dispatchResult) {
failedDispatchTasks.add(taskPriorityInfo);
}
}
for (String dispatchFailedTask : failedDispatchTasks) {
taskPriorityQueue.put(dispatchFailedTask);
}
} catch (Exception e) {
logger.error("dispatcher task error", e);
}
}
}
/**
* dispatch task
*
* @param taskInstanceId taskInstanceId
* @return result
*/
private boolean dispatch(int taskInstanceId) {
boolean result = false;
try {
TaskExecutionContext context = getTaskExecutionContext(taskInstanceId);
ExecutionContext executionContext = new ExecutionContext(context.toCommand(), ExecutorType.WORKER, context.getWorkerGroup());
if (taskInstanceIsFinalState(taskInstanceId)) {
// when task finish, ignore this task, there is no need to dispatch anymore
return true;
} else {
result = dispatcher.dispatch(executionContext);
}
} catch (ExecuteException e) {
logger.error("dispatch error", e);
}
return result;
}
/**
* taskInstance is final state
* success,failure,kill,stop,pause,threadwaiting is final state
*
* @param taskInstanceId taskInstanceId
* @return taskInstance is final state
*/
public Boolean taskInstanceIsFinalState(int taskInstanceId) {
TaskInstance taskInstance = processService.findTaskInstanceById(taskInstanceId);
return taskInstance.getState().typeIsFinished();
}
/**
* get TaskExecutionContext
*
* @param taskInstanceId taskInstanceId
* @return TaskExecutionContext
*/
protected TaskExecutionContext getTaskExecutionContext(int taskInstanceId) {
TaskInstance taskInstance = processService.getTaskInstanceDetailByTaskId(taskInstanceId);
// task type
TaskType taskType = TaskType.valueOf(taskInstance.getTaskType());
// task node
TaskNode taskNode = JSONUtils.parseObject(taskInstance.getTaskJson(), TaskNode.class);
Integer userId = taskInstance.getProcessDefine() == null ? 0 : taskInstance.getProcessDefine().getUserId();
Tenant tenant = processService.getTenantForProcess(taskInstance.getProcessInstance().getTenantId(), userId);
// verify tenant is null
if (verifyTenantIsNull(tenant, taskInstance)) {
processService.changeTaskState(taskInstance, ExecutionStatus.FAILURE,
taskInstance.getStartTime(),
taskInstance.getHost(),
null,
null,
taskInstance.getId());
return null;
}
// set queue for process instance, user-specified queue takes precedence over tenant queue
String userQueue = processService.queryUserQueueByProcessInstanceId(taskInstance.getProcessInstanceId());
taskInstance.getProcessInstance().setQueue(StringUtils.isEmpty(userQueue) ? tenant.getQueue() : userQueue);
taskInstance.getProcessInstance().setTenantCode(tenant.getTenantCode());
taskInstance.setExecutePath(getExecLocalPath(taskInstance));
taskInstance.setResources(getResourceFullNames(taskNode));
SQLTaskExecutionContext sqlTaskExecutionContext = new SQLTaskExecutionContext();
DataxTaskExecutionContext dataxTaskExecutionContext = new DataxTaskExecutionContext();
ProcedureTaskExecutionContext procedureTaskExecutionContext = new ProcedureTaskExecutionContext();
SqoopTaskExecutionContext sqoopTaskExecutionContext = new SqoopTaskExecutionContext();
// SQL task
if (taskType == TaskType.SQL) {
setSQLTaskRelation(sqlTaskExecutionContext, taskNode);
}
// DATAX task
if (taskType == TaskType.DATAX) {
setDataxTaskRelation(dataxTaskExecutionContext, taskNode);
}
// procedure task
if (taskType == TaskType.PROCEDURE) {
setProcedureTaskRelation(procedureTaskExecutionContext, taskNode);
}
if (taskType == TaskType.SQOOP) {
setSqoopTaskRelation(sqoopTaskExecutionContext, taskNode);
}
return TaskExecutionContextBuilder.get()
.buildTaskInstanceRelatedInfo(taskInstance)
.buildProcessInstanceRelatedInfo(taskInstance.getProcessInstance())
.buildProcessDefinitionRelatedInfo(taskInstance.getProcessDefine())
.buildSQLTaskRelatedInfo(sqlTaskExecutionContext)
.buildDataxTaskRelatedInfo(dataxTaskExecutionContext)
.buildProcedureTaskRelatedInfo(procedureTaskExecutionContext)
.buildSqoopTaskRelatedInfo(sqoopTaskExecutionContext)
.create();
}
/**
* set procedure task relation
*
* @param procedureTaskExecutionContext procedureTaskExecutionContext
* @param taskNode taskNode
*/
private void setProcedureTaskRelation(ProcedureTaskExecutionContext procedureTaskExecutionContext, TaskNode taskNode) {
ProcedureParameters procedureParameters = JSONUtils.parseObject(taskNode.getParams(), ProcedureParameters.class);
int datasourceId = procedureParameters.getDatasource();
DataSource datasource = processService.findDataSourceById(datasourceId);
procedureTaskExecutionContext.setConnectionParams(datasource.getConnectionParams());
}
/**
* set datax task relation
*
* @param dataxTaskExecutionContext dataxTaskExecutionContext
* @param taskNode taskNode
*/
private void setDataxTaskRelation(DataxTaskExecutionContext dataxTaskExecutionContext, TaskNode taskNode) {
DataxParameters dataxParameters = JSONUtils.parseObject(taskNode.getParams(), DataxParameters.class);
DataSource dataSource = processService.findDataSourceById(dataxParameters.getDataSource());
DataSource dataTarget = processService.findDataSourceById(dataxParameters.getDataTarget());
if (dataSource != null) {
dataxTaskExecutionContext.setDataSourceId(dataxParameters.getDataSource());
dataxTaskExecutionContext.setSourcetype(dataSource.getType().getCode());
dataxTaskExecutionContext.setSourceConnectionParams(dataSource.getConnectionParams());
}
if (dataTarget != null) {
dataxTaskExecutionContext.setDataTargetId(dataxParameters.getDataTarget());
dataxTaskExecutionContext.setTargetType(dataTarget.getType().getCode());
dataxTaskExecutionContext.setTargetConnectionParams(dataTarget.getConnectionParams());
}
}
/**
* set sqoop task relation
*
* @param sqoopTaskExecutionContext sqoopTaskExecutionContext
* @param taskNode taskNode
*/
private void setSqoopTaskRelation(SqoopTaskExecutionContext sqoopTaskExecutionContext, TaskNode taskNode) {
SqoopParameters sqoopParameters = JSONUtils.parseObject(taskNode.getParams(), SqoopParameters.class);
// sqoop job type is template set task relation
if (sqoopParameters.getJobType().equals(SqoopJobType.TEMPLATE.getDescp())) {
SourceMysqlParameter sourceMysqlParameter = JSONUtils.parseObject(sqoopParameters.getSourceParams(), SourceMysqlParameter.class);
TargetMysqlParameter targetMysqlParameter = JSONUtils.parseObject(sqoopParameters.getTargetParams(), TargetMysqlParameter.class);
DataSource dataSource = processService.findDataSourceById(sourceMysqlParameter.getSrcDatasource());
DataSource dataTarget = processService.findDataSourceById(targetMysqlParameter.getTargetDatasource());
if (dataSource != null) {
sqoopTaskExecutionContext.setDataSourceId(dataSource.getId());
sqoopTaskExecutionContext.setSourcetype(dataSource.getType().getCode());
sqoopTaskExecutionContext.setSourceConnectionParams(dataSource.getConnectionParams());
}
if (dataTarget != null) {
sqoopTaskExecutionContext.setDataTargetId(dataTarget.getId());
sqoopTaskExecutionContext.setTargetType(dataTarget.getType().getCode());
sqoopTaskExecutionContext.setTargetConnectionParams(dataTarget.getConnectionParams());
}
}
}
/**
* set SQL task relation
*
* @param sqlTaskExecutionContext sqlTaskExecutionContext
* @param taskNode taskNode
*/
private void setSQLTaskRelation(SQLTaskExecutionContext sqlTaskExecutionContext, TaskNode taskNode) {
SqlParameters sqlParameters = JSONUtils.parseObject(taskNode.getParams(), SqlParameters.class);
int datasourceId = sqlParameters.getDatasource();
DataSource datasource = processService.findDataSourceById(datasourceId);
sqlTaskExecutionContext.setConnectionParams(datasource.getConnectionParams());
// whether udf type
boolean udfTypeFlag = EnumUtils.isValidEnum(UdfType.class, sqlParameters.getType())
&& StringUtils.isNotEmpty(sqlParameters.getUdfs());
if (udfTypeFlag) {
String[] udfFunIds = sqlParameters.getUdfs().split(",");
int[] udfFunIdsArray = new int[udfFunIds.length];
for (int i = 0; i < udfFunIds.length; i++) {
udfFunIdsArray[i] = Integer.parseInt(udfFunIds[i]);
}
List<UdfFunc> udfFuncList = processService.queryUdfFunListByIds(udfFunIdsArray);
Map<UdfFunc, String> udfFuncMap = new HashMap<>();
for (UdfFunc udfFunc : udfFuncList) {
String tenantCode = processService.queryTenantCodeByResName(udfFunc.getResourceName(), ResourceType.UDF);
udfFuncMap.put(udfFunc, tenantCode);
}
sqlTaskExecutionContext.setUdfFuncTenantCodeMap(udfFuncMap);
}
}
/**
* get execute local path
*
* @return execute local path
*/
private String getExecLocalPath(TaskInstance taskInstance) {
return FileUtils.getProcessExecDir(taskInstance.getProcessDefine().getProjectId(),
taskInstance.getProcessDefine().getId(),
taskInstance.getProcessInstance().getId(),
taskInstance.getId());
}
/**
* whehter tenant is null
*
* @param tenant tenant
* @param taskInstance taskInstance
* @return result
*/
private boolean verifyTenantIsNull(Tenant tenant, TaskInstance taskInstance) {
if (tenant == null) {
logger.error("tenant not exists,process instance id : {},task instance id : {}",
taskInstance.getProcessInstance().getId(),
taskInstance.getId());
return true;
}
return false;
}
/**
* get resource map key is full name and value is tenantCode
*/
private Map<String, String> getResourceFullNames(TaskNode taskNode) {
Map<String, String> resourceMap = new HashMap<>();
AbstractParameters baseParam = TaskParametersUtils.getParameters(taskNode.getType(), taskNode.getParams());
if (baseParam != null) {
List<ResourceInfo> projectResourceFiles = baseParam.getResourceFilesList();
if (CollectionUtils.isNotEmpty(projectResourceFiles)) {
// filter the resources that the resource id equals 0
Set<ResourceInfo> oldVersionResources = projectResourceFiles.stream().filter(t -> t.getId() == 0).collect(Collectors.toSet());
if (CollectionUtils.isNotEmpty(oldVersionResources)) {
oldVersionResources.forEach(
(t) -> resourceMap.put(t.getRes(), processService.queryTenantCodeByResName(t.getRes(), ResourceType.FILE))
);
}
// get the resource id in order to get the resource names in batch
Stream<Integer> resourceIdStream = projectResourceFiles.stream().map(resourceInfo -> resourceInfo.getId());
Set<Integer> resourceIdsSet = resourceIdStream.collect(Collectors.toSet());
if (CollectionUtils.isNotEmpty(resourceIdsSet)) {
Integer[] resourceIds = resourceIdsSet.toArray(new Integer[resourceIdsSet.size()]);
List<Resource> resources = processService.listResourceByIds(resourceIds);
resources.forEach(
(t) -> resourceMap.put(t.getFullName(), processService.queryTenantCodeByResName(t.getFullName(), ResourceType.FILE))
);
}
}
}
return resourceMap;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.