test
commit
c2cf8fb7b1
|
|
@ -15,12 +15,17 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
|
||||
on: ["push", "pull_request"]
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- refactor-worker
|
||||
env:
|
||||
DOCKER_DIR: ./docker
|
||||
LOG_DIR: /tmp/dolphinscheduler
|
||||
|
||||
name: Test Coveralls Parallel
|
||||
name: Unit Test
|
||||
|
||||
jobs:
|
||||
|
||||
|
|
@ -29,9 +34,13 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v1
|
||||
with:
|
||||
submodules: true
|
||||
- uses: actions/checkout@v2
|
||||
# In the checkout@v2, it doesn't support git submodule. Execute the commands manually.
|
||||
- name: checkout submodules
|
||||
shell: bash
|
||||
run: |
|
||||
git submodule sync --recursive
|
||||
git -c protocol.version=2 submodule update --init --force --recursive --depth=1
|
||||
- uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.m2/repository
|
||||
|
|
@ -44,10 +53,17 @@ jobs:
|
|||
uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: 1.8
|
||||
- name: Git fetch unshallow
|
||||
run: |
|
||||
git fetch --unshallow
|
||||
git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"
|
||||
git fetch origin
|
||||
- name: Compile
|
||||
run: |
|
||||
export MAVEN_OPTS='-Dmaven.repo.local=.m2/repository -XX:+TieredCompilation -XX:TieredStopAtLevel=1 -XX:+CMSClassUnloadingEnabled -XX:+UseConcMarkSweepGC -XX:-UseGCOverheadLimit -Xmx3g'
|
||||
mvn test -B -Dmaven.test.skip=false
|
||||
- name: Upload coverage report to codecov
|
||||
run: |
|
||||
CODECOV_TOKEN="09c2663f-b091-4258-8a47-c981827eb29a" bash <(curl -s https://codecov.io/bash)
|
||||
- name: Run SonarCloud Analysis
|
||||
run: >
|
||||
|
|
|
|||
|
|
@ -148,3 +148,4 @@ dolphinscheduler-ui/dist/lib/external/
|
|||
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/taskInstance/index.vue
|
||||
/dolphinscheduler-dao/src/main/resources/dao/data_source.properties
|
||||
|
||||
!/zookeeper_data/
|
||||
|
|
|
|||
|
|
@ -31,8 +31,8 @@
|
|||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<filter class="org.apache.dolphinscheduler.common.log.TaskLogFilter"></filter>
|
||||
<Discriminator class="org.apache.dolphinscheduler.common.log.TaskLogDiscriminator">
|
||||
<filter class="org.apache.dolphinscheduler.server.log.TaskLogFilter"></filter>
|
||||
<Discriminator class="org.apache.dolphinscheduler.server.log.TaskLogDiscriminator">
|
||||
<key>taskAppId</key>
|
||||
<logBase>${log.base}</logBase>
|
||||
</Discriminator>
|
||||
|
|
@ -52,7 +52,7 @@
|
|||
|
||||
<appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-worker.log</file>
|
||||
<filter class="org.apache.dolphinscheduler.common.log.WorkerLogFilter">
|
||||
<filter class="org.apache.dolphinscheduler.server.log.WorkerLogFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ public class MailUtils {
|
|||
|
||||
public static final Boolean mailUseSSL = PropertyUtils.getBoolean(Constants.MAIL_SMTP_SSL_ENABLE);
|
||||
|
||||
public static final String xlsFilePath = PropertyUtils.getString(Constants.XLS_FILE_PATH);
|
||||
public static final String xlsFilePath = PropertyUtils.getString(Constants.XLS_FILE_PATH,"/tmp/xls");
|
||||
|
||||
public static final String starttlsEnable = PropertyUtils.getString(Constants.MAIL_SMTP_STARTTLS_ENABLE);
|
||||
|
||||
|
|
|
|||
|
|
@ -79,6 +79,18 @@ public class PropertyUtils {
|
|||
return properties.getProperty(key.trim());
|
||||
}
|
||||
|
||||
/**
|
||||
* get property value
|
||||
*
|
||||
* @param key property name
|
||||
* @param defaultVal default value
|
||||
* @return property value
|
||||
*/
|
||||
public static String getString(String key, String defaultVal) {
|
||||
String val = properties.getProperty(key.trim());
|
||||
return val == null ? defaultVal : val;
|
||||
}
|
||||
|
||||
/**
|
||||
* get property value
|
||||
*
|
||||
|
|
|
|||
|
|
@ -36,18 +36,18 @@ mail.smtp.ssl.enable=false
|
|||
mail.smtp.ssl.trust=xxx.xxx.com
|
||||
|
||||
#xls file path,need create if not exist
|
||||
xls.file.path=/tmp/xls
|
||||
#xls.file.path=/tmp/xls
|
||||
|
||||
# Enterprise WeChat configuration
|
||||
enterprise.wechat.enable=false
|
||||
enterprise.wechat.corp.id=xxxxxxx
|
||||
enterprise.wechat.secret=xxxxxxx
|
||||
enterprise.wechat.agent.id=xxxxxxx
|
||||
enterprise.wechat.users=xxxxxxx
|
||||
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
|
||||
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
|
||||
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
|
||||
enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}
|
||||
#enterprise.wechat.corp.id=xxxxxxx
|
||||
#enterprise.wechat.secret=xxxxxxx
|
||||
#enterprise.wechat.agent.id=xxxxxxx
|
||||
#enterprise.wechat.users=xxxxxxx
|
||||
#enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
|
||||
#enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
|
||||
#enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
|
||||
#enterprise.wechat.user.send.msg={\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,52 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
|
||||
<property name="log.base" value="logs"/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-alert.log</file>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>20</maxHistory>
|
||||
<maxFileSize>64MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="APILOGFILE"/>
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
|
|
@ -146,6 +146,12 @@
|
|||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-common</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>servlet-api</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
@ -156,11 +162,23 @@
|
|||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>servlet-api</artifactId>
|
||||
<groupId>javax.servlet</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-yarn-common</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<artifactId>servlet-api</artifactId>
|
||||
<groupId>javax.servlet</groupId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
@ -168,14 +186,15 @@
|
|||
<artifactId>hadoop-aws</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>servlet-api</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.mortbay.jetty</groupId>
|
||||
<artifactId>jsp-2.1</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.mortbay.jetty</groupId>
|
||||
<artifactId>servlet-api-2.5</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<!-- just for test -->
|
||||
|
|
|
|||
|
|
@ -21,11 +21,14 @@ import org.springframework.boot.autoconfigure.SpringBootApplication;
|
|||
import org.springframework.boot.web.servlet.ServletComponentScan;
|
||||
import org.springframework.boot.web.servlet.support.SpringBootServletInitializer;
|
||||
import org.springframework.context.annotation.ComponentScan;
|
||||
import org.springframework.context.annotation.FilterType;
|
||||
import springfox.documentation.swagger2.annotations.EnableSwagger2;
|
||||
|
||||
@SpringBootApplication
|
||||
@ServletComponentScan
|
||||
@ComponentScan("org.apache.dolphinscheduler")
|
||||
@ComponentScan(basePackages = {"org.apache.dolphinscheduler"},
|
||||
excludeFilters = @ComponentScan.Filter(type = FilterType.REGEX,
|
||||
pattern = "org.apache.dolphinscheduler.server.*"))
|
||||
public class ApiApplicationServer extends SpringBootServletInitializer {
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ public class ExecutorController extends BaseController {
|
|||
* @param receiversCc receivers cc
|
||||
* @param runMode run mode
|
||||
* @param processInstancePriority process instance priority
|
||||
* @param workerGroupId worker group id
|
||||
* @param workerGroup worker group
|
||||
* @param timeout timeout
|
||||
* @return start process result code
|
||||
*/
|
||||
|
|
@ -82,7 +82,7 @@ public class ExecutorController extends BaseController {
|
|||
@ApiImplicitParam(name = "receiversCc", value = "RECEIVERS_CC",dataType ="String" ),
|
||||
@ApiImplicitParam(name = "runMode", value = "RUN_MODE",dataType ="RunMode" ),
|
||||
@ApiImplicitParam(name = "processInstancePriority", value = "PROCESS_INSTANCE_PRIORITY", required = true, dataType = "Priority" ),
|
||||
@ApiImplicitParam(name = "workerGroupId", value = "WORKER_GROUP_ID", dataType = "Int",example = "100"),
|
||||
@ApiImplicitParam(name = "workerGroup", value = "WORKER_GROUP", dataType = "String",example = "default"),
|
||||
@ApiImplicitParam(name = "timeout", value = "TIMEOUT", dataType = "Int",example = "100"),
|
||||
})
|
||||
@PostMapping(value = "start-process-instance")
|
||||
|
|
@ -101,15 +101,15 @@ public class ExecutorController extends BaseController {
|
|||
@RequestParam(value = "receiversCc", required = false) String receiversCc,
|
||||
@RequestParam(value = "runMode", required = false) RunMode runMode,
|
||||
@RequestParam(value = "processInstancePriority", required = false) Priority processInstancePriority,
|
||||
@RequestParam(value = "workerGroupId", required = false, defaultValue = "-1") int workerGroupId,
|
||||
@RequestParam(value = "workerGroup", required = false, defaultValue = "default") String workerGroup,
|
||||
@RequestParam(value = "timeout", required = false) Integer timeout) {
|
||||
try {
|
||||
logger.info("login user {}, start process instance, project name: {}, process definition id: {}, schedule time: {}, "
|
||||
+ "failure policy: {}, node name: {}, node dep: {}, notify type: {}, "
|
||||
+ "notify group id: {},receivers:{},receiversCc:{}, run mode: {},process instance priority:{}, workerGroupId: {}, timeout: {}",
|
||||
+ "notify group id: {},receivers:{},receiversCc:{}, run mode: {},process instance priority:{}, workerGroup: {}, timeout: {}",
|
||||
loginUser.getUserName(), projectName, processDefinitionId, scheduleTime,
|
||||
failureStrategy, startNodeList, taskDependType, warningType, warningGroupId,receivers,receiversCc,runMode,processInstancePriority,
|
||||
workerGroupId, timeout);
|
||||
failureStrategy, startNodeList, taskDependType, warningType, workerGroup,receivers,receiversCc,runMode,processInstancePriority,
|
||||
workerGroup, timeout);
|
||||
|
||||
if (timeout == null) {
|
||||
timeout = Constants.MAX_TASK_TIMEOUT;
|
||||
|
|
@ -117,7 +117,7 @@ public class ExecutorController extends BaseController {
|
|||
|
||||
Map<String, Object> result = execService.execProcessInstance(loginUser, projectName, processDefinitionId, scheduleTime, execType, failureStrategy,
|
||||
startNodeList, taskDependType, warningType,
|
||||
warningGroupId,receivers,receiversCc, runMode,processInstancePriority, workerGroupId, timeout);
|
||||
warningGroupId,receivers,receiversCc, runMode,processInstancePriority, workerGroup, timeout);
|
||||
return returnDataList(result);
|
||||
} catch (Exception e) {
|
||||
logger.error(Status.START_PROCESS_INSTANCE_ERROR.getMsg(),e);
|
||||
|
|
|
|||
|
|
@ -60,14 +60,14 @@ public class LoggerController extends BaseController {
|
|||
*/
|
||||
@ApiOperation(value = "queryLog", notes= "QUERY_TASK_INSTANCE_LOG_NOTES")
|
||||
@ApiImplicitParams({
|
||||
@ApiImplicitParam(name = "taskInstId", value = "TASK_ID", dataType = "Int", example = "100"),
|
||||
@ApiImplicitParam(name = "taskInstanceId", value = "TASK_ID", dataType = "Int", example = "100"),
|
||||
@ApiImplicitParam(name = "skipLineNum", value = "SKIP_LINE_NUM", dataType ="Int", example = "100"),
|
||||
@ApiImplicitParam(name = "limit", value = "LIMIT", dataType ="Int", example = "100")
|
||||
})
|
||||
@GetMapping(value = "/detail")
|
||||
@ResponseStatus(HttpStatus.OK)
|
||||
public Result queryLog(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
|
||||
@RequestParam(value = "taskInstId") int taskInstanceId,
|
||||
@RequestParam(value = "taskInstanceId") int taskInstanceId,
|
||||
@RequestParam(value = "skipLineNum") int skipNum,
|
||||
@RequestParam(value = "limit") int limit) {
|
||||
try {
|
||||
|
|
@ -91,12 +91,12 @@ public class LoggerController extends BaseController {
|
|||
*/
|
||||
@ApiOperation(value = "downloadTaskLog", notes= "DOWNLOAD_TASK_INSTANCE_LOG_NOTES")
|
||||
@ApiImplicitParams({
|
||||
@ApiImplicitParam(name = "taskInstId", value = "TASK_ID",dataType = "Int", example = "100")
|
||||
@ApiImplicitParam(name = "taskInstanceId", value = "TASK_ID",dataType = "Int", example = "100")
|
||||
})
|
||||
@GetMapping(value = "/download-log")
|
||||
@ResponseBody
|
||||
public ResponseEntity downloadTaskLog(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
|
||||
@RequestParam(value = "taskInstId") int taskInstanceId) {
|
||||
@RequestParam(value = "taskInstanceId") int taskInstanceId) {
|
||||
try {
|
||||
byte[] logBytes = loggerService.getLogBytes(taskInstanceId);
|
||||
return ResponseEntity
|
||||
|
|
|
|||
|
|
@ -26,8 +26,6 @@ import org.apache.dolphinscheduler.common.utils.ParameterUtils;
|
|||
import org.apache.dolphinscheduler.common.utils.StringUtils;
|
||||
import org.apache.dolphinscheduler.dao.entity.User;
|
||||
import io.swagger.annotations.*;
|
||||
import org.apache.dolphinscheduler.service.queue.ITaskQueue;
|
||||
import org.apache.dolphinscheduler.service.queue.TaskQueueFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
|
@ -240,8 +238,7 @@ public class ProcessInstanceController extends BaseController{
|
|||
logger.info("delete process instance by id, login user:{}, project name:{}, process instance id:{}",
|
||||
loginUser.getUserName(), projectName, processInstanceId);
|
||||
// task queue
|
||||
ITaskQueue tasksQueue = TaskQueueFactory.getTaskQueueInstance();
|
||||
Map<String, Object> result = processInstanceService.deleteProcessInstanceById(loginUser, projectName, processInstanceId,tasksQueue);
|
||||
Map<String, Object> result = processInstanceService.deleteProcessInstanceById(loginUser, projectName, processInstanceId);
|
||||
return returnDataList(result);
|
||||
}catch (Exception e){
|
||||
logger.error(DELETE_PROCESS_INSTANCE_BY_ID_ERROR.getMsg(),e);
|
||||
|
|
@ -370,7 +367,6 @@ public class ProcessInstanceController extends BaseController{
|
|||
logger.info("delete process instance by ids, login user:{}, project name:{}, process instance ids :{}",
|
||||
loginUser.getUserName(), projectName, processInstanceIds);
|
||||
// task queue
|
||||
ITaskQueue tasksQueue = TaskQueueFactory.getTaskQueueInstance();
|
||||
Map<String, Object> result = new HashMap<>(5);
|
||||
List<String> deleteFailedIdList = new ArrayList<>();
|
||||
if(StringUtils.isNotEmpty(processInstanceIds)){
|
||||
|
|
@ -379,7 +375,7 @@ public class ProcessInstanceController extends BaseController{
|
|||
for (String strProcessInstanceId:processInstanceIdArray) {
|
||||
int processInstanceId = Integer.parseInt(strProcessInstanceId);
|
||||
try {
|
||||
Map<String, Object> deleteResult = processInstanceService.deleteProcessInstanceById(loginUser, projectName, processInstanceId,tasksQueue);
|
||||
Map<String, Object> deleteResult = processInstanceService.deleteProcessInstanceById(loginUser, projectName, processInstanceId);
|
||||
if(!Status.SUCCESS.equals(deleteResult.get(Constants.STATUS))){
|
||||
deleteFailedIdList.add(strProcessInstanceId);
|
||||
logger.error((String)deleteResult.get(Constants.MSG));
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ public class SchedulerController extends BaseController {
|
|||
* @param processInstancePriority process instance priority
|
||||
* @param receivers receivers
|
||||
* @param receiversCc receivers cc
|
||||
* @param workerGroupId worker group id
|
||||
* @param workerGroup worker group
|
||||
* @return create result code
|
||||
*/
|
||||
@ApiOperation(value = "createSchedule", notes= "CREATE_SCHEDULE_NOTES")
|
||||
|
|
@ -96,15 +96,15 @@ public class SchedulerController extends BaseController {
|
|||
@RequestParam(value = "failureStrategy", required = false, defaultValue = DEFAULT_FAILURE_POLICY) FailureStrategy failureStrategy,
|
||||
@RequestParam(value = "receivers", required = false) String receivers,
|
||||
@RequestParam(value = "receiversCc", required = false) String receiversCc,
|
||||
@RequestParam(value = "workerGroupId", required = false, defaultValue = "-1") int workerGroupId,
|
||||
@RequestParam(value = "workerGroup", required = false, defaultValue = "default") String workerGroup,
|
||||
@RequestParam(value = "processInstancePriority", required = false) Priority processInstancePriority) {
|
||||
logger.info("login user {}, project name: {}, process name: {}, create schedule: {}, warning type: {}, warning group id: {}," +
|
||||
"failure policy: {},receivers : {},receiversCc : {},processInstancePriority : {}, workGroupId:{}",
|
||||
loginUser.getUserName(), projectName, processDefinitionId, schedule, warningType, warningGroupId,
|
||||
failureStrategy, receivers, receiversCc, processInstancePriority, workerGroupId);
|
||||
failureStrategy, receivers, receiversCc, processInstancePriority, workerGroup);
|
||||
try {
|
||||
Map<String, Object> result = schedulerService.insertSchedule(loginUser, projectName, processDefinitionId, schedule,
|
||||
warningType, warningGroupId, failureStrategy, receivers, receiversCc, processInstancePriority, workerGroupId);
|
||||
warningType, warningGroupId, failureStrategy, receivers, receiversCc, processInstancePriority, workerGroup);
|
||||
|
||||
return returnDataList(result);
|
||||
} catch (Exception e) {
|
||||
|
|
@ -124,7 +124,7 @@ public class SchedulerController extends BaseController {
|
|||
* @param warningGroupId warning group id
|
||||
* @param failureStrategy failure strategy
|
||||
* @param receivers receivers
|
||||
* @param workerGroupId worker group id
|
||||
* @param workerGroup worker group
|
||||
* @param processInstancePriority process instance priority
|
||||
* @param receiversCc receivers cc
|
||||
* @return update result code
|
||||
|
|
@ -151,16 +151,16 @@ public class SchedulerController extends BaseController {
|
|||
@RequestParam(value = "failureStrategy", required = false, defaultValue = "END") FailureStrategy failureStrategy,
|
||||
@RequestParam(value = "receivers", required = false) String receivers,
|
||||
@RequestParam(value = "receiversCc", required = false) String receiversCc,
|
||||
@RequestParam(value = "workerGroupId", required = false, defaultValue = "-1") int workerGroupId,
|
||||
@RequestParam(value = "workerGroup", required = false, defaultValue = "default") String workerGroup,
|
||||
@RequestParam(value = "processInstancePriority", required = false) Priority processInstancePriority) {
|
||||
logger.info("login user {}, project name: {},id: {}, updateProcessInstance schedule: {}, notify type: {}, notify mails: {}, " +
|
||||
"failure policy: {},receivers : {},receiversCc : {},processInstancePriority : {},workerGroupId:{}",
|
||||
loginUser.getUserName(), projectName, id, schedule, warningType, warningGroupId, failureStrategy,
|
||||
receivers, receiversCc, processInstancePriority, workerGroupId);
|
||||
receivers, receiversCc, processInstancePriority, workerGroup);
|
||||
|
||||
try {
|
||||
Map<String, Object> result = schedulerService.updateSchedule(loginUser, projectName, id, schedule,
|
||||
warningType, warningGroupId, failureStrategy, receivers, receiversCc, null, processInstancePriority, workerGroupId);
|
||||
warningType, warningGroupId, failureStrategy, receivers, receiversCc, null, processInstancePriority, workerGroup);
|
||||
return returnDataList(result);
|
||||
|
||||
} catch (Exception e) {
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ import io.swagger.annotations.Api;
|
|||
import io.swagger.annotations.ApiImplicitParam;
|
||||
import io.swagger.annotations.ApiImplicitParams;
|
||||
import io.swagger.annotations.ApiOperation;
|
||||
import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
|
@ -34,6 +35,7 @@ import org.springframework.http.HttpStatus;
|
|||
import org.springframework.web.bind.annotation.*;
|
||||
import springfox.documentation.annotations.ApiIgnore;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
|
@ -46,7 +48,6 @@ public class WorkerGroupController extends BaseController{
|
|||
|
||||
private static final Logger logger = LoggerFactory.getLogger(WorkerGroupController.class);
|
||||
|
||||
|
||||
@Autowired
|
||||
WorkerGroupService workerGroupService;
|
||||
|
||||
|
|
@ -135,6 +136,7 @@ public class WorkerGroupController extends BaseController{
|
|||
loginUser.getUserName() );
|
||||
|
||||
try {
|
||||
|
||||
Map<String, Object> result = workerGroupService.queryAllGroup();
|
||||
return returnDataList(result);
|
||||
}catch (Exception e){
|
||||
|
|
|
|||
|
|
@ -96,11 +96,6 @@ public class ProcessMeta {
|
|||
*/
|
||||
private String scheduleProcessInstancePriority;
|
||||
|
||||
/**
|
||||
* worker group id
|
||||
*/
|
||||
private Integer scheduleWorkerGroupId;
|
||||
|
||||
/**
|
||||
* worker group name
|
||||
*/
|
||||
|
|
@ -229,14 +224,6 @@ public class ProcessMeta {
|
|||
this.scheduleProcessInstancePriority = scheduleProcessInstancePriority;
|
||||
}
|
||||
|
||||
public Integer getScheduleWorkerGroupId() {
|
||||
return scheduleWorkerGroupId;
|
||||
}
|
||||
|
||||
public void setScheduleWorkerGroupId(int scheduleWorkerGroupId) {
|
||||
this.scheduleWorkerGroupId = scheduleWorkerGroupId;
|
||||
}
|
||||
|
||||
public String getScheduleWorkerGroupName() {
|
||||
return scheduleWorkerGroupName;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,8 +29,6 @@ import org.apache.dolphinscheduler.common.utils.StringUtils;
|
|||
import org.apache.dolphinscheduler.dao.entity.*;
|
||||
import org.apache.dolphinscheduler.dao.mapper.*;
|
||||
import org.apache.dolphinscheduler.service.process.ProcessService;
|
||||
import org.apache.dolphinscheduler.service.queue.ITaskQueue;
|
||||
import org.apache.dolphinscheduler.service.queue.TaskQueueFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
|
@ -318,9 +316,8 @@ public class DataAnalysisService extends BaseService{
|
|||
return result;
|
||||
}
|
||||
|
||||
ITaskQueue tasksQueue = TaskQueueFactory.getTaskQueueInstance();
|
||||
List<String> tasksQueueList = tasksQueue.getAllTasks(Constants.DOLPHINSCHEDULER_TASKS_QUEUE);
|
||||
List<String> tasksKillList = tasksQueue.getAllTasks(Constants.DOLPHINSCHEDULER_TASKS_KILL);
|
||||
List<String> tasksQueueList = new ArrayList<>();
|
||||
List<String> tasksKillList = new ArrayList<>();
|
||||
|
||||
Map<String,Integer> dataMap = new HashMap<>();
|
||||
if (loginUser.getUserType() == UserType.ADMIN_USER){
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ public class ExecutorService extends BaseService{
|
|||
* @param receivers receivers
|
||||
* @param receiversCc receivers cc
|
||||
* @param processInstancePriority process instance priority
|
||||
* @param workerGroupId worker group id
|
||||
* @param workerGroup worker group name
|
||||
* @param runMode run mode
|
||||
* @param timeout timeout
|
||||
* @return execute process instance code
|
||||
|
|
@ -96,7 +96,7 @@ public class ExecutorService extends BaseService{
|
|||
FailureStrategy failureStrategy, String startNodeList,
|
||||
TaskDependType taskDependType, WarningType warningType, int warningGroupId,
|
||||
String receivers, String receiversCc, RunMode runMode,
|
||||
Priority processInstancePriority, int workerGroupId, Integer timeout) throws ParseException {
|
||||
Priority processInstancePriority, String workerGroup, Integer timeout) throws ParseException {
|
||||
Map<String, Object> result = new HashMap<>(5);
|
||||
// timeout is valid
|
||||
if (timeout <= 0 || timeout > MAX_TASK_TIMEOUT) {
|
||||
|
|
@ -128,7 +128,7 @@ public class ExecutorService extends BaseService{
|
|||
*/
|
||||
int create = this.createCommand(commandType, processDefinitionId,
|
||||
taskDependType, failureStrategy, startNodeList, cronTime, warningType, loginUser.getId(),
|
||||
warningGroupId, runMode,processInstancePriority, workerGroupId);
|
||||
warningGroupId, runMode,processInstancePriority, workerGroup);
|
||||
if(create > 0 ){
|
||||
/**
|
||||
* according to the process definition ID updateProcessInstance and CC recipient
|
||||
|
|
@ -452,11 +452,29 @@ public class ExecutorService extends BaseService{
|
|||
* @return
|
||||
* @throws ParseException
|
||||
*/
|
||||
|
||||
/**
|
||||
* create commonad
|
||||
* @param commandType command type
|
||||
* @param processDefineId process define id
|
||||
* @param nodeDep node dependency
|
||||
* @param failureStrategy failure strategy
|
||||
* @param startNodeList start node list
|
||||
* @param schedule schedule
|
||||
* @param warningType warning type
|
||||
* @param executorId executor id
|
||||
* @param warningGroupId warning group id
|
||||
* @param runMode run mode
|
||||
* @param processInstancePriority process instance priority
|
||||
* @param workerGroup worker group
|
||||
* @return create command result
|
||||
* @throws ParseException parse exception
|
||||
*/
|
||||
private int createCommand(CommandType commandType, int processDefineId,
|
||||
TaskDependType nodeDep, FailureStrategy failureStrategy,
|
||||
String startNodeList, String schedule, WarningType warningType,
|
||||
int excutorId, int warningGroupId,
|
||||
RunMode runMode,Priority processInstancePriority, int workerGroupId) throws ParseException {
|
||||
int executorId, int warningGroupId,
|
||||
RunMode runMode,Priority processInstancePriority, String workerGroup) throws ParseException {
|
||||
|
||||
/**
|
||||
* instantiate command schedule instance
|
||||
|
|
@ -484,10 +502,10 @@ public class ExecutorService extends BaseService{
|
|||
command.setWarningType(warningType);
|
||||
}
|
||||
command.setCommandParam(JSONUtils.toJson(cmdParam));
|
||||
command.setExecutorId(excutorId);
|
||||
command.setExecutorId(executorId);
|
||||
command.setWarningGroupId(warningGroupId);
|
||||
command.setProcessInstancePriority(processInstancePriority);
|
||||
command.setWorkerGroupId(workerGroupId);
|
||||
command.setWorkerGroup(workerGroup);
|
||||
|
||||
Date start = null;
|
||||
Date end = null;
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import org.apache.dolphinscheduler.api.utils.Result;
|
|||
import org.apache.dolphinscheduler.common.Constants;
|
||||
import org.apache.dolphinscheduler.common.utils.StringUtils;
|
||||
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
|
||||
import org.apache.dolphinscheduler.remote.utils.Host;
|
||||
import org.apache.dolphinscheduler.service.log.LogClientService;
|
||||
import org.apache.dolphinscheduler.service.process.ProcessService;
|
||||
import org.slf4j.Logger;
|
||||
|
|
@ -68,7 +69,7 @@ public class LoggerService {
|
|||
return new Result(Status.TASK_INSTANCE_NOT_FOUND.getCode(), Status.TASK_INSTANCE_NOT_FOUND.getMsg());
|
||||
}
|
||||
|
||||
String host = taskInstance.getHost();
|
||||
String host = Host.of(taskInstance.getHost()).getIp();
|
||||
if(StringUtils.isEmpty(host)){
|
||||
return new Result(Status.TASK_INSTANCE_NOT_FOUND.getCode(), Status.TASK_INSTANCE_NOT_FOUND.getMsg());
|
||||
}
|
||||
|
|
@ -94,7 +95,7 @@ public class LoggerService {
|
|||
if (taskInstance == null){
|
||||
throw new RuntimeException("task instance is null");
|
||||
}
|
||||
String host = taskInstance.getHost();
|
||||
String host = Host.of(taskInstance.getHost()).getIp();
|
||||
return logClient.getLogBytes(host, Constants.RPC_PORT, taskInstance.getLogPath());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -561,13 +561,13 @@ public class ProcessDefinitionService extends BaseDAGService {
|
|||
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinitionId);
|
||||
if (!schedules.isEmpty()) {
|
||||
Schedule schedule = schedules.get(0);
|
||||
WorkerGroup workerGroup = workerGroupMapper.selectById(schedule.getWorkerGroupId());
|
||||
/*WorkerGroup workerGroup = workerGroupMapper.selectById(schedule.getWorkerGroupId());
|
||||
|
||||
if (null == workerGroup && schedule.getWorkerGroupId() == -1) {
|
||||
workerGroup = new WorkerGroup();
|
||||
workerGroup.setId(-1);
|
||||
workerGroup.setName("");
|
||||
}
|
||||
}*/
|
||||
|
||||
exportProcessMeta.setScheduleWarningType(schedule.getWarningType().toString());
|
||||
exportProcessMeta.setScheduleWarningGroupId(schedule.getWarningGroupId());
|
||||
|
|
@ -577,11 +577,7 @@ public class ProcessDefinitionService extends BaseDAGService {
|
|||
exportProcessMeta.setScheduleFailureStrategy(String.valueOf(schedule.getFailureStrategy()));
|
||||
exportProcessMeta.setScheduleReleaseState(String.valueOf(ReleaseState.OFFLINE));
|
||||
exportProcessMeta.setScheduleProcessInstancePriority(String.valueOf(schedule.getProcessInstancePriority()));
|
||||
|
||||
if (null != workerGroup) {
|
||||
exportProcessMeta.setScheduleWorkerGroupId(workerGroup.getId());
|
||||
exportProcessMeta.setScheduleWorkerGroupName(workerGroup.getName());
|
||||
}
|
||||
exportProcessMeta.setScheduleWorkerGroupName(schedule.getWorkerGroup());
|
||||
}
|
||||
//create workflow json file
|
||||
return JSONUtils.toJsonString(exportProcessMeta);
|
||||
|
|
@ -780,15 +776,9 @@ public class ProcessDefinitionService extends BaseDAGService {
|
|||
if (null != processMeta.getScheduleProcessInstancePriority()) {
|
||||
scheduleObj.setProcessInstancePriority(Priority.valueOf(processMeta.getScheduleProcessInstancePriority()));
|
||||
}
|
||||
if (null != processMeta.getScheduleWorkerGroupId()) {
|
||||
scheduleObj.setWorkerGroupId(processMeta.getScheduleWorkerGroupId());
|
||||
} else {
|
||||
if (null != processMeta.getScheduleWorkerGroupName()) {
|
||||
List<WorkerGroup> workerGroups = workerGroupMapper.queryWorkerGroupByName(processMeta.getScheduleWorkerGroupName());
|
||||
if(CollectionUtils.isNotEmpty(workerGroups)){
|
||||
scheduleObj.setWorkerGroupId(workerGroups.get(0).getId());
|
||||
}
|
||||
}
|
||||
|
||||
if (null != processMeta.getScheduleWorkerGroupName()) {
|
||||
scheduleObj.setWorkerGroup(processMeta.getScheduleWorkerGroupName());
|
||||
}
|
||||
|
||||
return scheduleMapper.insert(scheduleObj);
|
||||
|
|
|
|||
|
|
@ -38,7 +38,6 @@ import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
|
|||
import org.apache.dolphinscheduler.dao.entity.*;
|
||||
import org.apache.dolphinscheduler.dao.mapper.*;
|
||||
import org.apache.dolphinscheduler.service.process.ProcessService;
|
||||
import org.apache.dolphinscheduler.service.queue.ITaskQueue;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
|
|
@ -113,9 +112,9 @@ public class ProcessInstanceService extends BaseDAGService {
|
|||
return checkResult;
|
||||
}
|
||||
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processId);
|
||||
String workerGroupName = "";
|
||||
if(processInstance.getWorkerGroupId() == -1){
|
||||
workerGroupName = DEFAULT;
|
||||
/*String workerGroupName = "";
|
||||
if(StringUtils.isBlank(processInstance.getWorkerGroup())){
|
||||
workerGroupName = ;
|
||||
}else{
|
||||
WorkerGroup workerGroup = workerGroupMapper.selectById(processInstance.getWorkerGroupId());
|
||||
if(workerGroup != null){
|
||||
|
|
@ -124,7 +123,7 @@ public class ProcessInstanceService extends BaseDAGService {
|
|||
workerGroupName = DEFAULT;
|
||||
}
|
||||
}
|
||||
processInstance.setWorkerGroupName(workerGroupName);
|
||||
processInstance.setWorkerGroupName(workerGroupName);*/
|
||||
ProcessDefinition processDefinition = processService.findProcessDefineById(processInstance.getProcessDefinitionId());
|
||||
processInstance.setReceivers(processDefinition.getReceivers());
|
||||
processInstance.setReceiversCc(processDefinition.getReceiversCc());
|
||||
|
|
@ -404,8 +403,6 @@ public class ProcessInstanceService extends BaseDAGService {
|
|||
processInstance.setProcessInstanceJson(processInstanceJson);
|
||||
processInstance.setGlobalParams(globalParams);
|
||||
}
|
||||
// int update = processDao.updateProcessInstance(processInstanceId, processInstanceJson,
|
||||
// globalParams, schedule, flag, locations, connects);
|
||||
int update = processService.updateProcessInstance(processInstance);
|
||||
int updateDefine = 1;
|
||||
if (syncDefine && StringUtils.isNotEmpty(processInstanceJson)) {
|
||||
|
|
@ -472,11 +469,10 @@ public class ProcessInstanceService extends BaseDAGService {
|
|||
* @param loginUser login user
|
||||
* @param projectName project name
|
||||
* @param processInstanceId process instance id
|
||||
* @param tasksQueue task queue
|
||||
* @return delete result code
|
||||
*/
|
||||
@Transactional(rollbackFor = Exception.class)
|
||||
public Map<String, Object> deleteProcessInstanceById(User loginUser, String projectName, Integer processInstanceId, ITaskQueue tasksQueue) {
|
||||
public Map<String, Object> deleteProcessInstanceById(User loginUser, String projectName, Integer processInstanceId) {
|
||||
|
||||
Map<String, Object> result = new HashMap<>(5);
|
||||
Project project = projectMapper.queryByName(projectName);
|
||||
|
|
@ -494,52 +490,6 @@ public class ProcessInstanceService extends BaseDAGService {
|
|||
return result;
|
||||
}
|
||||
|
||||
//process instance priority
|
||||
int processInstancePriority = processInstance.getProcessInstancePriority().ordinal();
|
||||
// delete zk queue
|
||||
if (CollectionUtils.isNotEmpty(taskInstanceList)){
|
||||
for (TaskInstance taskInstance : taskInstanceList){
|
||||
// task instance priority
|
||||
int taskInstancePriority = taskInstance.getTaskInstancePriority().ordinal();
|
||||
|
||||
StringBuilder nodeValueSb = new StringBuilder(100);
|
||||
nodeValueSb.append(processInstancePriority)
|
||||
.append(UNDERLINE)
|
||||
.append(processInstanceId)
|
||||
.append(UNDERLINE)
|
||||
.append(taskInstancePriority)
|
||||
.append(UNDERLINE)
|
||||
.append(taskInstance.getId())
|
||||
.append(UNDERLINE);
|
||||
|
||||
int taskWorkerGroupId = processService.getTaskWorkerGroupId(taskInstance);
|
||||
WorkerGroup workerGroup = workerGroupMapper.selectById(taskWorkerGroupId);
|
||||
|
||||
if(workerGroup == null){
|
||||
nodeValueSb.append(DEFAULT_WORKER_ID);
|
||||
}else {
|
||||
|
||||
String ips = workerGroup.getIpList();
|
||||
StringBuilder ipSb = new StringBuilder(100);
|
||||
String[] ipArray = ips.split(COMMA);
|
||||
|
||||
for (String ip : ipArray) {
|
||||
long ipLong = IpUtils.ipToLong(ip);
|
||||
ipSb.append(ipLong).append(COMMA);
|
||||
}
|
||||
|
||||
if(ipSb.length() > 0) {
|
||||
ipSb.deleteCharAt(ipSb.length() - 1);
|
||||
}
|
||||
nodeValueSb.append(ipSb);
|
||||
}
|
||||
|
||||
logger.info("delete task queue node : {}",nodeValueSb.toString());
|
||||
tasksQueue.removeNode(org.apache.dolphinscheduler.common.Constants.DOLPHINSCHEDULER_TASKS_QUEUE, nodeValueSb.toString());
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// delete database cascade
|
||||
int delete = processService.deleteWorkProcessInstanceById(processInstanceId);
|
||||
processService.deleteAllSubWorkProcessByParentId(processInstanceId);
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ public class SchedulerService extends BaseService {
|
|||
* @param processInstancePriority process instance priority
|
||||
* @param receivers receivers
|
||||
* @param receiversCc receivers cc
|
||||
* @param workerGroupId worker group id
|
||||
* @param workerGroup worker group
|
||||
* @return create result code
|
||||
* @throws IOException ioexception
|
||||
*/
|
||||
|
|
@ -106,7 +106,7 @@ public class SchedulerService extends BaseService {
|
|||
String receivers,
|
||||
String receiversCc,
|
||||
Priority processInstancePriority,
|
||||
int workerGroupId) throws IOException {
|
||||
String workerGroup) throws IOException {
|
||||
|
||||
Map<String, Object> result = new HashMap<String, Object>(5);
|
||||
|
||||
|
|
@ -156,7 +156,7 @@ public class SchedulerService extends BaseService {
|
|||
scheduleObj.setUserName(loginUser.getUserName());
|
||||
scheduleObj.setReleaseState(ReleaseState.OFFLINE);
|
||||
scheduleObj.setProcessInstancePriority(processInstancePriority);
|
||||
scheduleObj.setWorkerGroupId(workerGroupId);
|
||||
scheduleObj.setWorkerGroup(workerGroup);
|
||||
scheduleMapper.insert(scheduleObj);
|
||||
|
||||
/**
|
||||
|
|
@ -182,7 +182,7 @@ public class SchedulerService extends BaseService {
|
|||
* @param warningType warning type
|
||||
* @param warningGroupId warning group id
|
||||
* @param failureStrategy failure strategy
|
||||
* @param workerGroupId worker group id
|
||||
* @param workerGroup worker group
|
||||
* @param processInstancePriority process instance priority
|
||||
* @param receiversCc receiver cc
|
||||
* @param receivers receivers
|
||||
|
|
@ -202,7 +202,7 @@ public class SchedulerService extends BaseService {
|
|||
String receiversCc,
|
||||
ReleaseState scheduleStatus,
|
||||
Priority processInstancePriority,
|
||||
int workerGroupId) throws IOException {
|
||||
String workerGroup) throws IOException {
|
||||
Map<String, Object> result = new HashMap<String, Object>(5);
|
||||
|
||||
Project project = projectMapper.queryByName(projectName);
|
||||
|
|
@ -266,7 +266,7 @@ public class SchedulerService extends BaseService {
|
|||
if (scheduleStatus != null) {
|
||||
schedule.setReleaseState(scheduleStatus);
|
||||
}
|
||||
schedule.setWorkerGroupId(workerGroupId);
|
||||
schedule.setWorkerGroup(workerGroup);
|
||||
schedule.setUpdateTime(now);
|
||||
schedule.setProcessInstancePriority(processInstancePriority);
|
||||
scheduleMapper.updateById(schedule);
|
||||
|
|
|
|||
|
|
@ -28,14 +28,12 @@ import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
|
|||
import org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper;
|
||||
import com.baomidou.mybatisplus.core.metadata.IPage;
|
||||
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
|
||||
import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* work group service
|
||||
|
|
@ -50,6 +48,9 @@ public class WorkerGroupService extends BaseService {
|
|||
@Autowired
|
||||
ProcessInstanceMapper processInstanceMapper;
|
||||
|
||||
@Autowired
|
||||
protected ZookeeperCachedOperator zookeeperCachedOperator;
|
||||
|
||||
/**
|
||||
* create or update a worker group
|
||||
*
|
||||
|
|
@ -180,9 +181,22 @@ public class WorkerGroupService extends BaseService {
|
|||
* @return all worker group list
|
||||
*/
|
||||
public Map<String,Object> queryAllGroup() {
|
||||
Map<String, Object> result = new HashMap<>(5);
|
||||
List<WorkerGroup> workerGroupList = workerGroupMapper.queryAllWorkerGroup();
|
||||
result.put(Constants.DATA_LIST, workerGroupList);
|
||||
Map<String, Object> result = new HashMap<>();
|
||||
String workerPath = zookeeperCachedOperator.getZookeeperConfig().getDsRoot()+"/nodes" +"/worker";
|
||||
List<String> workerGroupList = zookeeperCachedOperator.getChildrenKeys(workerPath);
|
||||
|
||||
// available workerGroup list
|
||||
List<String> availableWorkerGroupList = new ArrayList<>();
|
||||
|
||||
for (String workerGroup : workerGroupList){
|
||||
String workerGroupPath= workerPath + "/" + workerGroup;
|
||||
List<String> childrenNodes = zookeeperCachedOperator.getChildrenKeys(workerGroupPath);
|
||||
if (CollectionUtils.isNotEmpty(childrenNodes)){
|
||||
availableWorkerGroupList.add(workerGroup);
|
||||
}
|
||||
}
|
||||
|
||||
result.put(Constants.DATA_LIST, availableWorkerGroupList);
|
||||
putMsg(result, Status.SUCCESS);
|
||||
return result;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,62 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
|
||||
<property name="log.base" value="logs"/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<!-- api server logback config start -->
|
||||
<appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-api-server.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>168</maxHistory>
|
||||
<maxFileSize>64MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<!-- api server logback config end -->
|
||||
|
||||
<logger name="org.apache.zookeeper" level="WARN"/>
|
||||
<logger name="org.apache.hbase" level="WARN"/>
|
||||
<logger name="org.apache.hadoop" level="WARN"/>
|
||||
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="APILOGFILE"/>
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
|
|
@ -28,8 +28,6 @@ import org.apache.dolphinscheduler.dao.entity.Project;
|
|||
import org.apache.dolphinscheduler.dao.entity.User;
|
||||
import org.apache.dolphinscheduler.dao.mapper.*;
|
||||
import org.apache.dolphinscheduler.service.process.ProcessService;
|
||||
import org.apache.dolphinscheduler.service.queue.ITaskQueue;
|
||||
import org.apache.dolphinscheduler.service.queue.TaskQueueFactory;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
|
@ -47,7 +45,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
@RunWith(PowerMockRunner.class)
|
||||
@PrepareForTest({TaskQueueFactory.class})
|
||||
public class DataAnalysisServiceTest {
|
||||
|
||||
@InjectMocks
|
||||
|
|
@ -74,8 +71,7 @@ public class DataAnalysisServiceTest {
|
|||
@Mock
|
||||
TaskInstanceMapper taskInstanceMapper;
|
||||
|
||||
@Mock
|
||||
ITaskQueue taskQueue;
|
||||
|
||||
|
||||
@Mock
|
||||
ProcessService processService;
|
||||
|
|
@ -183,30 +179,6 @@ public class DataAnalysisServiceTest {
|
|||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCountQueueState(){
|
||||
|
||||
PowerMockito.mockStatic(TaskQueueFactory.class);
|
||||
List<String> taskQueueList = new ArrayList<>(1);
|
||||
taskQueueList.add("1_0_1_1_-1");
|
||||
List<String> taskKillList = new ArrayList<>(1);
|
||||
taskKillList.add("1-0");
|
||||
PowerMockito.when(taskQueue.getAllTasks(Constants.DOLPHINSCHEDULER_TASKS_QUEUE)).thenReturn(taskQueueList);
|
||||
PowerMockito.when(taskQueue.getAllTasks(Constants.DOLPHINSCHEDULER_TASKS_KILL)).thenReturn(taskKillList);
|
||||
PowerMockito.when(TaskQueueFactory.getTaskQueueInstance()).thenReturn(taskQueue);
|
||||
//checkProject false
|
||||
Map<String, Object> result = dataAnalysisService.countQueueState(user,2);
|
||||
Assert.assertTrue(result.isEmpty());
|
||||
|
||||
result = dataAnalysisService.countQueueState(user,1);
|
||||
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
|
||||
//admin
|
||||
user.setUserType(UserType.ADMIN_USER);
|
||||
result = dataAnalysisService.countQueueState(user,1);
|
||||
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* get list
|
||||
* @return
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ public class ExecutorService2Test {
|
|||
null, null,
|
||||
null, null, 0,
|
||||
"", "", RunMode.RUN_MODE_SERIAL,
|
||||
Priority.LOW, 0, 110);
|
||||
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110);
|
||||
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
|
||||
verify(processService, times(1)).createCommand(any(Command.class));
|
||||
}catch (Exception e){
|
||||
|
|
@ -138,7 +138,7 @@ public class ExecutorService2Test {
|
|||
null, null,
|
||||
null, null, 0,
|
||||
"", "", RunMode.RUN_MODE_SERIAL,
|
||||
Priority.LOW, 0, 110);
|
||||
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110);
|
||||
Assert.assertEquals(Status.START_PROCESS_INSTANCE_ERROR, result.get(Constants.STATUS));
|
||||
verify(processService, times(0)).createCommand(any(Command.class));
|
||||
}catch (Exception e){
|
||||
|
|
@ -159,7 +159,7 @@ public class ExecutorService2Test {
|
|||
null, null,
|
||||
null, null, 0,
|
||||
"", "", RunMode.RUN_MODE_SERIAL,
|
||||
Priority.LOW, 0, 110);
|
||||
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110);
|
||||
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
|
||||
verify(processService, times(1)).createCommand(any(Command.class));
|
||||
}catch (Exception e){
|
||||
|
|
@ -180,7 +180,7 @@ public class ExecutorService2Test {
|
|||
null, null,
|
||||
null, null, 0,
|
||||
"", "", RunMode.RUN_MODE_PARALLEL,
|
||||
Priority.LOW, 0, 110);
|
||||
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110);
|
||||
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
|
||||
verify(processService, times(31)).createCommand(any(Command.class));
|
||||
}catch (Exception e){
|
||||
|
|
@ -201,7 +201,7 @@ public class ExecutorService2Test {
|
|||
null, null,
|
||||
null, null, 0,
|
||||
"", "", RunMode.RUN_MODE_PARALLEL,
|
||||
Priority.LOW, 0, 110);
|
||||
Priority.LOW, Constants.DEFAULT_WORKER_GROUP, 110);
|
||||
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
|
||||
verify(processService, times(15)).createCommand(any(Command.class));
|
||||
}catch (Exception e){
|
||||
|
|
|
|||
|
|
@ -52,12 +52,17 @@ public class LoggerServiceTest {
|
|||
//TASK_INSTANCE_NOT_FOUND
|
||||
Assert.assertEquals(Status.TASK_INSTANCE_NOT_FOUND.getCode(),result.getCode().intValue());
|
||||
|
||||
//HOST NOT FOUND
|
||||
result = loggerService.queryLog(1,1,1);
|
||||
try {
|
||||
//HOST NOT FOUND OR ILLEGAL
|
||||
result = loggerService.queryLog(1, 1, 1);
|
||||
} catch (RuntimeException e) {
|
||||
Assert.assertTrue(true);
|
||||
logger.error("testQueryDataSourceList error {}", e.getMessage());
|
||||
}
|
||||
Assert.assertEquals(Status.TASK_INSTANCE_NOT_FOUND.getCode(),result.getCode().intValue());
|
||||
|
||||
//SUCCESS
|
||||
taskInstance.setHost("127.0.0.1");
|
||||
taskInstance.setHost("127.0.0.1:8080");
|
||||
taskInstance.setLogPath("/temp/log");
|
||||
Mockito.when(processService.findTaskInstanceById(1)).thenReturn(taskInstance);
|
||||
result = loggerService.queryLog(1,1,1);
|
||||
|
|
@ -87,7 +92,7 @@ public class LoggerServiceTest {
|
|||
}
|
||||
|
||||
//success
|
||||
taskInstance.setHost("127.0.0.1");
|
||||
taskInstance.setHost("127.0.0.1:8080");
|
||||
taskInstance.setLogPath("/temp/log");
|
||||
//if use @RunWith(PowerMockRunner.class) mock object,sonarcloud will not calculate the coverage,
|
||||
// so no assert will be added here
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
*/
|
||||
package org.apache.dolphinscheduler.api.service;
|
||||
|
||||
import com.alibaba.druid.pool.DruidDataSource;
|
||||
import com.alibaba.fastjson.JSONArray;
|
||||
import com.alibaba.fastjson.JSONObject;
|
||||
import org.apache.dolphinscheduler.api.ApiApplicationServer;
|
||||
|
|
@ -28,7 +29,9 @@ import org.apache.dolphinscheduler.common.utils.FileUtils;
|
|||
import org.apache.dolphinscheduler.common.utils.JSONUtils;
|
||||
import org.apache.dolphinscheduler.dao.entity.*;
|
||||
import org.apache.dolphinscheduler.dao.mapper.*;
|
||||
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
|
||||
import org.apache.dolphinscheduler.service.process.ProcessService;
|
||||
import org.apache.dolphinscheduler.service.quartz.QuartzExecutors;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.json.JSONException;
|
||||
import org.junit.Assert;
|
||||
|
|
@ -38,10 +41,12 @@ import org.mockito.InjectMocks;
|
|||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
import org.quartz.Scheduler;
|
||||
import org.skyscreamer.jsonassert.JSONAssert;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.boot.test.context.SpringBootTest;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.mock.web.MockMultipartFile;
|
||||
import org.springframework.web.multipart.MultipartFile;
|
||||
|
||||
|
|
@ -274,6 +279,7 @@ public class ProcessDefinitionServiceTest {
|
|||
|
||||
@Test
|
||||
public void testReleaseProcessDefinition() {
|
||||
|
||||
String projectName = "project_test1";
|
||||
Mockito.when(projectMapper.queryByName(projectName)).thenReturn(getProject(projectName));
|
||||
|
||||
|
|
@ -298,20 +304,21 @@ public class ProcessDefinitionServiceTest {
|
|||
46, ReleaseState.ONLINE.getCode());
|
||||
Assert.assertEquals(Status.SUCCESS, onlineRes.get(Constants.STATUS));
|
||||
|
||||
//process definition offline
|
||||
List<Schedule> schedules = new ArrayList<>();
|
||||
Schedule schedule = getSchedule();
|
||||
schedules.add(schedule);
|
||||
Mockito.when(scheduleMapper.selectAllByProcessDefineArray(new int[]{46})).thenReturn(schedules);
|
||||
Mockito.when(scheduleMapper.updateById(schedule)).thenReturn(1);
|
||||
Map<String, Object> offlineRes = processDefinitionService.releaseProcessDefinition(loginUser, "project_test1",
|
||||
46, ReleaseState.OFFLINE.getCode());
|
||||
Assert.assertEquals(Status.SUCCESS, offlineRes.get(Constants.STATUS));
|
||||
|
||||
//release error code
|
||||
Map<String, Object> failRes = processDefinitionService.releaseProcessDefinition(loginUser, "project_test1",
|
||||
46, 2);
|
||||
46, 2);
|
||||
Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, failRes.get(Constants.STATUS));
|
||||
|
||||
//FIXME has function exit code 1 when exception
|
||||
//process definition offline
|
||||
// List<Schedule> schedules = new ArrayList<>();
|
||||
// Schedule schedule = getSchedule();
|
||||
// schedules.add(schedule);
|
||||
// Mockito.when(scheduleMapper.selectAllByProcessDefineArray(new int[]{46})).thenReturn(schedules);
|
||||
// Mockito.when(scheduleMapper.updateById(schedule)).thenReturn(1);
|
||||
// Map<String, Object> offlineRes = processDefinitionService.releaseProcessDefinition(loginUser, "project_test1",
|
||||
// 46, ReleaseState.OFFLINE.getCode());
|
||||
// Assert.assertEquals(Status.SUCCESS, offlineRes.get(Constants.STATUS));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
@ -803,7 +810,7 @@ public class ProcessDefinitionServiceTest {
|
|||
schedule.setProcessInstancePriority(Priority.MEDIUM);
|
||||
schedule.setWarningType(WarningType.NONE);
|
||||
schedule.setWarningGroupId(1);
|
||||
schedule.setWorkerGroupId(-1);
|
||||
schedule.setWorkerGroup(Constants.DEFAULT_WORKER_GROUP);
|
||||
return schedule;
|
||||
}
|
||||
|
||||
|
|
@ -822,7 +829,6 @@ public class ProcessDefinitionServiceTest {
|
|||
processMeta.setScheduleFailureStrategy(String.valueOf(schedule.getFailureStrategy()));
|
||||
processMeta.setScheduleReleaseState(String.valueOf(schedule.getReleaseState()));
|
||||
processMeta.setScheduleProcessInstancePriority(String.valueOf(schedule.getProcessInstancePriority()));
|
||||
processMeta.setScheduleWorkerGroupId(schedule.getWorkerGroupId());
|
||||
processMeta.setScheduleWorkerGroupName("workgroup1");
|
||||
return processMeta;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,12 +27,15 @@ import org.apache.dolphinscheduler.dao.entity.User;
|
|||
import org.apache.dolphinscheduler.dao.entity.WorkerGroup;
|
||||
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
|
||||
import org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper;
|
||||
import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator;
|
||||
import org.apache.dolphinscheduler.service.zk.ZookeeperConfig;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.mockito.InjectMocks;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.internal.matchers.Any;
|
||||
import org.mockito.junit.MockitoJUnitRunner;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
|
@ -52,11 +55,11 @@ public class WorkerGroupServiceTest {
|
|||
private WorkerGroupMapper workerGroupMapper;
|
||||
@Mock
|
||||
private ProcessInstanceMapper processInstanceMapper;
|
||||
|
||||
@Mock
|
||||
private ZookeeperCachedOperator zookeeperCachedOperator;
|
||||
|
||||
private String groupName="groupName000001";
|
||||
|
||||
|
||||
/**
|
||||
* create or update a worker group
|
||||
*/
|
||||
|
|
@ -129,8 +132,14 @@ public class WorkerGroupServiceTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testQueryAllGroup(){
|
||||
Mockito.when(workerGroupMapper.queryAllWorkerGroup()).thenReturn(getList());
|
||||
public void testQueryAllGroup() throws Exception {
|
||||
ZookeeperConfig zookeeperConfig = new ZookeeperConfig();
|
||||
zookeeperConfig.setDsRoot("/ds");
|
||||
Mockito.when(zookeeperCachedOperator.getZookeeperConfig()).thenReturn(zookeeperConfig);
|
||||
List<String> workerGroupStrList = new ArrayList<>();
|
||||
workerGroupStrList.add("workerGroup1");
|
||||
Mockito.when(zookeeperCachedOperator.getChildrenKeys(Mockito.anyString())).thenReturn(workerGroupStrList);
|
||||
|
||||
Map<String, Object> result = workerGroupService.queryAllGroup();
|
||||
logger.info(result.toString());
|
||||
Assert.assertEquals(Status.SUCCESS.getMsg(),(String)result.get(Constants.MSG));
|
||||
|
|
|
|||
|
|
@ -246,11 +246,6 @@
|
|||
</exclusions>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>javax.servlet</groupId>
|
||||
<artifactId>javax.servlet-api</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-hdfs</artifactId>
|
||||
|
|
|
|||
|
|
@ -25,9 +25,43 @@ import java.util.regex.Pattern;
|
|||
* Constants
|
||||
*/
|
||||
public final class Constants {
|
||||
|
||||
private Constants() {
|
||||
throw new IllegalStateException("Constants class");
|
||||
}
|
||||
|
||||
/**
|
||||
* quartz config
|
||||
*/
|
||||
public static final String ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS = "org.quartz.jobStore.driverDelegateClass";
|
||||
public static final String ORG_QUARTZ_SCHEDULER_INSTANCENAME = "org.quartz.scheduler.instanceName";
|
||||
public static final String ORG_QUARTZ_SCHEDULER_INSTANCEID = "org.quartz.scheduler.instanceId";
|
||||
public static final String ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON = "org.quartz.scheduler.makeSchedulerThreadDaemon";
|
||||
public static final String ORG_QUARTZ_JOBSTORE_USEPROPERTIES = "org.quartz.jobStore.useProperties";
|
||||
public static final String ORG_QUARTZ_THREADPOOL_CLASS = "org.quartz.threadPool.class";
|
||||
public static final String ORG_QUARTZ_THREADPOOL_THREADCOUNT = "org.quartz.threadPool.threadCount";
|
||||
public static final String ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS = "org.quartz.threadPool.makeThreadsDaemons";
|
||||
public static final String ORG_QUARTZ_THREADPOOL_THREADPRIORITY = "org.quartz.threadPool.threadPriority";
|
||||
public static final String ORG_QUARTZ_JOBSTORE_CLASS = "org.quartz.jobStore.class";
|
||||
public static final String ORG_QUARTZ_JOBSTORE_TABLEPREFIX = "org.quartz.jobStore.tablePrefix";
|
||||
public static final String ORG_QUARTZ_JOBSTORE_ISCLUSTERED = "org.quartz.jobStore.isClustered";
|
||||
public static final String ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD = "org.quartz.jobStore.misfireThreshold";
|
||||
public static final String ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL = "org.quartz.jobStore.clusterCheckinInterval";
|
||||
public static final String ORG_QUARTZ_JOBSTORE_DATASOURCE = "org.quartz.jobStore.dataSource";
|
||||
public static final String ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS = "org.quartz.dataSource.myDs.connectionProvider.class";
|
||||
|
||||
/**
|
||||
* quartz config default value
|
||||
*/
|
||||
public static final String QUARTZ_TABLE_PREFIX = "QRTZ_";
|
||||
public static final String QUARTZ_MISFIRETHRESHOLD = "60000";
|
||||
public static final String QUARTZ_CLUSTERCHECKININTERVAL = "5000";
|
||||
public static final String QUARTZ_DATASOURCE = "myDs";
|
||||
public static final String QUARTZ_THREADCOUNT = "25";
|
||||
public static final String QUARTZ_THREADPRIORITY = "5";
|
||||
public static final String QUARTZ_INSTANCENAME = "DolphinScheduler";
|
||||
public static final String QUARTZ_INSTANCEID = "AUTO";
|
||||
|
||||
/**
|
||||
* common properties path
|
||||
*/
|
||||
|
|
@ -56,9 +90,11 @@ public final class Constants {
|
|||
|
||||
|
||||
/**
|
||||
* yarn.resourcemanager.ha.rm.idsfs.defaultFS
|
||||
* yarn.resourcemanager.ha.rm.ids
|
||||
*/
|
||||
public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids";
|
||||
public static final String YARN_RESOURCEMANAGER_HA_XX = "xx";
|
||||
|
||||
|
||||
/**
|
||||
* yarn.application.status.address
|
||||
|
|
@ -72,31 +108,25 @@ public final class Constants {
|
|||
public static final String HDFS_ROOT_USER = "hdfs.root.user";
|
||||
|
||||
/**
|
||||
* hdfs configuration
|
||||
* data.store2hdfs.basepath
|
||||
* hdfs/s3 configuration
|
||||
* resource.upload.path
|
||||
*/
|
||||
public static final String DATA_STORE_2_HDFS_BASEPATH = "data.store2hdfs.basepath";
|
||||
public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path";
|
||||
|
||||
/**
|
||||
* data.basedir.path
|
||||
* data basedir path
|
||||
*/
|
||||
public static final String DATA_BASEDIR_PATH = "data.basedir.path";
|
||||
|
||||
/**
|
||||
* data.download.basedir.path
|
||||
*/
|
||||
public static final String DATA_DOWNLOAD_BASEDIR_PATH = "data.download.basedir.path";
|
||||
|
||||
/**
|
||||
* process.exec.basepath
|
||||
*/
|
||||
public static final String PROCESS_EXEC_BASEPATH = "process.exec.basepath";
|
||||
|
||||
/**
|
||||
* dolphinscheduler.env.path
|
||||
*/
|
||||
public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path";
|
||||
|
||||
/**
|
||||
* environment properties default path
|
||||
*/
|
||||
public static final String ENV_PATH = "env/dolphinscheduler_env.sh";
|
||||
|
||||
/**
|
||||
* python home
|
||||
|
|
@ -108,30 +138,33 @@ public final class Constants {
|
|||
*/
|
||||
public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs";
|
||||
|
||||
public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties";
|
||||
|
||||
/**
|
||||
* development.state
|
||||
*/
|
||||
public static final String DEVELOPMENT_STATE = "development.state";
|
||||
public static final String DEVELOPMENT_STATE_DEFAULT_VALUE = "true";
|
||||
|
||||
/**
|
||||
* res.upload.startup.type
|
||||
* string true
|
||||
*/
|
||||
public static final String RES_UPLOAD_STARTUP_TYPE = "res.upload.startup.type";
|
||||
public static final String STRING_TRUE = "true";
|
||||
|
||||
/**
|
||||
* zookeeper quorum
|
||||
* resource storage type
|
||||
*/
|
||||
public static final String ZOOKEEPER_QUORUM = "zookeeper.quorum";
|
||||
public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type";
|
||||
|
||||
/**
|
||||
* MasterServer directory registered in zookeeper
|
||||
*/
|
||||
public static final String ZOOKEEPER_DOLPHINSCHEDULER_MASTERS = "/masters";
|
||||
public static final String ZOOKEEPER_DOLPHINSCHEDULER_MASTERS = "/nodes/master";
|
||||
|
||||
/**
|
||||
* WorkerServer directory registered in zookeeper
|
||||
*/
|
||||
public static final String ZOOKEEPER_DOLPHINSCHEDULER_WORKERS = "/workers";
|
||||
public static final String ZOOKEEPER_DOLPHINSCHEDULER_WORKERS = "/nodes/worker";
|
||||
|
||||
/**
|
||||
* all servers directory registered in zookeeper
|
||||
|
|
@ -143,10 +176,6 @@ public final class Constants {
|
|||
*/
|
||||
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters";
|
||||
|
||||
/**
|
||||
* WorkerServer lock directory registered in zookeeper
|
||||
*/
|
||||
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_WORKERS = "/lock/workers";
|
||||
|
||||
/**
|
||||
* MasterServer failover directory registered in zookeeper
|
||||
|
|
@ -163,16 +192,17 @@ public final class Constants {
|
|||
*/
|
||||
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters";
|
||||
|
||||
/**
|
||||
* need send warn times when master server or worker server failover
|
||||
*/
|
||||
public static final int DOLPHINSCHEDULER_WARN_TIMES_FAILOVER = 3;
|
||||
|
||||
/**
|
||||
* comma ,
|
||||
*/
|
||||
public static final String COMMA = ",";
|
||||
|
||||
/**
|
||||
* slash /
|
||||
*/
|
||||
public static final String SLASH = "/";
|
||||
|
||||
/**
|
||||
* COLON :
|
||||
*/
|
||||
|
|
@ -198,37 +228,6 @@ public final class Constants {
|
|||
*/
|
||||
public static final String EQUAL_SIGN = "=";
|
||||
|
||||
/**
|
||||
* ZOOKEEPER_SESSION_TIMEOUT
|
||||
*/
|
||||
public static final String ZOOKEEPER_SESSION_TIMEOUT = "zookeeper.session.timeout";
|
||||
|
||||
public static final String ZOOKEEPER_CONNECTION_TIMEOUT = "zookeeper.connection.timeout";
|
||||
|
||||
public static final String ZOOKEEPER_RETRY_SLEEP = "zookeeper.retry.sleep";
|
||||
public static final String ZOOKEEPER_RETRY_BASE_SLEEP = "zookeeper.retry.base.sleep";
|
||||
public static final String ZOOKEEPER_RETRY_MAX_SLEEP = "zookeeper.retry.max.sleep";
|
||||
|
||||
public static final String ZOOKEEPER_RETRY_MAXTIME = "zookeeper.retry.maxtime";
|
||||
|
||||
|
||||
public static final String MASTER_HEARTBEAT_INTERVAL = "master.heartbeat.interval";
|
||||
|
||||
public static final String MASTER_EXEC_THREADS = "master.exec.threads";
|
||||
|
||||
public static final String MASTER_EXEC_TASK_THREADS = "master.exec.task.number";
|
||||
|
||||
|
||||
public static final String MASTER_COMMIT_RETRY_TIMES = "master.task.commit.retryTimes";
|
||||
|
||||
public static final String MASTER_COMMIT_RETRY_INTERVAL = "master.task.commit.interval";
|
||||
|
||||
|
||||
public static final String WORKER_EXEC_THREADS = "worker.exec.threads";
|
||||
|
||||
public static final String WORKER_HEARTBEAT_INTERVAL = "worker.heartbeat.interval";
|
||||
|
||||
public static final String WORKER_FETCH_TASK_NUM = "worker.fetch.task.num";
|
||||
|
||||
public static final String WORKER_MAX_CPULOAD_AVG = "worker.max.cpuload.avg";
|
||||
|
||||
|
|
@ -239,21 +238,6 @@ public final class Constants {
|
|||
public static final String MASTER_RESERVED_MEMORY = "master.reserved.memory";
|
||||
|
||||
|
||||
/**
|
||||
* dolphinscheduler tasks queue
|
||||
*/
|
||||
public static final String DOLPHINSCHEDULER_TASKS_QUEUE = "tasks_queue";
|
||||
|
||||
/**
|
||||
* dolphinscheduler need kill tasks queue
|
||||
*/
|
||||
public static final String DOLPHINSCHEDULER_TASKS_KILL = "tasks_kill";
|
||||
|
||||
public static final String ZOOKEEPER_DOLPHINSCHEDULER_ROOT = "zookeeper.dolphinscheduler.root";
|
||||
|
||||
public static final String SCHEDULER_QUEUE_IMPL = "dolphinscheduler.queue.impl";
|
||||
|
||||
|
||||
/**
|
||||
* date format of yyyy-MM-dd HH:mm:ss
|
||||
*/
|
||||
|
|
@ -345,26 +329,6 @@ public final class Constants {
|
|||
public static final int MAX_TASK_TIMEOUT = 24 * 3600;
|
||||
|
||||
|
||||
/**
|
||||
* heartbeat threads number
|
||||
*/
|
||||
public static final int DEFAUL_WORKER_HEARTBEAT_THREAD_NUM = 1;
|
||||
|
||||
/**
|
||||
* heartbeat interval
|
||||
*/
|
||||
public static final int DEFAULT_WORKER_HEARTBEAT_INTERVAL = 60;
|
||||
|
||||
/**
|
||||
* worker fetch task number
|
||||
*/
|
||||
public static final int DEFAULT_WORKER_FETCH_TASK_NUM = 1;
|
||||
|
||||
/**
|
||||
* worker execute threads number
|
||||
*/
|
||||
public static final int DEFAULT_WORKER_EXEC_THREAD_NUM = 10;
|
||||
|
||||
/**
|
||||
* master cpu load
|
||||
*/
|
||||
|
|
@ -386,16 +350,6 @@ public final class Constants {
|
|||
public static final double DEFAULT_WORKER_RESERVED_MEMORY = OSUtils.totalMemorySize() / 10;
|
||||
|
||||
|
||||
/**
|
||||
* master execute threads number
|
||||
*/
|
||||
public static final int DEFAULT_MASTER_EXEC_THREAD_NUM = 100;
|
||||
|
||||
|
||||
/**
|
||||
* default master concurrent task execute num
|
||||
*/
|
||||
public static final int DEFAULT_MASTER_TASK_EXEC_NUM = 20;
|
||||
|
||||
/**
|
||||
* default log cache rows num,output when reach the number
|
||||
|
|
@ -403,33 +357,11 @@ public final class Constants {
|
|||
public static final int DEFAULT_LOG_ROWS_NUM = 4 * 16;
|
||||
|
||||
/**
|
||||
* log flush interval,output when reach the interval
|
||||
* log flush interval?output when reach the interval
|
||||
*/
|
||||
public static final int DEFAULT_LOG_FLUSH_INTERVAL = 1000;
|
||||
|
||||
|
||||
/**
|
||||
* default master heartbeat thread number
|
||||
*/
|
||||
public static final int DEFAULT_MASTER_HEARTBEAT_THREAD_NUM = 1;
|
||||
|
||||
|
||||
/**
|
||||
* default master heartbeat interval
|
||||
*/
|
||||
public static final int DEFAULT_MASTER_HEARTBEAT_INTERVAL = 60;
|
||||
|
||||
/**
|
||||
* default master commit retry times
|
||||
*/
|
||||
public static final int DEFAULT_MASTER_COMMIT_RETRY_TIMES = 5;
|
||||
|
||||
|
||||
/**
|
||||
* default master commit retry interval
|
||||
*/
|
||||
public static final int DEFAULT_MASTER_COMMIT_RETRY_INTERVAL = 3000;
|
||||
|
||||
/**
|
||||
* time unit secong to minutes
|
||||
*/
|
||||
|
|
@ -448,9 +380,9 @@ public final class Constants {
|
|||
public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN";
|
||||
|
||||
/**
|
||||
* task record configuration path
|
||||
* datasource configuration path
|
||||
*/
|
||||
public static final String APPLICATION_PROPERTIES = "application.properties";
|
||||
public static final String DATASOURCE_PROPERTIES = "/datasource.properties";
|
||||
|
||||
public static final String TASK_RECORD_URL = "task.record.datasource.url";
|
||||
|
||||
|
|
@ -568,7 +500,7 @@ public final class Constants {
|
|||
/**
|
||||
* heartbeat for zk info length
|
||||
*/
|
||||
public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 7;
|
||||
public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 5;
|
||||
|
||||
|
||||
/**
|
||||
|
|
@ -800,7 +732,6 @@ public final class Constants {
|
|||
public static final String ALIAS = "alias";
|
||||
public static final String CONTENT = "content";
|
||||
public static final String DEPENDENT_SPLIT = ":||";
|
||||
public static final String DEPENDENT_ALL = "ALL";
|
||||
|
||||
|
||||
/**
|
||||
|
|
@ -859,7 +790,7 @@ public final class Constants {
|
|||
*/
|
||||
public static final String HIVE_CONF = "hiveconf:";
|
||||
|
||||
//flink 任务
|
||||
//flink ??
|
||||
public static final String FLINK_YARN_CLUSTER = "yarn-cluster";
|
||||
public static final String FLINK_RUN_MODE = "-m";
|
||||
public static final String FLINK_YARN_SLOT = "-ys";
|
||||
|
|
@ -894,26 +825,20 @@ public final class Constants {
|
|||
|
||||
/**
|
||||
* data total
|
||||
* 数据总数
|
||||
*/
|
||||
public static final String COUNT = "count";
|
||||
|
||||
/**
|
||||
* page size
|
||||
* 每页数据条数
|
||||
*/
|
||||
public static final String PAGE_SIZE = "pageSize";
|
||||
|
||||
/**
|
||||
* current page no
|
||||
* 当前页码
|
||||
*/
|
||||
public static final String PAGE_NUMBER = "pageNo";
|
||||
|
||||
/**
|
||||
* result
|
||||
*/
|
||||
public static final String RESULT = "result";
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
|
|
@ -994,4 +919,11 @@ public final class Constants {
|
|||
* dataSource sensitive param
|
||||
*/
|
||||
public static final String DATASOURCE_PASSWORD_REGEX = "(?<=(\"password\":\")).*?(?=(\"))";
|
||||
|
||||
/**
|
||||
* default worker group
|
||||
*/
|
||||
public static final String DEFAULT_WORKER_GROUP = "default";
|
||||
|
||||
public static final Integer TASK_INFO_LENGTH = 5;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -65,4 +65,13 @@ public enum CommandType {
|
|||
public String getDescp() {
|
||||
return descp;
|
||||
}
|
||||
|
||||
public static CommandType of(Integer status){
|
||||
for(CommandType cmdType : values()){
|
||||
if(cmdType.getCode() == status){
|
||||
return cmdType;
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("invalid status : " + status);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,4 +57,14 @@ public enum DbType {
|
|||
public String getDescp() {
|
||||
return descp;
|
||||
}
|
||||
|
||||
|
||||
public static DbType of(int type){
|
||||
for(DbType ty : values()){
|
||||
if(ty.getCode() == type){
|
||||
return ty;
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("invalid type : " + type);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -128,4 +128,13 @@ public enum ExecutionStatus {
|
|||
public String getDescp() {
|
||||
return descp;
|
||||
}
|
||||
|
||||
public static ExecutionStatus of(int status){
|
||||
for(ExecutionStatus es : values()){
|
||||
if(es.getCode() == status){
|
||||
return es;
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("invalid status : " + status);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,14 +16,45 @@
|
|||
*/
|
||||
package org.apache.dolphinscheduler.common.enums;
|
||||
|
||||
import com.baomidou.mybatisplus.annotation.EnumValue;
|
||||
|
||||
/**
|
||||
* task timeout strategy
|
||||
*/
|
||||
public enum TaskTimeoutStrategy {
|
||||
public enum TaskTimeoutStrategy {
|
||||
/**
|
||||
* 0 warn
|
||||
* 1 failed
|
||||
* 2 warn+failed
|
||||
*/
|
||||
WARN, FAILED, WARNFAILED
|
||||
WARN(0, "warn"),
|
||||
FAILED(1,"failed"),
|
||||
WARNFAILED(2,"warnfailed");
|
||||
|
||||
|
||||
TaskTimeoutStrategy(int code, String descp){
|
||||
this.code = code;
|
||||
this.descp = descp;
|
||||
}
|
||||
|
||||
@EnumValue
|
||||
private final int code;
|
||||
private final String descp;
|
||||
|
||||
public int getCode() {
|
||||
return code;
|
||||
}
|
||||
|
||||
public String getDescp() {
|
||||
return descp;
|
||||
}
|
||||
|
||||
public static TaskTimeoutStrategy of(int status){
|
||||
for(TaskTimeoutStrategy es : values()){
|
||||
if(es.getCode() == status){
|
||||
return es;
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("invalid status : " + status);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,4 +44,15 @@ public enum UdfType {
|
|||
public String getDescp() {
|
||||
return descp;
|
||||
}
|
||||
|
||||
public static UdfType of(int type){
|
||||
for(UdfType ut : values()){
|
||||
if(ut.getCode() == type){
|
||||
return ut;
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("invalid type : " + type);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -114,9 +114,9 @@ public class TaskNode {
|
|||
private Priority taskInstancePriority;
|
||||
|
||||
/**
|
||||
* worker group id
|
||||
* worker group
|
||||
*/
|
||||
private int workerGroupId;
|
||||
private String workerGroup;
|
||||
|
||||
|
||||
/**
|
||||
|
|
@ -230,7 +230,7 @@ public class TaskNode {
|
|||
Objects.equals(extras, taskNode.extras) &&
|
||||
Objects.equals(runFlag, taskNode.runFlag) &&
|
||||
Objects.equals(dependence, taskNode.dependence) &&
|
||||
Objects.equals(workerGroupId, taskNode.workerGroupId) &&
|
||||
Objects.equals(workerGroup, taskNode.workerGroup) &&
|
||||
CollectionUtils.equalLists(depList, taskNode.depList);
|
||||
}
|
||||
|
||||
|
|
@ -281,7 +281,7 @@ public class TaskNode {
|
|||
|
||||
/**
|
||||
* get task time out parameter
|
||||
* @return
|
||||
* @return task time out parameter
|
||||
*/
|
||||
public TaskTimeoutParameter getTaskTimeoutParameter() {
|
||||
if(StringUtils.isNotEmpty(this.getTimeout())){
|
||||
|
|
@ -310,15 +310,15 @@ public class TaskNode {
|
|||
", dependence='" + dependence + '\'' +
|
||||
", taskInstancePriority=" + taskInstancePriority +
|
||||
", timeout='" + timeout + '\'' +
|
||||
", workerGroupId='" + workerGroupId + '\'' +
|
||||
", workerGroup='" + workerGroup + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public int getWorkerGroupId() {
|
||||
return workerGroupId;
|
||||
public String getWorkerGroup() {
|
||||
return workerGroup;
|
||||
}
|
||||
|
||||
public void setWorkerGroupId(int workerGroupId) {
|
||||
this.workerGroupId = workerGroupId;
|
||||
public void setWorkerGroup(String workerGroup) {
|
||||
this.workerGroup = workerGroup;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,9 +20,10 @@ package org.apache.dolphinscheduler.common.process;
|
|||
import org.apache.dolphinscheduler.common.enums.DataType;
|
||||
import org.apache.dolphinscheduler.common.enums.Direct;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Objects;
|
||||
|
||||
public class Property {
|
||||
public class Property implements Serializable {
|
||||
/**
|
||||
* key
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ public abstract class AbstractParameters implements IParameters {
|
|||
|
||||
/**
|
||||
* get local parameters list
|
||||
* @return
|
||||
* @return Property list
|
||||
*/
|
||||
public List<Property> getLocalParams() {
|
||||
return localParams;
|
||||
|
|
@ -52,7 +52,7 @@ public abstract class AbstractParameters implements IParameters {
|
|||
|
||||
/**
|
||||
* get local parameters map
|
||||
* @return
|
||||
* @return parameters map
|
||||
*/
|
||||
public Map<String,Property> getLocalParametersMap() {
|
||||
if (localParams != null) {
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ public interface IParameters {
|
|||
/**
|
||||
* check parameters is valid
|
||||
*
|
||||
* @return
|
||||
* @return result
|
||||
*/
|
||||
boolean checkParameters();
|
||||
|
||||
|
|
|
|||
|
|
@ -34,6 +34,6 @@ public class Stopper {
|
|||
}
|
||||
|
||||
public static final void stop(){
|
||||
signal.getAndSet(true);
|
||||
signal.set(true);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ public class ThreadPoolExecutors {
|
|||
* Executes the given task sometime in the future. The task may execute in a new thread or in an existing pooled thread.
|
||||
* If the task cannot be submitted for execution, either because this executor has been shutdown or because its capacity has been reached,
|
||||
* the task is handled by the current RejectedExecutionHandler.
|
||||
* @param event
|
||||
* @param event event
|
||||
*/
|
||||
public void execute(final Runnable event) {
|
||||
Executor executor = getExecutor();
|
||||
|
|
|
|||
|
|
@ -33,10 +33,11 @@ public class ThreadUtils {
|
|||
private static final int STACK_DEPTH = 20;
|
||||
|
||||
/**
|
||||
Wrapper over newCachedThreadPool. Thread names are formatted as prefix-ID, where ID is a
|
||||
* Wrapper over newCachedThreadPool. Thread names are formatted as prefix-ID, where ID is a
|
||||
* unique, sequentially assigned integer.
|
||||
* @param prefix
|
||||
* @return
|
||||
*
|
||||
* @param prefix prefix
|
||||
* @return ThreadPoolExecutor
|
||||
*/
|
||||
public static ThreadPoolExecutor newDaemonCachedThreadPool(String prefix){
|
||||
ThreadFactory threadFactory = namedThreadFactory(prefix);
|
||||
|
|
@ -45,8 +46,8 @@ public class ThreadUtils {
|
|||
|
||||
/**
|
||||
* Create a thread factory that names threads with a prefix and also sets the threads to daemon.
|
||||
* @param prefix
|
||||
* @return
|
||||
* @param prefix prefix
|
||||
* @return ThreadFactory
|
||||
*/
|
||||
private static ThreadFactory namedThreadFactory(String prefix) {
|
||||
return new ThreadFactoryBuilder().setDaemon(true).setNameFormat(prefix + "-%d").build();
|
||||
|
|
@ -56,10 +57,10 @@ public class ThreadUtils {
|
|||
/**
|
||||
* Create a cached thread pool whose max number of threads is `maxThreadNumber`. Thread names
|
||||
* are formatted as prefix-ID, where ID is a unique, sequentially assigned integer.
|
||||
* @param prefix
|
||||
* @param maxThreadNumber
|
||||
* @param keepAliveSeconds
|
||||
* @return
|
||||
* @param prefix prefix
|
||||
* @param maxThreadNumber maxThreadNumber
|
||||
* @param keepAliveSeconds keepAliveSeconds
|
||||
* @return ThreadPoolExecutor
|
||||
*/
|
||||
public static ThreadPoolExecutor newDaemonCachedThreadPool(String prefix ,
|
||||
int maxThreadNumber,
|
||||
|
|
@ -82,9 +83,9 @@ public class ThreadUtils {
|
|||
/**
|
||||
* Wrapper over newFixedThreadPool. Thread names are formatted as prefix-ID, where ID is a
|
||||
* unique, sequentially assigned integer.
|
||||
* @param nThreads
|
||||
* @param prefix
|
||||
* @return
|
||||
* @param nThreads nThreads
|
||||
* @param prefix prefix
|
||||
* @return ThreadPoolExecutor
|
||||
*/
|
||||
public static ThreadPoolExecutor newDaemonFixedThreadPool(int nThreads , String prefix){
|
||||
ThreadFactory threadFactory = namedThreadFactory(prefix);
|
||||
|
|
@ -93,8 +94,8 @@ public class ThreadUtils {
|
|||
|
||||
/**
|
||||
* Wrapper over newSingleThreadExecutor.
|
||||
* @param threadName
|
||||
* @return
|
||||
* @param threadName threadName
|
||||
* @return ExecutorService
|
||||
*/
|
||||
public static ExecutorService newDaemonSingleThreadExecutor(String threadName){
|
||||
ThreadFactory threadFactory = new ThreadFactoryBuilder()
|
||||
|
|
@ -106,22 +107,23 @@ public class ThreadUtils {
|
|||
|
||||
/**
|
||||
* Wrapper over newDaemonFixedThreadExecutor.
|
||||
* @param threadName
|
||||
* @param threadsNum
|
||||
* @return
|
||||
* @param threadName threadName
|
||||
* @param threadsNum threadsNum
|
||||
* @return ExecutorService
|
||||
*/
|
||||
public static ExecutorService newDaemonFixedThreadExecutor(String threadName,int threadsNum){
|
||||
ThreadFactory threadFactory = new ThreadFactoryBuilder()
|
||||
.setDaemon(true)
|
||||
.setNameFormat(threadName)
|
||||
.build();
|
||||
return Executors.newFixedThreadPool(threadsNum,threadFactory);
|
||||
return Executors.newFixedThreadPool(threadsNum, threadFactory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper over ScheduledThreadPoolExecutor
|
||||
* @param corePoolSize
|
||||
* @return
|
||||
* @param threadName threadName
|
||||
* @param corePoolSize corePoolSize
|
||||
* @return ScheduledExecutorService
|
||||
*/
|
||||
public static ScheduledExecutorService newDaemonThreadScheduledExecutor(String threadName,int corePoolSize) {
|
||||
ThreadFactory threadFactory = new ThreadFactoryBuilder()
|
||||
|
|
@ -136,6 +138,11 @@ public class ThreadUtils {
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* get thread info
|
||||
* @param t t
|
||||
* @return ThreadInfo
|
||||
*/
|
||||
public static ThreadInfo getThreadInfo(Thread t) {
|
||||
long tid = t.getId();
|
||||
return threadBean.getThreadInfo(tid, STACK_DEPTH);
|
||||
|
|
@ -144,7 +151,9 @@ public class ThreadUtils {
|
|||
|
||||
/**
|
||||
* Format the given ThreadInfo object as a String.
|
||||
* @param indent a prefix for each line, used for nested indentation
|
||||
* @param threadInfo threadInfo
|
||||
* @param indent indent
|
||||
* @return threadInfo
|
||||
*/
|
||||
public static String formatThreadInfo(ThreadInfo threadInfo, String indent) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
|
@ -156,9 +165,9 @@ public class ThreadUtils {
|
|||
/**
|
||||
* Print all of the thread's information and stack traces.
|
||||
*
|
||||
* @param sb
|
||||
* @param info
|
||||
* @param indent
|
||||
* @param sb StringBuilder
|
||||
* @param info ThreadInfo
|
||||
* @param indent indent
|
||||
*/
|
||||
public static void appendThreadInfo(StringBuilder sb,
|
||||
ThreadInfo info,
|
||||
|
|
@ -193,6 +202,12 @@ public class ThreadUtils {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* getTaskName
|
||||
* @param id id
|
||||
* @param name name
|
||||
* @return task name
|
||||
*/
|
||||
private static String getTaskName(long id, String name) {
|
||||
if (name == null) {
|
||||
return Long.toString(id);
|
||||
|
|
|
|||
|
|
@ -20,13 +20,18 @@ import org.apache.dolphinscheduler.common.Constants;
|
|||
import org.apache.dolphinscheduler.common.enums.ResUploadType;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.net.URL;
|
||||
|
||||
/**
|
||||
* common utils
|
||||
*/
|
||||
public class CommonUtils {
|
||||
public class CommonUtils {
|
||||
private static final Logger logger = LoggerFactory.getLogger(CommonUtils.class);
|
||||
|
||||
private CommonUtils() {
|
||||
throw new IllegalStateException("CommonUtils class");
|
||||
}
|
||||
|
|
@ -37,25 +42,25 @@ public class CommonUtils {
|
|||
public static String getSystemEnvPath() {
|
||||
String envPath = PropertyUtils.getString(Constants.DOLPHINSCHEDULER_ENV_PATH);
|
||||
if (StringUtils.isEmpty(envPath)) {
|
||||
envPath = System.getProperty("user.home") + File.separator + ".bash_profile";
|
||||
URL envDefaultPath = CommonUtils.class.getClassLoader().getResource(Constants.ENV_PATH);
|
||||
|
||||
if (envDefaultPath != null){
|
||||
envPath = envDefaultPath.getPath();
|
||||
logger.debug("env path :{}", envPath);
|
||||
}else{
|
||||
envPath = System.getProperty("user.home") + File.separator + ".bash_profile";
|
||||
}
|
||||
}
|
||||
|
||||
return envPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return get queue implementation name
|
||||
*/
|
||||
public static String getQueueImplValue(){
|
||||
return PropertyUtils.getString(Constants.SCHEDULER_QUEUE_IMPL);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @return is develop mode
|
||||
*/
|
||||
public static boolean isDevelopMode() {
|
||||
return PropertyUtils.getBoolean(Constants.DEVELOPMENT_STATE);
|
||||
return PropertyUtils.getBoolean(Constants.DEVELOPMENT_STATE, true);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -65,9 +70,9 @@ public class CommonUtils {
|
|||
* @return true if upload resource is HDFS and kerberos startup
|
||||
*/
|
||||
public static boolean getKerberosStartupState(){
|
||||
String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
|
||||
String resUploadStartupType = PropertyUtils.getString(Constants.RESOURCE_STORAGE_TYPE);
|
||||
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
|
||||
Boolean kerberosStartupState = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE);
|
||||
Boolean kerberosStartupState = PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE,false);
|
||||
return resUploadType == ResUploadType.HDFS && kerberosStartupState;
|
||||
}
|
||||
|
||||
|
|
|
|||
File diff suppressed because one or more lines are too long
|
|
@ -34,6 +34,8 @@ import static org.apache.dolphinscheduler.common.Constants.*;
|
|||
public class FileUtils {
|
||||
public static final Logger logger = LoggerFactory.getLogger(FileUtils.class);
|
||||
|
||||
public static final String DATA_BASEDIR = PropertyUtils.getString(DATA_BASEDIR_PATH,"/tmp/dolphinscheduler");
|
||||
|
||||
/**
|
||||
* get file suffix
|
||||
*
|
||||
|
|
@ -59,7 +61,14 @@ public class FileUtils {
|
|||
* @return download file name
|
||||
*/
|
||||
public static String getDownloadFilename(String filename) {
|
||||
return String.format("%s/%s/%s", PropertyUtils.getString(DATA_DOWNLOAD_BASEDIR_PATH), DateUtils.getCurrentTime(YYYYMMDDHHMMSS), filename);
|
||||
String fileName = String.format("%s/download/%s/%s", DATA_BASEDIR, DateUtils.getCurrentTime(YYYYMMDDHHMMSS), filename);
|
||||
|
||||
File file = new File(fileName);
|
||||
if (!file.getParentFile().exists()){
|
||||
file.getParentFile().mkdirs();
|
||||
}
|
||||
|
||||
return fileName;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -70,7 +79,13 @@ public class FileUtils {
|
|||
* @return local file path
|
||||
*/
|
||||
public static String getUploadFilename(String tenantCode, String filename) {
|
||||
return String.format("%s/%s/resources/%s", PropertyUtils.getString(DATA_BASEDIR_PATH), tenantCode, filename);
|
||||
String fileName = String.format("%s/%s/resources/%s", DATA_BASEDIR, tenantCode, filename);
|
||||
File file = new File(fileName);
|
||||
if (!file.getParentFile().exists()){
|
||||
file.getParentFile().mkdirs();
|
||||
}
|
||||
|
||||
return fileName;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -82,9 +97,14 @@ public class FileUtils {
|
|||
* @return directory of process execution
|
||||
*/
|
||||
public static String getProcessExecDir(int projectId, int processDefineId, int processInstanceId, int taskInstanceId) {
|
||||
|
||||
return String.format("%s/process/%s/%s/%s/%s", PropertyUtils.getString(PROCESS_EXEC_BASEPATH), Integer.toString(projectId),
|
||||
String fileName = String.format("%s/exec/process/%s/%s/%s/%s", DATA_BASEDIR, Integer.toString(projectId),
|
||||
Integer.toString(processDefineId), Integer.toString(processInstanceId),Integer.toString(taskInstanceId));
|
||||
File file = new File(fileName);
|
||||
if (!file.getParentFile().exists()){
|
||||
file.getParentFile().mkdirs();
|
||||
}
|
||||
|
||||
return fileName;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -95,15 +115,21 @@ public class FileUtils {
|
|||
* @return directory of process instances
|
||||
*/
|
||||
public static String getProcessExecDir(int projectId, int processDefineId, int processInstanceId) {
|
||||
return String.format("%s/process/%s/%s/%s", PropertyUtils.getString(PROCESS_EXEC_BASEPATH), Integer.toString(projectId),
|
||||
String fileName = String.format("%s/exec/process/%s/%s/%s", DATA_BASEDIR, Integer.toString(projectId),
|
||||
Integer.toString(processDefineId), Integer.toString(processInstanceId));
|
||||
File file = new File(fileName);
|
||||
if (!file.getParentFile().exists()){
|
||||
file.getParentFile().mkdirs();
|
||||
}
|
||||
|
||||
return fileName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return get suffixes for resource files that support online viewing
|
||||
*/
|
||||
public static String getResourceViewSuffixs() {
|
||||
return PropertyUtils.getString(RESOURCE_VIEW_SUFFIXS);
|
||||
return PropertyUtils.getString(RESOURCE_VIEW_SUFFIXS, RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -38,6 +38,8 @@ import java.util.Map;
|
|||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.apache.dolphinscheduler.common.Constants.RESOURCE_UPLOAD_PATH;
|
||||
|
||||
/**
|
||||
* hadoop utils
|
||||
* single instance
|
||||
|
|
@ -47,8 +49,11 @@ public class HadoopUtils implements Closeable {
|
|||
private static final Logger logger = LoggerFactory.getLogger(HadoopUtils.class);
|
||||
|
||||
private static String hdfsUser = PropertyUtils.getString(Constants.HDFS_ROOT_USER);
|
||||
public static final String resourceUploadPath = PropertyUtils.getString(RESOURCE_UPLOAD_PATH, "/dolphinscheduler");
|
||||
|
||||
private static volatile HadoopUtils instance = new HadoopUtils();
|
||||
private static volatile Configuration configuration;
|
||||
private static volatile boolean yarnEnabled = false;
|
||||
private static FileSystem fs;
|
||||
|
||||
|
||||
|
|
@ -72,8 +77,7 @@ public class HadoopUtils implements Closeable {
|
|||
* init dolphinscheduler root path in hdfs
|
||||
*/
|
||||
private void initHdfsPath(){
|
||||
String hdfsPath = PropertyUtils.getString(Constants.DATA_STORE_2_HDFS_BASEPATH);
|
||||
Path path = new Path(hdfsPath);
|
||||
Path path = new Path(resourceUploadPath);
|
||||
|
||||
try {
|
||||
if (!fs.exists(path)) {
|
||||
|
|
@ -95,11 +99,11 @@ public class HadoopUtils implements Closeable {
|
|||
try {
|
||||
configuration = new Configuration();
|
||||
|
||||
String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
|
||||
String resUploadStartupType = PropertyUtils.getString(Constants.RESOURCE_STORAGE_TYPE);
|
||||
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
|
||||
|
||||
if (resUploadType == ResUploadType.HDFS){
|
||||
if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE)){
|
||||
if (PropertyUtils.getBoolean(Constants.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE,false)){
|
||||
System.setProperty(Constants.JAVA_SECURITY_KRB5_CONF,
|
||||
PropertyUtils.getString(Constants.JAVA_SECURITY_KRB5_CONF_PATH));
|
||||
configuration.set(Constants.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
|
||||
|
|
@ -151,14 +155,28 @@ public class HadoopUtils implements Closeable {
|
|||
fs = FileSystem.get(configuration);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* if rmHaIds includes xx, it signs not use resourcemanager
|
||||
* otherwise:
|
||||
* if rmHaIds is empty, single resourcemanager enabled
|
||||
* if rmHaIds not empty: resourcemanager HA enabled
|
||||
*/
|
||||
String rmHaIds = PropertyUtils.getString(Constants.YARN_RESOURCEMANAGER_HA_RM_IDS);
|
||||
String appAddress = PropertyUtils.getString(Constants.YARN_APPLICATION_STATUS_ADDRESS);
|
||||
if (!StringUtils.isEmpty(rmHaIds)) {
|
||||
//not use resourcemanager
|
||||
if (rmHaIds.contains(Constants.YARN_RESOURCEMANAGER_HA_XX)){
|
||||
yarnEnabled = false;
|
||||
} else if (!StringUtils.isEmpty(rmHaIds)) {
|
||||
//resourcemanager HA enabled
|
||||
appAddress = getAppAddress(appAddress, rmHaIds);
|
||||
yarnEnabled = true;
|
||||
logger.info("appAddress : {}", appAddress);
|
||||
} else {
|
||||
//single resourcemanager enabled
|
||||
yarnEnabled = true;
|
||||
}
|
||||
configuration.set(Constants.YARN_APPLICATION_STATUS_ADDRESS, appAddress);
|
||||
|
||||
} catch (Exception e) {
|
||||
logger.error(e.getMessage(), e);
|
||||
}
|
||||
|
|
@ -361,6 +379,13 @@ public class HadoopUtils implements Closeable {
|
|||
return fs.rename(new Path(src), new Path(dst));
|
||||
}
|
||||
|
||||
/**
|
||||
* hadoop resourcemanager enabled or not
|
||||
* @return result
|
||||
*/
|
||||
public boolean isYarnEnabled() {
|
||||
return yarnEnabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* get the state of an application
|
||||
|
|
@ -401,16 +426,15 @@ public class HadoopUtils implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* get data hdfs path
|
||||
* @return data hdfs path
|
||||
*/
|
||||
public static String getHdfsDataBasePath() {
|
||||
String basePath = PropertyUtils.getString(Constants.DATA_STORE_2_HDFS_BASEPATH);
|
||||
if ("/".equals(basePath)) {
|
||||
if ("/".equals(resourceUploadPath)) {
|
||||
// if basepath is configured to /, the generated url may be //default/resources (with extra leading /)
|
||||
return "";
|
||||
} else {
|
||||
return basePath;
|
||||
return resourceUploadPath;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -416,6 +416,8 @@ public class OSUtils {
|
|||
|
||||
/**
|
||||
* check memory and cpu usage
|
||||
* @param systemCpuLoad systemCpuLoad
|
||||
* @param systemReservedMemory systemReservedMemory
|
||||
* @return check memory and cpu usage
|
||||
*/
|
||||
public static Boolean checkResource(double systemCpuLoad, double systemReservedMemory){
|
||||
|
|
|
|||
|
|
@ -34,10 +34,9 @@ public final class Preconditions {
|
|||
* Ensures that the given object reference is not null.
|
||||
* Upon violation, a {@code NullPointerException} with no message is thrown.
|
||||
*
|
||||
* @param reference The object reference
|
||||
* @return The object reference itself (generically typed).
|
||||
*
|
||||
* @throws NullPointerException Thrown, if the passed reference was null.
|
||||
* @param reference reference
|
||||
* @param <T> T
|
||||
* @return T
|
||||
*/
|
||||
public static <T> T checkNotNull(T reference) {
|
||||
if (reference == null) {
|
||||
|
|
@ -49,12 +48,10 @@ public final class Preconditions {
|
|||
/**
|
||||
* Ensures that the given object reference is not null.
|
||||
* Upon violation, a {@code NullPointerException} with the given message is thrown.
|
||||
*
|
||||
* @param reference The object reference
|
||||
* @param errorMessage The message for the {@code NullPointerException} that is thrown if the check fails.
|
||||
* @return The object reference itself (generically typed).
|
||||
*
|
||||
* @throws NullPointerException Thrown, if the passed reference was null.
|
||||
* @param reference reference
|
||||
* @param errorMessage errorMessage
|
||||
* @param <T> T
|
||||
* @return T
|
||||
*/
|
||||
public static <T> T checkNotNull(T reference, String errorMessage) {
|
||||
if (reference == null) {
|
||||
|
|
@ -78,9 +75,8 @@ public final class Preconditions {
|
|||
* @param errorMessageArgs The arguments for the error message, to be inserted into the
|
||||
* message template for the {@code %s} placeholders.
|
||||
*
|
||||
* @param <T>
|
||||
* @return The object reference itself (generically typed).
|
||||
*
|
||||
* @throws NullPointerException Thrown, if the passed reference was null.
|
||||
*/
|
||||
public static <T> T checkNotNull(T reference,
|
||||
String errorMessageTemplate,
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ public class PropertyUtils {
|
|||
* @return judge whether resource upload startup
|
||||
*/
|
||||
public static Boolean getResUploadStartupState(){
|
||||
String resUploadStartupType = PropertyUtils.getString(Constants.RES_UPLOAD_STARTUP_TYPE);
|
||||
String resUploadStartupType = PropertyUtils.getString(Constants.RESOURCE_STORAGE_TYPE);
|
||||
ResUploadType resUploadType = ResUploadType.valueOf(resUploadStartupType);
|
||||
return resUploadType == ResUploadType.HDFS || resUploadType == ResUploadType.S3;
|
||||
}
|
||||
|
|
@ -89,6 +89,18 @@ public class PropertyUtils {
|
|||
return properties.getProperty(key.trim());
|
||||
}
|
||||
|
||||
/**
|
||||
* get property value
|
||||
*
|
||||
* @param key property name
|
||||
* @param defaultVal default value
|
||||
* @return property value
|
||||
*/
|
||||
public static String getString(String key, String defaultVal) {
|
||||
String val = properties.getProperty(key.trim());
|
||||
return val == null ? defaultVal : val;
|
||||
}
|
||||
|
||||
/**
|
||||
* get property value
|
||||
*
|
||||
|
|
@ -134,6 +146,22 @@ public class PropertyUtils {
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* get property value
|
||||
*
|
||||
* @param key property name
|
||||
* @param defaultValue default value
|
||||
* @return property value
|
||||
*/
|
||||
public static Boolean getBoolean(String key, boolean defaultValue) {
|
||||
String value = properties.getProperty(key.trim());
|
||||
if(null != value){
|
||||
return Boolean.parseBoolean(value);
|
||||
}
|
||||
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* get property long value
|
||||
* @param key key
|
||||
|
|
|
|||
|
|
@ -89,45 +89,6 @@ public class ResInfo {
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* get heart beat info
|
||||
* @param now now
|
||||
* @return heart beat info
|
||||
*/
|
||||
public static String getHeartBeatInfo(Date now){
|
||||
return buildHeartbeatForZKInfo(OSUtils.getHost(),
|
||||
OSUtils.getProcessID(),
|
||||
OSUtils.cpuUsage(),
|
||||
OSUtils.memoryUsage(),
|
||||
OSUtils.loadAverage(),
|
||||
DateUtils.dateToString(now),
|
||||
DateUtils.dateToString(now));
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* build heartbeat info for zk
|
||||
* @param host host
|
||||
* @param port port
|
||||
* @param cpuUsage cpu usage
|
||||
* @param memoryUsage memory usage
|
||||
* @param loadAverage load average
|
||||
* @param createTime create time
|
||||
* @param lastHeartbeatTime last heartbeat time
|
||||
* @return heartbeat info
|
||||
*/
|
||||
public static String buildHeartbeatForZKInfo(String host , int port ,
|
||||
double cpuUsage , double memoryUsage,double loadAverage,
|
||||
String createTime,String lastHeartbeatTime){
|
||||
|
||||
return host + Constants.COMMA + port + Constants.COMMA
|
||||
+ cpuUsage + Constants.COMMA
|
||||
+ memoryUsage + Constants.COMMA
|
||||
+ loadAverage + Constants.COMMA
|
||||
+ createTime + Constants.COMMA
|
||||
+ lastHeartbeatTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* parse heartbeat info for zk
|
||||
* @param heartBeatInfo heartbeat info
|
||||
|
|
@ -143,13 +104,11 @@ public class ResInfo {
|
|||
|
||||
}
|
||||
Server masterServer = new Server();
|
||||
masterServer.setHost(masterArray[0]);
|
||||
masterServer.setPort(Integer.parseInt(masterArray[1]));
|
||||
masterServer.setResInfo(getResInfoJson(Double.parseDouble(masterArray[2]),
|
||||
Double.parseDouble(masterArray[3]),
|
||||
Double.parseDouble(masterArray[4])));
|
||||
masterServer.setCreateTime(DateUtils.stringToDate(masterArray[5]));
|
||||
masterServer.setLastHeartbeatTime(DateUtils.stringToDate(masterArray[6]));
|
||||
masterServer.setResInfo(getResInfoJson(Double.parseDouble(masterArray[0]),
|
||||
Double.parseDouble(masterArray[1]),
|
||||
Double.parseDouble(masterArray[2])));
|
||||
masterServer.setCreateTime(DateUtils.stringToDate(masterArray[3]));
|
||||
masterServer.setLastHeartbeatTime(DateUtils.stringToDate(masterArray[4]));
|
||||
return masterServer;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,9 +27,9 @@ public class DependentDateUtils {
|
|||
|
||||
/**
|
||||
* get last day interval list
|
||||
* @param businessDate
|
||||
* @param hourNumber
|
||||
* @return
|
||||
* @param businessDate businessDate
|
||||
* @param hourNumber hourNumber
|
||||
* @return DateInterval list
|
||||
*/
|
||||
public static List<DateInterval> getLastHoursInterval(Date businessDate, int hourNumber){
|
||||
List<DateInterval> dateIntervals = new ArrayList<>();
|
||||
|
|
@ -44,8 +44,8 @@ public class DependentDateUtils {
|
|||
|
||||
/**
|
||||
* get today day interval list
|
||||
* @param businessDate
|
||||
* @return
|
||||
* @param businessDate businessDate
|
||||
* @return DateInterval list
|
||||
*/
|
||||
public static List<DateInterval> getTodayInterval(Date businessDate){
|
||||
|
||||
|
|
@ -59,9 +59,9 @@ public class DependentDateUtils {
|
|||
|
||||
/**
|
||||
* get last day interval list
|
||||
* @param businessDate
|
||||
* @param someDay
|
||||
* @return
|
||||
* @param businessDate businessDate
|
||||
* @param someDay someDay
|
||||
* @return DateInterval list
|
||||
*/
|
||||
public static List<DateInterval> getLastDayInterval(Date businessDate, int someDay){
|
||||
|
||||
|
|
@ -78,8 +78,8 @@ public class DependentDateUtils {
|
|||
|
||||
/**
|
||||
* get interval between this month first day and businessDate
|
||||
* @param businessDate
|
||||
* @return
|
||||
* @param businessDate businessDate
|
||||
* @return DateInterval list
|
||||
*/
|
||||
public static List<DateInterval> getThisMonthInterval(Date businessDate) {
|
||||
Date firstDay = DateUtils.getFirstDayOfMonth(businessDate);
|
||||
|
|
@ -88,8 +88,8 @@ public class DependentDateUtils {
|
|||
|
||||
/**
|
||||
* get interval between last month first day and last day
|
||||
* @param businessDate
|
||||
* @return
|
||||
* @param businessDate businessDate
|
||||
* @return DateInterval list
|
||||
*/
|
||||
public static List<DateInterval> getLastMonthInterval(Date businessDate) {
|
||||
|
||||
|
|
@ -102,11 +102,12 @@ public class DependentDateUtils {
|
|||
|
||||
/**
|
||||
* get interval on first/last day of the last month
|
||||
* @param businessDate
|
||||
* @param isBeginDay
|
||||
* @return
|
||||
* @param businessDate businessDate
|
||||
* @param isBeginDay isBeginDay
|
||||
* @return DateInterval list
|
||||
*/
|
||||
public static List<DateInterval> getLastMonthBeginInterval(Date businessDate, boolean isBeginDay) {
|
||||
public static List<DateInterval> getLastMonthBeginInterval(Date businessDate,
|
||||
boolean isBeginDay) {
|
||||
|
||||
Date firstDayThisMonth = DateUtils.getFirstDayOfMonth(businessDate);
|
||||
Date lastDay = DateUtils.getSomeDay(firstDayThisMonth, -1);
|
||||
|
|
@ -120,8 +121,8 @@ public class DependentDateUtils {
|
|||
|
||||
/**
|
||||
* get interval between monday to businessDate of this week
|
||||
* @param businessDate
|
||||
* @return
|
||||
* @param businessDate businessDate
|
||||
* @return DateInterval list
|
||||
*/
|
||||
public static List<DateInterval> getThisWeekInterval(Date businessDate) {
|
||||
Date mondayThisWeek = DateUtils.getMonday(businessDate);
|
||||
|
|
@ -131,8 +132,8 @@ public class DependentDateUtils {
|
|||
/**
|
||||
* get interval between monday to sunday of last week
|
||||
* default set monday the first day of week
|
||||
* @param businessDate
|
||||
* @return
|
||||
* @param businessDate businessDate
|
||||
* @return DateInterval list
|
||||
*/
|
||||
public static List<DateInterval> getLastWeekInterval(Date businessDate) {
|
||||
Date mondayThisWeek = DateUtils.getMonday(businessDate);
|
||||
|
|
@ -144,9 +145,9 @@ public class DependentDateUtils {
|
|||
/**
|
||||
* get interval on the day of last week
|
||||
* default set monday the first day of week
|
||||
* @param businessDate
|
||||
* @param businessDate businessDate
|
||||
* @param dayOfWeek monday:1,tuesday:2,wednesday:3,thursday:4,friday:5,saturday:6,sunday:7
|
||||
* @return
|
||||
* @return DateInterval list
|
||||
*/
|
||||
public static List<DateInterval> getLastWeekOneDayInterval(Date businessDate, int dayOfWeek) {
|
||||
Date mondayThisWeek = DateUtils.getMonday(businessDate);
|
||||
|
|
@ -156,6 +157,12 @@ public class DependentDateUtils {
|
|||
return getDateIntervalListBetweenTwoDates(destDay, destDay);
|
||||
}
|
||||
|
||||
/**
|
||||
* get date interval list between two dates
|
||||
* @param firstDay firstDay
|
||||
* @param lastDay lastDay
|
||||
* @return DateInterval list
|
||||
*/
|
||||
public static List<DateInterval> getDateIntervalListBetweenTwoDates(Date firstDay, Date lastDay) {
|
||||
List<DateInterval> dateIntervals = new ArrayList<>();
|
||||
while(!firstDay.after(lastDay)){
|
||||
|
|
|
|||
|
|
@ -37,17 +37,20 @@ public class PlaceholderUtils {
|
|||
* The suffix of the position to be replaced
|
||||
*/
|
||||
public static final String placeholderSuffix = "}";
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Replaces all placeholders of format {@code ${name}} with the value returned
|
||||
* from the supplied {@link PropertyPlaceholderHelper.PlaceholderResolver}.
|
||||
*
|
||||
* @param value the value containing the placeholders to be replaced
|
||||
* @param paramsMap placeholder data dictionary
|
||||
* @param value the value containing the placeholders to be replaced
|
||||
* @param paramsMap placeholder data dictionary
|
||||
* @param ignoreUnresolvablePlaceholders ignoreUnresolvablePlaceholders
|
||||
* @return the supplied value with placeholders replaced inline
|
||||
*/
|
||||
public static String replacePlaceholders(String value, Map<String, String> paramsMap, boolean ignoreUnresolvablePlaceholders) {
|
||||
public static String replacePlaceholders(String value,
|
||||
Map<String, String> paramsMap,
|
||||
boolean ignoreUnresolvablePlaceholders) {
|
||||
//replacement tool, parameter key will be replaced by value,if can't match , will throw an exception
|
||||
PropertyPlaceholderHelper strictHelper = getPropertyPlaceholderHelper(false);
|
||||
|
||||
|
|
|
|||
|
|
@ -15,80 +15,53 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
|
||||
#task queue implementation, default "zookeeper"
|
||||
# task queue implementation, default "zookeeper" TODO
|
||||
dolphinscheduler.queue.impl=zookeeper
|
||||
|
||||
#zookeeper cluster. multiple are separated by commas. eg. 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181
|
||||
zookeeper.quorum=localhost:2181
|
||||
# resource storage type : HDFS,S3,NONE
|
||||
resource.storage.type=HDFS
|
||||
|
||||
#dolphinscheduler root directory
|
||||
zookeeper.dolphinscheduler.root=/dolphinscheduler
|
||||
|
||||
#dolphinscheduler failover directory
|
||||
zookeeper.session.timeout=300
|
||||
zookeeper.connection.timeout=300
|
||||
zookeeper.retry.base.sleep=100
|
||||
zookeeper.retry.max.sleep=30000
|
||||
zookeeper.retry.maxtime=5
|
||||
|
||||
# resource upload startup type : HDFS,S3,NONE
|
||||
res.upload.startup.type=NONE
|
||||
|
||||
# Users who have permission to create directories under the HDFS root path
|
||||
hdfs.root.user=hdfs
|
||||
|
||||
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
|
||||
data.store2hdfs.basepath=/dolphinscheduler
|
||||
|
||||
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
|
||||
data.basedir.path=/tmp/dolphinscheduler
|
||||
|
||||
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
|
||||
data.download.basedir.path=/tmp/dolphinscheduler/download
|
||||
|
||||
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
|
||||
process.exec.basepath=/tmp/dolphinscheduler/exec
|
||||
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
|
||||
#resource.upload.path=/dolphinscheduler
|
||||
|
||||
# user data local directory path, please make sure the directory exists and have read write permissions
|
||||
#data.basedir.path=/tmp/dolphinscheduler
|
||||
|
||||
# whether kerberos starts
|
||||
hadoop.security.authentication.startup.state=false
|
||||
#hadoop.security.authentication.startup.state=false
|
||||
|
||||
# java.security.krb5.conf path
|
||||
java.security.krb5.conf.path=/opt/krb5.conf
|
||||
#java.security.krb5.conf.path=/opt/krb5.conf
|
||||
|
||||
# loginUserFromKeytab user
|
||||
login.user.keytab.username=hdfs-mycluster@ESZ.COM
|
||||
#login.user.keytab.username=hdfs-mycluster@ESZ.COM
|
||||
|
||||
# loginUserFromKeytab path
|
||||
login.user.keytab.path=/opt/hdfs.headless.keytab
|
||||
|
||||
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
|
||||
dolphinscheduler.env.path=/opt/dolphinscheduler_env.sh
|
||||
#login.user.keytab.path=/opt/hdfs.headless.keytab
|
||||
|
||||
#resource.view.suffixs
|
||||
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
|
||||
#resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
|
||||
|
||||
# is development state? default "false"
|
||||
development.state=true
|
||||
# if resource.storage.type=HDFS, the user need to have permission to create directories under the HDFS root path
|
||||
hdfs.root.user=hdfs
|
||||
|
||||
# if resource.storage.type=S3,the value like: s3a://dolphinscheduler ; if resource.storage.type=HDFS, When namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
|
||||
fs.defaultFS=hdfs://l:8020
|
||||
|
||||
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
|
||||
# to the conf directory,support s3,for example : s3a://dolphinscheduler
|
||||
fs.defaultFS=hdfs://mycluster:8020
|
||||
# if resource.storage.type=S3,s3 endpoint
|
||||
#fs.s3a.endpoint=http://192.168.199.91:9010
|
||||
|
||||
# s3 need,s3 endpoint
|
||||
fs.s3a.endpoint=http://192.168.199.91:9010
|
||||
# if resource.storage.type=S3,s3 access key
|
||||
#fs.s3a.access.key=A3DXS30FO22544RE
|
||||
|
||||
# s3 need,s3 access key
|
||||
fs.s3a.access.key=A3DXS30FO22544RE
|
||||
# if resource.storage.type=S3,s3 secret key
|
||||
#fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
|
||||
|
||||
# s3 need,s3 secret key
|
||||
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
|
||||
|
||||
#resourcemanager ha note this need ips , this empty if single
|
||||
# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty TODO
|
||||
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
|
||||
|
||||
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
|
||||
# If resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname.
|
||||
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
|
||||
|
||||
|
||||
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions, TODO
|
||||
#dolphinscheduler.env.path=env/dolphinscheduler_env.sh
|
||||
|
|
|
|||
|
|
@ -1,169 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
|
||||
<property name="log.base" value="logs"/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
|
||||
<!-- master server logback config start -->
|
||||
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-master.log</file>
|
||||
<!--<filter class="org.apache.dolphinscheduler.common.log.MasterLogFilter">
|
||||
<level>INFO</level>
|
||||
</filter>-->
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>168</maxHistory>
|
||||
<maxFileSize>200MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<!-- master server logback config end -->
|
||||
|
||||
|
||||
<!-- worker server logback config start -->
|
||||
<conversionRule conversionWord="messsage"
|
||||
converterClass="org.apache.dolphinscheduler.common.log.SensitiveDataConverter"/>
|
||||
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<filter class="org.apache.dolphinscheduler.common.log.TaskLogFilter"/>
|
||||
<Discriminator class="org.apache.dolphinscheduler.common.log.TaskLogDiscriminator">
|
||||
<key>taskAppId</key>
|
||||
<logBase>${log.base}</logBase>
|
||||
</Discriminator>
|
||||
<sift>
|
||||
<appender name="FILE-${taskAppId}" class="ch.qos.logback.core.FileAppender">
|
||||
<file>${log.base}/${taskAppId}.log</file>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
<append>true</append>
|
||||
</appender>
|
||||
</sift>
|
||||
</appender>
|
||||
<appender name="WORKERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-worker.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<filter class="org.apache.dolphinscheduler.common.log.WorkerLogFilter"/>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-worker.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>168</maxHistory>
|
||||
<maxFileSize>200MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %messsage%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<!-- worker server logback config end -->
|
||||
|
||||
|
||||
<!-- alert server logback config start -->
|
||||
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-alert.log</file>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>20</maxHistory>
|
||||
<maxFileSize>64MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<!-- alert server logback config end -->
|
||||
|
||||
|
||||
<!-- api server logback config start -->
|
||||
<appender name="APILOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-api-server.log</file>
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>168</maxHistory>
|
||||
<maxFileSize>64MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<!-- api server logback config end -->
|
||||
|
||||
<logger name="org.apache.zookeeper" level="WARN"/>
|
||||
<logger name="org.apache.hbase" level="WARN"/>
|
||||
<logger name="org.apache.hadoop" level="WARN"/>
|
||||
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
|
||||
<if condition='p("server").contains("master-server")'>
|
||||
<then>
|
||||
<appender-ref ref="MASTERLOGFILE"/>
|
||||
</then>
|
||||
</if>
|
||||
<if condition='p("server").contains("worker-server")'>
|
||||
<then>
|
||||
<appender-ref ref="TASKLOGFILE"/>
|
||||
<appender-ref ref="WORKERLOGFILE"/>
|
||||
</then>
|
||||
</if>
|
||||
<if condition='p("server").contains("alert-server")'>
|
||||
<then>
|
||||
<appender-ref ref="ALERTLOGFILE"/>
|
||||
</then>
|
||||
</if>
|
||||
<if condition='p("server").contains("api-server")'>
|
||||
<then>
|
||||
<appender-ref ref="APILOGFILE"/>
|
||||
</then>
|
||||
</if>
|
||||
</root>
|
||||
|
||||
</configuration>
|
||||
|
|
@ -35,11 +35,6 @@ public class CommonUtilsTest {
|
|||
Assert.assertTrue(true);
|
||||
}
|
||||
@Test
|
||||
public void getQueueImplValue(){
|
||||
logger.info(CommonUtils.getQueueImplValue());
|
||||
Assert.assertTrue(true);
|
||||
}
|
||||
@Test
|
||||
public void isDevelopMode() {
|
||||
logger.info("develop mode: {}",CommonUtils.isDevelopMode());
|
||||
Assert.assertTrue(true);
|
||||
|
|
|
|||
|
|
@ -22,9 +22,7 @@ import org.apache.dolphinscheduler.common.utils.CollectionUtils;
|
|||
import org.apache.dolphinscheduler.common.utils.DateUtils;
|
||||
import org.apache.dolphinscheduler.common.utils.StringUtils;
|
||||
import org.apache.dolphinscheduler.dao.entity.TaskRecord;
|
||||
import org.apache.commons.configuration.Configuration;
|
||||
import org.apache.commons.configuration.ConfigurationException;
|
||||
import org.apache.commons.configuration.PropertiesConfiguration;
|
||||
import org.apache.dolphinscheduler.dao.utils.PropertyUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
@ -42,26 +40,12 @@ public class TaskRecordDao {
|
|||
|
||||
private static Logger logger = LoggerFactory.getLogger(TaskRecordDao.class.getName());
|
||||
|
||||
/**
|
||||
* load conf
|
||||
*/
|
||||
private static Configuration conf;
|
||||
|
||||
static {
|
||||
try {
|
||||
conf = new PropertiesConfiguration(Constants.APPLICATION_PROPERTIES);
|
||||
}catch (ConfigurationException e){
|
||||
logger.error("load configuration exception",e);
|
||||
System.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* get task record flag
|
||||
* @return whether startup taskrecord
|
||||
*/
|
||||
public static boolean getTaskRecordFlag(){
|
||||
return conf.getBoolean(Constants.TASK_RECORD_FLAG);
|
||||
return PropertyUtils.getBoolean(Constants.TASK_RECORD_FLAG,false);
|
||||
}
|
||||
/**
|
||||
* create connection
|
||||
|
|
@ -72,9 +56,9 @@ public class TaskRecordDao {
|
|||
return null;
|
||||
}
|
||||
String driver = "com.mysql.jdbc.Driver";
|
||||
String url = conf.getString(Constants.TASK_RECORD_URL);
|
||||
String username = conf.getString(Constants.TASK_RECORD_USER);
|
||||
String password = conf.getString(Constants.TASK_RECORD_PWD);
|
||||
String url = PropertyUtils.getString(Constants.TASK_RECORD_URL);
|
||||
String username = PropertyUtils.getString(Constants.TASK_RECORD_USER);
|
||||
String password = PropertyUtils.getString(Constants.TASK_RECORD_PWD);
|
||||
Connection conn = null;
|
||||
try {
|
||||
//classLoader,load driver
|
||||
|
|
|
|||
|
|
@ -57,6 +57,7 @@ public abstract class BaseDataSource {
|
|||
public void setPrincipal(String principal) {
|
||||
this.principal = principal;
|
||||
}
|
||||
|
||||
/**
|
||||
* test whether the data source can be connected successfully
|
||||
* @throws Exception
|
||||
|
|
|
|||
|
|
@ -58,32 +58,7 @@ public class ConnectionFactory extends SpringConnectionFactory{
|
|||
*/
|
||||
public static DruidDataSource getDataSource() {
|
||||
|
||||
DruidDataSource druidDataSource = new DruidDataSource();
|
||||
|
||||
druidDataSource.setDriverClassName(conf.getString(Constants.SPRING_DATASOURCE_DRIVER_CLASS_NAME));
|
||||
druidDataSource.setUrl(conf.getString(Constants.SPRING_DATASOURCE_URL));
|
||||
druidDataSource.setUsername(conf.getString(Constants.SPRING_DATASOURCE_USERNAME));
|
||||
druidDataSource.setPassword(conf.getString(Constants.SPRING_DATASOURCE_PASSWORD));
|
||||
druidDataSource.setValidationQuery(conf.getString(Constants.SPRING_DATASOURCE_VALIDATION_QUERY));
|
||||
|
||||
druidDataSource.setPoolPreparedStatements(conf.getBoolean(Constants.SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS));
|
||||
druidDataSource.setTestWhileIdle(conf.getBoolean(Constants.SPRING_DATASOURCE_TEST_WHILE_IDLE));
|
||||
druidDataSource.setTestOnBorrow(conf.getBoolean(Constants.SPRING_DATASOURCE_TEST_ON_BORROW));
|
||||
druidDataSource.setTestOnReturn(conf.getBoolean(Constants.SPRING_DATASOURCE_TEST_ON_RETURN));
|
||||
druidDataSource.setKeepAlive(conf.getBoolean(Constants.SPRING_DATASOURCE_KEEP_ALIVE));
|
||||
|
||||
druidDataSource.setMinIdle(conf.getInt(Constants.SPRING_DATASOURCE_MIN_IDLE));
|
||||
druidDataSource.setMaxActive(conf.getInt(Constants.SPRING_DATASOURCE_MAX_ACTIVE));
|
||||
druidDataSource.setMaxWait(conf.getInt(Constants.SPRING_DATASOURCE_MAX_WAIT));
|
||||
druidDataSource.setMaxPoolPreparedStatementPerConnectionSize(conf.getInt(Constants.SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE));
|
||||
druidDataSource.setInitialSize(conf.getInt(Constants.SPRING_DATASOURCE_INITIAL_SIZE));
|
||||
druidDataSource.setTimeBetweenEvictionRunsMillis(conf.getLong(Constants.SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS));
|
||||
druidDataSource.setTimeBetweenConnectErrorMillis(conf.getLong(Constants.SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS));
|
||||
druidDataSource.setMinEvictableIdleTimeMillis(conf.getLong(Constants.SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS));
|
||||
druidDataSource.setValidationQueryTimeout(conf.getInt(Constants.SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT));
|
||||
//auto commit
|
||||
druidDataSource.setDefaultAutoCommit(conf.getBoolean(Constants.SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT));
|
||||
|
||||
DruidDataSource druidDataSource = dataSource();
|
||||
return druidDataSource;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ public class DB2ServerDataSource extends BaseDataSource {
|
|||
|
||||
/**
|
||||
* gets the JDBC url for the data source connection
|
||||
* @return
|
||||
* @return jdbc url
|
||||
*/
|
||||
@Override
|
||||
public String getJdbcUrl() {
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ public class HiveDataSource extends BaseDataSource {
|
|||
|
||||
/**
|
||||
* gets the JDBC url for the data source connection
|
||||
* @return
|
||||
* @return jdbc url
|
||||
*/
|
||||
@Override
|
||||
public String getJdbcUrl() {
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ public class MySQLDataSource extends BaseDataSource {
|
|||
|
||||
/**
|
||||
* gets the JDBC url for the data source connection
|
||||
* @return
|
||||
* @return jdbc url
|
||||
*/
|
||||
@Override
|
||||
public String getJdbcUrl() {
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ public class OracleDataSource extends BaseDataSource {
|
|||
|
||||
/**
|
||||
* gets the JDBC url for the data source connection
|
||||
* @return
|
||||
* @return jdbc url
|
||||
*/
|
||||
@Override
|
||||
public String getJdbcUrl() {
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ public class PostgreDataSource extends BaseDataSource {
|
|||
|
||||
/**
|
||||
* gets the JDBC url for the data source connection
|
||||
* @return
|
||||
* @return jdbc url
|
||||
*/
|
||||
@Override
|
||||
public String getJdbcUrl() {
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ public class SQLServerDataSource extends BaseDataSource {
|
|||
|
||||
/**
|
||||
* gets the JDBC url for the data source connection
|
||||
* @return
|
||||
* @return jdbc url
|
||||
*/
|
||||
@Override
|
||||
public String getJdbcUrl() {
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ public class SparkDataSource extends BaseDataSource {
|
|||
|
||||
/**
|
||||
* gets the JDBC url for the data source connection
|
||||
* @return
|
||||
* @return jdbc url
|
||||
*/
|
||||
@Override
|
||||
public String getJdbcUrl() {
|
||||
|
|
|
|||
|
|
@ -17,20 +17,26 @@
|
|||
package org.apache.dolphinscheduler.dao.datasource;
|
||||
|
||||
import com.alibaba.druid.pool.DruidDataSource;
|
||||
import com.baomidou.mybatisplus.annotation.IdType;
|
||||
import com.baomidou.mybatisplus.core.MybatisConfiguration;
|
||||
import com.baomidou.mybatisplus.core.config.GlobalConfig;
|
||||
import com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor;
|
||||
import com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean;
|
||||
import org.apache.commons.configuration.ConfigurationException;
|
||||
import org.apache.commons.configuration.PropertiesConfiguration;
|
||||
import org.apache.dolphinscheduler.common.Constants;
|
||||
import org.apache.dolphinscheduler.dao.utils.PropertyUtils;
|
||||
import org.apache.ibatis.session.SqlSession;
|
||||
import org.apache.ibatis.session.SqlSessionFactory;
|
||||
import org.apache.ibatis.type.JdbcType;
|
||||
import org.mybatis.spring.SqlSessionTemplate;
|
||||
import org.mybatis.spring.annotation.MapperScan;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
|
||||
import org.springframework.core.io.support.ResourcePatternResolver;
|
||||
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
|
||||
|
||||
|
||||
|
|
@ -43,19 +49,6 @@ public class SpringConnectionFactory {
|
|||
|
||||
private static final Logger logger = LoggerFactory.getLogger(SpringConnectionFactory.class);
|
||||
|
||||
/**
|
||||
* Load configuration file
|
||||
*/
|
||||
protected static org.apache.commons.configuration.Configuration conf;
|
||||
|
||||
static {
|
||||
try {
|
||||
conf = new PropertiesConfiguration(Constants.APPLICATION_PROPERTIES);
|
||||
} catch (ConfigurationException e) {
|
||||
logger.error("load configuration exception", e);
|
||||
System.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pagination interceptor
|
||||
|
|
@ -70,35 +63,34 @@ public class SpringConnectionFactory {
|
|||
* get the data source
|
||||
* @return druid dataSource
|
||||
*/
|
||||
@Bean
|
||||
public DruidDataSource dataSource() {
|
||||
@Bean(destroyMethod="")
|
||||
public static DruidDataSource dataSource() {
|
||||
|
||||
DruidDataSource druidDataSource = new DruidDataSource();
|
||||
|
||||
druidDataSource.setDriverClassName(conf.getString(Constants.SPRING_DATASOURCE_DRIVER_CLASS_NAME));
|
||||
druidDataSource.setUrl(conf.getString(Constants.SPRING_DATASOURCE_URL));
|
||||
druidDataSource.setUsername(conf.getString(Constants.SPRING_DATASOURCE_USERNAME));
|
||||
druidDataSource.setPassword(conf.getString(Constants.SPRING_DATASOURCE_PASSWORD));
|
||||
druidDataSource.setValidationQuery(conf.getString(Constants.SPRING_DATASOURCE_VALIDATION_QUERY));
|
||||
druidDataSource.setDriverClassName(PropertyUtils.getString(Constants.SPRING_DATASOURCE_DRIVER_CLASS_NAME));
|
||||
druidDataSource.setUrl(PropertyUtils.getString(Constants.SPRING_DATASOURCE_URL));
|
||||
druidDataSource.setUsername(PropertyUtils.getString(Constants.SPRING_DATASOURCE_USERNAME));
|
||||
druidDataSource.setPassword(PropertyUtils.getString(Constants.SPRING_DATASOURCE_PASSWORD));
|
||||
druidDataSource.setValidationQuery(PropertyUtils.getString(Constants.SPRING_DATASOURCE_VALIDATION_QUERY,"SELECT 1"));
|
||||
|
||||
druidDataSource.setPoolPreparedStatements(conf.getBoolean(Constants.SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS));
|
||||
druidDataSource.setTestWhileIdle(conf.getBoolean(Constants.SPRING_DATASOURCE_TEST_WHILE_IDLE));
|
||||
druidDataSource.setTestOnBorrow(conf.getBoolean(Constants.SPRING_DATASOURCE_TEST_ON_BORROW));
|
||||
druidDataSource.setTestOnReturn(conf.getBoolean(Constants.SPRING_DATASOURCE_TEST_ON_RETURN));
|
||||
druidDataSource.setKeepAlive(conf.getBoolean(Constants.SPRING_DATASOURCE_KEEP_ALIVE));
|
||||
druidDataSource.setPoolPreparedStatements(PropertyUtils.getBoolean(Constants.SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS,true));
|
||||
druidDataSource.setTestWhileIdle(PropertyUtils.getBoolean(Constants.SPRING_DATASOURCE_TEST_WHILE_IDLE,true));
|
||||
druidDataSource.setTestOnBorrow(PropertyUtils.getBoolean(Constants.SPRING_DATASOURCE_TEST_ON_BORROW,true));
|
||||
druidDataSource.setTestOnReturn(PropertyUtils.getBoolean(Constants.SPRING_DATASOURCE_TEST_ON_RETURN,true));
|
||||
druidDataSource.setKeepAlive(PropertyUtils.getBoolean(Constants.SPRING_DATASOURCE_KEEP_ALIVE,true));
|
||||
|
||||
druidDataSource.setMinIdle(conf.getInt(Constants.SPRING_DATASOURCE_MIN_IDLE));
|
||||
druidDataSource.setMaxActive(conf.getInt(Constants.SPRING_DATASOURCE_MAX_ACTIVE));
|
||||
druidDataSource.setMaxWait(conf.getInt(Constants.SPRING_DATASOURCE_MAX_WAIT));
|
||||
druidDataSource.setMaxPoolPreparedStatementPerConnectionSize(conf.getInt(Constants.SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE));
|
||||
druidDataSource.setInitialSize(conf.getInt(Constants.SPRING_DATASOURCE_INITIAL_SIZE));
|
||||
druidDataSource.setTimeBetweenEvictionRunsMillis(conf.getLong(Constants.SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS));
|
||||
druidDataSource.setTimeBetweenConnectErrorMillis(conf.getLong(Constants.SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS));
|
||||
druidDataSource.setMinEvictableIdleTimeMillis(conf.getLong(Constants.SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS));
|
||||
druidDataSource.setValidationQueryTimeout(conf.getInt(Constants.SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT));
|
||||
druidDataSource.setMinIdle(PropertyUtils.getInt(Constants.SPRING_DATASOURCE_MIN_IDLE,5));
|
||||
druidDataSource.setMaxActive(PropertyUtils.getInt(Constants.SPRING_DATASOURCE_MAX_ACTIVE,50));
|
||||
druidDataSource.setMaxWait(PropertyUtils.getInt(Constants.SPRING_DATASOURCE_MAX_WAIT,60000));
|
||||
druidDataSource.setMaxPoolPreparedStatementPerConnectionSize(PropertyUtils.getInt(Constants.SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE,20));
|
||||
druidDataSource.setInitialSize(PropertyUtils.getInt(Constants.SPRING_DATASOURCE_INITIAL_SIZE,5));
|
||||
druidDataSource.setTimeBetweenEvictionRunsMillis(PropertyUtils.getLong(Constants.SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS,60000));
|
||||
druidDataSource.setTimeBetweenConnectErrorMillis(PropertyUtils.getLong(Constants.SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS,60000));
|
||||
druidDataSource.setMinEvictableIdleTimeMillis(PropertyUtils.getLong(Constants.SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS,300000));
|
||||
druidDataSource.setValidationQueryTimeout(PropertyUtils.getInt(Constants.SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT,3));
|
||||
//auto commit
|
||||
druidDataSource.setDefaultAutoCommit(conf.getBoolean(Constants.SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT));
|
||||
|
||||
druidDataSource.setDefaultAutoCommit(PropertyUtils.getBoolean(Constants.SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT,true));
|
||||
return druidDataSource;
|
||||
}
|
||||
|
||||
|
|
@ -119,13 +111,23 @@ public class SpringConnectionFactory {
|
|||
@Bean
|
||||
public SqlSessionFactory sqlSessionFactory() throws Exception {
|
||||
MybatisConfiguration configuration = new MybatisConfiguration();
|
||||
configuration.addMappers("org.apache.dolphinscheduler.dao.mapper");
|
||||
configuration.setMapUnderscoreToCamelCase(true);
|
||||
configuration.setCacheEnabled(false);
|
||||
configuration.setCallSettersOnNulls(true);
|
||||
configuration.setJdbcTypeForNull(JdbcType.NULL);
|
||||
configuration.addInterceptor(paginationInterceptor());
|
||||
|
||||
MybatisSqlSessionFactoryBean sqlSessionFactoryBean = new MybatisSqlSessionFactoryBean();
|
||||
sqlSessionFactoryBean.setConfiguration(configuration);
|
||||
sqlSessionFactoryBean.setDataSource(dataSource());
|
||||
|
||||
GlobalConfig.DbConfig dbConfig = new GlobalConfig.DbConfig();
|
||||
dbConfig.setIdType(IdType.AUTO);
|
||||
GlobalConfig globalConfig = new GlobalConfig();
|
||||
globalConfig.setDbConfig(dbConfig);
|
||||
sqlSessionFactoryBean.setGlobalConfig(globalConfig);
|
||||
sqlSessionFactoryBean.setTypeAliasesPackage("org.apache.dolphinscheduler.dao.entity");
|
||||
ResourcePatternResolver resolver = new PathMatchingResourcePatternResolver();
|
||||
sqlSessionFactoryBean.setMapperLocations(resolver.getResources("org/apache/dolphinscheduler/dao/mapper/*Mapper.xml"));
|
||||
sqlSessionFactoryBean.setTypeEnumsPackage("org.apache.dolphinscheduler.*.enums");
|
||||
return sqlSessionFactoryBean.getObject();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -108,13 +108,11 @@ public class Command {
|
|||
@TableField("update_time")
|
||||
private Date updateTime;
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* worker group
|
||||
*/
|
||||
@TableField("worker_group_id")
|
||||
private int workerGroupId;
|
||||
|
||||
@TableField(exist = false)
|
||||
private String workerGroup;
|
||||
|
||||
public Command() {
|
||||
this.taskDependType = TaskDependType.TASK_POST;
|
||||
|
|
@ -254,13 +252,12 @@ public class Command {
|
|||
this.updateTime = updateTime;
|
||||
}
|
||||
|
||||
|
||||
public int getWorkerGroupId() {
|
||||
return workerGroupId;
|
||||
public String getWorkerGroup() {
|
||||
return workerGroup;
|
||||
}
|
||||
|
||||
public void setWorkerGroupId(int workerGroupId) {
|
||||
this.workerGroupId = workerGroupId;
|
||||
public void setWorkerGroup(String workerGroup) {
|
||||
this.workerGroup = workerGroup;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -283,7 +280,7 @@ public class Command {
|
|||
if (executorId != command.executorId) {
|
||||
return false;
|
||||
}
|
||||
if (workerGroupId != command.workerGroupId) {
|
||||
if (workerGroup != null ? workerGroup.equals(command.workerGroup) : command.workerGroup == null) {
|
||||
return false;
|
||||
}
|
||||
if (commandType != command.commandType) {
|
||||
|
|
@ -332,10 +329,9 @@ public class Command {
|
|||
result = 31 * result + (startTime != null ? startTime.hashCode() : 0);
|
||||
result = 31 * result + (processInstancePriority != null ? processInstancePriority.hashCode() : 0);
|
||||
result = 31 * result + (updateTime != null ? updateTime.hashCode() : 0);
|
||||
result = 31 * result + workerGroupId;
|
||||
result = 31 * result + (workerGroup != null ? workerGroup.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Command{" +
|
||||
|
|
@ -352,7 +348,7 @@ public class Command {
|
|||
", startTime=" + startTime +
|
||||
", processInstancePriority=" + processInstancePriority +
|
||||
", updateTime=" + updateTime +
|
||||
", workerGroupId=" + workerGroupId +
|
||||
", workerGroup='" + workerGroup + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -189,9 +189,9 @@ public class ProcessInstance {
|
|||
private Priority processInstancePriority;
|
||||
|
||||
/**
|
||||
* worker group id
|
||||
* worker group
|
||||
*/
|
||||
private int workerGroupId;
|
||||
private String workerGroup;
|
||||
|
||||
/**
|
||||
* process timeout for warning
|
||||
|
|
@ -203,12 +203,6 @@ public class ProcessInstance {
|
|||
*/
|
||||
private int tenantId;
|
||||
|
||||
/**
|
||||
* worker group name. for api.
|
||||
*/
|
||||
@TableField(exist = false)
|
||||
private String workerGroupName;
|
||||
|
||||
/**
|
||||
* receivers for api
|
||||
*/
|
||||
|
|
@ -493,7 +487,7 @@ public class ProcessInstance {
|
|||
* @return whether complement data
|
||||
*/
|
||||
public Boolean isComplementData(){
|
||||
if(!StringUtils.isNotEmpty(this.historyCmd)){
|
||||
if(StringUtils.isEmpty(this.historyCmd)){
|
||||
return false;
|
||||
}
|
||||
return historyCmd.startsWith(CommandType.COMPLEMENT_DATA.toString());
|
||||
|
|
@ -527,12 +521,12 @@ public class ProcessInstance {
|
|||
this.duration = duration;
|
||||
}
|
||||
|
||||
public int getWorkerGroupId() {
|
||||
return workerGroupId;
|
||||
public String getWorkerGroup() {
|
||||
return workerGroup;
|
||||
}
|
||||
|
||||
public void setWorkerGroupId(int workerGroupId) {
|
||||
this.workerGroupId = workerGroupId;
|
||||
public void setWorkerGroup(String workerGroup) {
|
||||
this.workerGroup = workerGroup;
|
||||
}
|
||||
|
||||
public int getTimeout() {
|
||||
|
|
@ -552,14 +546,6 @@ public class ProcessInstance {
|
|||
return this.tenantId ;
|
||||
}
|
||||
|
||||
public String getWorkerGroupName() {
|
||||
return workerGroupName;
|
||||
}
|
||||
|
||||
public void setWorkerGroupName(String workerGroupName) {
|
||||
this.workerGroupName = workerGroupName;
|
||||
}
|
||||
|
||||
public String getReceivers() {
|
||||
return receivers;
|
||||
}
|
||||
|
|
@ -610,10 +596,9 @@ public class ProcessInstance {
|
|||
", dependenceScheduleTimes='" + dependenceScheduleTimes + '\'' +
|
||||
", duration=" + duration +
|
||||
", processInstancePriority=" + processInstancePriority +
|
||||
", workerGroupId=" + workerGroupId +
|
||||
", workerGroup='" + workerGroup + '\'' +
|
||||
", timeout=" + timeout +
|
||||
", tenantId=" + tenantId +
|
||||
", workerGroupName='" + workerGroupName + '\'' +
|
||||
", receivers='" + receivers + '\'' +
|
||||
", receiversCc='" + receiversCc + '\'' +
|
||||
'}';
|
||||
|
|
|
|||
|
|
@ -122,9 +122,9 @@ public class Schedule {
|
|||
private Priority processInstancePriority;
|
||||
|
||||
/**
|
||||
* worker group id
|
||||
* worker group
|
||||
*/
|
||||
private int workerGroupId;
|
||||
private String workerGroup;
|
||||
|
||||
public int getWarningGroupId() {
|
||||
return warningGroupId;
|
||||
|
|
@ -265,13 +265,12 @@ public class Schedule {
|
|||
this.processInstancePriority = processInstancePriority;
|
||||
}
|
||||
|
||||
|
||||
public int getWorkerGroupId() {
|
||||
return workerGroupId;
|
||||
public String getWorkerGroup() {
|
||||
return workerGroup;
|
||||
}
|
||||
|
||||
public void setWorkerGroupId(int workerGroupId) {
|
||||
this.workerGroupId = workerGroupId;
|
||||
public void setWorkerGroup(String workerGroup) {
|
||||
this.workerGroup = workerGroup;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -294,7 +293,7 @@ public class Schedule {
|
|||
", releaseState=" + releaseState +
|
||||
", warningGroupId=" + warningGroupId +
|
||||
", processInstancePriority=" + processInstancePriority +
|
||||
", workerGroupId=" + workerGroupId +
|
||||
", workerGroup='" + workerGroup + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,13 +27,14 @@ import com.baomidou.mybatisplus.annotation.IdType;
|
|||
import com.baomidou.mybatisplus.annotation.TableId;
|
||||
import com.baomidou.mybatisplus.annotation.TableName;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* task instance
|
||||
*/
|
||||
@TableName("t_ds_task_instance")
|
||||
public class TaskInstance {
|
||||
public class TaskInstance implements Serializable {
|
||||
|
||||
/**
|
||||
* id
|
||||
|
|
@ -154,20 +155,17 @@ public class TaskInstance {
|
|||
|
||||
/**
|
||||
* duration
|
||||
* @return
|
||||
*/
|
||||
@TableField(exist = false)
|
||||
private Long duration;
|
||||
|
||||
/**
|
||||
* max retry times
|
||||
* @return
|
||||
*/
|
||||
private int maxRetryTimes;
|
||||
|
||||
/**
|
||||
* task retry interval, unit: minute
|
||||
* @return
|
||||
*/
|
||||
private int retryInterval;
|
||||
|
||||
|
|
@ -184,26 +182,15 @@ public class TaskInstance {
|
|||
|
||||
/**
|
||||
* dependent state
|
||||
* @return
|
||||
*/
|
||||
@TableField(exist = false)
|
||||
private String dependentResult;
|
||||
|
||||
|
||||
/**
|
||||
* worker group id
|
||||
* @return
|
||||
* workerGroup
|
||||
*/
|
||||
private int workerGroupId;
|
||||
|
||||
|
||||
|
||||
public void init(String host,Date startTime,String executePath){
|
||||
this.host = host;
|
||||
this.startTime = startTime;
|
||||
this.executePath = executePath;
|
||||
}
|
||||
|
||||
private String workerGroup;
|
||||
|
||||
public ProcessInstance getProcessInstance() {
|
||||
return processInstance;
|
||||
|
|
@ -458,12 +445,20 @@ public class TaskInstance {
|
|||
this.processInstancePriority = processInstancePriority;
|
||||
}
|
||||
|
||||
public int getWorkerGroupId() {
|
||||
return workerGroupId;
|
||||
public String getDependentResult() {
|
||||
return dependentResult;
|
||||
}
|
||||
|
||||
public void setWorkerGroupId(int workerGroupId) {
|
||||
this.workerGroupId = workerGroupId;
|
||||
public void setDependentResult(String dependentResult) {
|
||||
this.dependentResult = dependentResult;
|
||||
}
|
||||
|
||||
public String getWorkerGroup() {
|
||||
return workerGroup;
|
||||
}
|
||||
|
||||
public void setWorkerGroup(String workerGroup) {
|
||||
this.workerGroup = workerGroup;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
@ -485,27 +480,19 @@ public class TaskInstance {
|
|||
", logPath='" + logPath + '\'' +
|
||||
", retryTimes=" + retryTimes +
|
||||
", alertFlag=" + alertFlag +
|
||||
", flag=" + flag +
|
||||
", processInstance=" + processInstance +
|
||||
", processDefine=" + processDefine +
|
||||
", pid=" + pid +
|
||||
", appLink='" + appLink + '\'' +
|
||||
", flag=" + flag +
|
||||
", dependency=" + dependency +
|
||||
", dependency='" + dependency + '\'' +
|
||||
", duration=" + duration +
|
||||
", maxRetryTimes=" + maxRetryTimes +
|
||||
", retryInterval=" + retryInterval +
|
||||
", taskInstancePriority=" + taskInstancePriority +
|
||||
", processInstancePriority=" + processInstancePriority +
|
||||
", workGroupId=" + workerGroupId +
|
||||
", dependentResult='" + dependentResult + '\'' +
|
||||
", workerGroup='" + workerGroup + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public String getDependentResult() {
|
||||
return dependentResult;
|
||||
}
|
||||
|
||||
public void setDependentResult(String dependentResult) {
|
||||
this.dependentResult = dependentResult;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -186,24 +186,6 @@ public class UdfFunc {
|
|||
this.updateTime = updateTime;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "UdfFunc{" +
|
||||
"id=" + id +
|
||||
", userId=" + userId +
|
||||
", funcName='" + funcName + '\'' +
|
||||
", className='" + className + '\'' +
|
||||
", argTypes='" + argTypes + '\'' +
|
||||
", database='" + database + '\'' +
|
||||
", description='" + description + '\'' +
|
||||
", resourceId=" + resourceId +
|
||||
", resourceName='" + resourceName + '\'' +
|
||||
", type=" + type +
|
||||
", createTime=" + createTime +
|
||||
", updateTime=" + updateTime +
|
||||
'}';
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
|
|
@ -228,4 +210,22 @@ public class UdfFunc {
|
|||
result = 31 * result + (funcName != null ? funcName.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "UdfFunc{" +
|
||||
"id=" + id +
|
||||
", userId=" + userId +
|
||||
", funcName='" + funcName + '\'' +
|
||||
", className='" + className + '\'' +
|
||||
", argTypes='" + argTypes + '\'' +
|
||||
", database='" + database + '\'' +
|
||||
", description='" + description + '\'' +
|
||||
", resourceId=" + resourceId +
|
||||
", resourceName='" + resourceName + '\'' +
|
||||
", type=" + type +
|
||||
", createTime=" + createTime +
|
||||
", updateTime=" + updateTime +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -79,8 +79,10 @@ public interface DataSourceMapper extends BaseMapper<DataSource> {
|
|||
|
||||
/**
|
||||
* list authorized UDF function
|
||||
*
|
||||
* @param userId userId
|
||||
* @param dataSourceIds data source id array
|
||||
* @param <T> T
|
||||
* @return UDF function list
|
||||
*/
|
||||
<T> List<DataSource> listAuthorizedDataSource(@Param("userId") int userId,@Param("dataSourceIds")T[] dataSourceIds);
|
||||
|
|
|
|||
|
|
@ -87,7 +87,8 @@ public interface ResourceMapper extends BaseMapper<Resource> {
|
|||
/**
|
||||
* list authorized resource
|
||||
* @param userId userId
|
||||
* @param resNames resource names
|
||||
* @param resNames resNames
|
||||
* @param <T> T
|
||||
* @return resource list
|
||||
*/
|
||||
<T> List<Resource> listAuthorizedResource(@Param("userId") int userId,@Param("resNames")T[] resNames);
|
||||
|
|
|
|||
|
|
@ -62,8 +62,8 @@ public interface ScheduleMapper extends BaseMapper<Schedule> {
|
|||
|
||||
/**
|
||||
* query schedule list by process definition id
|
||||
* @param processDefinitionId
|
||||
* @return
|
||||
* @param processDefinitionId processDefinitionId
|
||||
* @return schedule list
|
||||
*/
|
||||
List<Schedule> queryReleaseSchedulerListByProcessDefinitionId(@Param("processDefinitionId") int processDefinitionId);
|
||||
|
||||
|
|
|
|||
|
|
@ -81,8 +81,9 @@ public interface UdfFuncMapper extends BaseMapper<UdfFunc> {
|
|||
/**
|
||||
* list authorized UDF function
|
||||
* @param userId userId
|
||||
* @param udfIds UDF function id array
|
||||
* @return UDF function list
|
||||
* @param udfIds udfIds
|
||||
* @param <T> T
|
||||
* @return Udf function list
|
||||
*/
|
||||
<T> List<UdfFunc> listAuthorizedUdfFunc (@Param("userId") int userId,@Param("udfIds")T[] udfIds);
|
||||
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ public class PropertyUtils {
|
|||
* init
|
||||
*/
|
||||
private void init(){
|
||||
String[] propertyFiles = new String[]{Constants.APPLICATION_PROPERTIES};
|
||||
String[] propertyFiles = new String[]{Constants.DATASOURCE_PROPERTIES};
|
||||
for (String fileName : propertyFiles) {
|
||||
InputStream fis = null;
|
||||
try {
|
||||
|
|
@ -77,6 +77,17 @@ public class PropertyUtils {
|
|||
return properties.getProperty(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* get property value
|
||||
*
|
||||
* @param key property name
|
||||
* @param defaultVal default value
|
||||
* @return property value
|
||||
*/
|
||||
public static String getString(String key, String defaultVal) {
|
||||
String val = properties.getProperty(key.trim());
|
||||
return val == null ? defaultVal : val;
|
||||
}
|
||||
|
||||
/**
|
||||
* get property value
|
||||
|
|
@ -106,4 +117,46 @@ public class PropertyUtils {
|
|||
}
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* get property value
|
||||
*
|
||||
* @param key property name
|
||||
* @return property value
|
||||
*/
|
||||
public static Boolean getBoolean(String key) {
|
||||
String value = properties.getProperty(key.trim());
|
||||
if(null != value){
|
||||
return Boolean.parseBoolean(value);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* get property value
|
||||
*
|
||||
* @param key property name
|
||||
* @param defaultValue default value
|
||||
* @return property value
|
||||
*/
|
||||
public static Boolean getBoolean(String key, boolean defaultValue) {
|
||||
String value = properties.getProperty(key.trim());
|
||||
if(null != value){
|
||||
return Boolean.parseBoolean(value);
|
||||
}
|
||||
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* get property long value
|
||||
* @param key key
|
||||
* @param defaultVal default value
|
||||
* @return property value
|
||||
*/
|
||||
public static long getLong(String key, long defaultVal) {
|
||||
String val = getString(key);
|
||||
return val == null ? defaultVal : Long.parseLong(val);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,145 +0,0 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# base spring data source configuration
|
||||
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
|
||||
# postgre
|
||||
spring.datasource.driver-class-name=org.postgresql.Driver
|
||||
spring.datasource.url=jdbc:postgresql://localhost:5432/dolphinscheduler
|
||||
# mysql
|
||||
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
|
||||
#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
|
||||
spring.datasource.username=test
|
||||
spring.datasource.password=test
|
||||
|
||||
# connection configuration
|
||||
spring.datasource.initialSize=5
|
||||
# min connection number
|
||||
spring.datasource.minIdle=5
|
||||
# max connection number
|
||||
spring.datasource.maxActive=50
|
||||
|
||||
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
|
||||
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
|
||||
spring.datasource.maxWait=60000
|
||||
|
||||
# milliseconds for check to close free connections
|
||||
spring.datasource.timeBetweenEvictionRunsMillis=60000
|
||||
|
||||
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
|
||||
spring.datasource.timeBetweenConnectErrorMillis=60000
|
||||
|
||||
# the longest time a connection remains idle without being evicted, in milliseconds
|
||||
spring.datasource.minEvictableIdleTimeMillis=300000
|
||||
|
||||
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
|
||||
spring.datasource.validationQuery=SELECT 1
|
||||
|
||||
#check whether the connection is valid for timeout, in seconds
|
||||
spring.datasource.validationQueryTimeout=3
|
||||
|
||||
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
|
||||
# validation Query is performed to check whether the connection is valid
|
||||
spring.datasource.testWhileIdle=true
|
||||
|
||||
#execute validation to check if the connection is valid when applying for a connection
|
||||
spring.datasource.testOnBorrow=true
|
||||
#execute validation to check if the connection is valid when the connection is returned
|
||||
spring.datasource.testOnReturn=false
|
||||
spring.datasource.defaultAutoCommit=true
|
||||
spring.datasource.keepAlive=true
|
||||
|
||||
# open PSCache, specify count PSCache for every connection
|
||||
spring.datasource.poolPreparedStatements=true
|
||||
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
|
||||
|
||||
spring.datasource.spring.datasource.filters=stat,wall,log4j
|
||||
spring.datasource.connectionProperties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
|
||||
|
||||
#mybatis
|
||||
mybatis-plus.mapper-locations=classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml
|
||||
|
||||
mybatis-plus.typeEnumsPackage=org.apache.dolphinscheduler.*.enums
|
||||
|
||||
#Entity scan, where multiple packages are separated by a comma or semicolon
|
||||
mybatis-plus.typeAliasesPackage=org.apache.dolphinscheduler.dao.entity
|
||||
|
||||
#Primary key type AUTO:" database ID AUTO ", INPUT:" user INPUT ID", ID_WORKER:" global unique ID (numeric type unique ID)", UUID:" global unique ID UUID";
|
||||
mybatis-plus.global-config.db-config.id-type=AUTO
|
||||
|
||||
#Field policy IGNORED:" ignore judgment ",NOT_NULL:" not NULL judgment "),NOT_EMPTY:" not NULL judgment"
|
||||
mybatis-plus.global-config.db-config.field-strategy=NOT_NULL
|
||||
|
||||
#The hump underline is converted
|
||||
mybatis-plus.global-config.db-config.column-underline=true
|
||||
mybatis-plus.global-config.db-config.logic-delete-value=-1
|
||||
mybatis-plus.global-config.db-config.logic-not-delete-value=0
|
||||
mybatis-plus.global-config.db-config.banner=false
|
||||
#The original configuration
|
||||
mybatis-plus.configuration.map-underscore-to-camel-case=true
|
||||
mybatis-plus.configuration.cache-enabled=false
|
||||
mybatis-plus.configuration.call-setters-on-nulls=true
|
||||
mybatis-plus.configuration.jdbc-type-for-null=null
|
||||
|
||||
# master settings
|
||||
# master execute thread num
|
||||
master.exec.threads=100
|
||||
|
||||
# master execute task number in parallel
|
||||
master.exec.task.num=20
|
||||
|
||||
# master heartbeat interval
|
||||
master.heartbeat.interval=10
|
||||
|
||||
# master commit task retry times
|
||||
master.task.commit.retryTimes=5
|
||||
|
||||
# master commit task interval
|
||||
master.task.commit.interval=1000
|
||||
|
||||
|
||||
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
|
||||
master.max.cpuload.avg=100
|
||||
|
||||
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
|
||||
master.reserved.memory=0.1
|
||||
|
||||
# worker settings
|
||||
# worker execute thread num
|
||||
worker.exec.threads=100
|
||||
|
||||
# worker heartbeat interval
|
||||
worker.heartbeat.interval=10
|
||||
|
||||
# submit the number of tasks at a time
|
||||
worker.fetch.task.num = 3
|
||||
|
||||
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
|
||||
worker.max.cpuload.avg=100
|
||||
|
||||
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
|
||||
worker.reserved.memory=0.1
|
||||
|
||||
# data quality analysis is not currently in use. please ignore the following configuration
|
||||
# task record
|
||||
task.record.flag=false
|
||||
task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8
|
||||
task.record.datasource.username=xx
|
||||
task.record.datasource.password=xx
|
||||
|
||||
# Logger Config
|
||||
#logging.level.org.apache.dolphinscheduler.dao=debug
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
# postgre
|
||||
#spring.datasource.driver-class-name=org.postgresql.Driver
|
||||
#spring.datasource.url=jdbc:postgresql://localhost:5432/dolphinscheduler
|
||||
# mysql
|
||||
spring.datasource.driver-class-name=com.mysql.jdbc.Driver
|
||||
spring.datasource.url=jdbc:mysql://localhost:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
|
||||
spring.datasource.username=root
|
||||
spring.datasource.password=root@123
|
||||
|
||||
## base spring data source configuration todo need to remove
|
||||
#spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
|
||||
|
||||
# connection configuration
|
||||
#spring.datasource.initialSize=5
|
||||
# min connection number
|
||||
#spring.datasource.minIdle=5
|
||||
# max connection number
|
||||
#spring.datasource.maxActive=50
|
||||
|
||||
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
|
||||
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
|
||||
#spring.datasource.maxWait=60000
|
||||
|
||||
# milliseconds for check to close free connections
|
||||
#spring.datasource.timeBetweenEvictionRunsMillis=60000
|
||||
|
||||
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
|
||||
#spring.datasource.timeBetweenConnectErrorMillis=60000
|
||||
|
||||
# the longest time a connection remains idle without being evicted, in milliseconds
|
||||
#spring.datasource.minEvictableIdleTimeMillis=300000
|
||||
|
||||
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
|
||||
#spring.datasource.validationQuery=SELECT 1
|
||||
|
||||
#check whether the connection is valid for timeout, in seconds
|
||||
#spring.datasource.validationQueryTimeout=3
|
||||
|
||||
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
|
||||
# validation Query is performed to check whether the connection is valid
|
||||
#spring.datasource.testWhileIdle=true
|
||||
|
||||
#execute validation to check if the connection is valid when applying for a connection
|
||||
#spring.datasource.testOnBorrow=true
|
||||
#execute validation to check if the connection is valid when the connection is returned
|
||||
#spring.datasource.testOnReturn=false
|
||||
#spring.datasource.defaultAutoCommit=true
|
||||
#spring.datasource.keepAlive=true
|
||||
|
||||
# open PSCache, specify count PSCache for every connection
|
||||
#spring.datasource.poolPreparedStatements=true
|
||||
#spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
|
||||
|
||||
#spring.datasource.filters=stat,wall,log4j
|
||||
#spring.datasource.connectionProperties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
|
||||
|
||||
|
||||
|
|
@ -16,6 +16,7 @@
|
|||
*/
|
||||
package org.apache.dolphinscheduler.dao.mapper;
|
||||
|
||||
import org.apache.dolphinscheduler.common.Constants;
|
||||
import org.apache.dolphinscheduler.common.utils.DateUtils;
|
||||
import org.apache.dolphinscheduler.dao.entity.Command;
|
||||
import org.apache.dolphinscheduler.dao.entity.CommandCount;
|
||||
|
|
@ -265,7 +266,7 @@ public class CommandMapperTest {
|
|||
command.setProcessInstancePriority(Priority.MEDIUM);
|
||||
command.setStartTime(DateUtils.stringToDate("2019-12-29 10:10:00"));
|
||||
command.setUpdateTime(DateUtils.stringToDate("2019-12-29 10:10:00"));
|
||||
command.setWorkerGroupId(-1);
|
||||
command.setWorkerGroup(Constants.DEFAULT_WORKER_GROUP);
|
||||
commandMapper.insert(command);
|
||||
|
||||
return command;
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@
|
|||
<artifactId>dolphinscheduler-remote</artifactId>
|
||||
|
||||
<name>dolphinscheduler-remote</name>
|
||||
<!-- FIXME change it to the project's website -->
|
||||
<url>http://www.example.com</url>
|
||||
|
||||
<properties>
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import io.netty.channel.socket.nio.NioSocketChannel;
|
|||
import org.apache.dolphinscheduler.remote.codec.NettyDecoder;
|
||||
import org.apache.dolphinscheduler.remote.codec.NettyEncoder;
|
||||
import org.apache.dolphinscheduler.remote.command.Command;
|
||||
import org.apache.dolphinscheduler.remote.command.CommandType;
|
||||
import org.apache.dolphinscheduler.remote.config.NettyClientConfig;
|
||||
import org.apache.dolphinscheduler.remote.exceptions.RemotingException;
|
||||
import org.apache.dolphinscheduler.remote.exceptions.RemotingTimeoutException;
|
||||
|
|
@ -33,7 +34,8 @@ import org.apache.dolphinscheduler.remote.future.InvokeCallback;
|
|||
import org.apache.dolphinscheduler.remote.future.ReleaseSemaphore;
|
||||
import org.apache.dolphinscheduler.remote.future.ResponseFuture;
|
||||
import org.apache.dolphinscheduler.remote.handler.NettyClientHandler;
|
||||
import org.apache.dolphinscheduler.remote.utils.Address;
|
||||
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
|
||||
import org.apache.dolphinscheduler.remote.utils.Host;
|
||||
import org.apache.dolphinscheduler.remote.utils.CallerThreadExecutePolicy;
|
||||
import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory;
|
||||
import org.slf4j.Logger;
|
||||
|
|
@ -64,7 +66,7 @@ public class NettyRemotingClient {
|
|||
/**
|
||||
* channels
|
||||
*/
|
||||
private final ConcurrentHashMap<Address, Channel> channels = new ConcurrentHashMap(128);
|
||||
private final ConcurrentHashMap<Host, Channel> channels = new ConcurrentHashMap(128);
|
||||
|
||||
/**
|
||||
* started flag
|
||||
|
|
@ -96,6 +98,11 @@ public class NettyRemotingClient {
|
|||
*/
|
||||
private final NettyClientHandler clientHandler;
|
||||
|
||||
/**
|
||||
* response future executor
|
||||
*/
|
||||
private final ScheduledExecutorService responseFutureExecutor;
|
||||
|
||||
/**
|
||||
* client init
|
||||
* @param clientConfig client config
|
||||
|
|
@ -115,6 +122,8 @@ public class NettyRemotingClient {
|
|||
new CallerThreadExecutePolicy());
|
||||
this.clientHandler = new NettyClientHandler(this, callbackExecutor);
|
||||
|
||||
this.responseFutureExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("ResponseFutureExecutor"));
|
||||
|
||||
this.start();
|
||||
}
|
||||
|
||||
|
|
@ -139,23 +148,29 @@ public class NettyRemotingClient {
|
|||
encoder);
|
||||
}
|
||||
});
|
||||
this.responseFutureExecutor.scheduleAtFixedRate(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ResponseFuture.scanFutureTable();
|
||||
}
|
||||
}, 5000, 1000, TimeUnit.MILLISECONDS);
|
||||
//
|
||||
isStarted.compareAndSet(false, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* async send
|
||||
* @param address address
|
||||
* @param host host
|
||||
* @param command command
|
||||
* @param timeoutMillis timeoutMillis
|
||||
* @param invokeCallback callback function
|
||||
* @throws InterruptedException
|
||||
* @throws RemotingException
|
||||
*/
|
||||
public void sendAsync(final Address address, final Command command,
|
||||
public void sendAsync(final Host host, final Command command,
|
||||
final long timeoutMillis,
|
||||
final InvokeCallback invokeCallback) throws InterruptedException, RemotingException {
|
||||
final Channel channel = getChannel(address);
|
||||
final Channel channel = getChannel(host);
|
||||
if (channel == null) {
|
||||
throw new RemotingException("network error");
|
||||
}
|
||||
|
|
@ -201,7 +216,7 @@ public class NettyRemotingClient {
|
|||
});
|
||||
} catch (Throwable ex){
|
||||
responseFuture.release();
|
||||
throw new RemotingException(String.format("send command to address: %s failed", address), ex);
|
||||
throw new RemotingException(String.format("send command to host: %s failed", host), ex);
|
||||
}
|
||||
} else{
|
||||
String message = String.format("try to acquire async semaphore timeout: %d, waiting thread num: %d, total permits: %d",
|
||||
|
|
@ -212,17 +227,17 @@ public class NettyRemotingClient {
|
|||
|
||||
/**
|
||||
* sync send
|
||||
* @param address address
|
||||
* @param host host
|
||||
* @param command command
|
||||
* @param timeoutMillis timeoutMillis
|
||||
* @return command
|
||||
* @throws InterruptedException
|
||||
* @throws RemotingException
|
||||
*/
|
||||
public Command sendSync(final Address address, final Command command, final long timeoutMillis) throws InterruptedException, RemotingException {
|
||||
final Channel channel = getChannel(address);
|
||||
public Command sendSync(final Host host, final Command command, final long timeoutMillis) throws InterruptedException, RemotingException {
|
||||
final Channel channel = getChannel(host);
|
||||
if (channel == null) {
|
||||
throw new RemotingException(String.format("connect to : %s fail", address));
|
||||
throw new RemotingException(String.format("connect to : %s fail", host));
|
||||
}
|
||||
final long opaque = command.getOpaque();
|
||||
final ResponseFuture responseFuture = new ResponseFuture(opaque, timeoutMillis, null, null);
|
||||
|
|
@ -237,7 +252,7 @@ public class NettyRemotingClient {
|
|||
}
|
||||
responseFuture.setCause(future.cause());
|
||||
responseFuture.putResponse(null);
|
||||
logger.error("send command {} to address {} failed", command, address);
|
||||
logger.error("send command {} to host {} failed", command, host);
|
||||
}
|
||||
});
|
||||
/**
|
||||
|
|
@ -246,49 +261,95 @@ public class NettyRemotingClient {
|
|||
Command result = responseFuture.waitResponse();
|
||||
if(result == null){
|
||||
if(responseFuture.isSendOK()){
|
||||
throw new RemotingTimeoutException(address.toString(), timeoutMillis, responseFuture.getCause());
|
||||
throw new RemotingTimeoutException(host.toString(), timeoutMillis, responseFuture.getCause());
|
||||
} else{
|
||||
throw new RemotingException(address.toString(), responseFuture.getCause());
|
||||
throw new RemotingException(host.toString(), responseFuture.getCause());
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* send task
|
||||
* @param host host
|
||||
* @param command command
|
||||
* @throws RemotingException
|
||||
*/
|
||||
public void send(final Host host, final Command command) throws RemotingException {
|
||||
Channel channel = getChannel(host);
|
||||
if (channel == null) {
|
||||
throw new RemotingException(String.format("connect to : %s fail", host));
|
||||
}
|
||||
try {
|
||||
ChannelFuture future = channel.writeAndFlush(command).await();
|
||||
if (future.isSuccess()) {
|
||||
logger.debug("send command : {} , to : {} successfully.", command, host.getAddress());
|
||||
} else {
|
||||
String msg = String.format("send command : %s , to :%s failed", command, host.getAddress());
|
||||
logger.error(msg, future.cause());
|
||||
throw new RemotingException(msg);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Send command {} to address {} encounter error.", command, host.getAddress());
|
||||
throw new RemotingException(String.format("Send command : %s , to :%s encounter error", command, host.getAddress()), e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* register processor
|
||||
* @param commandType command type
|
||||
* @param processor processor
|
||||
*/
|
||||
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor) {
|
||||
this.registerProcessor(commandType, processor, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* register processor
|
||||
*
|
||||
* @param commandType command type
|
||||
* @param processor processor
|
||||
* @param executor thread executor
|
||||
*/
|
||||
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor, final ExecutorService executor) {
|
||||
this.clientHandler.registerProcessor(commandType, processor, executor);
|
||||
}
|
||||
|
||||
/**
|
||||
* get channel
|
||||
* @param address
|
||||
* @param host
|
||||
* @return
|
||||
*/
|
||||
public Channel getChannel(Address address) {
|
||||
Channel channel = channels.get(address);
|
||||
public Channel getChannel(Host host) {
|
||||
Channel channel = channels.get(host);
|
||||
if(channel != null && channel.isActive()){
|
||||
return channel;
|
||||
}
|
||||
return createChannel(address, true);
|
||||
return createChannel(host, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* create channel
|
||||
* @param address address
|
||||
* @param host host
|
||||
* @param isSync sync flag
|
||||
* @return channel
|
||||
*/
|
||||
public Channel createChannel(Address address, boolean isSync) {
|
||||
public Channel createChannel(Host host, boolean isSync) {
|
||||
ChannelFuture future;
|
||||
try {
|
||||
synchronized (bootstrap){
|
||||
future = bootstrap.connect(new InetSocketAddress(address.getHost(), address.getPort()));
|
||||
future = bootstrap.connect(new InetSocketAddress(host.getIp(), host.getPort()));
|
||||
}
|
||||
if(isSync){
|
||||
future.sync();
|
||||
}
|
||||
if (future.isSuccess()) {
|
||||
Channel channel = future.channel();
|
||||
channels.put(address, channel);
|
||||
channels.put(host, channel);
|
||||
return channel;
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
logger.info("connect to {} error {}", address, ex);
|
||||
logger.info("connect to {} error {}", host, ex);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
@ -306,6 +367,9 @@ public class NettyRemotingClient {
|
|||
if(callbackExecutor != null){
|
||||
this.callbackExecutor.shutdownNow();
|
||||
}
|
||||
if(this.responseFutureExecutor != null){
|
||||
this.responseFutureExecutor.shutdownNow();
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
logger.error("netty client close exception", ex);
|
||||
}
|
||||
|
|
@ -325,10 +389,10 @@ public class NettyRemotingClient {
|
|||
|
||||
/**
|
||||
* close channel
|
||||
* @param address address
|
||||
* @param host host
|
||||
*/
|
||||
public void closeChannel(Address address){
|
||||
Channel channel = this.channels.remove(address);
|
||||
public void closeChannel(Host host){
|
||||
Channel channel = this.channels.remove(host);
|
||||
if(channel != null){
|
||||
channel.close();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
public enum CommandType {
/**
* roll view log request
*/
ROLL_VIEW_LOG_REQUEST,
/**
* roll view log response
*/
ROLL_VIEW_LOG_RESPONSE,
/**
* view whole log request
*/
VIEW_WHOLE_LOG_REQUEST,
/**
* view whole log response
*/
VIEW_WHOLE_LOG_RESPONSE,
/**
* get log bytes request
*/
GET_LOG_BYTES_REQUEST,
/**
* get log bytes response
*/
GET_LOG_BYTES_RESPONSE,
WORKER_REQUEST,
MASTER_RESPONSE,
/**
* execute task request
*/
EXECUTE_TASK_REQUEST,
/**
* execute task response
*/
EXECUTE_TASK_RESPONSE,
/**
* ping
*/
PING,
/**
* pong
*/
PONG;
}
|
||||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
public enum CommandType {
/**
* roll view log request
*/
ROLL_VIEW_LOG_REQUEST,
/**
* roll view log response
*/
ROLL_VIEW_LOG_RESPONSE,
/**
* view whole log request
*/
VIEW_WHOLE_LOG_REQUEST,
/**
* view whole log response
*/
VIEW_WHOLE_LOG_RESPONSE,
/**
* get log bytes request
*/
GET_LOG_BYTES_REQUEST,
/**
* get log bytes response
*/
GET_LOG_BYTES_RESPONSE,
WORKER_REQUEST,
MASTER_RESPONSE,
/**
* execute task request
*/
TASK_EXECUTE_REQUEST,
/**
* execute task ack
*/
TASK_EXECUTE_ACK,
/**
* execute task response
*/
TASK_EXECUTE_RESPONSE,
/**
* kill task
*/
TASK_KILL_REQUEST,
/**
* kill task response
*/
TASK_KILL_RESPONSE,
/**
* ping
*/
PING,
/**
* pong
*/
PONG;
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
import org.apache.dolphinscheduler.remote.utils.FastJsonSerializer;
import java.io.Serializable;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
/**
* execute task request command
*/
public class ExecuteTaskRequestCommand implements Serializable {
/**
* task id
*/
private String taskId;
/**
* attempt id
*/
private String attemptId;
/**
* application name
*/
private String applicationName;
/**
* group name
*/
private String groupName;
/**
* task name
*/
private String taskName;
/**
* connector port
*/
private int connectorPort;
/**
* description info
*/
private String description;
/**
* class name
*/
private String className;
/**
* method name
*/
private String methodName;
/**
* parameters
*/
private String params;
/**
* shard itemds
*/
private List<Integer> shardItems;
public List<Integer> getShardItems() {
return shardItems;
}
public void setShardItems(List<Integer> shardItems) {
this.shardItems = shardItems;
}
public String getParams() {
return params;
}
public void setParams(String params) {
this.params = params;
}
public String getTaskId() {
return taskId;
}
public void setTaskId(String taskId) {
this.taskId = taskId;
}
public String getApplicationName() {
return applicationName;
}
public void setApplicationName(String applicationName) {
this.applicationName = applicationName;
}
public String getGroupName() {
return groupName;
}
public void setGroupName(String groupName) {
this.groupName = groupName;
}
public String getTaskName() {
return taskName;
}
public void setTaskName(String taskName) {
this.taskName = taskName;
}
public int getConnectorPort() {
return connectorPort;
}
public void setConnectorPort(int connectorPort) {
this.connectorPort = connectorPort;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public String getClassName() {
return className;
}
public void setClassName(String className) {
this.className = className;
}
public String getMethodName() {
return methodName;
}
public void setMethodName(String methodName) {
this.methodName = methodName;
}
/**
* package request command
*
* @return command
*/
public Command convert2Command(){
Command command = new Command();
command.setType(CommandType.EXECUTE_TASK_REQUEST);
byte[] body = FastJsonSerializer.serialize(this);
command.setBody(body);
return command;
}
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
import org.apache.dolphinscheduler.remote.utils.FastJsonSerializer;
import java.io.Serializable;
import java.util.concurrent.atomic.AtomicLong;
/**
* execute task response command
*/
public class ExecuteTaskResponseCommand implements Serializable {
/**
* task id
*/
private String taskId;
/**
* attempt id
*/
private String attemptId;
/**
* return result
*/
private Object result;
/**
* received time
*/
private long receivedTime;
/**
* execute count
*/
private int executeCount;
/**
* execute time
*/
private long executeTime;
public String getAttemptId() {
return attemptId;
}
public void setAttemptId(String attemptId) {
this.attemptId = attemptId;
}
public String getTaskId() {
return taskId;
}
public void setTaskId(String taskId) {
this.taskId = taskId;
}
public Object getResult() {
return result;
}
public void setResult(Object result) {
this.result = result;
}
public long getReceivedTime() {
return receivedTime;
}
public void setReceivedTime(long receivedTime) {
this.receivedTime = receivedTime;
}
public int getExecuteCount() {
return executeCount;
}
public void setExecuteCount(int executeCount) {
this.executeCount = executeCount;
}
public long getExecuteTime() {
return executeTime;
}
public void setExecuteTime(long executeTime) {
this.executeTime = executeTime;
}
public Command convert2Command(long opaque){
Command command = new Command();
command.setType(CommandType.EXECUTE_TASK_RESPONSE);
byte[] body = FastJsonSerializer.serialize(this);
command.setBody(body);
return command;
}
}
|
||||
|
|
@ -0,0 +1 @@
|
|||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
import org.apache.dolphinscheduler.remote.utils.FastJsonSerializer;
import java.io.Serializable;
import java.util.Date;
/**
* execute task request command
*/
public class TaskExecuteAckCommand implements Serializable {
/**
* taskInstanceId
*/
private int taskInstanceId;
/**
* startTime
*/
private Date startTime;
/**
* host
*/
private String host;
/**
* status
*/
private int status;
/**
* logPath
*/
private String logPath;
/**
* executePath
*/
private String executePath;
public Date getStartTime() {
return startTime;
}
public void setStartTime(Date startTime) {
this.startTime = startTime;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getStatus() {
return status;
}
public void setStatus(int status) {
this.status = status;
}
public int getTaskInstanceId() {
return taskInstanceId;
}
public void setTaskInstanceId(int taskInstanceId) {
this.taskInstanceId = taskInstanceId;
}
public String getLogPath() {
return logPath;
}
public void setLogPath(String logPath) {
this.logPath = logPath;
}
public String getExecutePath() {
return executePath;
}
public void setExecutePath(String executePath) {
this.executePath = executePath;
}
/**
* package request command
*
* @return command
*/
public Command convert2Command(){
Command command = new Command();
command.setType(CommandType.TASK_EXECUTE_ACK);
byte[] body = FastJsonSerializer.serialize(this);
command.setBody(body);
return command;
}
@Override
public String toString() {
return "TaskExecuteAckCommand{" +
"taskInstanceId=" + taskInstanceId +
", startTime=" + startTime +
", host='" + host + '\'' +
", status=" + status +
", logPath='" + logPath + '\'' +
", executePath='" + executePath + '\'' +
'}';
}
}
|
||||
|
|
@ -0,0 +1 @@
|
|||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
import org.apache.dolphinscheduler.remote.utils.FastJsonSerializer;
import java.io.Serializable;
/**
* execute task request command
*/
public class TaskExecuteRequestCommand implements Serializable {
/**
* task execution context
*/
private String taskExecutionContext;
public String getTaskExecutionContext() {
return taskExecutionContext;
}
public void setTaskExecutionContext(String taskExecutionContext) {
this.taskExecutionContext = taskExecutionContext;
}
public TaskExecuteRequestCommand() {
}
public TaskExecuteRequestCommand(String taskExecutionContext) {
this.taskExecutionContext = taskExecutionContext;
}
/**
* package request command
*
* @return command
*/
public Command convert2Command(){
Command command = new Command();
command.setType(CommandType.TASK_EXECUTE_REQUEST);
byte[] body = FastJsonSerializer.serialize(this);
command.setBody(body);
return command;
}
@Override
public String toString() {
return "TaskExecuteRequestCommand{" +
"taskExecutionContext='" + taskExecutionContext + '\'' +
'}';
}
}
|
||||
|
|
@ -0,0 +1 @@
|
|||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
import org.apache.dolphinscheduler.remote.utils.FastJsonSerializer;
import java.io.Serializable;
import java.util.Date;
/**
* execute task response command
*/
public class TaskExecuteResponseCommand implements Serializable {
public TaskExecuteResponseCommand() {
}
public TaskExecuteResponseCommand(int taskInstanceId) {
this.taskInstanceId = taskInstanceId;
}
/**
* task instance id
*/
private int taskInstanceId;
/**
* status
*/
private int status;
/**
* end time
*/
private Date endTime;
/**
* processId
*/
private int processId;
/**
* appIds
*/
private String appIds;
public int getTaskInstanceId() {
return taskInstanceId;
}
public void setTaskInstanceId(int taskInstanceId) {
this.taskInstanceId = taskInstanceId;
}
public int getStatus() {
return status;
}
public void setStatus(int status) {
this.status = status;
}
public Date getEndTime() {
return endTime;
}
public void setEndTime(Date endTime) {
this.endTime = endTime;
}
public int getProcessId() {
return processId;
}
public void setProcessId(int processId) {
this.processId = processId;
}
public String getAppIds() {
return appIds;
}
public void setAppIds(String appIds) {
this.appIds = appIds;
}
/**
* package response command
* @return command
*/
public Command convert2Command(){
Command command = new Command();
command.setType(CommandType.TASK_EXECUTE_RESPONSE);
byte[] body = FastJsonSerializer.serialize(this);
command.setBody(body);
return command;
}
@Override
public String toString() {
return "TaskExecuteResponseCommand{" +
"taskInstanceId=" + taskInstanceId +
", status=" + status +
", endTime=" + endTime +
", processId=" + processId +
", appIds='" + appIds + '\'' +
'}';
}
}
|
||||
|
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.dolphinscheduler.remote.command;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* master/worker task transport
|
||||
*/
|
||||
public class TaskInfo implements Serializable{
|
||||
|
||||
/**
|
||||
* task instance id
|
||||
*/
|
||||
private Integer taskId;
|
||||
|
||||
|
||||
/**
|
||||
* taks name
|
||||
*/
|
||||
private String taskName;
|
||||
|
||||
/**
|
||||
* task start time
|
||||
*/
|
||||
private Date startTime;
|
||||
|
||||
/**
|
||||
* task type
|
||||
*/
|
||||
private String taskType;
|
||||
|
||||
/**
|
||||
* task execute path
|
||||
*/
|
||||
private String executePath;
|
||||
|
||||
/**
|
||||
* task json
|
||||
*/
|
||||
private String taskJson;
|
||||
|
||||
|
||||
/**
|
||||
* process instance id
|
||||
*/
|
||||
private Integer processInstanceId;
|
||||
|
||||
|
||||
/**
|
||||
* process instance schedule time
|
||||
*/
|
||||
private Date scheduleTime;
|
||||
|
||||
/**
|
||||
* process instance global parameters
|
||||
*/
|
||||
private String globalParams;
|
||||
|
||||
|
||||
/**
|
||||
* execute user id
|
||||
*/
|
||||
private Integer executorId;
|
||||
|
||||
|
||||
/**
|
||||
* command type if complement
|
||||
*/
|
||||
private Integer cmdTypeIfComplement;
|
||||
|
||||
|
||||
/**
|
||||
* tenant code
|
||||
*/
|
||||
private String tenantCode;
|
||||
|
||||
/**
|
||||
* task queue
|
||||
*/
|
||||
private String queue;
|
||||
|
||||
|
||||
/**
|
||||
* process define id
|
||||
*/
|
||||
private Integer processDefineId;
|
||||
|
||||
/**
|
||||
* project id
|
||||
*/
|
||||
private Integer projectId;
|
||||
|
||||
public Integer getTaskId() {
|
||||
return taskId;
|
||||
}
|
||||
|
||||
public void setTaskId(Integer taskId) {
|
||||
this.taskId = taskId;
|
||||
}
|
||||
|
||||
public String getTaskName() {
|
||||
return taskName;
|
||||
}
|
||||
|
||||
public void setTaskName(String taskName) {
|
||||
this.taskName = taskName;
|
||||
}
|
||||
|
||||
public Date getStartTime() {
|
||||
return startTime;
|
||||
}
|
||||
|
||||
public void setStartTime(Date startTime) {
|
||||
this.startTime = startTime;
|
||||
}
|
||||
|
||||
public String getTaskType() {
|
||||
return taskType;
|
||||
}
|
||||
|
||||
public void setTaskType(String taskType) {
|
||||
this.taskType = taskType;
|
||||
}
|
||||
|
||||
public String getExecutePath() {
|
||||
return executePath;
|
||||
}
|
||||
|
||||
public void setExecutePath(String executePath) {
|
||||
this.executePath = executePath;
|
||||
}
|
||||
|
||||
public String getTaskJson() {
|
||||
return taskJson;
|
||||
}
|
||||
|
||||
public void setTaskJson(String taskJson) {
|
||||
this.taskJson = taskJson;
|
||||
}
|
||||
|
||||
public Integer getProcessInstanceId() {
|
||||
return processInstanceId;
|
||||
}
|
||||
|
||||
public void setProcessInstanceId(Integer processInstanceId) {
|
||||
this.processInstanceId = processInstanceId;
|
||||
}
|
||||
|
||||
public Date getScheduleTime() {
|
||||
return scheduleTime;
|
||||
}
|
||||
|
||||
public void setScheduleTime(Date scheduleTime) {
|
||||
this.scheduleTime = scheduleTime;
|
||||
}
|
||||
|
||||
public String getGlobalParams() {
|
||||
return globalParams;
|
||||
}
|
||||
|
||||
public void setGlobalParams(String globalParams) {
|
||||
this.globalParams = globalParams;
|
||||
}
|
||||
|
||||
public String getTenantCode() {
|
||||
return tenantCode;
|
||||
}
|
||||
|
||||
public void setTenantCode(String tenantCode) {
|
||||
this.tenantCode = tenantCode;
|
||||
}
|
||||
|
||||
public String getQueue() {
|
||||
return queue;
|
||||
}
|
||||
|
||||
public void setQueue(String queue) {
|
||||
this.queue = queue;
|
||||
}
|
||||
|
||||
public Integer getProcessDefineId() {
|
||||
return processDefineId;
|
||||
}
|
||||
|
||||
public void setProcessDefineId(Integer processDefineId) {
|
||||
this.processDefineId = processDefineId;
|
||||
}
|
||||
|
||||
public Integer getProjectId() {
|
||||
return projectId;
|
||||
}
|
||||
|
||||
public void setProjectId(Integer projectId) {
|
||||
this.projectId = projectId;
|
||||
}
|
||||
|
||||
public Integer getExecutorId() {
|
||||
return executorId;
|
||||
}
|
||||
|
||||
public void setExecutorId(Integer executorId) {
|
||||
this.executorId = executorId;
|
||||
}
|
||||
|
||||
public Integer getCmdTypeIfComplement() {
|
||||
return cmdTypeIfComplement;
|
||||
}
|
||||
|
||||
public void setCmdTypeIfComplement(Integer cmdTypeIfComplement) {
|
||||
this.cmdTypeIfComplement = cmdTypeIfComplement;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "TaskInfo{" +
|
||||
"taskId=" + taskId +
|
||||
", taskName='" + taskName + '\'' +
|
||||
", startTime=" + startTime +
|
||||
", taskType='" + taskType + '\'' +
|
||||
", executePath='" + executePath + '\'' +
|
||||
", taskJson='" + taskJson + '\'' +
|
||||
", processInstanceId=" + processInstanceId +
|
||||
", scheduleTime=" + scheduleTime +
|
||||
", globalParams='" + globalParams + '\'' +
|
||||
", executorId=" + executorId +
|
||||
", cmdTypeIfComplement=" + cmdTypeIfComplement +
|
||||
", tenantCode='" + tenantCode + '\'' +
|
||||
", queue='" + queue + '\'' +
|
||||
", processDefineId=" + processDefineId +
|
||||
", projectId=" + projectId +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1 @@
|
|||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
import org.apache.dolphinscheduler.remote.utils.FastJsonSerializer;
import java.io.Serializable;
/**
* kill task request command
*/
public class TaskKillRequestCommand implements Serializable {
/**
* task id
*/
private int taskInstanceId;
public int getTaskInstanceId() {
return taskInstanceId;
}
public void setTaskInstanceId(int taskInstanceId) {
this.taskInstanceId = taskInstanceId;
}
/**
* package request command
*
* @return command
*/
public Command convert2Command(){
Command command = new Command();
command.setType(CommandType.TASK_KILL_REQUEST);
byte[] body = FastJsonSerializer.serialize(this);
command.setBody(body);
return command;
}
@Override
public String toString() {
return "TaskKillRequestCommand{" +
"taskInstanceId=" + taskInstanceId +
'}';
}
}
|
||||
|
|
@ -0,0 +1 @@
|
|||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.remote.command;
import org.apache.dolphinscheduler.remote.utils.FastJsonSerializer;
import java.io.Serializable;
import java.util.Date;
import java.util.List;
/**
* kill task response command
*/
public class TaskKillResponseCommand implements Serializable {
/**
* taskInstanceId
*/
private int taskInstanceId;
/**
* host
*/
private String host;
/**
* status
*/
private int status;
/**
* processId
*/
private int processId;
/**
* other resource manager appId , for example : YARN etc
*/
protected List<String> appIds;
public int getTaskInstanceId() {
return taskInstanceId;
}
public void setTaskInstanceId(int taskInstanceId) {
this.taskInstanceId = taskInstanceId;
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getStatus() {
return status;
}
public void setStatus(int status) {
this.status = status;
}
public int getProcessId() {
return processId;
}
public void setProcessId(int processId) {
this.processId = processId;
}
public List<String> getAppIds() {
return appIds;
}
public void setAppIds(List<String> appIds) {
this.appIds = appIds;
}
/**
* package request command
*
* @return command
*/
public Command convert2Command(){
Command command = new Command();
command.setType(CommandType.TASK_KILL_RESPONSE);
byte[] body = FastJsonSerializer.serialize(this);
command.setBody(body);
return command;
}
@Override
public String toString() {
return "TaskKillResponseCommand{" +
"taskInstanceId=" + taskInstanceId +
", host='" + host + '\'' +
", status=" + status +
", processId=" + processId +
", appIds=" + appIds +
'}';
}
}
|
||||
|
|
@ -18,7 +18,13 @@
|
|||
package org.apache.dolphinscheduler.remote.future;
|
||||
|
||||
import org.apache.dolphinscheduler.remote.command.Command;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.*;
|
||||
|
||||
/**
|
||||
|
|
@ -26,6 +32,8 @@ import java.util.concurrent.*;
|
|||
*/
|
||||
public class ResponseFuture {
|
||||
|
||||
private final static Logger LOGGER = LoggerFactory.getLogger(ResponseFuture.class);
|
||||
|
||||
private final static ConcurrentHashMap<Long,ResponseFuture> FUTURE_TABLE = new ConcurrentHashMap<>(256);
|
||||
|
||||
/**
|
||||
|
|
@ -161,4 +169,44 @@ public class ResponseFuture {
|
|||
this.releaseSemaphore.release();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ResponseFuture{" +
|
||||
"opaque=" + opaque +
|
||||
", timeoutMillis=" + timeoutMillis +
|
||||
", invokeCallback=" + invokeCallback +
|
||||
", releaseSemaphore=" + releaseSemaphore +
|
||||
", latch=" + latch +
|
||||
", beginTimestamp=" + beginTimestamp +
|
||||
", responseCommand=" + responseCommand +
|
||||
", sendOk=" + sendOk +
|
||||
", cause=" + cause +
|
||||
'}';
|
||||
}
|
||||
|
||||
/**
|
||||
* scan future table
|
||||
*/
|
||||
public static void scanFutureTable(){
|
||||
final List<ResponseFuture> futureList = new LinkedList<>();
|
||||
Iterator<Map.Entry<Long, ResponseFuture>> it = FUTURE_TABLE.entrySet().iterator();
|
||||
while (it.hasNext()) {
|
||||
Map.Entry<Long, ResponseFuture> next = it.next();
|
||||
ResponseFuture future = next.getValue();
|
||||
if ((future.getBeginTimestamp() + future.getTimeoutMillis() + 1000) <= System.currentTimeMillis()) {
|
||||
futureList.add(future);
|
||||
it.remove();
|
||||
LOGGER.warn("remove timeout request : {}", future);
|
||||
}
|
||||
}
|
||||
for (ResponseFuture future : futureList) {
|
||||
try {
|
||||
future.release();
|
||||
future.executeInvokeCallback();
|
||||
} catch (Throwable ex) {
|
||||
LOGGER.warn("scanFutureTable, execute callback error", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,12 +19,19 @@ package org.apache.dolphinscheduler.remote.handler;
|
|||
import io.netty.channel.*;
|
||||
import org.apache.dolphinscheduler.remote.NettyRemotingClient;
|
||||
import org.apache.dolphinscheduler.remote.command.Command;
|
||||
import org.apache.dolphinscheduler.remote.command.CommandType;
|
||||
import org.apache.dolphinscheduler.remote.future.ResponseFuture;
|
||||
import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor;
|
||||
import org.apache.dolphinscheduler.remote.utils.ChannelUtils;
|
||||
import org.apache.dolphinscheduler.remote.utils.Constants;
|
||||
import org.apache.dolphinscheduler.remote.utils.Pair;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
|
||||
/**
|
||||
* netty client request handler
|
||||
|
|
@ -44,9 +51,20 @@ public class NettyClientHandler extends ChannelInboundHandlerAdapter {
|
|||
*/
|
||||
private final ExecutorService callbackExecutor;
|
||||
|
||||
/**
|
||||
* processors
|
||||
*/
|
||||
private final ConcurrentHashMap<CommandType, Pair<NettyRequestProcessor, ExecutorService>> processors;
|
||||
|
||||
/**
|
||||
* default executor
|
||||
*/
|
||||
private final ExecutorService defaultExecutor = Executors.newFixedThreadPool(Constants.CPUS);
|
||||
|
||||
public NettyClientHandler(NettyRemotingClient nettyRemotingClient, ExecutorService callbackExecutor){
|
||||
this.nettyRemotingClient = nettyRemotingClient;
|
||||
this.callbackExecutor = callbackExecutor;
|
||||
this.processors = new ConcurrentHashMap();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -71,18 +89,43 @@ public class NettyClientHandler extends ChannelInboundHandlerAdapter {
|
|||
*/
|
||||
@Override
|
||||
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
|
||||
processReceived((Command)msg);
|
||||
processReceived(ctx.channel(), (Command)msg);
|
||||
}
|
||||
|
||||
/**
|
||||
* register processor
|
||||
*
|
||||
* @param commandType command type
|
||||
* @param processor processor
|
||||
*/
|
||||
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor) {
|
||||
this.registerProcessor(commandType, processor, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* register processor
|
||||
*
|
||||
* @param commandType command type
|
||||
* @param processor processor
|
||||
* @param executor thread executor
|
||||
*/
|
||||
public void registerProcessor(final CommandType commandType, final NettyRequestProcessor processor, final ExecutorService executor) {
|
||||
ExecutorService executorRef = executor;
|
||||
if(executorRef == null){
|
||||
executorRef = defaultExecutor;
|
||||
}
|
||||
this.processors.putIfAbsent(commandType, new Pair<>(processor, executorRef));
|
||||
}
|
||||
|
||||
/**
|
||||
* process received logic
|
||||
*
|
||||
* @param responseCommand responseCommand
|
||||
* @param command command
|
||||
*/
|
||||
private void processReceived(final Command responseCommand) {
|
||||
ResponseFuture future = ResponseFuture.getFuture(responseCommand.getOpaque());
|
||||
private void processReceived(final Channel channel, final Command command) {
|
||||
ResponseFuture future = ResponseFuture.getFuture(command.getOpaque());
|
||||
if(future != null){
|
||||
future.setResponseCommand(responseCommand);
|
||||
future.setResponseCommand(command);
|
||||
future.release();
|
||||
if(future.getInvokeCallback() != null){
|
||||
this.callbackExecutor.submit(new Runnable() {
|
||||
|
|
@ -92,10 +135,30 @@ public class NettyClientHandler extends ChannelInboundHandlerAdapter {
|
|||
}
|
||||
});
|
||||
} else{
|
||||
future.putResponse(responseCommand);
|
||||
future.putResponse(command);
|
||||
}
|
||||
} else{
|
||||
logger.warn("receive response {}, but not matched any request ", responseCommand);
|
||||
processByCommandType(channel, command);
|
||||
}
|
||||
}
|
||||
|
||||
public void processByCommandType(final Channel channel, final Command command) {
|
||||
final Pair<NettyRequestProcessor, ExecutorService> pair = processors.get(command.getType());
|
||||
if (pair != null) {
|
||||
Runnable run = () -> {
|
||||
try {
|
||||
pair.getLeft().process(channel, command);
|
||||
} catch (Throwable e) {
|
||||
logger.error(String.format("process command %s exception", command), e);
|
||||
}
|
||||
};
|
||||
try {
|
||||
pair.getRight().submit(run);
|
||||
} catch (RejectedExecutionException e) {
|
||||
logger.warn("thread pool is full, discard command {} from {}", command, ChannelUtils.getRemoteAddress(channel));
|
||||
}
|
||||
} else {
|
||||
logger.warn("receive response {}, but not matched any request ", command);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -112,30 +175,4 @@ public class NettyClientHandler extends ChannelInboundHandlerAdapter {
|
|||
ctx.channel().close();
|
||||
}
|
||||
|
||||
/**
|
||||
* channel write changed
|
||||
*
|
||||
* @param ctx channel handler context
|
||||
* @throws Exception
|
||||
*/
|
||||
@Override
|
||||
public void channelWritabilityChanged(ChannelHandlerContext ctx) throws Exception {
|
||||
Channel ch = ctx.channel();
|
||||
ChannelConfig config = ch.config();
|
||||
|
||||
if (!ch.isWritable()) {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("{} is not writable, over high water level : {}",
|
||||
new Object[]{ch, config.getWriteBufferHighWaterMark()});
|
||||
}
|
||||
|
||||
config.setAutoRead(false);
|
||||
} else {
|
||||
if (logger.isWarnEnabled()) {
|
||||
logger.warn("{} is writable, to low water : {}",
|
||||
new Object[]{ch, config.getWriteBufferLowWaterMark()});
|
||||
}
|
||||
config.setAutoRead(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -98,7 +98,7 @@ public class NettyServerHandler extends ChannelInboundHandlerAdapter {
|
|||
if(executorRef == null){
|
||||
executorRef = nettyRemotingServer.getDefaultExecutor();
|
||||
}
|
||||
this.processors.putIfAbsent(commandType, new Pair<NettyRequestProcessor, ExecutorService>(processor, executorRef));
|
||||
this.processors.putIfAbsent(commandType, new Pair<>(processor, executorRef));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -1,96 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.dolphinscheduler.remote.utils;
|
||||
|
||||
import java.io.Serializable;
|
||||
|
||||
/**
|
||||
* server address
|
||||
*/
|
||||
public class Address implements Serializable {
|
||||
|
||||
/**
|
||||
* host
|
||||
*/
|
||||
private String host;
|
||||
|
||||
/**
|
||||
* port
|
||||
*/
|
||||
private int port;
|
||||
|
||||
public Address(){
|
||||
//NOP
|
||||
}
|
||||
|
||||
public Address(String host, int port){
|
||||
this.host = host;
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public void setHost(String host) {
|
||||
this.host = host;
|
||||
}
|
||||
|
||||
public int getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public void setPort(int port) {
|
||||
this.port = port;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + ((host == null) ? 0 : host.hashCode());
|
||||
result = prime * result + port;
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
Address other = (Address) obj;
|
||||
if (host == null) {
|
||||
if (other.host != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!host.equals(other.host)) {
|
||||
return false;
|
||||
}
|
||||
return port == other.port;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Address [host=" + host + ", port=" + port + "]";
|
||||
}
|
||||
}
|
||||
|
|
@ -49,9 +49,9 @@ public class ChannelUtils {
|
|||
* @param channel channel
|
||||
* @return address
|
||||
*/
|
||||
public static Address toAddress(Channel channel){
|
||||
public static Host toAddress(Channel channel){
|
||||
InetSocketAddress socketAddress = ((InetSocketAddress)channel.remoteAddress());
|
||||
return new Address(socketAddress.getAddress().getHostAddress(), socketAddress.getPort());
|
||||
return new Host(socketAddress.getAddress().getHostAddress(), socketAddress.getPort());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,4 +38,7 @@ public class Constants {
|
|||
*/
|
||||
public static final int CPUS = Runtime.getRuntime().availableProcessors();
|
||||
|
||||
|
||||
public static final String LOCAL_ADDRESS = IPUtils.getFirstNoLoopbackIP4Address();
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ public class FastJsonSerializer {
|
|||
* @return deserialize type
|
||||
*/
|
||||
public static <T> T deserialize(byte[] src, Class<T> clazz) {
|
||||
return JSON.parseObject(new String(src, Constants.UTF8), clazz);
|
||||
return JSON.parseObject(src, clazz);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.dolphinscheduler.remote.utils;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* server address
|
||||
*/
|
||||
public class Host implements Serializable {
|
||||
|
||||
/**
|
||||
* address
|
||||
*/
|
||||
private String address;
|
||||
|
||||
/**
|
||||
* ip
|
||||
*/
|
||||
private String ip;
|
||||
|
||||
/**
|
||||
* port
|
||||
*/
|
||||
private int port;
|
||||
|
||||
public Host() {
|
||||
}
|
||||
|
||||
public Host(String ip, int port) {
|
||||
this.ip = ip;
|
||||
this.port = port;
|
||||
this.address = ip + ":" + port;
|
||||
}
|
||||
|
||||
public String getAddress() {
|
||||
return address;
|
||||
}
|
||||
|
||||
public void setAddress(String address) {
|
||||
this.address = address;
|
||||
}
|
||||
|
||||
public String getIp() {
|
||||
return ip;
|
||||
}
|
||||
|
||||
public void setIp(String ip) {
|
||||
this.ip = ip;
|
||||
this.address = ip + ":" + port;
|
||||
}
|
||||
|
||||
public int getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public void setPort(int port) {
|
||||
this.port = port;
|
||||
this.address = ip + ":" + port;
|
||||
}
|
||||
|
||||
/**
|
||||
* address convert host
|
||||
* @param address address
|
||||
* @return host
|
||||
*/
|
||||
public static Host of(String address){
|
||||
if(address == null) {
|
||||
throw new IllegalArgumentException("Host : address is null.");
|
||||
}
|
||||
String[] parts = address.split(":");
|
||||
if (parts.length != 2) {
|
||||
throw new IllegalArgumentException(String.format("Host : %s illegal.", address));
|
||||
}
|
||||
Host host = new Host(parts[0], Integer.parseInt(parts[1]));
|
||||
return host;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
Host host = (Host) o;
|
||||
return Objects.equals(getAddress(), host.getAddress());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(getAddress());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Host{" +
|
||||
"address='" + address + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.dolphinscheduler.remote.utils;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Enumeration;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class IPUtils {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(IPUtils.class);
|
||||
|
||||
private static String IP_REGEX = "([1-9]|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])(\\.(\\d|[1-9]\\d|1\\d{2}|2[0-4]\\d|25[0-5])){3}";
|
||||
|
||||
private static String LOCAL_HOST = "unknown";
|
||||
|
||||
static {
|
||||
String host = System.getenv("HOSTNAME");
|
||||
if (isNotEmpty(host)) {
|
||||
LOCAL_HOST = host;
|
||||
} else {
|
||||
|
||||
try {
|
||||
String hostName = InetAddress.getLocalHost().getHostName();
|
||||
if (isNotEmpty(hostName)) {
|
||||
LOCAL_HOST = hostName;
|
||||
}
|
||||
} catch (UnknownHostException e) {
|
||||
logger.error("get hostName error!", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static String getLocalHost() {
|
||||
return LOCAL_HOST;
|
||||
}
|
||||
|
||||
|
||||
public static String getFirstNoLoopbackIP4Address() {
|
||||
Collection<String> allNoLoopbackIP4Addresses = getNoLoopbackIP4Addresses();
|
||||
if (allNoLoopbackIP4Addresses.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
return allNoLoopbackIP4Addresses.iterator().next();
|
||||
}
|
||||
|
||||
public static Collection<String> getNoLoopbackIP4Addresses() {
|
||||
Collection<String> noLoopbackIP4Addresses = new ArrayList<>();
|
||||
Collection<InetAddress> allInetAddresses = getAllHostAddress();
|
||||
|
||||
for (InetAddress address : allInetAddresses) {
|
||||
if (!address.isLoopbackAddress() && !address.isSiteLocalAddress()
|
||||
&& !Inet6Address.class.isInstance(address)) {
|
||||
noLoopbackIP4Addresses.add(address.getHostAddress());
|
||||
}
|
||||
}
|
||||
if (noLoopbackIP4Addresses.isEmpty()) {
|
||||
for (InetAddress address : allInetAddresses) {
|
||||
if (!address.isLoopbackAddress() && !Inet6Address.class.isInstance(address)) {
|
||||
noLoopbackIP4Addresses.add(address.getHostAddress());
|
||||
}
|
||||
}
|
||||
}
|
||||
return noLoopbackIP4Addresses;
|
||||
}
|
||||
|
||||
public static Collection<InetAddress> getAllHostAddress() {
|
||||
try {
|
||||
Enumeration<NetworkInterface> networkInterfaces = NetworkInterface.getNetworkInterfaces();
|
||||
Collection<InetAddress> addresses = new ArrayList<>();
|
||||
|
||||
while (networkInterfaces.hasMoreElements()) {
|
||||
NetworkInterface networkInterface = networkInterfaces.nextElement();
|
||||
Enumeration<InetAddress> inetAddresses = networkInterface.getInetAddresses();
|
||||
while (inetAddresses.hasMoreElements()) {
|
||||
InetAddress inetAddress = inetAddresses.nextElement();
|
||||
addresses.add(inetAddress);
|
||||
}
|
||||
}
|
||||
|
||||
return addresses;
|
||||
} catch (SocketException e) {
|
||||
throw new RuntimeException(e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
public static String getIpByHostName(String host) {
|
||||
InetAddress address = null;
|
||||
try {
|
||||
address = InetAddress.getByName(host);
|
||||
} catch (UnknownHostException e) {
|
||||
logger.error("get IP error", e);
|
||||
}
|
||||
if (address == null) {
|
||||
return "";
|
||||
}
|
||||
return address.getHostAddress();
|
||||
|
||||
}
|
||||
|
||||
private static boolean isEmpty(final CharSequence cs) {
|
||||
return cs == null || cs.length() == 0;
|
||||
}
|
||||
|
||||
private static boolean isNotEmpty(final CharSequence cs) {
|
||||
return !isEmpty(cs);
|
||||
}
|
||||
|
||||
public static boolean isIp(String addr) {
|
||||
if (addr.length() < 7 || addr.length() > 15 || "".equals(addr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Pattern pat = Pattern.compile(IP_REGEX);
|
||||
|
||||
Matcher mat = pat.matcher(addr);
|
||||
|
||||
boolean ipAddress = mat.find();
|
||||
|
||||
return ipAddress;
|
||||
}
|
||||
}
|
||||
|
|
@ -50,4 +50,8 @@ public class Pair<L, R> {
|
|||
public void setRight(R right) {
|
||||
this.right = right;
|
||||
}
|
||||
|
||||
public static <L, R> Pair of(L left, R right){
|
||||
return new Pair(left, right);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue