test
commit
152c814126
|
|
@ -45,17 +45,29 @@ jobs:
|
|||
Compile-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/checkout@v2
|
||||
# In the checkout@v2, it doesn't support git submodule. Execute the commands manually.
|
||||
- name: checkout submodules
|
||||
shell: bash
|
||||
run: |
|
||||
git submodule sync --recursive
|
||||
git -c protocol.version=2 submodule update --init --force --recursive --depth=1
|
||||
- name: Set up JDK 1.8
|
||||
uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: 1.8
|
||||
- name: Compile
|
||||
run: mvn -U -B -T 1C clean install -Prelease -Dmaven.compile.fork=true -Dmaven.test.skip=true
|
||||
run: mvn -B clean compile package -Prelease -Dmaven.test.skip=true
|
||||
License-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/checkout@v2
|
||||
# In the checkout@v2, it doesn't support git submodule. Execute the commands manually.
|
||||
- name: checkout submodules
|
||||
shell: bash
|
||||
run: |
|
||||
git submodule sync --recursive
|
||||
git -c protocol.version=2 submodule update --init --force --recursive --depth=1
|
||||
- name: Set up JDK 1.8
|
||||
uses: actions/setup-java@v1
|
||||
with:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,75 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
on: ["pull_request"]
|
||||
env:
|
||||
DOCKER_DIR: ./docker
|
||||
LOG_DIR: /tmp/dolphinscheduler
|
||||
|
||||
name: e2e Test
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
# In the checkout@v2, it doesn't support git submodule. Execute the commands manually.
|
||||
- name: checkout submodules
|
||||
shell: bash
|
||||
run: |
|
||||
git submodule sync --recursive
|
||||
git -c protocol.version=2 submodule update --init --force --recursive --depth=1
|
||||
- uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/.m2/repository
|
||||
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-maven-
|
||||
- name: Build Image
|
||||
run: |
|
||||
export VERSION=`cat $(pwd)/pom.xml| grep "SNAPSHOT</version>" | awk -F "-SNAPSHOT" '{print $1}' | awk -F ">" '{print $2}'`
|
||||
sh ./dockerfile/hooks/build
|
||||
- name: Docker Run
|
||||
run: |
|
||||
VERSION=`cat $(pwd)/pom.xml| grep "SNAPSHOT</version>" | awk -F "-SNAPSHOT" '{print $1}' | awk -F ">" '{print $2}'`
|
||||
mkdir -p /tmp/logs
|
||||
docker run -dit -e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -v /tmp/logs:/opt/dolphinscheduler/logs -p 8888:8888 dolphinscheduler:$VERSION all
|
||||
- name: Check Server Status
|
||||
run: sh ./dockerfile/hooks/check
|
||||
- name: Prepare e2e env
|
||||
run: |
|
||||
sudo apt-get install -y libxss1 libappindicator1 libindicator7 xvfb unzip
|
||||
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
|
||||
sudo dpkg -i google-chrome*.deb
|
||||
sudo apt-get install -f -y
|
||||
wget -N https://chromedriver.storage.googleapis.com/80.0.3987.106/chromedriver_linux64.zip
|
||||
unzip chromedriver_linux64.zip
|
||||
sudo mv -f chromedriver /usr/local/share/chromedriver
|
||||
sudo ln -s /usr/local/share/chromedriver /usr/local/bin/chromedriver
|
||||
- name: Run e2e Test
|
||||
run: cd ./e2e && mvn -B clean test
|
||||
- name: Collect logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: dslogs
|
||||
path: /tmp/logs
|
||||
|
||||
|
||||
|
|
@ -34,7 +34,13 @@ jobs:
|
|||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/checkout@v2
|
||||
# In the checkout@v2, it doesn't support git submodule. Execute the commands manually.
|
||||
- name: checkout submodules
|
||||
shell: bash
|
||||
run: |
|
||||
git submodule sync --recursive
|
||||
git -c protocol.version=2 submodule update --init --force --recursive --depth=1
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v1
|
||||
with:
|
||||
|
|
@ -49,7 +55,13 @@ jobs:
|
|||
License-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/checkout@v2
|
||||
# In the checkout@v2, it doesn't support git submodule. Execute the commands manually.
|
||||
- name: checkout submodules
|
||||
shell: bash
|
||||
run: |
|
||||
git submodule sync --recursive
|
||||
git -c protocol.version=2 submodule update --init --force --recursive --depth=1
|
||||
- name: Set up JDK 1.8
|
||||
uses: actions/setup-java@v1
|
||||
with:
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- refactor-worker
|
||||
env:
|
||||
DOCKER_DIR: ./docker
|
||||
LOG_DIR: /tmp/dolphinscheduler
|
||||
|
|
@ -84,4 +83,4 @@ jobs:
|
|||
mkdir -p ${LOG_DIR}
|
||||
cd ${DOCKER_DIR}
|
||||
docker-compose logs db > ${LOG_DIR}/db.txt
|
||||
continue-on-error: true
|
||||
continue-on-error: true
|
||||
110
CONTRIBUTING.md
110
CONTRIBUTING.md
|
|
@ -1,35 +1,53 @@
|
|||
* First from the remote repository *https://github.com/apache/incubator-dolphinscheduler.git* fork code to your own repository
|
||||
|
||||
* there are three branches in the remote repository currently:
|
||||
* master normal delivery branch
|
||||
After the stable version is released, the code for the stable version branch is merged into the master branch.
|
||||
# Development
|
||||
|
||||
* dev daily development branch
|
||||
The daily development branch, the newly submitted code can pull requests to this branch.
|
||||
Start by forking the dolphinscheduler GitHub repository, make changes in a branch and then send a pull request.
|
||||
|
||||
## Set up your dolphinscheduler GitHub Repository
|
||||
|
||||
* Clone your own warehouse to your local
|
||||
There are three branches in the remote repository currently:
|
||||
- `master` : normal delivery branch. After the stable version is released, the code for the stable version branch is merged into the master branch.
|
||||
|
||||
- `dev` : daily development branch. The daily development branch, the newly submitted code can pull requests to this branch.
|
||||
|
||||
- `x.x.x-release` : the stable release version.
|
||||
|
||||
`git clone https://github.com/apache/incubator-dolphinscheduler.git`
|
||||
So, you should fork the `dev` branch.
|
||||
|
||||
* Add remote repository address, named upstream
|
||||
|
||||
`git remote add upstream https://github.com/apache/incubator-dolphinscheduler.git`
|
||||
|
||||
* View repository:
|
||||
|
||||
`git remote -v`
|
||||
|
||||
> There will be two repositories at this time: origin (your own warehouse) and upstream (remote repository)
|
||||
|
||||
* Get/update remote repository code (already the latest code, skip it)
|
||||
|
||||
`git fetch upstream`
|
||||
|
||||
|
||||
* Synchronize remote repository code to local repository
|
||||
After forking the [dolphinscheduler upstream source repository](https://github.com/apache/incubator-dolphinscheduler/fork) to your personal repository, you can set your personal development environment.
|
||||
|
||||
```sh
|
||||
$ cd <your work direcotry>
|
||||
$ git clone < your personal forked dolphinscheduler repo>
|
||||
$ cd incubator-dolphinscheduler
|
||||
```
|
||||
|
||||
## Set git remote as ``upstream``
|
||||
|
||||
Add remote repository address, named upstream
|
||||
|
||||
```sh
|
||||
git remote add upstream https://github.com/apache/incubator-dolphinscheduler.git
|
||||
```
|
||||
|
||||
View repository:
|
||||
|
||||
```sh
|
||||
git remote -v
|
||||
```
|
||||
|
||||
There will be two repositories at this time: origin (your own warehouse) and upstream (remote repository)
|
||||
|
||||
Get/update remote repository code (already the latest code, skip it).
|
||||
|
||||
|
||||
```sh
|
||||
git fetch upstream
|
||||
```
|
||||
|
||||
Synchronize remote repository code to local repository
|
||||
|
||||
```sh
|
||||
git checkout origin/dev
|
||||
git merge --no-ff upstream/dev
|
||||
```
|
||||
|
|
@ -41,24 +59,46 @@ git checkout -b dev-1.0 upstream/dev-1.0
|
|||
git push --set-upstream origin dev1.0
|
||||
```
|
||||
|
||||
* After modifying the code locally, submit it to your own repository:
|
||||
## Create your feature branch
|
||||
Before making code changes, make sure you create a separate branch for them.
|
||||
|
||||
`git commit -m 'test commit'`
|
||||
`git push`
|
||||
```sh
|
||||
$ git checkout -b <your-feature>
|
||||
```
|
||||
|
||||
* Submit changes to the remote repository
|
||||
## Commit changes
|
||||
After modifying the code locally, submit it to your own repository:
|
||||
|
||||
```sh
|
||||
|
||||
git commit -m 'information about your feature'
|
||||
```
|
||||
|
||||
## Push to the branch
|
||||
|
||||
|
||||
Push your locally committed changes to the remote origin (your fork).
|
||||
|
||||
```
|
||||
$ git push origin <your-feature>
|
||||
```
|
||||
|
||||
## Create a pull request
|
||||
|
||||
After submitting changes to your remote repository, you should click on the new pull request On the following github page.
|
||||
|
||||
* On the github page, click on the new pull request.
|
||||
<p align = "center">
|
||||
<img src = "http://geek.analysys.cn/static/upload/221/2019-04-02/90f3abbf-70ef-4334-b8d6-9014c9cf4c7f.png"width ="60%"/>
|
||||
</ p>
|
||||
<img src = "http://geek.analysys.cn/static/upload/221/2019-04-02/90f3abbf-70ef-4334-b8d6-9014c9cf4c7f.png" width ="60%"/>
|
||||
</p>
|
||||
|
||||
|
||||
Select the modified local branch and the branch to merge past to create a pull request.
|
||||
|
||||
* Select the modified local branch and the branch to merge past to create a pull request.
|
||||
<p align = "center">
|
||||
<img src = "http://geek.analysys.cn/static/upload/221/2019-04-02/fe7eecfe-2720-4736-951b-b3387cf1ae41.png"width ="60%"/>
|
||||
</ p>
|
||||
<img src = "http://geek.analysys.cn/static/upload/221/2019-04-02/fe7eecfe-2720-4736-951b-b3387cf1ae41.png" width ="60%"/>
|
||||
</p>
|
||||
|
||||
* Next, the administrator is responsible for **merging** to complete the pull request
|
||||
Next, the administrator is responsible for **merging** to complete the pull request.
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
28
README.md
28
README.md
|
|
@ -17,7 +17,7 @@ Dolphin Scheduler Official Website
|
|||
|
||||
### Design features:
|
||||
|
||||
A distributed and easy-to-expand visual DAG workflow scheduling system. Dedicated to solving the complex dependencies in data processing, making the scheduling system `out of the box` for data processing.
|
||||
A distributed and easy-to-extend visual DAG workflow scheduling system. Dedicated to solving the complex dependencies in data processing, making the scheduling system `out of the box` for data processing.
|
||||
Its main objectives are as follows:
|
||||
|
||||
- Associate the Tasks according to the dependencies of the tasks in a DAG graph, which can visualize the running state of task in real time.
|
||||
|
|
@ -45,17 +45,16 @@ HA is supported by itself | All process definition operations are visualized, dr
|
|||
Overload processing: Task queue mechanism, the number of schedulable tasks on a single machine can be flexibly configured, when too many tasks will be cached in the task queue, will not cause machine jam. | One-click deployment | Supports traditional shell tasks, and also support big data platform task scheduling: MR, Spark, SQL (mysql, postgresql, hive, sparksql), Python, Procedure, Sub_Process | |
|
||||
|
||||
|
||||
|
||||
|
||||
### System partial screenshot
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
### Document
|
||||
|
||||
- <a href="https://dolphinscheduler.apache.org/en-us/docs/1.2.0/user_doc/backend-deployment.html" target="_blank">Backend deployment documentation</a>
|
||||
|
|
@ -100,16 +99,9 @@ It is because of the shoulders of these open source projects that the birth of t
|
|||
### Get Help
|
||||
1. Submit an issue
|
||||
1. Subscribe the mail list : https://dolphinscheduler.apache.org/en-us/docs/development/subscribe.html. then send mail to dev@dolphinscheduler.apache.org
|
||||
1. Contact WeChat group manager, ID 510570367. This is for Mandarin(CN) discussion.
|
||||
1. Contact WeChat(dailidong66). This is just for Mandarin(CN) discussion.
|
||||
|
||||
### License
|
||||
Please refer to [LICENSE](https://github.com/apache/incubator-dolphinscheduler/blob/dev/LICENSE) file.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -36,11 +36,19 @@ Dolphin Scheduler Official Website
|
|||
|
||||
### 系统部分截图
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||

|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
### 文档
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,158 @@
|
|||
{
|
||||
"DOLPHIN": {
|
||||
"service": [],
|
||||
"DOLPHIN_API": [
|
||||
{
|
||||
"name": "dolphin_api_port_check",
|
||||
"label": "dolphin_api_port_check",
|
||||
"description": "dolphin_api_port_check.",
|
||||
"interval": 10,
|
||||
"scope": "ANY",
|
||||
"source": {
|
||||
"type": "PORT",
|
||||
"uri": "{{dolphin-application-api/server.port}}",
|
||||
"default_port": 12345,
|
||||
"reporting": {
|
||||
"ok": {
|
||||
"text": "TCP OK - {0:.3f}s response on port {1}"
|
||||
},
|
||||
"warning": {
|
||||
"text": "TCP OK - {0:.3f}s response on port {1}",
|
||||
"value": 1.5
|
||||
},
|
||||
"critical": {
|
||||
"text": "Connection failed: {0} to {1}:{2}",
|
||||
"value": 5.0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"DOLPHIN_LOGGER": [
|
||||
{
|
||||
"name": "dolphin_logger_port_check",
|
||||
"label": "dolphin_logger_port_check",
|
||||
"description": "dolphin_logger_port_check.",
|
||||
"interval": 10,
|
||||
"scope": "ANY",
|
||||
"source": {
|
||||
"type": "PORT",
|
||||
"uri": "{{dolphin-common/loggerserver.rpc.port}}",
|
||||
"default_port": 50051,
|
||||
"reporting": {
|
||||
"ok": {
|
||||
"text": "TCP OK - {0:.3f}s response on port {1}"
|
||||
},
|
||||
"warning": {
|
||||
"text": "TCP OK - {0:.3f}s response on port {1}",
|
||||
"value": 1.5
|
||||
},
|
||||
"critical": {
|
||||
"text": "Connection failed: {0} to {1}:{2}",
|
||||
"value": 5.0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"DOLPHIN_MASTER": [
|
||||
{
|
||||
"name": "DOLPHIN_MASTER_CHECK",
|
||||
"label": "check dolphin scheduler master status",
|
||||
"description": "",
|
||||
"interval":10,
|
||||
"scope": "HOST",
|
||||
"enabled": true,
|
||||
"source": {
|
||||
"type": "SCRIPT",
|
||||
"path": "DOLPHIN/1.2.1/package/alerts/alert_dolphin_scheduler_status.py",
|
||||
"parameters": [
|
||||
|
||||
{
|
||||
"name": "connection.timeout",
|
||||
"display_name": "Connection Timeout",
|
||||
"value": 5.0,
|
||||
"type": "NUMERIC",
|
||||
"description": "The maximum time before this alert is considered to be CRITICAL",
|
||||
"units": "seconds",
|
||||
"threshold": "CRITICAL"
|
||||
},
|
||||
{
|
||||
"name": "alertName",
|
||||
"display_name": "alertName",
|
||||
"value": "DOLPHIN_MASTER",
|
||||
"type": "STRING",
|
||||
"description": "alert name"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"DOLPHIN_WORKER": [
|
||||
{
|
||||
"name": "DOLPHIN_WORKER_CHECK",
|
||||
"label": "check dolphin scheduler worker status",
|
||||
"description": "",
|
||||
"interval":10,
|
||||
"scope": "HOST",
|
||||
"enabled": true,
|
||||
"source": {
|
||||
"type": "SCRIPT",
|
||||
"path": "DOLPHIN/1.2.1/package/alerts/alert_dolphin_scheduler_status.py",
|
||||
"parameters": [
|
||||
|
||||
{
|
||||
"name": "connection.timeout",
|
||||
"display_name": "Connection Timeout",
|
||||
"value": 5.0,
|
||||
"type": "NUMERIC",
|
||||
"description": "The maximum time before this alert is considered to be CRITICAL",
|
||||
"units": "seconds",
|
||||
"threshold": "CRITICAL"
|
||||
},
|
||||
{
|
||||
"name": "alertName",
|
||||
"display_name": "alertName",
|
||||
"value": "DOLPHIN_WORKER",
|
||||
"type": "STRING",
|
||||
"description": "alert name"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"DOLPHIN_ALERT": [
|
||||
{
|
||||
"name": "DOLPHIN_DOLPHIN_ALERT_CHECK",
|
||||
"label": "check dolphin scheduler alert status",
|
||||
"description": "",
|
||||
"interval":10,
|
||||
"scope": "HOST",
|
||||
"enabled": true,
|
||||
"source": {
|
||||
"type": "SCRIPT",
|
||||
"path": "DOLPHIN/1.2.1/package/alerts/alert_dolphin_scheduler_status.py",
|
||||
"parameters": [
|
||||
|
||||
{
|
||||
"name": "connection.timeout",
|
||||
"display_name": "Connection Timeout",
|
||||
"value": 5.0,
|
||||
"type": "NUMERIC",
|
||||
"description": "The maximum time before this alert is considered to be CRITICAL",
|
||||
"units": "seconds",
|
||||
"threshold": "CRITICAL"
|
||||
},
|
||||
{
|
||||
"name": "alertName",
|
||||
"display_name": "alertName",
|
||||
"value": "DOLPHIN_ALERT",
|
||||
"type": "STRING",
|
||||
"description": "alert name"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,144 @@
|
|||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>alert.type</name>
|
||||
<value>EMAIL</value>
|
||||
<description>alert type is EMAIL/SMS</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.protocol</name>
|
||||
<value>SMTP</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.server.host</name>
|
||||
<value>xxx.xxx.com</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.server.port</name>
|
||||
<value>25</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.sender</name>
|
||||
<value>admin</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.user</name>
|
||||
<value>admin</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.passwd</name>
|
||||
<value>000000</value>
|
||||
<description></description>
|
||||
<property-type>PASSWORD</property-type>
|
||||
<value-attributes>
|
||||
<type>password</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mail.smtp.starttls.enable</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.smtp.ssl.enable</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mail.smtp.ssl.trust</name>
|
||||
<value>xxx.xxx.com</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>xls.file.path</name>
|
||||
<value>/tmp/xls</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>enterprise.wechat.enable</name>
|
||||
<value>false</value>
|
||||
<description></description>
|
||||
<value-attributes>
|
||||
<type>value-list</type>
|
||||
<entries>
|
||||
<entry>
|
||||
<value>true</value>
|
||||
<label>Enabled</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>false</value>
|
||||
<label>Disabled</label>
|
||||
</entry>
|
||||
</entries>
|
||||
<selection-cardinality>1</selection-cardinality>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>enterprise.wechat.corp.id</name>
|
||||
<value>wechatId</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>enterprise.wechat.secret</name>
|
||||
<value>secret</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>enterprise.wechat.agent.id</name>
|
||||
<value>agentId</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>enterprise.wechat.users</name>
|
||||
<value>wechatUsers</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
</configuration>
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>server.port</name>
|
||||
<value>12345</value>
|
||||
<description>
|
||||
server port
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
</property>
|
||||
<property>
|
||||
<name>server.servlet.session.timeout</name>
|
||||
<value>7200</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.servlet.multipart.max-file-size</name>
|
||||
<value>1024</value>
|
||||
<value-attributes>
|
||||
<unit>MB</unit>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.servlet.multipart.max-request-size</name>
|
||||
<value>1024</value>
|
||||
<value-attributes>
|
||||
<unit>MB</unit>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>server.jetty.max-http-post-size</name>
|
||||
<value>5000000</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.messages.encoding</name>
|
||||
<value>UTF-8</value>
|
||||
<description></description>
|
||||
</property>
|
||||
</configuration>
|
||||
|
|
@ -0,0 +1,467 @@
|
|||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>spring.datasource.initialSize</name>
|
||||
<value>5</value>
|
||||
<description>
|
||||
Init connection number
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.minIdle</name>
|
||||
<value>5</value>
|
||||
<description>
|
||||
Min connection number
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.maxActive</name>
|
||||
<value>50</value>
|
||||
<description>
|
||||
Max connection number
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.maxWait</name>
|
||||
<value>60000</value>
|
||||
<description>
|
||||
Max wait time for get a connection in milliseconds.
|
||||
If configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
|
||||
If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.timeBetweenEvictionRunsMillis</name>
|
||||
<value>60000</value>
|
||||
<description>
|
||||
Milliseconds for check to close free connections
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.timeBetweenConnectErrorMillis</name>
|
||||
<value>60000</value>
|
||||
<description>
|
||||
The Destroy thread detects the connection interval and closes the physical connection in milliseconds
|
||||
if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.minEvictableIdleTimeMillis</name>
|
||||
<value>300000</value>
|
||||
<description>
|
||||
The longest time a connection remains idle without being evicted, in milliseconds
|
||||
</description>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.validationQuery</name>
|
||||
<value>SELECT 1</value>
|
||||
<description>
|
||||
The SQL used to check whether the connection is valid requires a query statement.
|
||||
If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.validationQueryTimeout</name>
|
||||
<value>3</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
Check whether the connection is valid for timeout, in seconds
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.testWhileIdle</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
When applying for a connection,
|
||||
if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
|
||||
validation Query is performed to check whether the connection is valid
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.testOnBorrow</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
Execute validation to check if the connection is valid when applying for a connection
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.testOnReturn</name>
|
||||
<value>false</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
Execute validation to check if the connection is valid when the connection is returned
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.defaultAutoCommit</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.keepAlive</name>
|
||||
<value>false</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>spring.datasource.poolPreparedStatements</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
Open PSCache, specify count PSCache for every connection
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.maxPoolPreparedStatementPerConnectionSize</name>
|
||||
<value>20</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.spring.datasource.filters</name>
|
||||
<value>stat,wall,log4j</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>spring.datasource.connectionProperties</name>
|
||||
<value>druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mybatis-plus.mapper-locations</name>
|
||||
<value>classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.typeEnumsPackage</name>
|
||||
<value>org.apache.dolphinscheduler.*.enums</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.typeAliasesPackage</name>
|
||||
<value>org.apache.dolphinscheduler.dao.entity</value>
|
||||
<description>
|
||||
Entity scan, where multiple packages are separated by a comma or semicolon
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.global-config.db-config.id-type</name>
|
||||
<value>AUTO</value>
|
||||
<value-attributes>
|
||||
<type>value-list</type>
|
||||
<entries>
|
||||
<entry>
|
||||
<value>AUTO</value>
|
||||
<label>AUTO</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>INPUT</value>
|
||||
<label>INPUT</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>ID_WORKER</value>
|
||||
<label>ID_WORKER</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>UUID</value>
|
||||
<label>UUID</label>
|
||||
</entry>
|
||||
</entries>
|
||||
<selection-cardinality>1</selection-cardinality>
|
||||
</value-attributes>
|
||||
<description>
|
||||
Primary key type AUTO:" database ID AUTO ",
|
||||
INPUT:" user INPUT ID",
|
||||
ID_WORKER:" global unique ID (numeric type unique ID)",
|
||||
UUID:" global unique ID UUID";
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.global-config.db-config.field-strategy</name>
|
||||
<value>NOT_NULL</value>
|
||||
<value-attributes>
|
||||
<type>value-list</type>
|
||||
<entries>
|
||||
<entry>
|
||||
<value>IGNORED</value>
|
||||
<label>IGNORED</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>NOT_NULL</value>
|
||||
<label>NOT_NULL</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>NOT_EMPTY</value>
|
||||
<label>NOT_EMPTY</label>
|
||||
</entry>
|
||||
</entries>
|
||||
<selection-cardinality>1</selection-cardinality>
|
||||
</value-attributes>
|
||||
<description>
|
||||
Field policy IGNORED:" ignore judgment ",
|
||||
NOT_NULL:" not NULL judgment "),
|
||||
NOT_EMPTY:" not NULL judgment"
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.global-config.db-config.column-underline</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.global-config.db-config.logic-delete-value</name>
|
||||
<value>1</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.global-config.db-config.logic-not-delete-value</name>
|
||||
<value>0</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.global-config.db-config.banner</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mybatis-plus.configuration.map-underscore-to-camel-case</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.configuration.cache-enabled</name>
|
||||
<value>false</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.configuration.call-setters-on-nulls</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>mybatis-plus.configuration.jdbc-type-for-null</name>
|
||||
<value>null</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.exec.threads</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.exec.task.num</name>
|
||||
<value>20</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.heartbeat.interval</name>
|
||||
<value>10</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.task.commit.retryTimes</name>
|
||||
<value>5</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.task.commit.interval</name>
|
||||
<value>1000</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.max.cpuload.avg</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>master.reserved.memory</name>
|
||||
<value>0.1</value>
|
||||
<value-attributes>
|
||||
<type>float</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.exec.threads</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.heartbeat.interval</name>
|
||||
<value>10</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.fetch.task.num</name>
|
||||
<value>3</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.max.cpuload.avg</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>worker.reserved.memory</name>
|
||||
<value>0.1</value>
|
||||
<value-attributes>
|
||||
<type>float</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
|
@ -0,0 +1,232 @@
|
|||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>dolphinscheduler.queue.impl</name>
|
||||
<value>zookeeper</value>
|
||||
<description>
|
||||
Task queue implementation, default "zookeeper"
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.dolphinscheduler.root</name>
|
||||
<value>/dolphinscheduler</value>
|
||||
<description>
|
||||
dolphinscheduler root directory
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.session.timeout</name>
|
||||
<value>300</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.connection.timeout</name>
|
||||
<value>300</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.retry.base.sleep</name>
|
||||
<value>100</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.retry.max.sleep</name>
|
||||
<value>30000</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>zookeeper.retry.maxtime</name>
|
||||
<value>5</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>res.upload.startup.type</name>
|
||||
<display-name>Choose Resource Upload Startup Type</display-name>
|
||||
<description>
|
||||
Resource upload startup type : HDFS,S3,NONE
|
||||
</description>
|
||||
<value>NONE</value>
|
||||
<value-attributes>
|
||||
<type>value-list</type>
|
||||
<entries>
|
||||
<entry>
|
||||
<value>HDFS</value>
|
||||
<label>HDFS</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>S3</value>
|
||||
<label>S3</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>NONE</value>
|
||||
<label>NONE</label>
|
||||
</entry>
|
||||
</entries>
|
||||
<selection-cardinality>1</selection-cardinality>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>hdfs.root.user</name>
|
||||
<value>hdfs</value>
|
||||
<description>
|
||||
Users who have permission to create directories under the HDFS root path
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>data.store2hdfs.basepath</name>
|
||||
<value>/dolphinscheduler</value>
|
||||
<description>
|
||||
Data base dir, resource file will store to this hadoop hdfs path, self configuration,
|
||||
please make sure the directory exists on hdfs and have read write permissions。
|
||||
"/dolphinscheduler" is recommended
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>data.basedir.path</name>
|
||||
<value>/tmp/dolphinscheduler</value>
|
||||
<description>
|
||||
User data directory path, self configuration,
|
||||
please make sure the directory exists and have read write permissions
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>hadoop.security.authentication.startup.state</name>
|
||||
<value>false</value>
|
||||
<value-attributes>
|
||||
<type>value-list</type>
|
||||
<entries>
|
||||
<entry>
|
||||
<value>true</value>
|
||||
<label>Enabled</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>false</value>
|
||||
<label>Disabled</label>
|
||||
</entry>
|
||||
</entries>
|
||||
<selection-cardinality>1</selection-cardinality>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>java.security.krb5.conf.path</name>
|
||||
<value>/opt/krb5.conf</value>
|
||||
<description>
|
||||
java.security.krb5.conf path
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>login.user.keytab.username</name>
|
||||
<value>hdfs-mycluster@ESZ.COM</value>
|
||||
<description>
|
||||
LoginUserFromKeytab user
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>login.user.keytab.path</name>
|
||||
<value>/opt/hdfs.headless.keytab</value>
|
||||
<description>
|
||||
LoginUserFromKeytab path
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>resource.view.suffixs</name>
|
||||
<value>txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties</value>
|
||||
<description></description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.defaultFS</name>
|
||||
<value>hdfs://mycluster:8020</value>
|
||||
<description>
|
||||
HA or single namenode,
|
||||
If namenode ha needs to copy core-site.xml and hdfs-site.xml to the conf directory,
|
||||
support s3,for example : s3a://dolphinscheduler
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3a.endpoint</name>
|
||||
<value>http://host:9010</value>
|
||||
<description>
|
||||
s3 need,s3 endpoint
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3a.access.key</name>
|
||||
<value>A3DXS30FO22544RE</value>
|
||||
<description>
|
||||
s3 need,s3 access key
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.s3a.secret.key</name>
|
||||
<value>OloCLq3n+8+sdPHUhJ21XrSxTC+JK</value>
|
||||
<description>
|
||||
s3 need,s3 secret key
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>loggerserver.rpc.port</name>
|
||||
<value>50051</value>
|
||||
<value-attributes>
|
||||
<type>int</type>F
|
||||
</value-attributes>
|
||||
<description>
|
||||
</description>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
</configuration>
|
||||
|
|
@ -0,0 +1,123 @@
|
|||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>dolphin.database.type</name>
|
||||
<value>mysql</value>
|
||||
<description>Dolphin Scheduler DataBase Type Which Is Select</description>
|
||||
<display-name>Dolphin Database Type</display-name>
|
||||
<value-attributes>
|
||||
<type>value-list</type>
|
||||
<entries>
|
||||
<entry>
|
||||
<value>mysql</value>
|
||||
<label>Mysql</label>
|
||||
</entry>
|
||||
<entry>
|
||||
<value>postgresql</value>
|
||||
<label>Postgresql</label>
|
||||
</entry>
|
||||
</entries>
|
||||
<selection-cardinality>1</selection-cardinality>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dolphin.database.host</name>
|
||||
<value></value>
|
||||
<display-name>Dolphin Database Host</display-name>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dolphin.database.port</name>
|
||||
<value></value>
|
||||
<display-name>Dolphin Database Port</display-name>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dolphin.database.username</name>
|
||||
<value></value>
|
||||
<display-name>Dolphin Database Username</display-name>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dolphin.database.password</name>
|
||||
<value></value>
|
||||
<display-name>Dolphin Database Password</display-name>
|
||||
<property-type>PASSWORD</property-type>
|
||||
<value-attributes>
|
||||
<type>password</type>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dolphin.user</name>
|
||||
<value></value>
|
||||
<description>Which user to install and admin dolphin scheduler</description>
|
||||
<display-name>Deploy User</display-name>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
<property>
|
||||
<name>dolphin.group</name>
|
||||
<value></value>
|
||||
<description>Which user to install and admin dolphin scheduler</description>
|
||||
<display-name>Deploy Group</display-name>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dolphinscheduler-env-content</name>
|
||||
<display-name>Dolphinscheduler Env template</display-name>
|
||||
<description>This is the jinja template for dolphinscheduler.env.sh file</description>
|
||||
<value>#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
export HADOOP_HOME=/opt/soft/hadoop
|
||||
export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
|
||||
export SPARK_HOME1=/opt/soft/spark1
|
||||
export SPARK_HOME2=/opt/soft/spark2
|
||||
export PYTHON_HOME=/opt/soft/python
|
||||
export JAVA_HOME=/opt/soft/java
|
||||
export HIVE_HOME=/opt/soft/hive
|
||||
export FLINK_HOME=/opt/soft/flink</value>
|
||||
<value-attributes>
|
||||
<type>content</type>
|
||||
<empty-value-valid>false</empty-value-valid>
|
||||
<show-property-name>false</show-property-name>
|
||||
</value-attributes>
|
||||
<on-ambari-upgrade add="true"/>
|
||||
</property>
|
||||
</configuration>
|
||||
|
|
@ -0,0 +1,131 @@
|
|||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<configuration>
|
||||
<property>
|
||||
<name>org.quartz.scheduler.instanceName</name>
|
||||
<value>DolphinScheduler</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<!-- 列举枚举值 -->
|
||||
<name>org.quartz.scheduler.instanceId</name>
|
||||
<value>AUTO</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.scheduler.makeSchedulerThreadDaemon</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.useProperties</name>
|
||||
<value>false</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.threadPool.class</name>
|
||||
<value>org.quartz.simpl.SimpleThreadPool</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.threadPool.makeThreadsDaemons</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.threadPool.threadCount</name>
|
||||
<value>25</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.threadPool.threadPriority</name>
|
||||
<value>5</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.class</name>
|
||||
<value>org.quartz.impl.jdbcjobstore.JobStoreTX</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.tablePrefix</name>
|
||||
<value>QRTZ_</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.isClustered</name>
|
||||
<value>true</value>
|
||||
<value-attributes>
|
||||
<type>boolean</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.misfireThreshold</name>
|
||||
<value>60000</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.clusterCheckinInterval</name>
|
||||
<value>5000</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.jobStore.dataSource</name>
|
||||
<value>myDs</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.dataSource.myDs.connectionProvider.class</name>
|
||||
<value>org.apache.dolphinscheduler.server.quartz.DruidConnectionProvider</value>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.dataSource.myDs.maxConnections</name>
|
||||
<value>10</value>
|
||||
<value-attributes>
|
||||
<type>int</type>
|
||||
</value-attributes>
|
||||
<description></description>
|
||||
</property>
|
||||
<property>
|
||||
<name>org.quartz.dataSource.myDs.validationQuery</name>
|
||||
<value>select 1</value>
|
||||
<description></description>
|
||||
</property>
|
||||
</configuration>
|
||||
|
|
@ -0,0 +1,137 @@
|
|||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<metainfo>
|
||||
<schemaVersion>2.0</schemaVersion>
|
||||
<services>
|
||||
<service>
|
||||
<name>DOLPHIN</name>
|
||||
<displayName>Dolphin Scheduler</displayName>
|
||||
<comment>分布式易扩展的可视化DAG工作流任务调度系统</comment>
|
||||
<version>1.2.1</version>
|
||||
<components>
|
||||
<component>
|
||||
<name>DOLPHIN_MASTER</name>
|
||||
<displayName>DS Master</displayName>
|
||||
<category>MASTER</category>
|
||||
<cardinality>1+</cardinality>
|
||||
<commandScript>
|
||||
<script>scripts/dolphin_master_service.py</script>
|
||||
<scriptType>PYTHON</scriptType>
|
||||
<timeout>600</timeout>
|
||||
</commandScript>
|
||||
</component>
|
||||
|
||||
<component>
|
||||
<name>DOLPHIN_LOGGER</name>
|
||||
<displayName>DS Logger</displayName>
|
||||
<category>SLAVE</category>
|
||||
<cardinality>1+</cardinality>
|
||||
<commandScript>
|
||||
<script>scripts/dolphin_logger_service.py</script>
|
||||
<scriptType>PYTHON</scriptType>
|
||||
<timeout>600</timeout>
|
||||
</commandScript>
|
||||
</component>
|
||||
|
||||
<component>
|
||||
<name>DOLPHIN_WORKER</name>
|
||||
<displayName>DS Worker</displayName>
|
||||
<category>SLAVE</category>
|
||||
<cardinality>1+</cardinality>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<name>DOLPHIN/DOLPHIN_LOGGER</name>
|
||||
<scope>host</scope>
|
||||
<auto-deploy>
|
||||
<enabled>true</enabled>
|
||||
</auto-deploy>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<commandScript>
|
||||
<script>scripts/dolphin_worker_service.py</script>
|
||||
<scriptType>PYTHON</scriptType>
|
||||
<timeout>600</timeout>
|
||||
</commandScript>
|
||||
</component>
|
||||
|
||||
<component>
|
||||
<name>DOLPHIN_ALERT</name>
|
||||
<displayName>DS Alert</displayName>
|
||||
<category>SLAVE</category>
|
||||
<cardinality>1</cardinality>
|
||||
<commandScript>
|
||||
<script>scripts/dolphin_alert_service.py</script>
|
||||
<scriptType>PYTHON</scriptType>
|
||||
<timeout>600</timeout>
|
||||
</commandScript>
|
||||
</component>
|
||||
|
||||
<component>
|
||||
<name>DOLPHIN_API</name>
|
||||
<displayName>DS_Api</displayName>
|
||||
<category>SLAVE</category>
|
||||
<cardinality>1</cardinality>
|
||||
<commandScript>
|
||||
<script>scripts/dolphin_api_service.py</script>
|
||||
<scriptType>PYTHON</scriptType>
|
||||
<timeout>600</timeout>
|
||||
</commandScript>
|
||||
</component>
|
||||
</components>
|
||||
|
||||
<requiredServices>
|
||||
<service>ZOOKEEPER</service>
|
||||
</requiredServices>
|
||||
|
||||
<osSpecifics>
|
||||
<osSpecific>
|
||||
<osFamily>any</osFamily>
|
||||
<packages>
|
||||
<package>
|
||||
<name>apache-dolphinscheduler-incubating-1.2.1*</name>
|
||||
</package>
|
||||
</packages>
|
||||
</osSpecific>
|
||||
</osSpecifics>
|
||||
|
||||
<configuration-dependencies>
|
||||
<config-type>dolphin-alert</config-type>
|
||||
<config-type>dolphin-app-api</config-type>
|
||||
<config-type>dolphin-app-dao</config-type>
|
||||
<config-type>dolphin-common</config-type>
|
||||
<config-type>dolphin-env</config-type>
|
||||
<config-type>dolphin-quartz</config-type>
|
||||
</configuration-dependencies>
|
||||
|
||||
<themes>
|
||||
<theme>
|
||||
<fileName>theme.json</fileName>
|
||||
<default>true</default>
|
||||
</theme>
|
||||
</themes>
|
||||
|
||||
<quickLinksConfigurations-dir>quicklinks</quickLinksConfigurations-dir>
|
||||
<quickLinksConfigurations>
|
||||
<quickLinksConfiguration>
|
||||
<fileName>quicklinks.json</fileName>
|
||||
<default>true</default>
|
||||
</quickLinksConfiguration>
|
||||
</quickLinksConfigurations>
|
||||
</service>
|
||||
</services>
|
||||
</metainfo>
|
||||
|
|
@ -0,0 +1,124 @@
|
|||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
import socket
|
||||
import urllib2
|
||||
import os
|
||||
import logging
|
||||
import ambari_simplejson as json
|
||||
from resource_management.libraries.script.script import Script
|
||||
import sys
|
||||
reload(sys)
|
||||
sys.setdefaultencoding('utf-8')
|
||||
|
||||
logger = logging.getLogger('ambari_alerts')
|
||||
|
||||
config = Script.get_config()
|
||||
|
||||
|
||||
def get_tokens():
|
||||
"""
|
||||
Returns a tuple of tokens in the format {{site/property}} that will be used
|
||||
to build the dictionary passed into execute
|
||||
|
||||
:rtype tuple
|
||||
"""
|
||||
|
||||
def get_info(url, connection_timeout):
|
||||
response = None
|
||||
|
||||
try:
|
||||
response = urllib2.urlopen(url, timeout=connection_timeout)
|
||||
json_data = response.read()
|
||||
return json_data
|
||||
finally:
|
||||
if response is not None:
|
||||
try:
|
||||
response.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def execute(configurations={}, parameters={}, host_name=None):
|
||||
"""
|
||||
Returns a tuple containing the result code and a pre-formatted result label
|
||||
|
||||
Keyword arguments:
|
||||
configurations : a mapping of configuration key to value
|
||||
parameters : a mapping of script parameter key to value
|
||||
host_name : the name of this host where the alert is running
|
||||
|
||||
:type configurations dict
|
||||
:type parameters dict
|
||||
:type host_name str
|
||||
"""
|
||||
|
||||
alert_name = parameters['alertName']
|
||||
|
||||
dolphin_pidfile_dir = "/opt/soft/run/dolphinscheduler"
|
||||
|
||||
pid = "0"
|
||||
|
||||
|
||||
from resource_management.core import sudo
|
||||
|
||||
is_running = True
|
||||
pid_file_path = ""
|
||||
if alert_name == 'DOLPHIN_MASTER':
|
||||
pid_file_path = dolphin_pidfile_dir + "/master-server.pid"
|
||||
elif alert_name == 'DOLPHIN_WORKER':
|
||||
pid_file_path = dolphin_pidfile_dir + "/worker-server.pid"
|
||||
elif alert_name == 'DOLPHIN_ALERT':
|
||||
pid_file_path = dolphin_pidfile_dir + "/alert-server.pid"
|
||||
elif alert_name == 'DOLPHIN_LOGGER':
|
||||
pid_file_path = dolphin_pidfile_dir + "/logger-server.pid"
|
||||
elif alert_name == 'DOLPHIN_API':
|
||||
pid_file_path = dolphin_pidfile_dir + "/api-server.pid"
|
||||
|
||||
if not pid_file_path or not os.path.isfile(pid_file_path):
|
||||
is_running = False
|
||||
|
||||
try:
|
||||
pid = int(sudo.read_file(pid_file_path))
|
||||
except:
|
||||
is_running = False
|
||||
|
||||
try:
|
||||
# Kill will not actually kill the process
|
||||
# From the doc:
|
||||
# If sig is 0, then no signal is sent, but error checking is still
|
||||
# performed; this can be used to check for the existence of a
|
||||
# process ID or process group ID.
|
||||
sudo.kill(pid, 0)
|
||||
except OSError:
|
||||
is_running = False
|
||||
|
||||
if host_name is None:
|
||||
host_name = socket.getfqdn()
|
||||
|
||||
if not is_running:
|
||||
result_code = "CRITICAL"
|
||||
else:
|
||||
result_code = "OK"
|
||||
|
||||
label = "The comment {0} of DOLPHIN_SCHEDULER on {1} is {2}".format(alert_name, host_name, result_code)
|
||||
|
||||
return ((result_code, [label]))
|
||||
|
||||
if __name__ == "__main__":
|
||||
pass
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
import time
|
||||
from resource_management import *
|
||||
|
||||
from dolphin_env import dolphin_env
|
||||
|
||||
|
||||
class DolphinAlertService(Script):
|
||||
def install(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.install_packages(env)
|
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
|
||||
|
||||
def configure(self, env):
|
||||
import params
|
||||
params.pika_slave = True
|
||||
env.set_params(params)
|
||||
|
||||
dolphin_env()
|
||||
|
||||
def start(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.configure(env)
|
||||
no_op_test = format("ls {dolphin_pidfile_dir}/alert-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/alert-server.pid` | grep `cat {dolphin_pidfile_dir}/alert-server.pid` >/dev/null 2>&1")
|
||||
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start alert-server")
|
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
|
||||
|
||||
def stop(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop alert-server")
|
||||
Execute(stop_cmd, user=params.dolphin_user)
|
||||
time.sleep(5)
|
||||
|
||||
def status(self, env):
|
||||
import status_params
|
||||
env.set_params(status_params)
|
||||
check_process_status(status_params.dolphin_run_dir + "alert-server.pid")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
DolphinAlertService().execute()
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
import time
|
||||
from resource_management import *
|
||||
|
||||
from dolphin_env import dolphin_env
|
||||
|
||||
|
||||
class DolphinApiService(Script):
|
||||
def install(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.install_packages(env)
|
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
|
||||
|
||||
def configure(self, env):
|
||||
import params
|
||||
params.pika_slave = True
|
||||
env.set_params(params)
|
||||
|
||||
dolphin_env()
|
||||
|
||||
def start(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.configure(env)
|
||||
|
||||
#init
|
||||
init_cmd=format("sh " + params.dolphin_home + "/script/create-dolphinscheduler.sh")
|
||||
Execute(init_cmd, user=params.dolphin_user)
|
||||
|
||||
#upgrade
|
||||
upgrade_cmd=format("sh " + params.dolphin_home + "/script/upgrade-dolphinscheduler.sh")
|
||||
Execute(upgrade_cmd, user=params.dolphin_user)
|
||||
|
||||
no_op_test = format("ls {dolphin_pidfile_dir}/api-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/api-server.pid` | grep `cat {dolphin_pidfile_dir}/api-server.pid` >/dev/null 2>&1")
|
||||
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start api-server")
|
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
|
||||
|
||||
def stop(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop api-server")
|
||||
Execute(stop_cmd, user=params.dolphin_user)
|
||||
time.sleep(5)
|
||||
|
||||
def status(self, env):
|
||||
import status_params
|
||||
env.set_params(status_params)
|
||||
check_process_status(status_params.dolphin_run_dir + "api-server.pid")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
DolphinApiService().execute()
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
"""
|
||||
|
||||
from resource_management import *
|
||||
|
||||
|
||||
def dolphin_env():
|
||||
import params
|
||||
|
||||
Directory(params.dolphin_pidfile_dir,
|
||||
mode=0777,
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group,
|
||||
create_parents=True
|
||||
)
|
||||
Directory(params.dolphin_log_dir,
|
||||
mode=0777,
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group,
|
||||
create_parents=True
|
||||
)
|
||||
Directory(params.dolphin_conf_dir,
|
||||
mode=0777,
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group,
|
||||
create_parents=True
|
||||
)
|
||||
|
||||
|
||||
Directory(params.dolphin_alert_map['xls.file.path'],
|
||||
mode=0777,
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group,
|
||||
create_parents=True
|
||||
)
|
||||
Directory(params.dolphin_common_map['data.basedir.path'],
|
||||
mode=0777,
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group,
|
||||
create_parents=True
|
||||
)
|
||||
Directory(params.dolphin_common_map['data.download.basedir.path'],
|
||||
mode=0777,
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group,
|
||||
create_parents=True
|
||||
)
|
||||
Directory(params.dolphin_common_map['process.exec.basepath'],
|
||||
mode=0777,
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group,
|
||||
create_parents=True
|
||||
)
|
||||
|
||||
|
||||
File(format(params.dolphin_env_path),
|
||||
mode=0777,
|
||||
content=InlineTemplate(params.dolphin_env_content),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
|
||||
File(format(params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh"),
|
||||
mode=0755,
|
||||
content=Template("dolphin-daemon.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/alert.properties"),
|
||||
mode=0755,
|
||||
content=Template("alert.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/application.properties"),
|
||||
mode=0755,
|
||||
content=Template("application.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/application-api.properties"),
|
||||
mode=0755,
|
||||
content=Template("application-api.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/common.properties"),
|
||||
mode=0755,
|
||||
content=Template("common.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
||||
File(format(params.dolphin_conf_dir + "/quartz.properties"),
|
||||
mode=0755,
|
||||
content=Template("quartz.properties.j2"),
|
||||
owner=params.dolphin_user,
|
||||
group=params.dolphin_group
|
||||
)
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
import time
|
||||
from resource_management import *
|
||||
|
||||
from dolphin_env import dolphin_env
|
||||
|
||||
|
||||
class DolphinLoggerService(Script):
|
||||
def install(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.install_packages(env)
|
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
|
||||
|
||||
def configure(self, env):
|
||||
import params
|
||||
params.pika_slave = True
|
||||
env.set_params(params)
|
||||
|
||||
dolphin_env()
|
||||
|
||||
def start(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.configure(env)
|
||||
no_op_test = format("ls {dolphin_pidfile_dir}/logger-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/logger-server.pid` | grep `cat {dolphin_pidfile_dir}/logger-server.pid` >/dev/null 2>&1")
|
||||
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start logger-server")
|
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
|
||||
|
||||
def stop(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop logger-server")
|
||||
Execute(stop_cmd, user=params.dolphin_user)
|
||||
time.sleep(5)
|
||||
|
||||
def status(self, env):
|
||||
import status_params
|
||||
env.set_params(status_params)
|
||||
check_process_status(status_params.dolphin_run_dir + "logger-server.pid")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
DolphinLoggerService().execute()
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
import time
|
||||
from resource_management import *
|
||||
|
||||
from dolphin_env import dolphin_env
|
||||
|
||||
|
||||
class DolphinMasterService(Script):
|
||||
def install(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.install_packages(env)
|
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
|
||||
|
||||
def configure(self, env):
|
||||
import params
|
||||
params.pika_slave = True
|
||||
env.set_params(params)
|
||||
|
||||
dolphin_env()
|
||||
|
||||
def start(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.configure(env)
|
||||
no_op_test = format("ls {dolphin_pidfile_dir}/master-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/master-server.pid` | grep `cat {dolphin_pidfile_dir}/master-server.pid` >/dev/null 2>&1")
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start master-server")
|
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
|
||||
|
||||
def stop(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop master-server")
|
||||
Execute(stop_cmd, user=params.dolphin_user)
|
||||
time.sleep(5)
|
||||
|
||||
def status(self, env):
|
||||
import status_params
|
||||
env.set_params(status_params)
|
||||
check_process_status(status_params.dolphin_run_dir + "master-server.pid")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
DolphinMasterService().execute()
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
import time
|
||||
from resource_management import *
|
||||
|
||||
from dolphin_env import dolphin_env
|
||||
|
||||
|
||||
class DolphinWorkerService(Script):
|
||||
def install(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.install_packages(env)
|
||||
Execute(('chmod', '-R', '777', params.dolphin_home), user=params.dolphin_user, sudo=True)
|
||||
|
||||
def configure(self, env):
|
||||
import params
|
||||
params.pika_slave = True
|
||||
env.set_params(params)
|
||||
|
||||
dolphin_env()
|
||||
|
||||
def start(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
self.configure(env)
|
||||
no_op_test = format("ls {dolphin_pidfile_dir}/worker-server.pid >/dev/null 2>&1 && ps `cat {dolphin_pidfile_dir}/worker-server.pid` | grep `cat {dolphin_pidfile_dir}/worker-server.pid` >/dev/null 2>&1")
|
||||
start_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh start worker-server")
|
||||
Execute(start_cmd, user=params.dolphin_user, not_if=no_op_test)
|
||||
|
||||
def stop(self, env):
|
||||
import params
|
||||
env.set_params(params)
|
||||
stop_cmd = format("sh " + params.dolphin_bin_dir + "/dolphinscheduler-daemon.sh stop worker-server")
|
||||
Execute(stop_cmd, user=params.dolphin_user)
|
||||
time.sleep(5)
|
||||
|
||||
def status(self, env):
|
||||
import status_params
|
||||
env.set_params(status_params)
|
||||
check_process_status(status_params.dolphin_run_dir + "worker-server.pid")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
DolphinWorkerService().execute()
|
||||
|
|
@ -0,0 +1,150 @@
|
|||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
|
||||
import sys
|
||||
from resource_management import *
|
||||
from resource_management.core.logger import Logger
|
||||
from resource_management.libraries.functions import default
|
||||
|
||||
Logger.initialize_logger()
|
||||
reload(sys)
|
||||
sys.setdefaultencoding('utf-8')
|
||||
|
||||
# server configurations
|
||||
config = Script.get_config()
|
||||
|
||||
# conf_dir = "/etc/"
|
||||
dolphin_home = "/opt/soft/dolphinscheduler"
|
||||
dolphin_conf_dir = dolphin_home + "/conf"
|
||||
dolphin_log_dir = dolphin_home + "/logs"
|
||||
dolphin_bin_dir = dolphin_home + "/bin"
|
||||
dolphin_lib_jars = dolphin_home + "/lib/*"
|
||||
dolphin_pidfile_dir = "/opt/soft/run/dolphinscheduler"
|
||||
|
||||
rmHosts = default("/clusterHostInfo/rm_host", [])
|
||||
|
||||
# dolphin-env
|
||||
dolphin_env_map = {}
|
||||
dolphin_env_map.update(config['configurations']['dolphin-env'])
|
||||
|
||||
# which user to install and admin dolphin scheduler
|
||||
dolphin_user = dolphin_env_map['dolphin.user']
|
||||
dolphin_group = dolphin_env_map['dolphin.group']
|
||||
|
||||
# .dolphinscheduler_env.sh
|
||||
dolphin_env_path = dolphin_conf_dir + '/env/dolphinscheduler_env.sh'
|
||||
dolphin_env_content = dolphin_env_map['dolphinscheduler-env-content']
|
||||
|
||||
# database config
|
||||
dolphin_database_config = {}
|
||||
dolphin_database_config['dolphin_database_type'] = dolphin_env_map['dolphin.database.type']
|
||||
dolphin_database_config['dolphin_database_host'] = dolphin_env_map['dolphin.database.host']
|
||||
dolphin_database_config['dolphin_database_port'] = dolphin_env_map['dolphin.database.port']
|
||||
dolphin_database_config['dolphin_database_username'] = dolphin_env_map['dolphin.database.username']
|
||||
dolphin_database_config['dolphin_database_password'] = dolphin_env_map['dolphin.database.password']
|
||||
|
||||
if 'mysql' == dolphin_database_config['dolphin_database_type']:
|
||||
dolphin_database_config['dolphin_database_driver'] = 'com.mysql.jdbc.Driver'
|
||||
dolphin_database_config['driverDelegateClass'] = 'org.quartz.impl.jdbcjobstore.StdJDBCDelegate'
|
||||
dolphin_database_config['dolphin_database_url'] = 'jdbc:mysql://' + dolphin_env_map['dolphin.database.host'] \
|
||||
+ ':' + dolphin_env_map['dolphin.database.port'] \
|
||||
+ '/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8'
|
||||
else:
|
||||
dolphin_database_config['dolphin_database_driver'] = 'org.postgresql.Driver'
|
||||
dolphin_database_config['driverDelegateClass'] = 'org.quartz.impl.jdbcjobstore.PostgreSQLDelegate'
|
||||
dolphin_database_config['dolphin_database_url'] = 'jdbc:postgresql://' + dolphin_env_map['dolphin.database.host'] \
|
||||
+ ':' + dolphin_env_map['dolphin.database.port'] \
|
||||
+ '/dolphinscheduler'
|
||||
|
||||
# application-alert.properties
|
||||
dolphin_alert_map = {}
|
||||
wechat_push_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token'
|
||||
wechat_token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret'
|
||||
wechat_team_send_msg = '{\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}'
|
||||
wechat_user_send_msg = '{\"touser\":\"$toUser\",\"agentid\":\"$agentId\",\"msgtype\":\"markdown\",\"markdown\":{\"content\":\"$msg\"}}'
|
||||
|
||||
dolphin_alert_map['enterprise.wechat.push.ur'] = wechat_push_url
|
||||
dolphin_alert_map['enterprise.wechat.token.url'] = wechat_token_url
|
||||
dolphin_alert_map['enterprise.wechat.team.send.msg'] = wechat_team_send_msg
|
||||
dolphin_alert_map['enterprise.wechat.user.send.msg'] = wechat_user_send_msg
|
||||
dolphin_alert_map.update(config['configurations']['dolphin-alert'])
|
||||
|
||||
# application-api.properties
|
||||
dolphin_app_api_map = {}
|
||||
dolphin_app_api_map['logging.config'] = 'classpath:apiserver_logback.xml'
|
||||
dolphin_app_api_map['spring.messages.basename'] = 'i18n/messages'
|
||||
dolphin_app_api_map['server.servlet.context-path'] = '/dolphinscheduler/'
|
||||
dolphin_app_api_map.update(config['configurations']['dolphin-application-api'])
|
||||
|
||||
# application-dao.properties
|
||||
dolphin_application_map = {}
|
||||
dolphin_application_map['spring.datasource.type'] = 'com.alibaba.druid.pool.DruidDataSource'
|
||||
dolphin_application_map['spring.datasource.driver-class-name'] = dolphin_database_config['dolphin_database_driver']
|
||||
dolphin_application_map['spring.datasource.url'] = dolphin_database_config['dolphin_database_url']
|
||||
dolphin_application_map['spring.datasource.username'] = dolphin_database_config['dolphin_database_username']
|
||||
dolphin_application_map['spring.datasource.password'] = dolphin_database_config['dolphin_database_password']
|
||||
dolphin_application_map.update(config['configurations']['dolphin-application'])
|
||||
|
||||
# common.properties
|
||||
dolphin_common_map = {}
|
||||
|
||||
if 'yarn-site' in config['configurations'] and \
|
||||
'yarn.resourcemanager.webapp.address' in config['configurations']['yarn-site']:
|
||||
yarn_resourcemanager_webapp_address = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address']
|
||||
yarn_application_status_address = 'http://' + yarn_resourcemanager_webapp_address + '/ws/v1/cluster/apps/%s'
|
||||
dolphin_common_map['yarn.application.status.address'] = yarn_application_status_address
|
||||
|
||||
rmHosts = default("/clusterHostInfo/rm_host", [])
|
||||
if len(rmHosts) > 1:
|
||||
dolphin_common_map['yarn.resourcemanager.ha.rm.ids'] = ','.join(rmHosts)
|
||||
else:
|
||||
dolphin_common_map['yarn.resourcemanager.ha.rm.ids'] = ''
|
||||
|
||||
dolphin_common_map_tmp = config['configurations']['dolphin-common']
|
||||
data_basedir_path = dolphin_common_map_tmp['data.basedir.path']
|
||||
process_exec_basepath = data_basedir_path + '/exec'
|
||||
data_download_basedir_path = data_basedir_path + '/download'
|
||||
dolphin_common_map['process.exec.basepath'] = process_exec_basepath
|
||||
dolphin_common_map['data.download.basedir.path'] = data_download_basedir_path
|
||||
dolphin_common_map['dolphinscheduler.env.path'] = dolphin_env_path
|
||||
|
||||
zookeeperHosts = default("/clusterHostInfo/zookeeper_hosts", [])
|
||||
if len(zookeeperHosts) > 0 and "clientPort" in config['configurations']['zoo.cfg']:
|
||||
clientPort = config['configurations']['zoo.cfg']['clientPort']
|
||||
zookeeperPort = ":" + clientPort + ","
|
||||
dolphin_common_map['zookeeper.quorum'] = zookeeperPort.join(zookeeperHosts) + ":" + clientPort
|
||||
|
||||
dolphin_common_map.update(config['configurations']['dolphin-common'])
|
||||
|
||||
# quartz.properties
|
||||
dolphin_quartz_map = {}
|
||||
dolphin_quartz_map['org.quartz.jobStore.driverDelegateClass'] = dolphin_database_config['driverDelegateClass']
|
||||
dolphin_quartz_map['org.quartz.dataSource.myDs.driver'] = dolphin_database_config['dolphin_database_driver']
|
||||
dolphin_quartz_map['org.quartz.dataSource.myDs.URL'] = dolphin_database_config['dolphin_database_url']
|
||||
dolphin_quartz_map['org.quartz.dataSource.myDs.user'] = dolphin_database_config['dolphin_database_username']
|
||||
dolphin_quartz_map['org.quartz.dataSource.myDs.password'] = dolphin_database_config['dolphin_database_password']
|
||||
dolphin_quartz_map.update(config['configurations']['dolphin-quartz'])
|
||||
|
||||
# if 'ganglia_server_host' in config['clusterHostInfo'] and \
|
||||
# len(config['clusterHostInfo']['ganglia_server_host'])>0:
|
||||
# ganglia_installed = True
|
||||
# ganglia_server = config['clusterHostInfo']['ganglia_server_host'][0]
|
||||
# ganglia_report_interval = 60
|
||||
# else:
|
||||
# ganglia_installed = False
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
"""
|
||||
|
||||
from resource_management import *
|
||||
from resource_management.libraries.functions import get_unique_id_and_date
|
||||
|
||||
class ServiceCheck(Script):
|
||||
def service_check(self, env):
|
||||
import params
|
||||
#env.set_params(params)
|
||||
|
||||
# Execute(format("which pika_server"))
|
||||
|
||||
if __name__ == "__main__":
|
||||
ServiceCheck().execute()
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
"""
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
"""
|
||||
|
||||
from resource_management import *
|
||||
|
||||
config = Script.get_config()
|
||||
|
||||
dolphin_run_dir = "/opt/soft/run/dolphinscheduler/"
|
||||
|
|
@ -15,7 +15,6 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
|
||||
masters=ark0,ark1
|
||||
workers=ark2,ark3,ark4
|
||||
alertServer=ark3
|
||||
apiServers=ark1
|
||||
{% for key, value in dolphin_alert_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
||||
|
|
@ -15,6 +15,6 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
|
||||
installPath=/data1_1T/dolphinscheduler
|
||||
deployUser=dolphinscheduler
|
||||
ips=ark0,ark1,ark2,ark3,ark4
|
||||
{% for key, value in dolphin_app_api_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
||||
|
|
@ -15,6 +15,6 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
|
||||
export PYTHON_HOME=/usr/bin/python
|
||||
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
|
||||
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH
|
||||
{% for key, value in dolphin_application_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
||||
|
|
@ -15,7 +15,6 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
|
||||
export PYTHON_HOME=/usr/bin/python
|
||||
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
|
||||
export PATH=$PYTHON_HOME:$JAVA_HOME/bin:$PATH
|
||||
export DATAX_HOME=/opt/datax/bin/datax.py
|
||||
{% for key, value in dolphin_common_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
usage="Usage: dolphinscheduler-daemon.sh (start|stop) <command> "
|
||||
|
||||
# if no args specified, show usage
|
||||
if [ $# -le 1 ]; then
|
||||
echo $usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
startStop=$1
|
||||
shift
|
||||
command=$1
|
||||
shift
|
||||
|
||||
echo "Begin $startStop $command......"
|
||||
|
||||
BIN_DIR=`dirname $0`
|
||||
BIN_DIR=`cd "$BIN_DIR"; pwd`
|
||||
DOLPHINSCHEDULER_HOME=$BIN_DIR/..
|
||||
|
||||
export HOSTNAME=`hostname`
|
||||
|
||||
DOLPHINSCHEDULER_LIB_JARS={{dolphin_lib_jars}}
|
||||
|
||||
DOLPHINSCHEDULER_OPTS="-server -Xmx16g -Xms4g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
|
||||
STOP_TIMEOUT=5
|
||||
|
||||
log={{dolphin_log_dir}}/dolphinscheduler-$command-$HOSTNAME.out
|
||||
pid={{dolphin_pidfile_dir}}/$command.pid
|
||||
|
||||
cd $DOLPHINSCHEDULER_HOME
|
||||
|
||||
if [ "$command" = "api-server" ]; then
|
||||
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/apiserver_logback.xml -Dspring.profiles.active=api"
|
||||
CLASS=org.apache.dolphinscheduler.api.ApiApplicationServer
|
||||
elif [ "$command" = "master-server" ]; then
|
||||
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/master_logback.xml -Ddruid.mysql.usePingMethod=false"
|
||||
CLASS=org.apache.dolphinscheduler.server.master.MasterServer
|
||||
elif [ "$command" = "worker-server" ]; then
|
||||
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/worker_logback.xml -Ddruid.mysql.usePingMethod=false"
|
||||
CLASS=org.apache.dolphinscheduler.server.worker.WorkerServer
|
||||
elif [ "$command" = "alert-server" ]; then
|
||||
LOG_FILE="-Dlogback.configurationFile={{dolphin_conf_dir}}/alert_logback.xml"
|
||||
CLASS=org.apache.dolphinscheduler.alert.AlertServer
|
||||
elif [ "$command" = "logger-server" ]; then
|
||||
CLASS=org.apache.dolphinscheduler.server.rpc.LoggerServer
|
||||
elif [ "$command" = "combined-server" ]; then
|
||||
LOG_FILE="-Dlogging.config={{dolphin_conf_dir}}/combined_logback.xml -Dspring.profiles.active=api -Dserver.is-combined-server=true"
|
||||
CLASS=org.apache.dolphinscheduler.api.CombinedApplicationServer
|
||||
else
|
||||
echo "Error: No command named \`$command' was found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case $startStop in
|
||||
(start)
|
||||
|
||||
if [ -f $pid ]; then
|
||||
if kill -0 `cat $pid` > /dev/null 2>&1; then
|
||||
echo $command running as process `cat $pid`. Stop it first.
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo starting $command, logging to $log
|
||||
|
||||
exec_command="$LOG_FILE $DOLPHINSCHEDULER_OPTS -classpath {{dolphin_conf_dir}}:{{dolphin_lib_jars}} $CLASS"
|
||||
|
||||
echo "nohup java $exec_command > $log 2>&1 < /dev/null &"
|
||||
nohup java $exec_command > $log 2>&1 < /dev/null &
|
||||
echo $! > $pid
|
||||
;;
|
||||
|
||||
(stop)
|
||||
|
||||
if [ -f $pid ]; then
|
||||
TARGET_PID=`cat $pid`
|
||||
if kill -0 $TARGET_PID > /dev/null 2>&1; then
|
||||
echo stopping $command
|
||||
kill $TARGET_PID
|
||||
sleep $STOP_TIMEOUT
|
||||
if kill -0 $TARGET_PID > /dev/null 2>&1; then
|
||||
echo "$command did not stop gracefully after $STOP_TIMEOUT seconds: killing with kill -9"
|
||||
kill -9 $TARGET_PID
|
||||
fi
|
||||
else
|
||||
echo no $command to stop
|
||||
fi
|
||||
rm -f $pid
|
||||
else
|
||||
echo no $command to stop
|
||||
fi
|
||||
;;
|
||||
|
||||
(*)
|
||||
echo $usage
|
||||
exit 1
|
||||
;;
|
||||
|
||||
esac
|
||||
|
||||
echo "End $startStop $command."
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
{% for key, value in dolphin_quartz_map.iteritems() -%}
|
||||
{{key}}={{value}}
|
||||
{% endfor %}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
"name": "default",
|
||||
"description": "default quick links configuration",
|
||||
"configuration": {
|
||||
"protocol":
|
||||
{
|
||||
"type":"http"
|
||||
},
|
||||
|
||||
"links": [
|
||||
{
|
||||
"name": "dolphin-application-ui",
|
||||
"label": "DolphinApplication UI",
|
||||
"requires_user_name": "false",
|
||||
"component_name": "DOLPHIN_API",
|
||||
"url": "%@://%@:%@/dolphinscheduler/ui/view/login/index.html",
|
||||
"port":{
|
||||
"http_property": "server.port",
|
||||
"http_default_port": "12345",
|
||||
"regex": "^(\\d+)$",
|
||||
"site": "dolphin-application-api"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,605 @@
|
|||
{
|
||||
"name": "default",
|
||||
"description": "Default theme for Dolphin Scheduler service",
|
||||
"configuration": {
|
||||
"layouts": [
|
||||
{
|
||||
"name": "default",
|
||||
"tabs": [
|
||||
{
|
||||
"name": "settings",
|
||||
"display-name": "Settings",
|
||||
"layout": {
|
||||
"tab-rows": "3",
|
||||
"tab-columns": "3",
|
||||
"sections": [
|
||||
{
|
||||
"name": "dolphin-env-config",
|
||||
"display-name": "Dolphin Env Config",
|
||||
"row-index": "0",
|
||||
"column-index": "0",
|
||||
"row-span": "1",
|
||||
"column-span": "2",
|
||||
"section-rows": "1",
|
||||
"section-columns": "2",
|
||||
"subsections": [
|
||||
{
|
||||
"name": "env-row1-col1",
|
||||
"display-name": "Deploy User Info",
|
||||
"row-index": "0",
|
||||
"column-index": "0",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
},
|
||||
{
|
||||
"name": "env-row1-col2",
|
||||
"display-name": "System Env Optimization",
|
||||
"row-index": "0",
|
||||
"column-index": "1",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "dolphin-database-config",
|
||||
"display-name": "Database Config",
|
||||
"row-index": "1",
|
||||
"column-index": "0",
|
||||
"row-span": "1",
|
||||
"column-span": "2",
|
||||
"section-rows": "1",
|
||||
"section-columns": "3",
|
||||
"subsections": [
|
||||
{
|
||||
"name": "database-row1-col1",
|
||||
"row-index": "0",
|
||||
"column-index": "0",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
},
|
||||
{
|
||||
"name": "database-row1-col2",
|
||||
"row-index": "0",
|
||||
"column-index": "1",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
},
|
||||
{
|
||||
"name": "database-row1-col3",
|
||||
"row-index": "0",
|
||||
"column-index": "2",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "dynamic-config",
|
||||
"row-index": "2",
|
||||
"column-index": "0",
|
||||
"row-span": "1",
|
||||
"column-span": "2",
|
||||
"section-rows": "1",
|
||||
"section-columns": "3",
|
||||
"subsections": [
|
||||
{
|
||||
"name": "dynamic-row1-col1",
|
||||
"display-name": "Resource FS Config",
|
||||
"row-index": "0",
|
||||
"column-index": "0",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
},
|
||||
{
|
||||
"name": "dynamic-row1-col2",
|
||||
"display-name": "Kerberos Info",
|
||||
"row-index": "0",
|
||||
"column-index": "1",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
},
|
||||
{
|
||||
"name": "dynamic-row1-col3",
|
||||
"display-name": "Wechat Info",
|
||||
"row-index": "0",
|
||||
"column-index": "1",
|
||||
"row-span": "1",
|
||||
"column-span": "1"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"placement": {
|
||||
"configuration-layout": "default",
|
||||
"configs": [
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.type",
|
||||
"subsection-name": "database-row1-col1"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.host",
|
||||
"subsection-name": "database-row1-col2"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.port",
|
||||
"subsection-name": "database-row1-col2"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.username",
|
||||
"subsection-name": "database-row1-col3"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.password",
|
||||
"subsection-name": "database-row1-col3"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.user",
|
||||
"subsection-name": "env-row1-col1"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.group",
|
||||
"subsection-name": "env-row1-col1"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphinscheduler-env-content",
|
||||
"subsection-name": "env-row1-col2"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/res.upload.startup.type",
|
||||
"subsection-name": "dynamic-row1-col1"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/hdfs.root.user",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/res.upload.startup.type"
|
||||
],
|
||||
"if": "${dolphin-common/res.upload.startup.type} === HDFS",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/data.store2hdfs.basepath",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/res.upload.startup.type"
|
||||
],
|
||||
"if": "${dolphin-common/res.upload.startup.type} === HDFS",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.defaultFS",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/res.upload.startup.type"
|
||||
],
|
||||
"if": "${dolphin-common/res.upload.startup.type} === HDFS",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.s3a.endpoint",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/res.upload.startup.type"
|
||||
],
|
||||
"if": "${dolphin-common/res.upload.startup.type} === S3",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.s3a.access.key",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/res.upload.startup.type"
|
||||
],
|
||||
"if": "${dolphin-common/res.upload.startup.type} === S3",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.s3a.secret.key",
|
||||
"subsection-name": "dynamic-row1-col1",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/res.upload.startup.type"
|
||||
],
|
||||
"if": "${dolphin-common/res.upload.startup.type} === S3",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/hadoop.security.authentication.startup.state",
|
||||
"subsection-name": "dynamic-row1-col2"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/java.security.krb5.conf.path",
|
||||
"subsection-name": "dynamic-row1-col2",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/hadoop.security.authentication.startup.state"
|
||||
],
|
||||
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/login.user.keytab.username",
|
||||
"subsection-name": "dynamic-row1-col2",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/hadoop.security.authentication.startup.state"
|
||||
],
|
||||
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/login.user.keytab.path",
|
||||
"subsection-name": "dynamic-row1-col2",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-common/hadoop.security.authentication.startup.state"
|
||||
],
|
||||
"if": "${dolphin-common/hadoop.security.authentication.startup.state}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.enable",
|
||||
"subsection-name": "dynamic-row1-col3"
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.corp.id",
|
||||
"subsection-name": "dynamic-row1-col3",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-alert/enterprise.wechat.enable"
|
||||
],
|
||||
"if": "${dolphin-alert/enterprise.wechat.enable}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.secret",
|
||||
"subsection-name": "dynamic-row1-col3",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-alert/enterprise.wechat.enable"
|
||||
],
|
||||
"if": "${dolphin-alert/enterprise.wechat.enable}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.agent.id",
|
||||
"subsection-name": "dynamic-row1-col3",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-alert/enterprise.wechat.enable"
|
||||
],
|
||||
"if": "${dolphin-alert/enterprise.wechat.enable}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.users",
|
||||
"subsection-name": "dynamic-row1-col3",
|
||||
"depends-on": [
|
||||
{
|
||||
"configs":[
|
||||
"dolphin-alert/enterprise.wechat.enable"
|
||||
],
|
||||
"if": "${dolphin-alert/enterprise.wechat.enable}",
|
||||
"then": {
|
||||
"property_value_attributes": {
|
||||
"visible": true
|
||||
}
|
||||
},
|
||||
"else": {
|
||||
"property_value_attributes": {
|
||||
"visible": false
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"widgets": [
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.type",
|
||||
"widget": {
|
||||
"type": "combo"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.host",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.port",
|
||||
"widget": {
|
||||
"type": "text-field",
|
||||
"units": [
|
||||
{
|
||||
"unit-name": "int"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.username",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.database.password",
|
||||
"widget": {
|
||||
"type": "password"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.user",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphin.group",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-env/dolphinscheduler-env-content",
|
||||
"widget": {
|
||||
"type": "text-area"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/res.upload.startup.type",
|
||||
"widget": {
|
||||
"type": "combo"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/hdfs.root.user",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/data.store2hdfs.basepath",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.defaultFS",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.s3a.endpoint",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.s3a.access.key",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/fs.s3a.secret.key",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/hadoop.security.authentication.startup.state",
|
||||
"widget": {
|
||||
"type": "toggle"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/java.security.krb5.conf.path",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/login.user.keytab.username",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-common/login.user.keytab.path",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.enable",
|
||||
"widget": {
|
||||
"type": "toggle"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.corp.id",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.secret",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.agent.id",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": "dolphin-alert/enterprise.wechat.users",
|
||||
"widget": {
|
||||
"type": "text-field"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
Binary file not shown.
|
|
@ -0,0 +1,26 @@
|
|||
<?xml version="1.0"?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<metainfo>
|
||||
<schemaVersion>2.0</schemaVersion>
|
||||
<services>
|
||||
<service>
|
||||
<name>DOLPHIN</name>
|
||||
<extends>common-services/DOLPHIN/1.2.1</extends>
|
||||
</service>
|
||||
</services>
|
||||
</metainfo>
|
||||
|
|
@ -0,0 +1,226 @@
|
|||
# Dolphin Scheduler
|
||||
|
||||
[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
|
||||
|
||||
## Introduction
|
||||
This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.10+
|
||||
- PV provisioner support in the underlying infrastructure
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
|
||||
$ cd incubator-dolphinscheduler
|
||||
$ helm install --name dolphinscheduler .
|
||||
```
|
||||
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
> **Tip**: List all releases using `helm list`
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `dolphinscheduler` deployment:
|
||||
|
||||
```bash
|
||||
$ helm delete --purge dolphinscheduler
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
|
||||
| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
|
||||
| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` |
|
||||
| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
|
||||
| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
|
||||
| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
|
||||
| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
|
||||
| | | |
|
||||
| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
|
||||
| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
|
||||
| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
|
||||
| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` |
|
||||
| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
|
||||
| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
|
||||
| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
|
||||
| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
|
||||
| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
|
||||
| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
|
||||
| | | |
|
||||
| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
|
||||
| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
|
||||
| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
|
||||
| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
|
||||
| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
|
||||
| | | |
|
||||
| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
|
||||
| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
|
||||
| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `master.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
|
||||
| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
|
||||
| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
|
||||
| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` |
|
||||
| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` |
|
||||
| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` |
|
||||
| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` |
|
||||
| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` |
|
||||
| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
|
||||
| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
|
||||
| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
|
||||
| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
|
||||
| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
|
||||
| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` |
|
||||
| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` |
|
||||
| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` |
|
||||
| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` |
|
||||
| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` |
|
||||
| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` |
|
||||
| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` |
|
||||
| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
|
||||
| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
|
||||
| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
|
||||
| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
|
||||
| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
|
||||
| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
|
||||
| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
|
||||
| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` |
|
||||
| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` |
|
||||
| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` |
|
||||
| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` |
|
||||
| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` |
|
||||
| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` |
|
||||
| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` |
|
||||
| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
|
||||
| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
|
||||
| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
|
||||
| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
|
||||
| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `api.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` |
|
||||
| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
|
||||
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
|
||||
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
|
||||
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
|
||||
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
|
||||
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `ingress.enabled` | Enable ingress | `false` |
|
||||
| `ingress.host` | Ingress host | `dolphinscheduler.org` |
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.tls.enabled` | Enable ingress tls | `false` |
|
||||
| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` |
|
||||
| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` |
|
||||
|
||||
For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation.
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
apiVersion: v2
|
||||
name: dolphinscheduler
|
||||
description: Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
|
||||
home: https://dolphinscheduler.apache.org
|
||||
icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg
|
||||
keywords:
|
||||
- dolphinscheduler
|
||||
- Scheduler
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 1.2.1
|
||||
|
||||
dependencies:
|
||||
- name: postgresql
|
||||
version: 8.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: postgresql.enabled
|
||||
- name: zookeeper
|
||||
version: 5.x.x
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
condition: redis.enabled
|
||||
|
|
@ -0,0 +1,226 @@
|
|||
# Dolphin Scheduler
|
||||
|
||||
[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
|
||||
|
||||
## Introduction
|
||||
This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.10+
|
||||
- PV provisioner support in the underlying infrastructure
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/apache/incubator-dolphinscheduler.git
|
||||
$ cd incubator-dolphinscheduler
|
||||
$ helm install --name dolphinscheduler .
|
||||
```
|
||||
These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
> **Tip**: List all releases using `helm list`
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `dolphinscheduler` deployment:
|
||||
|
||||
```bash
|
||||
$ helm delete --purge dolphinscheduler
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
|
||||
| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
|
||||
| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` |
|
||||
| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
|
||||
| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
|
||||
| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
|
||||
| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
|
||||
| | | |
|
||||
| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
|
||||
| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
|
||||
| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
|
||||
| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` |
|
||||
| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
|
||||
| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
|
||||
| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
|
||||
| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
|
||||
| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
|
||||
| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
|
||||
| | | |
|
||||
| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
|
||||
| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
|
||||
| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
|
||||
| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
|
||||
| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
|
||||
| | | |
|
||||
| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
|
||||
| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
|
||||
| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `master.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
|
||||
| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
|
||||
| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
|
||||
| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` |
|
||||
| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` |
|
||||
| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` |
|
||||
| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` |
|
||||
| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` |
|
||||
| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
|
||||
| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
|
||||
| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
|
||||
| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
|
||||
| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
|
||||
| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` |
|
||||
| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` |
|
||||
| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` |
|
||||
| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` |
|
||||
| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` |
|
||||
| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` |
|
||||
| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` |
|
||||
| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
|
||||
| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
|
||||
| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
|
||||
| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
|
||||
| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
|
||||
| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
|
||||
| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
|
||||
| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` |
|
||||
| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` |
|
||||
| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` |
|
||||
| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` |
|
||||
| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` |
|
||||
| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` |
|
||||
| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` |
|
||||
| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` |
|
||||
| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
|
||||
| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
|
||||
| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
|
||||
| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
|
||||
| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `api.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` |
|
||||
| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
|
||||
| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
|
||||
| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
|
||||
| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
|
||||
| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
|
||||
| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
|
||||
| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
|
||||
| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
|
||||
| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
|
||||
| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
|
||||
| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
|
||||
| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
|
||||
| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
|
||||
| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
|
||||
| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
|
||||
| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
|
||||
| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
|
||||
| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
|
||||
| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
|
||||
| | | |
|
||||
| `ingress.enabled` | Enable ingress | `false` |
|
||||
| `ingress.host` | Ingress host | `dolphinscheduler.org` |
|
||||
| `ingress.path` | Ingress path | `/` |
|
||||
| `ingress.tls.enabled` | Enable ingress tls | `false` |
|
||||
| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` |
|
||||
| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` |
|
||||
|
||||
For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation.
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
** Please be patient while the chart is being deployed **
|
||||
|
||||
1. Get the Dolphinscheduler URL by running:
|
||||
|
||||
{{- if .Values.ingress.enabled }}
|
||||
|
||||
export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "dolphinscheduler.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
|
||||
echo "Dolphinscheduler URL: http://$HOSTNAME/"
|
||||
|
||||
{{- else }}
|
||||
|
||||
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
|
||||
|
||||
{{- end }}
|
||||
|
||||
2. Get the Dolphinscheduler URL by running:
|
||||
|
||||
{{- if .Values.ingress.enabled }}
|
||||
|
||||
export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "dolphinscheduler.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
|
||||
echo "Dolphinscheduler URL: http://$HOSTNAME/"
|
||||
|
||||
{{- else }}
|
||||
|
||||
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
|
||||
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,149 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.labels" -}}
|
||||
helm.sh/chart: {{ include "dolphinscheduler.chart" . }}
|
||||
{{ include "dolphinscheduler.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "dolphinscheduler.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default docker image registry.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.image.registry" -}}
|
||||
{{- $registry := default "docker.io" .Values.image.registry -}}
|
||||
{{- printf "%s" $registry | trunc 63 | trimSuffix "/" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default docker image repository.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.image.repository" -}}
|
||||
{{- printf "%s/%s:%s" (include "dolphinscheduler.image.registry" .) .Values.image.repository .Values.image.tag -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified postgresql name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.postgresql.fullname" -}}
|
||||
{{- $name := default "postgresql" .Values.postgresql.nameOverride -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified zookkeeper name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.zookeeper.fullname" -}}
|
||||
{{- $name := default "zookeeper" .Values.zookeeper.nameOverride -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified zookkeeper quorum.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.zookeeper.quorum" -}}
|
||||
{{- $port := default "2181" (.Values.zookeeper.service.port | toString) -}}
|
||||
{{- printf "%s:%s" (include "dolphinscheduler.zookeeper.fullname" .) $port | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default dolphinscheduler worker base dir.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.worker.base.dir" -}}
|
||||
{{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}}
|
||||
{{- printf "%s" $name | trunc 63 | trimSuffix "/" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default dolphinscheduler worker data download dir.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.worker.data.download.dir" -}}
|
||||
{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/download" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default dolphinscheduler worker process exec dir.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "dolphinscheduler.worker.process.exec.dir" -}}
|
||||
{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/exec" -}}
|
||||
{{- end -}}
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
{{- if .Values.alert.configmap }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
data:
|
||||
XLS_FILE_PATH: {{ .Values.alert.configmap.XLS_FILE_PATH | quote }}
|
||||
MAIL_SERVER_HOST: {{ .Values.alert.configmap.MAIL_SERVER_HOST | quote }}
|
||||
MAIL_SERVER_PORT: {{ .Values.alert.configmap.MAIL_SERVER_PORT | quote }}
|
||||
MAIL_SENDER: {{ .Values.alert.configmap.MAIL_SENDER | quote }}
|
||||
MAIL_USER: {{ .Values.alert.configmap.MAIL_USER | quote }}
|
||||
MAIL_PASSWD: {{ .Values.alert.configmap.MAIL_PASSWD | quote }}
|
||||
MAIL_SMTP_STARTTLS_ENABLE: {{ .Values.alert.configmap.MAIL_SMTP_STARTTLS_ENABLE | quote }}
|
||||
MAIL_SMTP_SSL_ENABLE: {{ .Values.alert.configmap.MAIL_SMTP_SSL_ENABLE | quote }}
|
||||
MAIL_SMTP_SSL_TRUST: {{ .Values.alert.configmap.MAIL_SMTP_SSL_TRUST | quote }}
|
||||
ENTERPRISE_WECHAT_ENABLE: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_ENABLE | quote }}
|
||||
ENTERPRISE_WECHAT_CORP_ID: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_CORP_ID | quote }}
|
||||
ENTERPRISE_WECHAT_SECRET: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_SECRET | quote }}
|
||||
ENTERPRISE_WECHAT_AGENT_ID: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_AGENT_ID | quote }}
|
||||
ENTERPRISE_WECHAT_USERS: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_USERS | quote }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
{{- if .Values.master.configmap }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
data:
|
||||
MASTER_EXEC_THREADS: {{ .Values.master.configmap.MASTER_EXEC_THREADS | quote }}
|
||||
MASTER_EXEC_TASK_NUM: {{ .Values.master.configmap.MASTER_EXEC_TASK_NUM | quote }}
|
||||
MASTER_HEARTBEAT_INTERVAL: {{ .Values.master.configmap.MASTER_HEARTBEAT_INTERVAL | quote }}
|
||||
MASTER_TASK_COMMIT_RETRYTIMES: {{ .Values.master.configmap.MASTER_TASK_COMMIT_RETRYTIMES | quote }}
|
||||
MASTER_TASK_COMMIT_INTERVAL: {{ .Values.master.configmap.MASTER_TASK_COMMIT_INTERVAL | quote }}
|
||||
MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }}
|
||||
MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
{{- if .Values.worker.configmap }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
data:
|
||||
WORKER_EXEC_THREADS: {{ .Values.worker.configmap.WORKER_EXEC_THREADS | quote }}
|
||||
WORKER_HEARTBEAT_INTERVAL: {{ .Values.worker.configmap.WORKER_HEARTBEAT_INTERVAL | quote }}
|
||||
WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }}
|
||||
WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }}
|
||||
WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }}
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
|
||||
DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH: {{ include "dolphinscheduler.worker.data.download.dir" . | quote }}
|
||||
DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH: {{ include "dolphinscheduler.worker.process.exec.dir" . | quote }}
|
||||
dolphinscheduler_env.sh: |-
|
||||
{{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }}
|
||||
{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,228 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: alert
|
||||
spec:
|
||||
replicas: {{ .Values.alert.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: alert
|
||||
strategy:
|
||||
type: {{ .Values.alert.strategy.type | quote }}
|
||||
rollingUpdate:
|
||||
maxSurge: {{ .Values.alert.strategy.rollingUpdate.maxSurge | quote }}
|
||||
maxUnavailable: {{ .Values.alert.strategy.rollingUpdate.maxUnavailable | quote }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: alert
|
||||
spec:
|
||||
{{- if .Values.alert.affinity }}
|
||||
affinity: {{- toYaml .Values.alert.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alert.nodeSelector }}
|
||||
nodeSelector: {{- toYaml .Values.alert.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.alert.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- name: init-postgresql
|
||||
image: busybox:1.31.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 5 ]; then
|
||||
echo "Error: Couldn't connect to postgresql."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
|
||||
sleep 60
|
||||
done
|
||||
env:
|
||||
- name: POSTGRESQL_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
image: {{ include "dolphinscheduler.image.repository" . | quote }}
|
||||
args:
|
||||
- "alert-server"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: {{ .Values.timezone }}
|
||||
- name: XLS_FILE_PATH
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: XLS_FILE_PATH
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SERVER_HOST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SERVER_HOST
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SERVER_PORT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SERVER_PORT
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SENDER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SENDER
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_USER
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_USER
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_PASSWD
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_PASSWD
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SMTP_STARTTLS_ENABLE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SMTP_STARTTLS_ENABLE
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SMTP_SSL_ENABLE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SMTP_SSL_ENABLE
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: MAIL_SMTP_SSL_TRUST
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: MAIL_SMTP_SSL_TRUST
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: ENTERPRISE_WECHAT_ENABLE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: ENTERPRISE_WECHAT_ENABLE
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: ENTERPRISE_WECHAT_CORP_ID
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: ENTERPRISE_WECHAT_CORP_ID
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: ENTERPRISE_WECHAT_SECRET
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: ENTERPRISE_WECHAT_SECRET
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: ENTERPRISE_WECHAT_AGENT_ID
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: ENTERPRISE_WECHAT_AGENT_ID
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: ENTERPRISE_WECHAT_USERS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: ENTERPRISE_WECHAT_USERS
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
- name: POSTGRESQL_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_USERNAME
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ .Values.postgresql.postgresqlUsername }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.username | quote }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
key: postgresql-password
|
||||
{{- else }}
|
||||
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
|
||||
key: db-password
|
||||
{{- end }}
|
||||
{{- if .Values.alert.livenessProbe.enabled }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- /root/checkpoint.sh
|
||||
- worker-server
|
||||
initialDelaySeconds: {{ .Values.alert.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.alert.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.alert.livenessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.alert.livenessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.alert.livenessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
{{- if .Values.alert.readinessProbe.enabled }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- /root/checkpoint.sh
|
||||
- worker-server
|
||||
initialDelaySeconds: {{ .Values.alert.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.alert.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.alert.readinessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.alert.readinessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.alert.readinessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: "/opt/dolphinscheduler/logs"
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
volumes:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
{{- if .Values.alert.persistentVolumeClaim.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,161 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: api
|
||||
spec:
|
||||
replicas: {{ .Values.api.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: api
|
||||
strategy:
|
||||
type: {{ .Values.api.strategy.type | quote }}
|
||||
rollingUpdate:
|
||||
maxSurge: {{ .Values.api.strategy.rollingUpdate.maxSurge | quote }}
|
||||
maxUnavailable: {{ .Values.api.strategy.rollingUpdate.maxUnavailable | quote }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: api
|
||||
spec:
|
||||
{{- if .Values.api.affinity }}
|
||||
affinity: {{- toYaml .Values.api.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.api.nodeSelector }}
|
||||
nodeSelector: {{- toYaml .Values.api.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.api.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- name: init-postgresql
|
||||
image: busybox:1.31.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 5 ]; then
|
||||
echo "Error: Couldn't connect to postgresql."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
|
||||
sleep 60
|
||||
done
|
||||
env:
|
||||
- name: POSTGRESQL_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
image: {{ include "dolphinscheduler.image.repository" . | quote }}
|
||||
args:
|
||||
- "api-server"
|
||||
ports:
|
||||
- containerPort: 12345
|
||||
name: tcp-port
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: {{ .Values.timezone }}
|
||||
- name: POSTGRESQL_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_USERNAME
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ .Values.postgresql.postgresqlUsername }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.username | quote }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
key: postgresql-password
|
||||
{{- else }}
|
||||
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
|
||||
key: db-password
|
||||
{{- end }}
|
||||
- name: ZOOKEEPER_QUORUM
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
|
||||
{{- end }}
|
||||
{{- if .Values.api.livenessProbe.enabled }}
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 12345
|
||||
initialDelaySeconds: {{ .Values.api.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.api.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.api.livenessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.api.livenessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.api.livenessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
{{- if .Values.api.readinessProbe.enabled }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 12345
|
||||
initialDelaySeconds: {{ .Values.api.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.api.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.api.readinessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.api.readinessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.api.readinessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: "/opt/dolphinscheduler/logs"
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
volumes:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
{{- if .Values.api.persistentVolumeClaim.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: frontend
|
||||
spec:
|
||||
replicas: {{ .Values.frontend.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: frontend
|
||||
strategy:
|
||||
type: {{ .Values.frontend.strategy.type | quote }}
|
||||
rollingUpdate:
|
||||
maxSurge: {{ .Values.frontend.strategy.rollingUpdate.maxSurge | quote }}
|
||||
maxUnavailable: {{ .Values.frontend.strategy.rollingUpdate.maxUnavailable | quote }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: frontend
|
||||
spec:
|
||||
{{- if .Values.frontend.affinity }}
|
||||
affinity: {{- toYaml .Values.frontend.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.frontend.nodeSelector }}
|
||||
nodeSelector: {{- toYaml .Values.frontend.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.frontend.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
image: {{ include "dolphinscheduler.image.repository" . | quote }}
|
||||
args:
|
||||
- "frontend"
|
||||
ports:
|
||||
- containerPort: 8888
|
||||
name: tcp-port
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: {{ .Values.timezone }}
|
||||
- name: FRONTEND_API_SERVER_HOST
|
||||
value: '{{ include "dolphinscheduler.fullname" . }}-api'
|
||||
- name: FRONTEND_API_SERVER_PORT
|
||||
value: "12345"
|
||||
{{- if .Values.frontend.livenessProbe.enabled }}
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 8888
|
||||
initialDelaySeconds: {{ .Values.frontend.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.frontend.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.frontend.livenessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.frontend.livenessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.frontend.livenessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
{{- if .Values.frontend.readinessProbe.enabled }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 8888
|
||||
initialDelaySeconds: {{ .Values.frontend.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.frontend.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.frontend.readinessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.frontend.readinessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.frontend.readinessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: "/var/log/nginx"
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
volumes:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
{{- if .Values.ingress.enabled }}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
rules:
|
||||
- host: {{ .Values.ingress.host }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ .Values.ingress.path }}
|
||||
backend:
|
||||
serviceName: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
servicePort: tcp-port
|
||||
{{- if .Values.ingress.tls.enabled }}
|
||||
tls:
|
||||
hosts:
|
||||
{{- range .Values.ingress.tls.hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .Values.ingress.tls.secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
{{- if .Values.alert.persistentVolumeClaim.enabled }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.alert.persistentVolumeClaim.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
storageClassName: {{ .Values.alert.persistentVolumeClaim.storageClassName | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.alert.persistentVolumeClaim.storage | quote }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
{{- if .Values.api.persistentVolumeClaim.enabled }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.api.persistentVolumeClaim.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
storageClassName: {{ .Values.api.persistentVolumeClaim.storageClassName | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.api.persistentVolumeClaim.storage | quote }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
{{- if .Values.frontend.persistentVolumeClaim.enabled }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.frontend.persistentVolumeClaim.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
storageClassName: {{ .Values.frontend.persistentVolumeClaim.storageClassName | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.frontend.persistentVolumeClaim.storage | quote }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
{{- if not .Values.postgresql.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-postgresql
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
type: Opaque
|
||||
data:
|
||||
db-password: {{ .Values.externalDatabase.password | b64enc | quote }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,247 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: master
|
||||
spec:
|
||||
podManagementPolicy: {{ .Values.master.podManagementPolicy }}
|
||||
replicas: {{ .Values.master.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: master
|
||||
serviceName: {{ template "dolphinscheduler.fullname" . }}-master-headless
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: master
|
||||
spec:
|
||||
{{- if .Values.master.affinity }}
|
||||
affinity: {{- toYaml .Values.master.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.master.nodeSelector }}
|
||||
nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.master.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- name: init-zookeeper
|
||||
image: busybox:1.31.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
|
||||
while ! nc -z ${line%:*} ${line#*:}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 5 ]; then
|
||||
echo "Error: Couldn't connect to zookeeper."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
|
||||
sleep 60
|
||||
done
|
||||
done
|
||||
env:
|
||||
- name: ZOOKEEPER_QUORUM
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
|
||||
{{- end }}
|
||||
- name: init-postgresql
|
||||
image: busybox:1.31.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 5 ]; then
|
||||
echo "Error: Couldn't connect to postgresql."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
|
||||
sleep 60
|
||||
done
|
||||
env:
|
||||
- name: POSTGRESQL_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
image: {{ include "dolphinscheduler.image.repository" . | quote }}
|
||||
args:
|
||||
- "master-server"
|
||||
ports:
|
||||
- containerPort: 8888
|
||||
name: unused-tcp-port
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: {{ .Values.timezone }}
|
||||
- name: MASTER_EXEC_THREADS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
key: MASTER_EXEC_THREADS
|
||||
- name: MASTER_EXEC_TASK_NUM
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
key: MASTER_EXEC_TASK_NUM
|
||||
- name: MASTER_HEARTBEAT_INTERVAL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
key: MASTER_HEARTBEAT_INTERVAL
|
||||
- name: MASTER_TASK_COMMIT_RETRYTIMES
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
key: MASTER_TASK_COMMIT_RETRYTIMES
|
||||
- name: MASTER_TASK_COMMIT_INTERVAL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
key: MASTER_TASK_COMMIT_INTERVAL
|
||||
- name: MASTER_MAX_CPULOAD_AVG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
key: MASTER_MAX_CPULOAD_AVG
|
||||
- name: MASTER_RESERVED_MEMORY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
key: MASTER_RESERVED_MEMORY
|
||||
- name: POSTGRESQL_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_USERNAME
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ .Values.postgresql.postgresqlUsername }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.username | quote }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
key: postgresql-password
|
||||
{{- else }}
|
||||
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
|
||||
key: db-password
|
||||
{{- end }}
|
||||
- name: TASK_QUEUE
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: {{ .Values.zookeeper.taskQueue }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.taskQueue }}
|
||||
{{- end }}
|
||||
- name: ZOOKEEPER_QUORUM
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: {{ template "dolphinscheduler.zookeeper.quorum" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
|
||||
{{- end }}
|
||||
{{- if .Values.master.livenessProbe.enabled }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- /root/checkpoint.sh
|
||||
- master-server
|
||||
initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
{{- if .Values.master.readinessProbe.enabled }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- /root/checkpoint.sh
|
||||
- master-server
|
||||
initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.master.readinessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: "/opt/dolphinscheduler/logs"
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
volumes:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
{{- if .Values.master.persistentVolumeClaim.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if .Values.master.persistentVolumeClaim.enabled }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.master.persistentVolumeClaim.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
storageClassName: {{ .Values.master.persistentVolumeClaim.storageClassName | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.master.persistentVolumeClaim.storage | quote }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,275 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: worker
|
||||
spec:
|
||||
podManagementPolicy: {{ .Values.worker.podManagementPolicy }}
|
||||
replicas: {{ .Values.worker.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: worker
|
||||
serviceName: {{ template "dolphinscheduler.fullname" . }}-worker-headless
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: worker
|
||||
spec:
|
||||
{{- if .Values.worker.affinity }}
|
||||
affinity: {{- toYaml .Values.worker.affinity | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.worker.nodeSelector }}
|
||||
nodeSelector: {{- toYaml .Values.worker.nodeSelector | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.worker.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- name: init-zookeeper
|
||||
image: busybox:1.31.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
|
||||
while ! nc -z ${line%:*} ${line#*:}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 5 ]; then
|
||||
echo "Error: Couldn't connect to zookeeper."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
|
||||
sleep 60
|
||||
done
|
||||
done
|
||||
env:
|
||||
- name: ZOOKEEPER_QUORUM
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
|
||||
{{- end }}
|
||||
- name: init-postgresql
|
||||
image: busybox:1.31.0
|
||||
command:
|
||||
- /bin/sh
|
||||
- -ec
|
||||
- |
|
||||
while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
|
||||
counter=$((counter+1))
|
||||
if [ $counter == 5 ]; then
|
||||
echo "Error: Couldn't connect to postgresql."
|
||||
exit 1
|
||||
fi
|
||||
echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
|
||||
sleep 60
|
||||
done
|
||||
env:
|
||||
- name: POSTGRESQL_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
image: {{ include "dolphinscheduler.image.repository" . | quote }}
|
||||
args:
|
||||
- "worker-server"
|
||||
ports:
|
||||
- containerPort: 50051
|
||||
name: "logs-port"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: {{ .Values.timezone }}
|
||||
- name: WORKER_EXEC_THREADS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
key: WORKER_EXEC_THREADS
|
||||
- name: WORKER_FETCH_TASK_NUM
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
key: WORKER_FETCH_TASK_NUM
|
||||
- name: WORKER_HEARTBEAT_INTERVAL
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
key: WORKER_HEARTBEAT_INTERVAL
|
||||
- name: WORKER_MAX_CPULOAD_AVG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
key: WORKER_MAX_CPULOAD_AVG
|
||||
- name: WORKER_RESERVED_MEMORY
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
key: WORKER_RESERVED_MEMORY
|
||||
- name: POSTGRESQL_HOST
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.host | quote }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PORT
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: "5432"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.port }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_USERNAME
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
value: {{ .Values.postgresql.postgresqlUsername }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalDatabase.username | quote }}
|
||||
{{- end }}
|
||||
- name: POSTGRESQL_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
{{- if .Values.postgresql.enabled }}
|
||||
name: {{ template "dolphinscheduler.postgresql.fullname" . }}
|
||||
key: postgresql-password
|
||||
{{- else }}
|
||||
name: {{ printf "%s-%s" .Release.Name "externaldb" }}
|
||||
key: db-password
|
||||
{{- end }}
|
||||
- name: TASK_QUEUE
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: {{ .Values.zookeeper.taskQueue }}
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.taskQueue }}
|
||||
{{- end }}
|
||||
- name: ZOOKEEPER_QUORUM
|
||||
{{- if .Values.zookeeper.enabled }}
|
||||
value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
|
||||
{{- else }}
|
||||
value: {{ .Values.externalZookeeper.zookeeperQuorum }}
|
||||
{{- end }}
|
||||
{{- if .Values.worker.livenessProbe.enabled }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- /root/checkpoint.sh
|
||||
- worker-server
|
||||
initialDelaySeconds: {{ .Values.worker.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.worker.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.worker.livenessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.worker.livenessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.worker.livenessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
{{- if .Values.worker.readinessProbe.enabled }}
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- /root/checkpoint.sh
|
||||
- worker-server
|
||||
initialDelaySeconds: {{ .Values.worker.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.worker.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.worker.readinessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.worker.readinessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.worker.readinessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker-data
|
||||
- mountPath: "/opt/dolphinscheduler/logs"
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
|
||||
- mountPath: "/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"
|
||||
subPath: "dolphinscheduler_env.sh"
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
|
||||
volumes:
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-worker-data
|
||||
{{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "dolphinscheduler.fullname" . }}-worker-data
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
|
||||
{{- if .Values.worker.persistentVolumeClaim.logsPersistentVolume.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "dolphinscheduler.fullname" . }}-worker-logs
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
|
||||
configMap:
|
||||
defaultMode: 0777
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
items:
|
||||
- key: dolphinscheduler_env.sh
|
||||
path: dolphinscheduler_env.sh
|
||||
{{- if .Values.worker.persistentVolumeClaim.enabled }}
|
||||
volumeClaimTemplates:
|
||||
{{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
|
||||
- metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker-data
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-data
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
storageClassName: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storageClassName | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storage | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.worker.persistentVolumeClaim.logsPersistentVolume.enabled }}
|
||||
- metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
{{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
storageClassName: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storageClassName | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storage | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 12345
|
||||
targetPort: tcp-port
|
||||
protocol: TCP
|
||||
name: tcp-port
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: api
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 8888
|
||||
targetPort: tcp-port
|
||||
protocol: TCP
|
||||
name: tcp-port
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: frontend
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-master-headless
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master-headless
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}-master-headless
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
clusterIP: "None"
|
||||
ports:
|
||||
- port: 8888
|
||||
targetPort: tcp-port
|
||||
protocol: TCP
|
||||
name: unused-tcp-port
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: master
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "dolphinscheduler.fullname" . }}-worker-headless
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-headless
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}-worker-headless
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
clusterIP: "None"
|
||||
ports:
|
||||
- port: 50051
|
||||
targetPort: logs-port
|
||||
protocol: TCP
|
||||
name: logs-port
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/component: worker
|
||||
|
|
@ -0,0 +1,355 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# Default values for dolphinscheduler-chart.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
timezone: "Asia/Shanghai"
|
||||
|
||||
image:
|
||||
registry: "docker.io"
|
||||
repository: "dolphinscheduler"
|
||||
tag: "1.2.1"
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
imagePullSecrets: []
|
||||
|
||||
# If not exists external postgresql, by default, Dolphinscheduler's database will use it.
|
||||
postgresql:
|
||||
enabled: true
|
||||
postgresqlUsername: "root"
|
||||
postgresqlPassword: "root"
|
||||
postgresqlDatabase: "dolphinscheduler"
|
||||
persistence:
|
||||
enabled: false
|
||||
size: "20Gi"
|
||||
storageClass: "-"
|
||||
|
||||
# If exists external postgresql, and set postgresql.enable value to false.
|
||||
# If postgresql.enable is false, Dolphinscheduler's database will use it.
|
||||
externalDatabase:
|
||||
host: "localhost"
|
||||
port: "5432"
|
||||
username: "root"
|
||||
password: "root"
|
||||
database: "dolphinscheduler"
|
||||
|
||||
# If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it.
|
||||
zookeeper:
|
||||
enabled: true
|
||||
taskQueue: "zookeeper"
|
||||
persistence:
|
||||
enabled: false
|
||||
size: "20Gi"
|
||||
storageClass: "-"
|
||||
|
||||
# If exists external zookeeper, and set zookeeper.enable value to false.
|
||||
# If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it.
|
||||
externalZookeeper:
|
||||
taskQueue: "zookeeper"
|
||||
zookeeperQuorum: "127.0.0.1:2181"
|
||||
|
||||
master:
|
||||
podManagementPolicy: "Parallel"
|
||||
replicas: "3"
|
||||
# NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
# Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
nodeSelector: {}
|
||||
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
# Affinity is a group of affinity scheduling rules.
|
||||
# If specified, the pod's scheduling constraints.
|
||||
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
affinity: {}
|
||||
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
configmap:
|
||||
MASTER_EXEC_THREADS: "100"
|
||||
MASTER_EXEC_TASK_NUM: "20"
|
||||
MASTER_HEARTBEAT_INTERVAL: "10"
|
||||
MASTER_TASK_COMMIT_RETRYTIMES: "5"
|
||||
MASTER_TASK_COMMIT_INTERVAL: "1000"
|
||||
MASTER_MAX_CPULOAD_AVG: "100"
|
||||
MASTER_RESERVED_MEMORY: "0.1"
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
periodSeconds: "30"
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
periodSeconds: "30"
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
|
||||
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
|
||||
## A claim in this list takes precedence over any volumes in the template, with the same name.
|
||||
persistentVolumeClaim:
|
||||
enabled: false
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
storageClassName: "-"
|
||||
storage: "20Gi"
|
||||
|
||||
worker:
|
||||
podManagementPolicy: "Parallel"
|
||||
replicas: "3"
|
||||
# NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
# Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
nodeSelector: {}
|
||||
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
# Affinity is a group of affinity scheduling rules.
|
||||
# If specified, the pod's scheduling constraints.
|
||||
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
affinity: {}
|
||||
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
periodSeconds: "30"
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
periodSeconds: "30"
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
configmap:
|
||||
WORKER_EXEC_THREADS: "100"
|
||||
WORKER_HEARTBEAT_INTERVAL: "10"
|
||||
WORKER_FETCH_TASK_NUM: "3"
|
||||
WORKER_MAX_CPULOAD_AVG: "100"
|
||||
WORKER_RESERVED_MEMORY: "0.1"
|
||||
DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
|
||||
DOLPHINSCHEDULER_ENV:
|
||||
- "export HADOOP_HOME=/opt/soft/hadoop"
|
||||
- "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop"
|
||||
- "export SPARK_HOME1=/opt/soft/spark1"
|
||||
- "export SPARK_HOME2=/opt/soft/spark2"
|
||||
- "export PYTHON_HOME=/opt/soft/python"
|
||||
- "export JAVA_HOME=/opt/soft/java"
|
||||
- "export HIVE_HOME=/opt/soft/hive"
|
||||
- "export FLINK_HOME=/opt/soft/flink"
|
||||
- "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH"
|
||||
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
|
||||
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
|
||||
## A claim in this list takes precedence over any volumes in the template, with the same name.
|
||||
persistentVolumeClaim:
|
||||
enabled: false
|
||||
## dolphinscheduler data volume
|
||||
dataPersistentVolume:
|
||||
enabled: false
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
storageClassName: "-"
|
||||
storage: "20Gi"
|
||||
## dolphinscheduler logs volume
|
||||
logsPersistentVolume:
|
||||
enabled: false
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
storageClassName: "-"
|
||||
storage: "20Gi"
|
||||
|
||||
alert:
|
||||
strategy:
|
||||
type: "RollingUpdate"
|
||||
rollingUpdate:
|
||||
maxSurge: "25%"
|
||||
maxUnavailable: "25%"
|
||||
replicas: "1"
|
||||
# NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
# Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
nodeSelector: {}
|
||||
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
# Affinity is a group of affinity scheduling rules.
|
||||
# If specified, the pod's scheduling constraints.
|
||||
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
affinity: {}
|
||||
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
configmap:
|
||||
XLS_FILE_PATH: "/tmp/xls"
|
||||
MAIL_SERVER_HOST: ""
|
||||
MAIL_SERVER_PORT: ""
|
||||
MAIL_SENDER: ""
|
||||
MAIL_USER: ""
|
||||
MAIL_PASSWD: ""
|
||||
MAIL_SMTP_STARTTLS_ENABLE: false
|
||||
MAIL_SMTP_SSL_ENABLE: false
|
||||
MAIL_SMTP_SSL_TRUST: ""
|
||||
ENTERPRISE_WECHAT_ENABLE: false
|
||||
ENTERPRISE_WECHAT_CORP_ID: ""
|
||||
ENTERPRISE_WECHAT_SECRET: ""
|
||||
ENTERPRISE_WECHAT_AGENT_ID: ""
|
||||
ENTERPRISE_WECHAT_USERS: ""
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
periodSeconds: "30"
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
periodSeconds: "30"
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
|
||||
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
|
||||
## A claim in this list takes precedence over any volumes in the template, with the same name.
|
||||
persistentVolumeClaim:
|
||||
enabled: false
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
storageClassName: "-"
|
||||
storage: "20Gi"
|
||||
|
||||
api:
|
||||
strategy:
|
||||
type: "RollingUpdate"
|
||||
rollingUpdate:
|
||||
maxSurge: "25%"
|
||||
maxUnavailable: "25%"
|
||||
replicas: "1"
|
||||
# NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
# Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
nodeSelector: {}
|
||||
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
# Affinity is a group of affinity scheduling rules.
|
||||
# If specified, the pod's scheduling constraints.
|
||||
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
affinity: {}
|
||||
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
periodSeconds: "30"
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
periodSeconds: "30"
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
|
||||
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
|
||||
## A claim in this list takes precedence over any volumes in the template, with the same name.
|
||||
persistentVolumeClaim:
|
||||
enabled: false
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
storageClassName: "-"
|
||||
storage: "20Gi"
|
||||
|
||||
frontend:
|
||||
strategy:
|
||||
type: "RollingUpdate"
|
||||
rollingUpdate:
|
||||
maxSurge: "25%"
|
||||
maxUnavailable: "25%"
|
||||
replicas: "1"
|
||||
# NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
# Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
# More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
nodeSelector: {}
|
||||
# Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
|
||||
# effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
|
||||
tolerations: []
|
||||
# Affinity is a group of affinity scheduling rules.
|
||||
# If specified, the pod's scheduling constraints.
|
||||
# More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
|
||||
affinity: {}
|
||||
## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
periodSeconds: "30"
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
|
||||
## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: "30"
|
||||
periodSeconds: "30"
|
||||
timeoutSeconds: "5"
|
||||
failureThreshold: "3"
|
||||
successThreshold: "1"
|
||||
## volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
|
||||
## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
|
||||
## A claim in this list takes precedence over any volumes in the template, with the same name.
|
||||
persistentVolumeClaim:
|
||||
enabled: false
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
storageClassName: "-"
|
||||
storage: "20Gi"
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
host: "dolphinscheduler.org"
|
||||
path: "/"
|
||||
tls:
|
||||
enabled: false
|
||||
hosts:
|
||||
- "dolphinscheduler.org"
|
||||
secretName: "dolphinscheduler-tls"
|
||||
|
|
@ -1,3 +1,18 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
version: '2'
|
||||
services:
|
||||
zookeeper:
|
||||
|
|
|
|||
|
|
@ -15,122 +15,81 @@
|
|||
# limitations under the License.
|
||||
#
|
||||
|
||||
FROM ubuntu:18.04
|
||||
FROM nginx:alpine
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ARG VERSION
|
||||
|
||||
ARG version
|
||||
ARG tar_version
|
||||
ENV TZ Asia/Shanghai
|
||||
ENV LANG C.UTF-8
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
#1,install jdk
|
||||
#1. install dos2unix shadow bash openrc python sudo vim wget iputils net-tools ssh pip tini kazoo.
|
||||
#If install slowly, you can replcae alpine's mirror with aliyun's mirror, Example:
|
||||
#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories
|
||||
RUN apk update && \
|
||||
apk add dos2unix shadow bash openrc python sudo vim wget iputils net-tools openssh-server py2-pip tini && \
|
||||
apk add --update procps && \
|
||||
openrc boot && \
|
||||
pip install kazoo
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get -y install openjdk-8-jdk \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64
|
||||
#2. install jdk
|
||||
RUN apk add openjdk8
|
||||
ENV JAVA_HOME /usr/lib/jvm/java-1.8-openjdk
|
||||
ENV PATH $JAVA_HOME/bin:$PATH
|
||||
|
||||
|
||||
#install wget
|
||||
RUN apt-get update && \
|
||||
apt-get -y install wget
|
||||
#2,install ZK
|
||||
|
||||
#3. install zk
|
||||
RUN cd /opt && \
|
||||
wget https://www-us.apache.org/dist/zookeeper/zookeeper-3.4.14/zookeeper-3.4.14.tar.gz && \
|
||||
tar -zxvf zookeeper-3.4.14.tar.gz && \
|
||||
mv zookeeper-3.4.14 zookeeper && \
|
||||
rm -rf ./zookeeper-*tar.gz && \
|
||||
wget https://downloads.apache.org/zookeeper/zookeeper-3.5.7/apache-zookeeper-3.5.7-bin.tar.gz && \
|
||||
tar -zxvf apache-zookeeper-3.5.7-bin.tar.gz && \
|
||||
mv apache-zookeeper-3.5.7-bin zookeeper && \
|
||||
mkdir -p /tmp/zookeeper && \
|
||||
rm -rf ./zookeeper-*tar.gz && \
|
||||
rm -rf /opt/zookeeper/conf/zoo_sample.cfg
|
||||
ADD ./conf/zookeeper/zoo.cfg /opt/zookeeper/conf
|
||||
ENV ZK_HOME /opt/zookeeper
|
||||
ENV PATH $ZK_HOME/bin:$PATH
|
||||
|
||||
ADD ./dockerfile/conf/zookeeper/zoo.cfg /opt/zookeeper/conf
|
||||
ENV ZK_HOME=/opt/zookeeper
|
||||
ENV PATH $PATH:$ZK_HOME/bin
|
||||
#4. install pg
|
||||
RUN apk add postgresql postgresql-contrib
|
||||
|
||||
#3,install maven
|
||||
RUN cd /opt && \
|
||||
wget http://apache-mirror.rbc.ru/pub/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz && \
|
||||
tar -zxvf apache-maven-3.3.9-bin.tar.gz && \
|
||||
mv apache-maven-3.3.9 maven && \
|
||||
rm -rf ./apache-maven-*tar.gz && \
|
||||
rm -rf /opt/maven/conf/settings.xml
|
||||
ADD ./dockerfile/conf/maven/settings.xml /opt/maven/conf
|
||||
ENV MAVEN_HOME=/opt/maven
|
||||
ENV PATH $PATH:$MAVEN_HOME/bin
|
||||
#5. add dolphinscheduler
|
||||
ADD ./apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin.tar.gz /opt/
|
||||
RUN mv /opt/apache-dolphinscheduler-incubating-${VERSION}-SNAPSHOT-dolphinscheduler-bin/ /opt/dolphinscheduler/
|
||||
ENV DOLPHINSCHEDULER_HOME /opt/dolphinscheduler
|
||||
|
||||
#4,install node
|
||||
RUN cd /opt && \
|
||||
wget https://nodejs.org/download/release/v8.9.4/node-v8.9.4-linux-x64.tar.gz && \
|
||||
tar -zxvf node-v8.9.4-linux-x64.tar.gz && \
|
||||
mv node-v8.9.4-linux-x64 node && \
|
||||
rm -rf ./node-v8.9.4-*tar.gz
|
||||
ENV NODE_HOME=/opt/node
|
||||
ENV PATH $PATH:$NODE_HOME/bin
|
||||
#6. modify nginx
|
||||
RUN echo "daemon off;" >> /etc/nginx/nginx.conf && \
|
||||
rm -rf /etc/nginx/conf.d/*
|
||||
ADD ./conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
|
||||
|
||||
#5,install postgresql
|
||||
RUN apt-get update && \
|
||||
apt-get install -y postgresql postgresql-contrib sudo && \
|
||||
sed -i 's/localhost/*/g' /etc/postgresql/10/main/postgresql.conf
|
||||
#7. add configuration and modify permissions and set soft links
|
||||
ADD ./checkpoint.sh /root/checkpoint.sh
|
||||
ADD ./startup-init-conf.sh /root/startup-init-conf.sh
|
||||
ADD ./startup.sh /root/startup.sh
|
||||
ADD ./conf/dolphinscheduler/*.tpl /opt/dolphinscheduler/conf/
|
||||
ADD conf/dolphinscheduler/env/dolphinscheduler_env.sh /opt/dolphinscheduler/conf/env/
|
||||
RUN chmod +x /root/checkpoint.sh && \
|
||||
chmod +x /root/startup-init-conf.sh && \
|
||||
chmod +x /root/startup.sh && \
|
||||
chmod +x /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
|
||||
chmod +x /opt/dolphinscheduler/script/*.sh && \
|
||||
chmod +x /opt/dolphinscheduler/bin/*.sh && \
|
||||
chmod +x /opt/zookeeper/bin/*.sh && \
|
||||
dos2unix /root/checkpoint.sh && \
|
||||
dos2unix /root/startup-init-conf.sh && \
|
||||
dos2unix /root/startup.sh && \
|
||||
dos2unix /opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh && \
|
||||
dos2unix /opt/dolphinscheduler/script/*.sh && \
|
||||
dos2unix /opt/dolphinscheduler/bin/*.sh && \
|
||||
dos2unix /opt/zookeeper/bin/*.sh && \
|
||||
rm -rf /bin/sh && \
|
||||
ln -s /bin/bash /bin/sh && \
|
||||
mkdir -p /tmp/xls
|
||||
|
||||
#6,install nginx
|
||||
RUN apt-get update && \
|
||||
apt-get install -y nginx && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
echo "\ndaemon off;" >> /etc/nginx/nginx.conf && \
|
||||
chown -R www-data:www-data /var/lib/nginx
|
||||
#8. remove apk index cache
|
||||
RUN rm -rf /var/cache/apk/*
|
||||
|
||||
#7,install sudo,python,vim,ping and ssh command
|
||||
RUN apt-get update && \
|
||||
apt-get -y install sudo && \
|
||||
apt-get -y install python && \
|
||||
apt-get -y install vim && \
|
||||
apt-get -y install iputils-ping && \
|
||||
apt-get -y install net-tools && \
|
||||
apt-get -y install openssh-server && \
|
||||
apt-get -y install python-pip && \
|
||||
pip install kazoo
|
||||
#9. expose port
|
||||
EXPOSE 2181 2888 3888 5432 12345 50051 8888
|
||||
|
||||
#8,add dolphinscheduler source code to /opt/dolphinscheduler_source
|
||||
ADD . /opt/dolphinscheduler_source
|
||||
|
||||
|
||||
#9,backend compilation
|
||||
RUN cd /opt/dolphinscheduler_source && \
|
||||
mvn clean package -Prelease -Dmaven.test.skip=true
|
||||
|
||||
#10,frontend compilation
|
||||
RUN chmod -R 777 /opt/dolphinscheduler_source/dolphinscheduler-ui && \
|
||||
cd /opt/dolphinscheduler_source/dolphinscheduler-ui && \
|
||||
rm -rf /opt/dolphinscheduler_source/dolphinscheduler-ui/node_modules && \
|
||||
npm install node-sass --unsafe-perm && \
|
||||
npm install && \
|
||||
npm run build
|
||||
|
||||
#11,modify dolphinscheduler configuration file
|
||||
#backend configuration
|
||||
RUN tar -zxvf /opt/dolphinscheduler_source/dolphinscheduler-dist/dolphinscheduler-backend/target/apache-dolphinscheduler-incubating-${tar_version}-dolphinscheduler-backend-bin.tar.gz -C /opt && \
|
||||
mv /opt/apache-dolphinscheduler-incubating-${tar_version}-dolphinscheduler-backend-bin /opt/dolphinscheduler && \
|
||||
rm -rf /opt/dolphinscheduler/conf
|
||||
|
||||
ADD ./dockerfile/conf/dolphinscheduler/conf /opt/dolphinscheduler/conf
|
||||
#frontend nginx configuration
|
||||
ADD ./dockerfile/conf/nginx/dolphinscheduler.conf /etc/nginx/conf.d
|
||||
|
||||
#12,open port
|
||||
EXPOSE 2181 2888 3888 3306 80 12345 8888
|
||||
|
||||
COPY ./dockerfile/startup.sh /root/startup.sh
|
||||
#13,modify permissions and set soft links
|
||||
RUN chmod +x /root/startup.sh && \
|
||||
chmod +x /opt/dolphinscheduler/script/create-dolphinscheduler.sh && \
|
||||
chmod +x /opt/zookeeper/bin/zkServer.sh && \
|
||||
chmod +x /opt/dolphinscheduler/bin/dolphinscheduler-daemon.sh && \
|
||||
rm -rf /bin/sh && \
|
||||
ln -s /bin/bash /bin/sh && \
|
||||
mkdir -p /tmp/xls
|
||||
|
||||
|
||||
ENTRYPOINT ["/root/startup.sh"]
|
||||
ENTRYPOINT ["/sbin/tini", "--", "/root/startup.sh"]
|
||||
|
|
@ -1,11 +1,328 @@
|
|||
## Build Image
|
||||
```
|
||||
cd ..
|
||||
docker build -t dolphinscheduler --build-arg version=1.1.0 --build-arg tar_version=1.1.0-SNAPSHOT -f dockerfile/Dockerfile .
|
||||
docker run -p 12345:12345 -p 8888:8888 --rm --name dolphinscheduler -d dolphinscheduler
|
||||
```
|
||||
* Visit the url: http://127.0.0.1:8888
|
||||
* UserName:admin Password:dolphinscheduler123
|
||||
## What is Dolphin Scheduler?
|
||||
|
||||
## Note
|
||||
* MacOS: The memory of docker needs to be set to 4G, default 2G. Steps: Preferences -> Advanced -> adjust resources -> Apply & Restart
|
||||
Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
|
||||
|
||||
Github URL: https://github.com/apache/incubator-dolphinscheduler
|
||||
|
||||
Official Website: https://dolphinscheduler.apache.org
|
||||
|
||||

|
||||
|
||||
[](README.md)
|
||||
[](README_zh_CN.md)
|
||||
|
||||
## How to use this docker image
|
||||
|
||||
#### You can start a dolphinscheduler instance
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -e POSTGRESQL_DATABASE=dolphinscheduler \
|
||||
-p 8888:8888 \
|
||||
dolphinscheduler all
|
||||
```
|
||||
|
||||
The default postgres user `root`, postgres password `root` and database `dolphinscheduler` are created in the `startup.sh`.
|
||||
|
||||
The default zookeeper is created in the `startup.sh`.
|
||||
|
||||
#### Or via Environment Variables **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`POSTGRESQL_DATABASE`** **`ZOOKEEPER_QUORUM`**
|
||||
|
||||
You can specify **existing postgres service**. Example:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
|
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
|
||||
-p 8888:8888 \
|
||||
dolphinscheduler all
|
||||
```
|
||||
|
||||
You can specify **existing zookeeper service**. Example:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
|
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" -e POSTGRESQL_DATABASE="dolphinscheduler" \
|
||||
-p 8888:8888 \
|
||||
dolphinscheduler all
|
||||
```
|
||||
|
||||
#### Or start a standalone dolphinscheduler server
|
||||
|
||||
You can start a standalone dolphinscheduler server.
|
||||
|
||||
* Start a **master server**, For example:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
|
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
|
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
|
||||
dolphinscheduler master-server
|
||||
```
|
||||
|
||||
* Start a **worker server**, For example:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
|
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
|
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
|
||||
dolphinscheduler worker-server
|
||||
```
|
||||
|
||||
* Start a **api server**, For example:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
|
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
|
||||
-p 12345:12345 \
|
||||
dolphinscheduler api-server
|
||||
```
|
||||
|
||||
* Start a **alert server**, For example:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
|
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
|
||||
dolphinscheduler alert-server
|
||||
```
|
||||
|
||||
* Start a **frontend**, For example:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \
|
||||
-p 8888:8888 \
|
||||
dolphinscheduler frontend
|
||||
```
|
||||
|
||||
**Note**: You must be specify `POSTGRESQL_HOST` `POSTGRESQL_PORT` `POSTGRESQL_DATABASE` `POSTGRESQL_USERNAME` `POSTGRESQL_PASSWORD` `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server.
|
||||
|
||||
## How to build a docker image
|
||||
|
||||
You can build a docker image in A Unix-like operating system, You can also build it in Windows operating system.
|
||||
|
||||
In Unix-Like, Example:
|
||||
|
||||
```bash
|
||||
$ cd path/incubator-dolphinscheduler
|
||||
$ sh ./dockerfile/hooks/build
|
||||
```
|
||||
|
||||
In Windows, Example:
|
||||
|
||||
```bat
|
||||
c:\incubator-dolphinscheduler>.\dockerfile\hooks\build.bat
|
||||
```
|
||||
|
||||
Please read `./dockerfile/hooks/build` `./dockerfile/hooks/build.bat` script files if you don't understand
|
||||
|
||||
## Environment Variables
|
||||
|
||||
The Dolphin Scheduler image uses several environment variables which are easy to miss. While none of the variables are required, they may significantly aid you in using the image.
|
||||
|
||||
**`POSTGRESQL_HOST`**
|
||||
|
||||
This environment variable sets the host for PostgreSQL. The default value is `127.0.0.1`.
|
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
|
||||
|
||||
**`POSTGRESQL_PORT`**
|
||||
|
||||
This environment variable sets the port for PostgreSQL. The default value is `5432`.
|
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
|
||||
|
||||
**`POSTGRESQL_USERNAME`**
|
||||
|
||||
This environment variable sets the username for PostgreSQL. The default value is `root`.
|
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
|
||||
|
||||
**`POSTGRESQL_PASSWORD`**
|
||||
|
||||
This environment variable sets the password for PostgreSQL. The default value is `root`.
|
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
|
||||
|
||||
**`POSTGRESQL_DATABASE`**
|
||||
|
||||
This environment variable sets the database for PostgreSQL. The default value is `dolphinscheduler`.
|
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
|
||||
|
||||
**`DOLPHINSCHEDULER_ENV_PATH`**
|
||||
|
||||
This environment variable sets the runtime environment for task. The default value is `/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh`.
|
||||
|
||||
**`DOLPHINSCHEDULER_DATA_BASEDIR_PATH`**
|
||||
|
||||
User data directory path, self configuration, please make sure the directory exists and have read write permissions. The default value is `/tmp/dolphinscheduler`
|
||||
|
||||
**`DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH`**
|
||||
|
||||
Directory path for user data download. self configuration, please make sure the directory exists and have read write permissions. The default value is `/tmp/dolphinscheduler/download`
|
||||
|
||||
**`DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH`**
|
||||
|
||||
Process execute directory. self configuration, please make sure the directory exists and have read write permissions. The default value is `/tmp/dolphinscheduler/exec`
|
||||
|
||||
**`TASK_QUEUE`**
|
||||
|
||||
This environment variable sets the task queue for `master-server` and `worker-serverr`. The default value is `zookeeper`.
|
||||
|
||||
**`ZOOKEEPER_QUORUM`**
|
||||
|
||||
This environment variable sets zookeeper quorum for `master-server` and `worker-serverr`. The default value is `127.0.0.1:2181`.
|
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`.
|
||||
|
||||
**`MASTER_EXEC_THREADS`**
|
||||
|
||||
This environment variable sets exec thread num for `master-server`. The default value is `100`.
|
||||
|
||||
**`MASTER_EXEC_TASK_NUM`**
|
||||
|
||||
This environment variable sets exec task num for `master-server`. The default value is `20`.
|
||||
|
||||
**`MASTER_HEARTBEAT_INTERVAL`**
|
||||
|
||||
This environment variable sets heartbeat interval for `master-server`. The default value is `10`.
|
||||
|
||||
**`MASTER_TASK_COMMIT_RETRYTIMES`**
|
||||
|
||||
This environment variable sets task commit retry times for `master-server`. The default value is `5`.
|
||||
|
||||
**`MASTER_TASK_COMMIT_INTERVAL`**
|
||||
|
||||
This environment variable sets task commit interval for `master-server`. The default value is `1000`.
|
||||
|
||||
**`MASTER_MAX_CPULOAD_AVG`**
|
||||
|
||||
This environment variable sets max cpu load avg for `master-server`. The default value is `100`.
|
||||
|
||||
**`MASTER_RESERVED_MEMORY`**
|
||||
|
||||
This environment variable sets reserved memory for `master-server`. The default value is `0.1`.
|
||||
|
||||
**`WORKER_EXEC_THREADS`**
|
||||
|
||||
This environment variable sets exec thread num for `worker-server`. The default value is `100`.
|
||||
|
||||
**`WORKER_HEARTBEAT_INTERVAL`**
|
||||
|
||||
This environment variable sets heartbeat interval for `worker-server`. The default value is `10`.
|
||||
|
||||
**`WORKER_FETCH_TASK_NUM`**
|
||||
|
||||
This environment variable sets fetch task num for `worker-server`. The default value is `3`.
|
||||
|
||||
**`WORKER_MAX_CPULOAD_AVG`**
|
||||
|
||||
This environment variable sets max cpu load avg for `worker-server`. The default value is `100`.
|
||||
|
||||
**`WORKER_RESERVED_MEMORY`**
|
||||
|
||||
This environment variable sets reserved memory for `worker-server`. The default value is `0.1`.
|
||||
|
||||
**`XLS_FILE_PATH`**
|
||||
|
||||
This environment variable sets xls file path for `alert-server`. The default value is `/tmp/xls`.
|
||||
|
||||
**`MAIL_SERVER_HOST`**
|
||||
|
||||
This environment variable sets mail server host for `alert-server`. The default value is empty.
|
||||
|
||||
**`MAIL_SERVER_PORT`**
|
||||
|
||||
This environment variable sets mail server port for `alert-server`. The default value is empty.
|
||||
|
||||
**`MAIL_SENDER`**
|
||||
|
||||
This environment variable sets mail sender for `alert-server`. The default value is empty.
|
||||
|
||||
**`MAIL_USER=`**
|
||||
|
||||
This environment variable sets mail user for `alert-server`. The default value is empty.
|
||||
|
||||
**`MAIL_PASSWD`**
|
||||
|
||||
This environment variable sets mail password for `alert-server`. The default value is empty.
|
||||
|
||||
**`MAIL_SMTP_STARTTLS_ENABLE`**
|
||||
|
||||
This environment variable sets SMTP tls for `alert-server`. The default value is `true`.
|
||||
|
||||
**`MAIL_SMTP_SSL_ENABLE`**
|
||||
|
||||
This environment variable sets SMTP ssl for `alert-server`. The default value is `false`.
|
||||
|
||||
**`MAIL_SMTP_SSL_TRUST`**
|
||||
|
||||
This environment variable sets SMTP ssl truest for `alert-server`. The default value is empty.
|
||||
|
||||
**`ENTERPRISE_WECHAT_ENABLE`**
|
||||
|
||||
This environment variable sets enterprise wechat enable for `alert-server`. The default value is `false`.
|
||||
|
||||
**`ENTERPRISE_WECHAT_CORP_ID`**
|
||||
|
||||
This environment variable sets enterprise wechat corp id for `alert-server`. The default value is empty.
|
||||
|
||||
**`ENTERPRISE_WECHAT_SECRET`**
|
||||
|
||||
This environment variable sets enterprise wechat secret for `alert-server`. The default value is empty.
|
||||
|
||||
**`ENTERPRISE_WECHAT_AGENT_ID`**
|
||||
|
||||
This environment variable sets enterprise wechat agent id for `alert-server`. The default value is empty.
|
||||
|
||||
**`ENTERPRISE_WECHAT_USERS`**
|
||||
|
||||
This environment variable sets enterprise wechat users for `alert-server`. The default value is empty.
|
||||
|
||||
**`FRONTEND_API_SERVER_HOST`**
|
||||
|
||||
This environment variable sets api server host for `frontend`. The default value is `127.0.0.1`.
|
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`.
|
||||
|
||||
**`FRONTEND_API_SERVER_PORT`**
|
||||
|
||||
This environment variable sets api server port for `frontend`. The default value is `123451`.
|
||||
|
||||
**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `api-server`.
|
||||
|
||||
## Initialization scripts
|
||||
|
||||
If you would like to do additional initialization in an image derived from this one, add one or more environment variable under `/root/start-init-conf.sh`, and modify template files in `/opt/dolphinscheduler/conf/*.tpl`.
|
||||
|
||||
For example, to add an environment variable `API_SERVER_PORT` in `/root/start-init-conf.sh`:
|
||||
|
||||
```
|
||||
export API_SERVER_PORT=5555
|
||||
```
|
||||
|
||||
and to modify `/opt/dolphinscheduler/conf/application-api.properties.tpl` template file, add server port:
|
||||
```
|
||||
server.port=${API_SERVER_PORT}
|
||||
```
|
||||
|
||||
`/root/start-init-conf.sh` will dynamically generate config file:
|
||||
|
||||
```sh
|
||||
echo "generate app config"
|
||||
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do
|
||||
eval "cat << EOF
|
||||
$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
|
||||
EOF
|
||||
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
|
||||
done
|
||||
|
||||
echo "generate nginx config"
|
||||
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
|
||||
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf
|
||||
```
|
||||
|
|
|
|||
|
|
@ -0,0 +1,328 @@
|
|||
## Dolphin Scheduler是什么?
|
||||
|
||||
一个分布式易扩展的可视化DAG工作流任务调度系统。致力于解决数据处理流程中错综复杂的依赖关系,使调度系统在数据处理流程中`开箱即用`。
|
||||
|
||||
Github URL: https://github.com/apache/incubator-dolphinscheduler
|
||||
|
||||
Official Website: https://dolphinscheduler.apache.org
|
||||
|
||||

|
||||
|
||||
[](README.md)
|
||||
[](README_zh_CN.md)
|
||||
|
||||
## 如何使用docker镜像
|
||||
|
||||
#### 你可以运行一个dolphinscheduler实例
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e POSTGRESQL_USERNAME=test -e POSTGRESQL_PASSWORD=test -e POSTGRESQL_DATABASE=dolphinscheduler \
|
||||
-p 8888:8888 \
|
||||
dolphinscheduler all
|
||||
```
|
||||
|
||||
在`startup.sh`脚本中,默认的创建`Postgres`的用户、密码和数据库,默认值分别为:`root`、`root`、`dolphinscheduler`。
|
||||
|
||||
同时,默认的`Zookeeper`也会在`startup.sh`脚本中被创建。
|
||||
|
||||
#### 或者通过环境变量 **`POSTGRESQL_HOST`** **`POSTGRESQL_PORT`** **`ZOOKEEPER_QUORUM`** 使用已存在的服务
|
||||
|
||||
你可以指定一个已经存在的 **`Postgres`** 服务. 如下:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
|
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
|
||||
-p 8888:8888 \
|
||||
dolphinscheduler all
|
||||
```
|
||||
|
||||
你也可以指定一个已经存在的 **Zookeeper** 服务. 如下:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
|
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" -e POSTGRESQL_DATABASE="dolphinscheduler" \
|
||||
-p 8888:8888 \
|
||||
dolphinscheduler all
|
||||
```
|
||||
|
||||
#### 或者运行dolphinscheduler中的部分服务
|
||||
|
||||
你能够运行dolphinscheduler中的部分服务。
|
||||
|
||||
* 启动一个 **master server**, 如下:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
|
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
|
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
|
||||
dolphinscheduler master-server
|
||||
```
|
||||
|
||||
* 启动一个 **worker server**, 如下:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e ZOOKEEPER_QUORUM="l92.168.x.x:2181"
|
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
|
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
|
||||
dolphinscheduler worker-server
|
||||
```
|
||||
|
||||
* 启动一个 **api server**, 如下:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
|
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
|
||||
-p 12345:12345 \
|
||||
dolphinscheduler api-server
|
||||
```
|
||||
|
||||
* 启动一个 **alert server**, 如下:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e POSTGRESQL_HOST="192.168.x.x" -e POSTGRESQL_PORT="5432" -e POSTGRESQL_DATABASE="dolphinscheduler" \
|
||||
-e POSTGRESQL_USERNAME="test" -e POSTGRESQL_PASSWORD="test" \
|
||||
dolphinscheduler alert-server
|
||||
```
|
||||
|
||||
* 启动一个 **frontend**, 如下:
|
||||
|
||||
```
|
||||
$ docker run -dit --name dolphinscheduler \
|
||||
-e FRONTEND_API_SERVER_HOST="192.168.x.x" -e FRONTEND_API_SERVER_PORT="12345" \
|
||||
-p 8888:8888 \
|
||||
dolphinscheduler frontend
|
||||
```
|
||||
|
||||
**注意**: 当你运行dolphinscheduler中的部分服务时,你必须指定这些环境变量 `POSTGRESQL_HOST` `POSTGRESQL_PORT` `POSTGRESQL_DATABASE` `POSTGRESQL_USERNAME` `POSTGRESQL_PASSWORD` `ZOOKEEPER_QUORUM`。
|
||||
|
||||
## 如何构建一个docker镜像
|
||||
|
||||
你能够在类Unix系统和Windows系统中构建一个docker镜像。
|
||||
|
||||
类Unix系统, 如下:
|
||||
|
||||
```bash
|
||||
$ cd path/incubator-dolphinscheduler
|
||||
$ sh ./dockerfile/hooks/build
|
||||
```
|
||||
|
||||
Windows系统, 如下:
|
||||
|
||||
```bat
|
||||
c:\incubator-dolphinscheduler>.\dockerfile\hooks\build.bat
|
||||
```
|
||||
|
||||
如果你不理解这些脚本 `./dockerfile/hooks/build` `./dockerfile/hooks/build.bat`,请阅读里面的内容。
|
||||
|
||||
## 环境变量
|
||||
|
||||
Dolphin Scheduler映像使用了几个容易遗漏的环境变量。虽然这些变量不是必须的,但是可以帮助你更容易配置镜像并根据你的需求定义相应的服务配置。
|
||||
|
||||
**`POSTGRESQL_HOST`**
|
||||
|
||||
配置`PostgreSQL`的`HOST`, 默认值 `127.0.0.1`。
|
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
|
||||
|
||||
**`POSTGRESQL_PORT`**
|
||||
|
||||
配置`PostgreSQL`的`PORT`, 默认值 `5432`。
|
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
|
||||
|
||||
**`POSTGRESQL_USERNAME`**
|
||||
|
||||
配置`PostgreSQL`的`USERNAME`, 默认值 `root`。
|
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
|
||||
|
||||
**`POSTGRESQL_PASSWORD`**
|
||||
|
||||
配置`PostgreSQL`的`PASSWORD`, 默认值 `root`。
|
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
|
||||
|
||||
**`POSTGRESQL_DATABASE`**
|
||||
|
||||
配置`PostgreSQL`的`DATABASE`, 默认值 `dolphinscheduler`。
|
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`、`api-server`、`alert-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
|
||||
|
||||
**`DOLPHINSCHEDULER_ENV_PATH`**
|
||||
|
||||
任务执行时的环境变量配置文件, 默认值 `/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh`。
|
||||
|
||||
**`DOLPHINSCHEDULER_DATA_BASEDIR_PATH`**
|
||||
|
||||
用户数据目录, 用户自己配置, 请确保这个目录存在并且用户读写权限, 默认值 `/tmp/dolphinscheduler`。
|
||||
|
||||
**`DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH`**
|
||||
|
||||
用户数据下载目录, 用户自己配置, 请确保这个目录存在并且用户读写权限, 默认值 `/tmp/dolphinscheduler/download`。
|
||||
|
||||
**`DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH`**
|
||||
|
||||
任务执行目录, 用户自己配置, 请确保这个目录存在并且用户读写权限, 默认值 `/tmp/dolphinscheduler/exec`。
|
||||
|
||||
**`TASK_QUEUE`**
|
||||
|
||||
配置`master-server`和`worker-serverr`的`Zookeeper`任务队列名, 默认值 `zookeeper`。
|
||||
|
||||
**`ZOOKEEPER_QUORUM`**
|
||||
|
||||
配置`master-server`和`worker-serverr`的`Zookeeper`地址, 默认值 `127.0.0.1:2181`。
|
||||
|
||||
**注意**: 当运行`dolphinscheduler`中`master-server`、`worker-server`这些服务时,必须指定这个环境变量,以便于你更好的搭建分布式服务。
|
||||
|
||||
**`MASTER_EXEC_THREADS`**
|
||||
|
||||
配置`master-server`中的执行线程数量,默认值 `100`。
|
||||
|
||||
**`MASTER_EXEC_TASK_NUM`**
|
||||
|
||||
配置`master-server`中的执行任务数量,默认值 `20`。
|
||||
|
||||
**`MASTER_HEARTBEAT_INTERVAL`**
|
||||
|
||||
配置`master-server`中的心跳交互时间,默认值 `10`。
|
||||
|
||||
**`MASTER_TASK_COMMIT_RETRYTIMES`**
|
||||
|
||||
配置`master-server`中的任务提交重试次数,默认值 `5`。
|
||||
|
||||
**`MASTER_TASK_COMMIT_INTERVAL`**
|
||||
|
||||
配置`master-server`中的任务提交交互时间,默认值 `1000`。
|
||||
|
||||
**`MASTER_MAX_CPULOAD_AVG`**
|
||||
|
||||
配置`master-server`中的CPU中的`load average`值,默认值 `100`。
|
||||
|
||||
**`MASTER_RESERVED_MEMORY`**
|
||||
|
||||
配置`master-server`的保留内存,默认值 `0.1`。
|
||||
|
||||
**`WORKER_EXEC_THREADS`**
|
||||
|
||||
配置`worker-server`中的执行线程数量,默认值 `100`。
|
||||
|
||||
**`WORKER_HEARTBEAT_INTERVAL`**
|
||||
|
||||
配置`worker-server`中的心跳交互时间,默认值 `10`。
|
||||
|
||||
**`WORKER_FETCH_TASK_NUM`**
|
||||
|
||||
配置`worker-server`中的获取任务的数量,默认值 `3`。
|
||||
|
||||
**`WORKER_MAX_CPULOAD_AVG`**
|
||||
|
||||
配置`worker-server`中的CPU中的最大`load average`值,默认值 `100`。
|
||||
|
||||
**`WORKER_RESERVED_MEMORY`**
|
||||
|
||||
配置`worker-server`的保留内存,默认值 `0.1`。
|
||||
|
||||
**`XLS_FILE_PATH`**
|
||||
|
||||
配置`alert-server`的`XLS`文件的存储路径,默认值 `/tmp/xls`。
|
||||
|
||||
**`MAIL_SERVER_HOST`**
|
||||
|
||||
配置`alert-server`的邮件服务地址,默认值 `空`。
|
||||
|
||||
**`MAIL_SERVER_PORT`**
|
||||
|
||||
配置`alert-server`的邮件服务端口,默认值 `空`。
|
||||
|
||||
**`MAIL_SENDER`**
|
||||
|
||||
配置`alert-server`的邮件发送人,默认值 `空`。
|
||||
|
||||
**`MAIL_USER=`**
|
||||
|
||||
配置`alert-server`的邮件服务用户名,默认值 `空`。
|
||||
|
||||
**`MAIL_PASSWD`**
|
||||
|
||||
配置`alert-server`的邮件服务用户密码,默认值 `空`。
|
||||
|
||||
**`MAIL_SMTP_STARTTLS_ENABLE`**
|
||||
|
||||
配置`alert-server`的邮件服务是否启用TLS,默认值 `true`。
|
||||
|
||||
**`MAIL_SMTP_SSL_ENABLE`**
|
||||
|
||||
配置`alert-server`的邮件服务是否启用SSL,默认值 `false`。
|
||||
|
||||
**`MAIL_SMTP_SSL_TRUST`**
|
||||
|
||||
配置`alert-server`的邮件服务SSL的信任地址,默认值 `空`。
|
||||
|
||||
**`ENTERPRISE_WECHAT_ENABLE`**
|
||||
|
||||
配置`alert-server`的邮件服务是否启用企业微信,默认值 `false`。
|
||||
|
||||
**`ENTERPRISE_WECHAT_CORP_ID`**
|
||||
|
||||
配置`alert-server`的邮件服务企业微信`ID`,默认值 `空`。
|
||||
|
||||
**`ENTERPRISE_WECHAT_SECRET`**
|
||||
|
||||
配置`alert-server`的邮件服务企业微信`SECRET`,默认值 `空`。
|
||||
|
||||
**`ENTERPRISE_WECHAT_AGENT_ID`**
|
||||
|
||||
配置`alert-server`的邮件服务企业微信`AGENT_ID`,默认值 `空`。
|
||||
|
||||
**`ENTERPRISE_WECHAT_USERS`**
|
||||
|
||||
配置`alert-server`的邮件服务企业微信`USERS`,默认值 `空`。
|
||||
|
||||
**`FRONTEND_API_SERVER_HOST`**
|
||||
|
||||
配置`frontend`的连接`api-server`的地址,默认值 `127.0.0.1`。
|
||||
|
||||
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。
|
||||
|
||||
**`FRONTEND_API_SERVER_PORT`**
|
||||
|
||||
配置`frontend`的连接`api-server`的端口,默认值 `12345`。
|
||||
|
||||
**Note**: 当单独运行`api-server`时,你应该指定`api-server`这个值。
|
||||
|
||||
## 初始化脚本
|
||||
|
||||
如果你想在编译的时候或者运行的时候附加一些其它的操作及新增一些环境变量,你可以在`/root/start-init-conf.sh`文件中进行修改,同时如果涉及到配置文件的修改,请在`/opt/dolphinscheduler/conf/*.tpl`中修改相应的配置文件
|
||||
|
||||
例如,在`/root/start-init-conf.sh`添加一个环境变量`API_SERVER_PORT`:
|
||||
|
||||
```
|
||||
export API_SERVER_PORT=5555
|
||||
```
|
||||
|
||||
当添加以上环境变量后,你应该在相应的模板文件`/opt/dolphinscheduler/conf/application-api.properties.tpl`中添加这个环境变量配置:
|
||||
```
|
||||
server.port=${API_SERVER_PORT}
|
||||
```
|
||||
|
||||
`/root/start-init-conf.sh`将根据模板文件动态的生成配置文件:
|
||||
|
||||
```sh
|
||||
echo "generate app config"
|
||||
ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do
|
||||
eval "cat << EOF
|
||||
$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
|
||||
EOF
|
||||
" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
|
||||
done
|
||||
|
||||
echo "generate nginx config"
|
||||
sed -i "s/FRONTEND_API_SERVER_HOST/${FRONTEND_API_SERVER_HOST}/g" /etc/nginx/conf.d/dolphinscheduler.conf
|
||||
sed -i "s/FRONTEND_API_SERVER_PORT/${FRONTEND_API_SERVER_PORT}/g" /etc/nginx/conf.d/dolphinscheduler.conf
|
||||
```
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
if [ "$(ps -ef | grep java | grep -c $1)" -eq 0 ]; then
|
||||
echo "[ERROR] $1 process not exits."
|
||||
exit 1
|
||||
else
|
||||
echo "[INFO] $1 process exits."
|
||||
exit 0
|
||||
fi
|
||||
|
|
@ -14,33 +14,33 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
#alert type is EMAIL/SMS
|
||||
alert.type=EMAIL
|
||||
|
||||
# alter msg template, default is html template
|
||||
#alert.template=html
|
||||
# mail server configuration
|
||||
mail.protocol=SMTP
|
||||
mail.server.host=smtp.126.com
|
||||
mail.server.port=
|
||||
mail.sender=dolphinscheduler@126.com
|
||||
mail.user=dolphinscheduler@126.com
|
||||
mail.passwd=escheduler123
|
||||
|
||||
mail.server.host=${MAIL_SERVER_HOST}
|
||||
mail.server.port=${MAIL_SERVER_PORT}
|
||||
mail.sender=${MAIL_SENDER}
|
||||
mail.user=${MAIL_USER}
|
||||
mail.passwd=${MAIL_PASSWD}
|
||||
# TLS
|
||||
mail.smtp.starttls.enable=false
|
||||
mail.smtp.starttls.enable=${MAIL_SMTP_STARTTLS_ENABLE}
|
||||
# SSL
|
||||
mail.smtp.ssl.enable=true
|
||||
mail.smtp.ssl.trust=smtp.126.com
|
||||
mail.smtp.ssl.enable=${MAIL_SMTP_SSL_ENABLE}
|
||||
mail.smtp.ssl.trust=${MAIL_SMTP_SSL_TRUST}
|
||||
|
||||
#xls file path,need create if not exist
|
||||
xls.file.path=/tmp/xls
|
||||
xls.file.path=${XLS_FILE_PATH}
|
||||
|
||||
# Enterprise WeChat configuration
|
||||
enterprise.wechat.enable=false
|
||||
enterprise.wechat.corp.id=xxxxxxx
|
||||
enterprise.wechat.secret=xxxxxxx
|
||||
enterprise.wechat.agent.id=xxxxxxx
|
||||
enterprise.wechat.users=xxxxxxx
|
||||
enterprise.wechat.enable=${ENTERPRISE_WECHAT_ENABLE}
|
||||
enterprise.wechat.corp.id=${ENTERPRISE_WECHAT_CORP_ID}
|
||||
enterprise.wechat.secret=${ENTERPRISE_WECHAT_SECRET}
|
||||
enterprise.wechat.agent.id=${ENTERPRISE_WECHAT_AGENT_ID}
|
||||
enterprise.wechat.users=${ENTERPRISE_WECHAT_USERS}
|
||||
enterprise.wechat.token.url=https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=$corpId&corpsecret=$secret
|
||||
enterprise.wechat.push.url=https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=$token
|
||||
enterprise.wechat.team.send.msg={\"toparty\":\"$toParty\",\"agentid\":\"$agentId\",\"msgtype\":\"text\",\"text\":{\"content\":\"$msg\"},\"safe\":\"0\"}
|
||||
|
|
@ -14,27 +14,24 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
logging.config=classpath:apiserver_logback.xml
|
||||
|
||||
# server port
|
||||
server.port=12345
|
||||
|
||||
# session config
|
||||
server.servlet.session.timeout=7200
|
||||
|
||||
# servlet config
|
||||
server.servlet.context-path=/dolphinscheduler/
|
||||
|
||||
# file size limit for upload
|
||||
spring.servlet.multipart.max-file-size=1024MB
|
||||
spring.servlet.multipart.max-request-size=1024MB
|
||||
|
||||
#post content
|
||||
# post content
|
||||
server.jetty.max-http-post-size=5000000
|
||||
|
||||
# i18n
|
||||
spring.messages.encoding=UTF-8
|
||||
|
||||
#i18n classpath folder , file prefix messages, if have many files, use "," seperator
|
||||
spring.messages.basename=i18n/messages
|
||||
# Authentication types (supported types: PASSWORD)
|
||||
security.authentication.type=PASSWORD
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -17,70 +17,57 @@
|
|||
|
||||
# base spring data source configuration
|
||||
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
|
||||
# postgresql
|
||||
# postgre
|
||||
spring.datasource.driver-class-name=org.postgresql.Driver
|
||||
spring.datasource.url=jdbc:postgresql://127.0.0.1:5432/dolphinscheduler
|
||||
spring.datasource.username=root
|
||||
spring.datasource.password=root@123
|
||||
|
||||
spring.datasource.url=jdbc:postgresql://${POSTGRESQL_HOST}:${POSTGRESQL_PORT}/${POSTGRESQL_DATABASE}?characterEncoding=utf8
|
||||
# mysql
|
||||
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
|
||||
#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
|
||||
spring.datasource.username=${POSTGRESQL_USERNAME}
|
||||
spring.datasource.password=${POSTGRESQL_PASSWORD}
|
||||
# connection configuration
|
||||
spring.datasource.initialSize=5
|
||||
# min connection number
|
||||
spring.datasource.minIdle=5
|
||||
# max connection number
|
||||
spring.datasource.maxActive=50
|
||||
|
||||
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
|
||||
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
|
||||
spring.datasource.maxWait=60000
|
||||
|
||||
# milliseconds for check to close free connections
|
||||
spring.datasource.timeBetweenEvictionRunsMillis=60000
|
||||
|
||||
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
|
||||
spring.datasource.timeBetweenConnectErrorMillis=60000
|
||||
|
||||
# the longest time a connection remains idle without being evicted, in milliseconds
|
||||
spring.datasource.minEvictableIdleTimeMillis=300000
|
||||
|
||||
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
|
||||
spring.datasource.validationQuery=SELECT 1
|
||||
|
||||
#check whether the connection is valid for timeout, in seconds
|
||||
spring.datasource.validationQueryTimeout=3
|
||||
|
||||
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
|
||||
# validation Query is performed to check whether the connection is valid
|
||||
spring.datasource.testWhileIdle=true
|
||||
|
||||
#execute validation to check if the connection is valid when applying for a connection
|
||||
spring.datasource.testOnBorrow=true
|
||||
#execute validation to check if the connection is valid when the connection is returned
|
||||
spring.datasource.testOnReturn=false
|
||||
spring.datasource.defaultAutoCommit=true
|
||||
spring.datasource.keepAlive=true
|
||||
|
||||
# open PSCache, specify count PSCache for every connection
|
||||
spring.datasource.poolPreparedStatements=true
|
||||
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
|
||||
|
||||
spring.datasource.spring.datasource.filters=stat,wall,log4j
|
||||
spring.datasource.connectionProperties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
|
||||
|
||||
#mybatis
|
||||
mybatis-plus.mapper-locations=classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml
|
||||
|
||||
mybatis-plus.typeEnumsPackage=org.apache.dolphinscheduler.*.enums
|
||||
|
||||
#Entity scan, where multiple packages are separated by a comma or semicolon
|
||||
mybatis-plus.typeAliasesPackage=org.apache.dolphinscheduler.dao.entity
|
||||
|
||||
#Primary key type AUTO:" database ID AUTO ", INPUT:" user INPUT ID", ID_WORKER:" global unique ID (numeric type unique ID)", UUID:" global unique ID UUID";
|
||||
mybatis-plus.global-config.db-config.id-type=AUTO
|
||||
|
||||
#Field policy IGNORED:" ignore judgment ",NOT_NULL:" not NULL judgment "),NOT_EMPTY:" not NULL judgment"
|
||||
mybatis-plus.global-config.db-config.field-strategy=NOT_NULL
|
||||
|
||||
#The hump underline is converted
|
||||
mybatis-plus.global-config.db-config.column-underline=true
|
||||
mybatis-plus.global-config.db-config.logic-delete-value=-1
|
||||
|
|
@ -92,12 +79,37 @@ mybatis-plus.configuration.cache-enabled=false
|
|||
mybatis-plus.configuration.call-setters-on-nulls=true
|
||||
mybatis-plus.configuration.jdbc-type-for-null=null
|
||||
|
||||
# master settings
|
||||
# master execute thread num
|
||||
master.exec.threads=${MASTER_EXEC_THREADS}
|
||||
# master execute task number in parallel
|
||||
master.exec.task.num=${MASTER_EXEC_TASK_NUM}
|
||||
# master heartbeat interval
|
||||
master.heartbeat.interval=${MASTER_HEARTBEAT_INTERVAL}
|
||||
# master commit task retry times
|
||||
master.task.commit.retryTimes=${MASTER_TASK_COMMIT_RETRYTIMES}
|
||||
# master commit task interval
|
||||
master.task.commit.interval=${MASTER_TASK_COMMIT_INTERVAL}
|
||||
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
|
||||
master.max.cpuload.avg=${MASTER_MAX_CPULOAD_AVG}
|
||||
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
|
||||
master.reserved.memory=${MASTER_RESERVED_MEMORY}
|
||||
|
||||
# worker settings
|
||||
# worker execute thread num
|
||||
worker.exec.threads=${WORKER_EXEC_THREADS}
|
||||
# worker heartbeat interval
|
||||
worker.heartbeat.interval=${WORKER_HEARTBEAT_INTERVAL}
|
||||
# submit the number of tasks at a time
|
||||
worker.fetch.task.num=${WORKER_FETCH_TASK_NUM}
|
||||
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
|
||||
worker.max.cpuload.avg=${WORKER_MAX_CPULOAD_AVG}
|
||||
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
|
||||
worker.reserved.memory=${WORKER_RESERVED_MEMORY}
|
||||
|
||||
# data quality analysis is not currently in use. please ignore the following configuration
|
||||
# task record flag
|
||||
# task record
|
||||
task.record.flag=false
|
||||
task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8
|
||||
task.record.datasource.username=xx
|
||||
task.record.datasource.password=xx
|
||||
|
||||
# Logger Config
|
||||
#logging.level.org.apache.dolphinscheduler.dao=debug
|
||||
|
|
@ -16,44 +16,69 @@
|
|||
#
|
||||
|
||||
#task queue implementation, default "zookeeper"
|
||||
dolphinscheduler.queue.impl=zookeeper
|
||||
dolphinscheduler.queue.impl=${TASK_QUEUE}
|
||||
|
||||
#zookeeper cluster. multiple are separated by commas. eg. 192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181
|
||||
zookeeper.quorum=${ZOOKEEPER_QUORUM}
|
||||
#dolphinscheduler root directory
|
||||
zookeeper.dolphinscheduler.root=/dolphinscheduler
|
||||
#dolphinscheduler failover directory
|
||||
zookeeper.session.timeout=300
|
||||
zookeeper.connection.timeout=300
|
||||
zookeeper.retry.base.sleep=100
|
||||
zookeeper.retry.max.sleep=30000
|
||||
zookeeper.retry.maxtime=5
|
||||
|
||||
#============================================================================
|
||||
# System
|
||||
#============================================================================
|
||||
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
|
||||
dolphinscheduler.env.path=${DOLPHINSCHEDULER_ENV_PATH}
|
||||
#resource.view.suffixs
|
||||
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties
|
||||
# is development state? default "false"
|
||||
development.state=true
|
||||
# user data directory path, self configuration, please make sure the directory exists and have read write permissions
|
||||
data.basedir.path=/tmp/dolphinscheduler
|
||||
|
||||
data.basedir.path=${DOLPHINSCHEDULER_DATA_BASEDIR_PATH}
|
||||
# directory path for user data download. self configuration, please make sure the directory exists and have read write permissions
|
||||
data.download.basedir.path=/tmp/dolphinscheduler/download
|
||||
|
||||
data.download.basedir.path=${DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH}
|
||||
# process execute directory. self configuration, please make sure the directory exists and have read write permissions
|
||||
process.exec.basepath=/tmp/dolphinscheduler/exec
|
||||
|
||||
# Users who have permission to create directories under the HDFS root path
|
||||
hdfs.root.user=hdfs
|
||||
|
||||
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
|
||||
data.store2hdfs.basepath=/dolphinscheduler
|
||||
process.exec.basepath=${DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH}
|
||||
|
||||
# resource upload startup type : HDFS,S3,NONE
|
||||
res.upload.startup.type=NONE
|
||||
|
||||
#============================================================================
|
||||
# HDFS
|
||||
#============================================================================
|
||||
# Users who have permission to create directories under the HDFS root path
|
||||
hdfs.root.user=hdfs
|
||||
# data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/dolphinscheduler" is recommended
|
||||
data.store2hdfs.basepath=/dolphinscheduler
|
||||
# whether kerberos starts
|
||||
hadoop.security.authentication.startup.state=false
|
||||
|
||||
# java.security.krb5.conf path
|
||||
java.security.krb5.conf.path=/opt/krb5.conf
|
||||
|
||||
# loginUserFromKeytab user
|
||||
login.user.keytab.username=hdfs-mycluster@ESZ.COM
|
||||
|
||||
# loginUserFromKeytab path
|
||||
login.user.keytab.path=/opt/hdfs.headless.keytab
|
||||
|
||||
# system env path. self configuration, please make sure the directory and file exists and have read write execute permissions
|
||||
dolphinscheduler.env.path=/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh
|
||||
#============================================================================
|
||||
# S3
|
||||
#============================================================================
|
||||
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
|
||||
# to the conf directory,support s3,for example : s3a://dolphinscheduler
|
||||
fs.defaultFS=hdfs://mycluster:8020
|
||||
# s3 need,s3 endpoint
|
||||
fs.s3a.endpoint=http://192.168.199.91:9010
|
||||
# s3 need,s3 access key
|
||||
fs.s3a.access.key=A3DXS30FO22544RE
|
||||
# s3 need,s3 secret key
|
||||
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
|
||||
#resourcemanager ha note this need ips , this empty if single
|
||||
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
|
||||
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
|
||||
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
|
||||
|
||||
#resource.view.suffixs
|
||||
resource.view.suffixs=txt,log,sh,conf,cfg,py,java,sql,hql,xml
|
||||
|
||||
# is development state? default "false"
|
||||
development.state=true
|
||||
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
<property name="log.base" value="logs" />
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="ALERTLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-alert.log</file>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-alert.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>20</maxHistory>
|
||||
<maxFileSize>64MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="ALERTLOGFILE"/>
|
||||
</root>
|
||||
</configuration>
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
|
||||
<configuration scan="true" scanPeriod="120 seconds">
|
||||
<logger name="org.apache.zookeeper" level="WARN"/>
|
||||
<logger name="org.apache.hbase" level="WARN"/>
|
||||
<logger name="org.apache.hadoop" level="WARN"/>
|
||||
|
||||
<property name="log.base" value="logs" />
|
||||
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="APISERVERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<!-- Log level filter -->
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<file>${log.base}/dolphinscheduler-api-server.log</file>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-api-server.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>168</maxHistory>
|
||||
<maxFileSize>64MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
|
||||
</appender>
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="APISERVERLOGFILE" />
|
||||
</root>
|
||||
</configuration>
|
||||
|
|
@ -1,80 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
|
||||
<configuration scan="true" scanPeriod="120 seconds">
|
||||
<property name="log.base" value="logs"/>
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
%highlight([%level]) %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{10}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
<appender name="TASKLOGFILE" class="ch.qos.logback.classic.sift.SiftingAppender">
|
||||
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<filter class="org.apache.dolphinscheduler.common.log.TaskLogFilter"></filter>
|
||||
<Discriminator class="org.apache.dolphinscheduler.common.log.TaskLogDiscriminator">
|
||||
<key>taskAppId</key>
|
||||
<logBase>${log.base}</logBase>
|
||||
</Discriminator>
|
||||
<sift>
|
||||
<appender name="FILE-${taskAppId}" class="ch.qos.logback.core.FileAppender">
|
||||
<file>${log.base}/${taskAppId}.log</file>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
<append>true</append>
|
||||
</appender>
|
||||
</sift>
|
||||
</appender>
|
||||
|
||||
<appender name="COMBINEDLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-combined.log</file>
|
||||
<filter class="org.apache.dolphinscheduler.common.log.WorkerLogFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-combined.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>168</maxHistory>
|
||||
<maxFileSize>200MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
|
||||
</appender>
|
||||
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="STDOUT"/>
|
||||
<appender-ref ref="TASKLOGFILE"/>
|
||||
<appender-ref ref="COMBINEDLOGFILE"/>
|
||||
</root>
|
||||
</configuration>
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
|
||||
# to the conf directory,support s3,for example : s3a://dolphinscheduler
|
||||
fs.defaultFS=hdfs://mycluster:8020
|
||||
|
||||
# s3 need,s3 endpoint
|
||||
fs.s3a.endpoint=http://192.168.199.91:9010
|
||||
|
||||
# s3 need,s3 access key
|
||||
fs.s3a.access.key=A3DXS30FO22544RE
|
||||
|
||||
# s3 need,s3 secret key
|
||||
fs.s3a.secret.key=OloCLq3n+8+sdPHUhJ21XrSxTC+JK
|
||||
|
||||
#resourcemanager ha note this need ips , this empty if single
|
||||
yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
|
||||
|
||||
# If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
|
||||
yarn.application.status.address=http://ark1:8088/ws/v1/cluster/apps/%s
|
||||
|
|
@ -1,252 +0,0 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
QUERY_SCHEDULE_LIST_NOTES=query schedule list
|
||||
EXECUTE_PROCESS_TAG=execute process related operation
|
||||
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation
|
||||
RUN_PROCESS_INSTANCE_NOTES=run process instance
|
||||
START_NODE_LIST=start node list(node name)
|
||||
TASK_DEPEND_TYPE=task depend type
|
||||
COMMAND_TYPE=command type
|
||||
RUN_MODE=run mode
|
||||
TIMEOUT=timeout
|
||||
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance
|
||||
EXECUTE_TYPE=execute type
|
||||
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition
|
||||
GET_RECEIVER_CC_NOTES=query receiver cc
|
||||
DESC=description
|
||||
GROUP_NAME=group name
|
||||
GROUP_TYPE=group type
|
||||
QUERY_ALERT_GROUP_LIST_NOTES=query alert group list
|
||||
UPDATE_ALERT_GROUP_NOTES=update alert group
|
||||
DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id
|
||||
VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not
|
||||
GRANT_ALERT_GROUP_NOTES=grant alert group
|
||||
USER_IDS=user id list
|
||||
ALERT_GROUP_TAG=alert group related operation
|
||||
CREATE_ALERT_GROUP_NOTES=create alert group
|
||||
WORKER_GROUP_TAG=worker group related operation
|
||||
SAVE_WORKER_GROUP_NOTES=create worker group
|
||||
WORKER_GROUP_NAME=worker group name
|
||||
WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2
|
||||
QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging
|
||||
QUERY_WORKER_GROUP_LIST_NOTES=query worker group list
|
||||
DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id
|
||||
DATA_ANALYSIS_TAG=analysis related operation of task state
|
||||
COUNT_TASK_STATE_NOTES=count task state
|
||||
COUNT_PROCESS_INSTANCE_NOTES=count process instance state
|
||||
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user
|
||||
COUNT_COMMAND_STATE_NOTES=count command state
|
||||
COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\
|
||||
|
||||
ACCESS_TOKEN_TAG=access token related operation
|
||||
MONITOR_TAG=monitor related operation
|
||||
MASTER_LIST_NOTES=master server list
|
||||
WORKER_LIST_NOTES=worker server list
|
||||
QUERY_DATABASE_STATE_NOTES=query database state
|
||||
QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE
|
||||
TASK_STATE=task instance state
|
||||
SOURCE_TABLE=SOURCE TABLE
|
||||
DEST_TABLE=dest table
|
||||
TASK_DATE=task date
|
||||
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging
|
||||
DATA_SOURCE_TAG=data source related operation
|
||||
CREATE_DATA_SOURCE_NOTES=create data source
|
||||
DATA_SOURCE_NAME=data source name
|
||||
DATA_SOURCE_NOTE=data source desc
|
||||
DB_TYPE=database type
|
||||
DATA_SOURCE_HOST=DATA SOURCE HOST
|
||||
DATA_SOURCE_PORT=data source port
|
||||
DATABASE_NAME=database name
|
||||
QUEUE_TAG=queue related operation
|
||||
QUERY_QUEUE_LIST_NOTES=query queue list
|
||||
QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging
|
||||
CREATE_QUEUE_NOTES=create queue
|
||||
YARN_QUEUE_NAME=yarn(hadoop) queue name
|
||||
QUEUE_ID=queue id
|
||||
TENANT_DESC=tenant desc
|
||||
QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging
|
||||
QUERY_TENANT_LIST_NOTES=query tenant list
|
||||
UPDATE_TENANT_NOTES=update tenant
|
||||
DELETE_TENANT_NOTES=delete tenant
|
||||
RESOURCES_TAG=resource center related operation
|
||||
CREATE_RESOURCE_NOTES=create resource
|
||||
RESOURCE_TYPE=resource file type
|
||||
RESOURCE_NAME=resource name
|
||||
RESOURCE_DESC=resource file desc
|
||||
RESOURCE_FILE=resource file
|
||||
RESOURCE_ID=resource id
|
||||
QUERY_RESOURCE_LIST_NOTES=query resource list
|
||||
DELETE_RESOURCE_BY_ID_NOTES=delete resource by id
|
||||
VIEW_RESOURCE_BY_ID_NOTES=view resource by id
|
||||
ONLINE_CREATE_RESOURCE_NOTES=online create resource
|
||||
SUFFIX=resource file suffix
|
||||
CONTENT=resource file content
|
||||
UPDATE_RESOURCE_NOTES=edit resource file online
|
||||
DOWNLOAD_RESOURCE_NOTES=download resource file
|
||||
CREATE_UDF_FUNCTION_NOTES=create udf function
|
||||
UDF_TYPE=UDF type
|
||||
FUNC_NAME=function name
|
||||
CLASS_NAME=package and class name
|
||||
ARG_TYPES=arguments
|
||||
UDF_DESC=udf desc
|
||||
VIEW_UDF_FUNCTION_NOTES=view udf function
|
||||
UPDATE_UDF_FUNCTION_NOTES=update udf function
|
||||
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging
|
||||
VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name
|
||||
DELETE_UDF_FUNCTION_NOTES=delete udf function
|
||||
AUTHORIZED_FILE_NOTES=authorized file
|
||||
UNAUTHORIZED_FILE_NOTES=unauthorized file
|
||||
AUTHORIZED_UDF_FUNC_NOTES=authorized udf func
|
||||
UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func
|
||||
VERIFY_QUEUE_NOTES=verify queue
|
||||
TENANT_TAG=tenant related operation
|
||||
CREATE_TENANT_NOTES=create tenant
|
||||
TENANT_CODE=tenant code
|
||||
TENANT_NAME=tenant name
|
||||
QUEUE_NAME=queue name
|
||||
PASSWORD=password
|
||||
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
|
||||
PROJECT_TAG=project related operation
|
||||
CREATE_PROJECT_NOTES=create project
|
||||
PROJECT_DESC=project description
|
||||
UPDATE_PROJECT_NOTES=update project
|
||||
PROJECT_ID=project id
|
||||
QUERY_PROJECT_BY_ID_NOTES=query project info by project id
|
||||
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
|
||||
DELETE_PROJECT_BY_ID_NOTES=delete project by id
|
||||
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
|
||||
QUERY_ALL_PROJECT_LIST_NOTES=query all project list
|
||||
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
|
||||
TASK_RECORD_TAG=task record related operation
|
||||
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging
|
||||
CREATE_TOKEN_NOTES=create token ,note: please login first
|
||||
QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging
|
||||
SCHEDULE=schedule
|
||||
WARNING_TYPE=warning type(sending strategy)
|
||||
WARNING_GROUP_ID=warning group id
|
||||
FAILURE_STRATEGY=failure strategy
|
||||
RECEIVERS=receivers
|
||||
RECEIVERS_CC=receivers cc
|
||||
WORKER_GROUP_ID=worker server group id
|
||||
PROCESS_INSTANCE_PRIORITY=process instance priority
|
||||
UPDATE_SCHEDULE_NOTES=update schedule
|
||||
SCHEDULE_ID=schedule id
|
||||
ONLINE_SCHEDULE_NOTES=online schedule
|
||||
OFFLINE_SCHEDULE_NOTES=offline schedule
|
||||
QUERY_SCHEDULE_NOTES=query schedule
|
||||
QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging
|
||||
LOGIN_TAG=User login related operations
|
||||
USER_NAME=user name
|
||||
PROJECT_NAME=project name
|
||||
CREATE_PROCESS_DEFINITION_NOTES=create process definition
|
||||
PROCESS_DEFINITION_NAME=process definition name
|
||||
PROCESS_DEFINITION_JSON=process definition detail info (json format)
|
||||
PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format)
|
||||
PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format)
|
||||
PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format)
|
||||
PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format)
|
||||
PROCESS_DEFINITION_DESC=process definition desc
|
||||
PROCESS_DEFINITION_TAG=process definition related opertation
|
||||
SIGNOUT_NOTES=logout
|
||||
USER_PASSWORD=user password
|
||||
UPDATE_PROCESS_INSTANCE_NOTES=update process instance
|
||||
QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list
|
||||
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
|
||||
LOGIN_NOTES=user login
|
||||
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
|
||||
PROCESS_DEFINITION_ID=process definition id
|
||||
PROCESS_DEFINITION_IDS=process definition ids
|
||||
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
|
||||
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
|
||||
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
|
||||
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
|
||||
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list
|
||||
PAGE_NO=page no
|
||||
PROCESS_INSTANCE_ID=process instance id
|
||||
PROCESS_INSTANCE_JSON=process instance info(json format)
|
||||
SCHEDULE_TIME=schedule time
|
||||
SYNC_DEFINE=update the information of the process instance to the process definition\
|
||||
|
||||
RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance
|
||||
SEARCH_VAL=search val
|
||||
USER_ID=user id
|
||||
PAGE_SIZE=page size
|
||||
LIMIT=limit
|
||||
VIEW_TREE_NOTES=view tree
|
||||
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
|
||||
PROCESS_DEFINITION_ID_LIST=process definition id list
|
||||
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id
|
||||
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id
|
||||
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids
|
||||
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
|
||||
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
|
||||
TASK_ID=task instance id
|
||||
SKIP_LINE_NUM=skip line num
|
||||
QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log
|
||||
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log
|
||||
USERS_TAG=users related operation
|
||||
SCHEDULER_TAG=scheduler related operation
|
||||
CREATE_SCHEDULE_NOTES=create schedule
|
||||
CREATE_USER_NOTES=create user
|
||||
TENANT_ID=tenant id
|
||||
QUEUE=queue
|
||||
EMAIL=email
|
||||
PHONE=phone
|
||||
QUERY_USER_LIST_NOTES=query user list
|
||||
UPDATE_USER_NOTES=update user
|
||||
DELETE_USER_BY_ID_NOTES=delete user by id
|
||||
GRANT_PROJECT_NOTES=GRANT PROJECT
|
||||
PROJECT_IDS=project ids(string format, multiple projects separated by ",")
|
||||
GRANT_RESOURCE_NOTES=grant resource file
|
||||
RESOURCE_IDS=resource ids(string format, multiple resources separated by ",")
|
||||
GET_USER_INFO_NOTES=get user info
|
||||
LIST_USER_NOTES=list user
|
||||
VERIFY_USER_NAME_NOTES=verify user name
|
||||
UNAUTHORIZED_USER_NOTES=cancel authorization
|
||||
ALERT_GROUP_ID=alert group id
|
||||
AUTHORIZED_USER_NOTES=authorized user
|
||||
GRANT_UDF_FUNC_NOTES=grant udf function
|
||||
UDF_IDS=udf ids(string format, multiple udf functions separated by ",")
|
||||
GRANT_DATASOURCE_NOTES=grant datasource
|
||||
DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",")
|
||||
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id
|
||||
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id
|
||||
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables
|
||||
VIEW_GANTT_NOTES=view gantt
|
||||
SUB_PROCESS_INSTANCE_ID=sub process instance id
|
||||
TASK_NAME=task instance name
|
||||
TASK_INSTANCE_TAG=task instance related operation
|
||||
LOGGER_TAG=log related operation
|
||||
PROCESS_INSTANCE_TAG=process instance related operation
|
||||
EXECUTION_STATUS=runing status for workflow and task nodes
|
||||
HOST=ip address of running task
|
||||
START_DATE=start date
|
||||
END_DATE=end date
|
||||
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id
|
||||
UPDATE_DATA_SOURCE_NOTES=update data source
|
||||
DATA_SOURCE_ID=DATA SOURCE ID
|
||||
QUERY_DATA_SOURCE_NOTES=query data source by id
|
||||
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type
|
||||
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging
|
||||
CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE
|
||||
CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test
|
||||
DELETE_DATA_SOURCE_NOTES=delete data source
|
||||
VERIFY_DATA_SOURCE_NOTES=verify data source
|
||||
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source
|
||||
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source
|
||||
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id
|
||||
|
|
@ -1,252 +0,0 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
QUERY_SCHEDULE_LIST_NOTES=query schedule list
|
||||
EXECUTE_PROCESS_TAG=execute process related operation
|
||||
PROCESS_INSTANCE_EXECUTOR_TAG=process instance executor related operation
|
||||
RUN_PROCESS_INSTANCE_NOTES=run process instance
|
||||
START_NODE_LIST=start node list(node name)
|
||||
TASK_DEPEND_TYPE=task depend type
|
||||
COMMAND_TYPE=command type
|
||||
RUN_MODE=run mode
|
||||
TIMEOUT=timeout
|
||||
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=execute action to process instance
|
||||
EXECUTE_TYPE=execute type
|
||||
START_CHECK_PROCESS_DEFINITION_NOTES=start check process definition
|
||||
GET_RECEIVER_CC_NOTES=query receiver cc
|
||||
DESC=description
|
||||
GROUP_NAME=group name
|
||||
GROUP_TYPE=group type
|
||||
QUERY_ALERT_GROUP_LIST_NOTES=query alert group list
|
||||
UPDATE_ALERT_GROUP_NOTES=update alert group
|
||||
DELETE_ALERT_GROUP_BY_ID_NOTES=delete alert group by id
|
||||
VERIFY_ALERT_GROUP_NAME_NOTES=verify alert group name, check alert group exist or not
|
||||
GRANT_ALERT_GROUP_NOTES=grant alert group
|
||||
USER_IDS=user id list
|
||||
ALERT_GROUP_TAG=alert group related operation
|
||||
CREATE_ALERT_GROUP_NOTES=create alert group
|
||||
WORKER_GROUP_TAG=worker group related operation
|
||||
SAVE_WORKER_GROUP_NOTES=create worker group
|
||||
WORKER_GROUP_NAME=worker group name
|
||||
WORKER_IP_LIST=worker ip list, eg. 192.168.1.1,192.168.1.2
|
||||
QUERY_WORKER_GROUP_PAGING_NOTES=query worker group paging
|
||||
QUERY_WORKER_GROUP_LIST_NOTES=query worker group list
|
||||
DELETE_WORKER_GROUP_BY_ID_NOTES=delete worker group by id
|
||||
DATA_ANALYSIS_TAG=analysis related operation of task state
|
||||
COUNT_TASK_STATE_NOTES=count task state
|
||||
COUNT_PROCESS_INSTANCE_NOTES=count process instance state
|
||||
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=count process definition by user
|
||||
COUNT_COMMAND_STATE_NOTES=count command state
|
||||
COUNT_QUEUE_STATE_NOTES=count the running status of the task in the queue\
|
||||
|
||||
ACCESS_TOKEN_TAG=access token related operation
|
||||
MONITOR_TAG=monitor related operation
|
||||
MASTER_LIST_NOTES=master server list
|
||||
WORKER_LIST_NOTES=worker server list
|
||||
QUERY_DATABASE_STATE_NOTES=query database state
|
||||
QUERY_ZOOKEEPER_STATE_NOTES=QUERY ZOOKEEPER STATE
|
||||
TASK_STATE=task instance state
|
||||
SOURCE_TABLE=SOURCE TABLE
|
||||
DEST_TABLE=dest table
|
||||
TASK_DATE=task date
|
||||
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=query history task record list paging
|
||||
DATA_SOURCE_TAG=data source related operation
|
||||
CREATE_DATA_SOURCE_NOTES=create data source
|
||||
DATA_SOURCE_NAME=data source name
|
||||
DATA_SOURCE_NOTE=data source desc
|
||||
DB_TYPE=database type
|
||||
DATA_SOURCE_HOST=DATA SOURCE HOST
|
||||
DATA_SOURCE_PORT=data source port
|
||||
DATABASE_NAME=database name
|
||||
QUEUE_TAG=queue related operation
|
||||
QUERY_QUEUE_LIST_NOTES=query queue list
|
||||
QUERY_QUEUE_LIST_PAGING_NOTES=query queue list paging
|
||||
CREATE_QUEUE_NOTES=create queue
|
||||
YARN_QUEUE_NAME=yarn(hadoop) queue name
|
||||
QUEUE_ID=queue id
|
||||
TENANT_DESC=tenant desc
|
||||
QUERY_TENANT_LIST_PAGING_NOTES=query tenant list paging
|
||||
QUERY_TENANT_LIST_NOTES=query tenant list
|
||||
UPDATE_TENANT_NOTES=update tenant
|
||||
DELETE_TENANT_NOTES=delete tenant
|
||||
RESOURCES_TAG=resource center related operation
|
||||
CREATE_RESOURCE_NOTES=create resource
|
||||
RESOURCE_TYPE=resource file type
|
||||
RESOURCE_NAME=resource name
|
||||
RESOURCE_DESC=resource file desc
|
||||
RESOURCE_FILE=resource file
|
||||
RESOURCE_ID=resource id
|
||||
QUERY_RESOURCE_LIST_NOTES=query resource list
|
||||
DELETE_RESOURCE_BY_ID_NOTES=delete resource by id
|
||||
VIEW_RESOURCE_BY_ID_NOTES=view resource by id
|
||||
ONLINE_CREATE_RESOURCE_NOTES=online create resource
|
||||
SUFFIX=resource file suffix
|
||||
CONTENT=resource file content
|
||||
UPDATE_RESOURCE_NOTES=edit resource file online
|
||||
DOWNLOAD_RESOURCE_NOTES=download resource file
|
||||
CREATE_UDF_FUNCTION_NOTES=create udf function
|
||||
UDF_TYPE=UDF type
|
||||
FUNC_NAME=function name
|
||||
CLASS_NAME=package and class name
|
||||
ARG_TYPES=arguments
|
||||
UDF_DESC=udf desc
|
||||
VIEW_UDF_FUNCTION_NOTES=view udf function
|
||||
UPDATE_UDF_FUNCTION_NOTES=update udf function
|
||||
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=query udf function list paging
|
||||
VERIFY_UDF_FUNCTION_NAME_NOTES=verify udf function name
|
||||
DELETE_UDF_FUNCTION_NOTES=delete udf function
|
||||
AUTHORIZED_FILE_NOTES=authorized file
|
||||
UNAUTHORIZED_FILE_NOTES=unauthorized file
|
||||
AUTHORIZED_UDF_FUNC_NOTES=authorized udf func
|
||||
UNAUTHORIZED_UDF_FUNC_NOTES=unauthorized udf func
|
||||
VERIFY_QUEUE_NOTES=verify queue
|
||||
TENANT_TAG=tenant related operation
|
||||
CREATE_TENANT_NOTES=create tenant
|
||||
TENANT_CODE=tenant code
|
||||
TENANT_NAME=tenant name
|
||||
QUEUE_NAME=queue name
|
||||
PASSWORD=password
|
||||
DATA_SOURCE_OTHER=jdbc connection params, format:{"key1":"value1",...}
|
||||
PROJECT_TAG=project related operation
|
||||
CREATE_PROJECT_NOTES=create project
|
||||
PROJECT_DESC=project description
|
||||
UPDATE_PROJECT_NOTES=update project
|
||||
PROJECT_ID=project id
|
||||
QUERY_PROJECT_BY_ID_NOTES=query project info by project id
|
||||
QUERY_PROJECT_LIST_PAGING_NOTES=QUERY PROJECT LIST PAGING
|
||||
QUERY_ALL_PROJECT_LIST_NOTES=query all project list
|
||||
DELETE_PROJECT_BY_ID_NOTES=delete project by id
|
||||
QUERY_UNAUTHORIZED_PROJECT_NOTES=query unauthorized project
|
||||
QUERY_AUTHORIZED_PROJECT_NOTES=query authorized project
|
||||
TASK_RECORD_TAG=task record related operation
|
||||
QUERY_TASK_RECORD_LIST_PAGING_NOTES=query task record list paging
|
||||
CREATE_TOKEN_NOTES=create token ,note: please login first
|
||||
QUERY_ACCESS_TOKEN_LIST_NOTES=query access token list paging
|
||||
SCHEDULE=schedule
|
||||
WARNING_TYPE=warning type(sending strategy)
|
||||
WARNING_GROUP_ID=warning group id
|
||||
FAILURE_STRATEGY=failure strategy
|
||||
RECEIVERS=receivers
|
||||
RECEIVERS_CC=receivers cc
|
||||
WORKER_GROUP_ID=worker server group id
|
||||
PROCESS_INSTANCE_PRIORITY=process instance priority
|
||||
UPDATE_SCHEDULE_NOTES=update schedule
|
||||
SCHEDULE_ID=schedule id
|
||||
ONLINE_SCHEDULE_NOTES=online schedule
|
||||
OFFLINE_SCHEDULE_NOTES=offline schedule
|
||||
QUERY_SCHEDULE_NOTES=query schedule
|
||||
QUERY_SCHEDULE_LIST_PAGING_NOTES=query schedule list paging
|
||||
LOGIN_TAG=User login related operations
|
||||
USER_NAME=user name
|
||||
PROJECT_NAME=project name
|
||||
CREATE_PROCESS_DEFINITION_NOTES=create process definition
|
||||
PROCESS_DEFINITION_NAME=process definition name
|
||||
PROCESS_DEFINITION_JSON=process definition detail info (json format)
|
||||
PROCESS_DEFINITION_LOCATIONS=process definition node locations info (json format)
|
||||
PROCESS_INSTANCE_LOCATIONS=process instance node locations info (json format)
|
||||
PROCESS_DEFINITION_CONNECTS=process definition node connects info (json format)
|
||||
PROCESS_INSTANCE_CONNECTS=process instance node connects info (json format)
|
||||
PROCESS_DEFINITION_DESC=process definition desc
|
||||
PROCESS_DEFINITION_TAG=process definition related opertation
|
||||
SIGNOUT_NOTES=logout
|
||||
USER_PASSWORD=user password
|
||||
UPDATE_PROCESS_INSTANCE_NOTES=update process instance
|
||||
QUERY_PROCESS_INSTANCE_LIST_NOTES=query process instance list
|
||||
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=verify proccess definition name
|
||||
LOGIN_NOTES=user login
|
||||
UPDATE_PROCCESS_DEFINITION_NOTES=update proccess definition
|
||||
PROCESS_DEFINITION_ID=process definition id
|
||||
PROCESS_DEFINITION_IDS=process definition ids
|
||||
RELEASE_PROCCESS_DEFINITION_NOTES=release proccess definition
|
||||
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=query proccess definition by id
|
||||
QUERY_PROCCESS_DEFINITION_LIST_NOTES=query proccess definition list
|
||||
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=query proccess definition list paging
|
||||
QUERY_ALL_DEFINITION_LIST_NOTES=query all definition list
|
||||
PAGE_NO=page no
|
||||
PROCESS_INSTANCE_ID=process instance id
|
||||
PROCESS_INSTANCE_JSON=process instance info(json format)
|
||||
SCHEDULE_TIME=schedule time
|
||||
SYNC_DEFINE=update the information of the process instance to the process definition\
|
||||
|
||||
RECOVERY_PROCESS_INSTANCE_FLAG=whether to recovery process instance
|
||||
SEARCH_VAL=search val
|
||||
USER_ID=user id
|
||||
PAGE_SIZE=page size
|
||||
LIMIT=limit
|
||||
VIEW_TREE_NOTES=view tree
|
||||
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=get task node list by process definition id
|
||||
PROCESS_DEFINITION_ID_LIST=process definition id list
|
||||
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=query proccess definition all by project id
|
||||
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=delete process definition by process definition id
|
||||
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=batch delete process definition by process definition ids
|
||||
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=query process instance by process instance id
|
||||
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=delete process instance by process instance id
|
||||
TASK_ID=task instance id
|
||||
SKIP_LINE_NUM=skip line num
|
||||
QUERY_TASK_INSTANCE_LOG_NOTES=query task instance log
|
||||
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=download task instance log
|
||||
USERS_TAG=users related operation
|
||||
SCHEDULER_TAG=scheduler related operation
|
||||
CREATE_SCHEDULE_NOTES=create schedule
|
||||
CREATE_USER_NOTES=create user
|
||||
TENANT_ID=tenant id
|
||||
QUEUE=queue
|
||||
EMAIL=email
|
||||
PHONE=phone
|
||||
QUERY_USER_LIST_NOTES=query user list
|
||||
UPDATE_USER_NOTES=update user
|
||||
DELETE_USER_BY_ID_NOTES=delete user by id
|
||||
GRANT_PROJECT_NOTES=GRANT PROJECT
|
||||
PROJECT_IDS=project ids(string format, multiple projects separated by ",")
|
||||
GRANT_RESOURCE_NOTES=grant resource file
|
||||
RESOURCE_IDS=resource ids(string format, multiple resources separated by ",")
|
||||
GET_USER_INFO_NOTES=get user info
|
||||
LIST_USER_NOTES=list user
|
||||
VERIFY_USER_NAME_NOTES=verify user name
|
||||
UNAUTHORIZED_USER_NOTES=cancel authorization
|
||||
ALERT_GROUP_ID=alert group id
|
||||
AUTHORIZED_USER_NOTES=authorized user
|
||||
GRANT_UDF_FUNC_NOTES=grant udf function
|
||||
UDF_IDS=udf ids(string format, multiple udf functions separated by ",")
|
||||
GRANT_DATASOURCE_NOTES=grant datasource
|
||||
DATASOURCE_IDS=datasource ids(string format, multiple datasources separated by ",")
|
||||
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=query subprocess instance by task instance id
|
||||
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=query parent process instance info by sub process instance id
|
||||
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=query process instance global variables and local variables
|
||||
VIEW_GANTT_NOTES=view gantt
|
||||
SUB_PROCESS_INSTANCE_ID=sub process instance id
|
||||
TASK_NAME=task instance name
|
||||
TASK_INSTANCE_TAG=task instance related operation
|
||||
LOGGER_TAG=log related operation
|
||||
PROCESS_INSTANCE_TAG=process instance related operation
|
||||
EXECUTION_STATUS=runing status for workflow and task nodes
|
||||
HOST=ip address of running task
|
||||
START_DATE=start date
|
||||
END_DATE=end date
|
||||
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=query task list by process instance id
|
||||
UPDATE_DATA_SOURCE_NOTES=update data source
|
||||
DATA_SOURCE_ID=DATA SOURCE ID
|
||||
QUERY_DATA_SOURCE_NOTES=query data source by id
|
||||
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=query data source list by database type
|
||||
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=query data source list paging
|
||||
CONNECT_DATA_SOURCE_NOTES=CONNECT DATA SOURCE
|
||||
CONNECT_DATA_SOURCE_TEST_NOTES=connect data source test
|
||||
DELETE_DATA_SOURCE_NOTES=delete data source
|
||||
VERIFY_DATA_SOURCE_NOTES=verify data source
|
||||
UNAUTHORIZED_DATA_SOURCE_NOTES=unauthorized data source
|
||||
AUTHORIZED_DATA_SOURCE_NOTES=authorized data source
|
||||
DELETE_SCHEDULER_BY_ID_NOTES=delete scheduler by id
|
||||
|
|
@ -1,250 +0,0 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
QUERY_SCHEDULE_LIST_NOTES=查询定时列表
|
||||
PROCESS_INSTANCE_EXECUTOR_TAG=流程实例执行相关操作
|
||||
RUN_PROCESS_INSTANCE_NOTES=运行流程实例
|
||||
START_NODE_LIST=开始节点列表(节点name)
|
||||
TASK_DEPEND_TYPE=任务依赖类型
|
||||
COMMAND_TYPE=指令类型
|
||||
RUN_MODE=运行模式
|
||||
TIMEOUT=超时时间
|
||||
EXECUTE_ACTION_TO_PROCESS_INSTANCE_NOTES=执行流程实例的各种操作(暂停、停止、重跑、恢复等)
|
||||
EXECUTE_TYPE=执行类型
|
||||
START_CHECK_PROCESS_DEFINITION_NOTES=检查流程定义
|
||||
DESC=备注(描述)
|
||||
GROUP_NAME=组名称
|
||||
GROUP_TYPE=组类型
|
||||
QUERY_ALERT_GROUP_LIST_NOTES=告警组列表\
|
||||
|
||||
UPDATE_ALERT_GROUP_NOTES=编辑(更新)告警组
|
||||
DELETE_ALERT_GROUP_BY_ID_NOTES=删除告警组通过ID
|
||||
VERIFY_ALERT_GROUP_NAME_NOTES=检查告警组是否存在
|
||||
GRANT_ALERT_GROUP_NOTES=授权告警组
|
||||
USER_IDS=用户ID列表
|
||||
ALERT_GROUP_TAG=告警组相关操作
|
||||
WORKER_GROUP_TAG=Worker分组管理
|
||||
SAVE_WORKER_GROUP_NOTES=创建Worker分组\
|
||||
|
||||
WORKER_GROUP_NAME=Worker分组名称
|
||||
WORKER_IP_LIST=Worker ip列表,注意:多个IP地址以逗号分割\
|
||||
|
||||
QUERY_WORKER_GROUP_PAGING_NOTES=Worker分组管理
|
||||
QUERY_WORKER_GROUP_LIST_NOTES=查询worker group分组
|
||||
DELETE_WORKER_GROUP_BY_ID_NOTES=删除worker group通过ID
|
||||
DATA_ANALYSIS_TAG=任务状态分析相关操作
|
||||
COUNT_TASK_STATE_NOTES=任务状态统计
|
||||
COUNT_PROCESS_INSTANCE_NOTES=统计流程实例状态
|
||||
COUNT_PROCESS_DEFINITION_BY_USER_NOTES=统计用户创建的流程定义
|
||||
COUNT_COMMAND_STATE_NOTES=统计命令状态
|
||||
COUNT_QUEUE_STATE_NOTES=统计队列里任务状态
|
||||
ACCESS_TOKEN_TAG=access token相关操作,需要先登录
|
||||
MONITOR_TAG=监控相关操作
|
||||
MASTER_LIST_NOTES=master服务列表
|
||||
WORKER_LIST_NOTES=worker服务列表
|
||||
QUERY_DATABASE_STATE_NOTES=查询数据库状态
|
||||
QUERY_ZOOKEEPER_STATE_NOTES=查询Zookeeper状态
|
||||
TASK_STATE=任务实例状态
|
||||
SOURCE_TABLE=源表
|
||||
DEST_TABLE=目标表
|
||||
TASK_DATE=任务时间
|
||||
QUERY_HISTORY_TASK_RECORD_LIST_PAGING_NOTES=分页查询历史任务记录列表
|
||||
DATA_SOURCE_TAG=数据源相关操作
|
||||
CREATE_DATA_SOURCE_NOTES=创建数据源
|
||||
DATA_SOURCE_NAME=数据源名称
|
||||
DATA_SOURCE_NOTE=数据源描述
|
||||
DB_TYPE=数据源类型
|
||||
DATA_SOURCE_HOST=IP主机名
|
||||
DATA_SOURCE_PORT=数据源端口
|
||||
DATABASE_NAME=数据库名
|
||||
QUEUE_TAG=队列相关操作
|
||||
QUERY_QUEUE_LIST_NOTES=查询队列列表
|
||||
QUERY_QUEUE_LIST_PAGING_NOTES=分页查询队列列表
|
||||
CREATE_QUEUE_NOTES=创建队列
|
||||
YARN_QUEUE_NAME=hadoop yarn队列名
|
||||
QUEUE_ID=队列ID
|
||||
TENANT_DESC=租户描述
|
||||
QUERY_TENANT_LIST_PAGING_NOTES=分页查询租户列表
|
||||
QUERY_TENANT_LIST_NOTES=查询租户列表
|
||||
UPDATE_TENANT_NOTES=更新租户
|
||||
DELETE_TENANT_NOTES=删除租户
|
||||
RESOURCES_TAG=资源中心相关操作
|
||||
CREATE_RESOURCE_NOTES=创建资源
|
||||
RESOURCE_TYPE=资源文件类型
|
||||
RESOURCE_NAME=资源文件名称
|
||||
RESOURCE_DESC=资源文件描述
|
||||
RESOURCE_FILE=资源文件
|
||||
RESOURCE_ID=资源ID
|
||||
QUERY_RESOURCE_LIST_NOTES=查询资源列表
|
||||
DELETE_RESOURCE_BY_ID_NOTES=删除资源通过ID
|
||||
VIEW_RESOURCE_BY_ID_NOTES=浏览资源通通过ID
|
||||
ONLINE_CREATE_RESOURCE_NOTES=在线创建资源
|
||||
SUFFIX=资源文件后缀
|
||||
CONTENT=资源文件内容
|
||||
UPDATE_RESOURCE_NOTES=在线更新资源文件
|
||||
DOWNLOAD_RESOURCE_NOTES=下载资源文件
|
||||
CREATE_UDF_FUNCTION_NOTES=创建UDF函数
|
||||
UDF_TYPE=UDF类型
|
||||
FUNC_NAME=函数名称
|
||||
CLASS_NAME=包名类名
|
||||
ARG_TYPES=参数
|
||||
UDF_DESC=udf描述,使用说明
|
||||
VIEW_UDF_FUNCTION_NOTES=查看udf函数
|
||||
UPDATE_UDF_FUNCTION_NOTES=更新udf函数
|
||||
QUERY_UDF_FUNCTION_LIST_PAGING_NOTES=分页查询udf函数列表
|
||||
VERIFY_UDF_FUNCTION_NAME_NOTES=验证udf函数名
|
||||
DELETE_UDF_FUNCTION_NOTES=删除UDF函数
|
||||
AUTHORIZED_FILE_NOTES=授权文件
|
||||
UNAUTHORIZED_FILE_NOTES=取消授权文件
|
||||
AUTHORIZED_UDF_FUNC_NOTES=授权udf函数
|
||||
UNAUTHORIZED_UDF_FUNC_NOTES=取消udf函数授权
|
||||
VERIFY_QUEUE_NOTES=验证队列
|
||||
TENANT_TAG=租户相关操作
|
||||
CREATE_TENANT_NOTES=创建租户
|
||||
TENANT_CODE=租户编码
|
||||
TENANT_NAME=租户名称
|
||||
QUEUE_NAME=队列名
|
||||
PASSWORD=密码
|
||||
DATA_SOURCE_OTHER=jdbc连接参数,格式为:{"key1":"value1",...}
|
||||
PROJECT_TAG=项目相关操作
|
||||
CREATE_PROJECT_NOTES=创建项目
|
||||
PROJECT_DESC=项目描述
|
||||
UPDATE_PROJECT_NOTES=更新项目
|
||||
PROJECT_ID=项目ID
|
||||
QUERY_PROJECT_BY_ID_NOTES=通过项目ID查询项目信息
|
||||
QUERY_PROJECT_LIST_PAGING_NOTES=分页查询项目列表
|
||||
QUERY_ALL_PROJECT_LIST_NOTES=查询所有项目
|
||||
DELETE_PROJECT_BY_ID_NOTES=删除项目通过ID
|
||||
QUERY_UNAUTHORIZED_PROJECT_NOTES=查询未授权的项目
|
||||
QUERY_AUTHORIZED_PROJECT_NOTES=查询授权项目
|
||||
TASK_RECORD_TAG=任务记录相关操作
|
||||
QUERY_TASK_RECORD_LIST_PAGING_NOTES=分页查询任务记录列表
|
||||
CREATE_TOKEN_NOTES=创建token,注意需要先登录
|
||||
QUERY_ACCESS_TOKEN_LIST_NOTES=分页查询access token列表
|
||||
SCHEDULE=定时
|
||||
WARNING_TYPE=发送策略
|
||||
WARNING_GROUP_ID=发送组ID
|
||||
FAILURE_STRATEGY=失败策略
|
||||
RECEIVERS=收件人
|
||||
RECEIVERS_CC=收件人(抄送)
|
||||
WORKER_GROUP_ID=Worker Server分组ID
|
||||
PROCESS_INSTANCE_PRIORITY=流程实例优先级
|
||||
UPDATE_SCHEDULE_NOTES=更新定时
|
||||
SCHEDULE_ID=定时ID
|
||||
ONLINE_SCHEDULE_NOTES=定时上线
|
||||
OFFLINE_SCHEDULE_NOTES=定时下线
|
||||
QUERY_SCHEDULE_NOTES=查询定时
|
||||
QUERY_SCHEDULE_LIST_PAGING_NOTES=分页查询定时
|
||||
LOGIN_TAG=用户登录相关操作
|
||||
USER_NAME=用户名
|
||||
PROJECT_NAME=项目名称
|
||||
CREATE_PROCESS_DEFINITION_NOTES=创建流程定义
|
||||
PROCESS_DEFINITION_NAME=流程定义名称
|
||||
PROCESS_DEFINITION_JSON=流程定义详细信息(json格式)
|
||||
PROCESS_DEFINITION_LOCATIONS=流程定义节点坐标位置信息(json格式)
|
||||
PROCESS_INSTANCE_LOCATIONS=流程实例节点坐标位置信息(json格式)
|
||||
PROCESS_DEFINITION_CONNECTS=流程定义节点图标连接信息(json格式)
|
||||
PROCESS_INSTANCE_CONNECTS=流程实例节点图标连接信息(json格式)
|
||||
PROCESS_DEFINITION_DESC=流程定义描述信息
|
||||
PROCESS_DEFINITION_TAG=流程定义相关操作
|
||||
SIGNOUT_NOTES=退出登录
|
||||
USER_PASSWORD=用户密码
|
||||
UPDATE_PROCESS_INSTANCE_NOTES=更新流程实例
|
||||
QUERY_PROCESS_INSTANCE_LIST_NOTES=查询流程实例列表
|
||||
VERIFY_PROCCESS_DEFINITION_NAME_NOTES=验证流程定义名字
|
||||
LOGIN_NOTES=用户登录
|
||||
UPDATE_PROCCESS_DEFINITION_NOTES=更新流程定义
|
||||
PROCESS_DEFINITION_ID=流程定义ID
|
||||
RELEASE_PROCCESS_DEFINITION_NOTES=发布流程定义
|
||||
QUERY_PROCCESS_DEFINITION_BY_ID_NOTES=查询流程定义通过流程定义ID
|
||||
QUERY_PROCCESS_DEFINITION_LIST_NOTES=查询流程定义列表
|
||||
QUERY_PROCCESS_DEFINITION_LIST_PAGING_NOTES=分页查询流程定义列表
|
||||
QUERY_ALL_DEFINITION_LIST_NOTES=查询所有流程定义
|
||||
PAGE_NO=页码号
|
||||
PROCESS_INSTANCE_ID=流程实例ID
|
||||
PROCESS_INSTANCE_IDS=流程实例ID集合
|
||||
PROCESS_INSTANCE_JSON=流程实例信息(json格式)
|
||||
SCHEDULE_TIME=定时时间
|
||||
SYNC_DEFINE=更新流程实例的信息是否同步到流程定义
|
||||
RECOVERY_PROCESS_INSTANCE_FLAG=是否恢复流程实例
|
||||
SEARCH_VAL=搜索值
|
||||
USER_ID=用户ID
|
||||
PAGE_SIZE=页大小
|
||||
LIMIT=显示多少条
|
||||
VIEW_TREE_NOTES=树状图
|
||||
GET_NODE_LIST_BY_DEFINITION_ID_NOTES=获得任务节点列表通过流程定义ID
|
||||
PROCESS_DEFINITION_ID_LIST=流程定义id列表
|
||||
QUERY_PROCCESS_DEFINITION_All_BY_PROJECT_ID_NOTES=查询流程定义通过项目ID
|
||||
BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES=批量删除流程定义通过流程定义ID集合
|
||||
DELETE_PROCESS_DEFINITION_BY_ID_NOTES=删除流程定义通过流程定义ID
|
||||
QUERY_PROCESS_INSTANCE_BY_ID_NOTES=查询流程实例通过流程实例ID
|
||||
DELETE_PROCESS_INSTANCE_BY_ID_NOTES=删除流程实例通过流程实例ID
|
||||
TASK_ID=任务实例ID
|
||||
SKIP_LINE_NUM=忽略行数
|
||||
QUERY_TASK_INSTANCE_LOG_NOTES=查询任务实例日志
|
||||
DOWNLOAD_TASK_INSTANCE_LOG_NOTES=下载任务实例日志
|
||||
USERS_TAG=用户相关操作
|
||||
SCHEDULER_TAG=定时相关操作
|
||||
CREATE_SCHEDULE_NOTES=创建定时
|
||||
CREATE_USER_NOTES=创建用户
|
||||
TENANT_ID=租户ID
|
||||
QUEUE=使用的队列
|
||||
EMAIL=邮箱
|
||||
PHONE=手机号
|
||||
QUERY_USER_LIST_NOTES=查询用户列表
|
||||
UPDATE_USER_NOTES=更新用户
|
||||
DELETE_USER_BY_ID_NOTES=删除用户通过ID
|
||||
GRANT_PROJECT_NOTES=授权项目
|
||||
PROJECT_IDS=项目IDS(字符串格式,多个项目以","分割)
|
||||
GRANT_RESOURCE_NOTES=授权资源文件
|
||||
RESOURCE_IDS=资源ID列表(字符串格式,多个资源ID以","分割)
|
||||
GET_USER_INFO_NOTES=获取用户信息
|
||||
LIST_USER_NOTES=用户列表
|
||||
VERIFY_USER_NAME_NOTES=验证用户名
|
||||
UNAUTHORIZED_USER_NOTES=取消授权
|
||||
ALERT_GROUP_ID=报警组ID
|
||||
AUTHORIZED_USER_NOTES=授权用户
|
||||
GRANT_UDF_FUNC_NOTES=授权udf函数
|
||||
UDF_IDS=udf函数id列表(字符串格式,多个udf函数ID以","分割)
|
||||
GRANT_DATASOURCE_NOTES=授权数据源
|
||||
DATASOURCE_IDS=数据源ID列表(字符串格式,多个数据源ID以","分割)
|
||||
QUERY_SUBPROCESS_INSTANCE_BY_TASK_ID_NOTES=查询子流程实例通过任务实例ID
|
||||
QUERY_PARENT_PROCESS_INSTANCE_BY_SUB_PROCESS_INSTANCE_ID_NOTES=查询父流程实例信息通过子流程实例ID
|
||||
QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES=查询流程实例全局变量和局部变量
|
||||
VIEW_GANTT_NOTES=浏览Gantt图
|
||||
SUB_PROCESS_INSTANCE_ID=子流程是咧ID
|
||||
TASK_NAME=任务实例名
|
||||
TASK_INSTANCE_TAG=任务实例相关操作
|
||||
LOGGER_TAG=日志相关操作
|
||||
PROCESS_INSTANCE_TAG=流程实例相关操作
|
||||
EXECUTION_STATUS=工作流和任务节点的运行状态
|
||||
HOST=运行任务的主机IP地址
|
||||
START_DATE=开始时间
|
||||
END_DATE=结束时间
|
||||
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_NOTES=通过流程实例ID查询任务列表
|
||||
UPDATE_DATA_SOURCE_NOTES=更新数据源
|
||||
DATA_SOURCE_ID=数据源ID
|
||||
QUERY_DATA_SOURCE_NOTES=查询数据源通过ID
|
||||
QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES=查询数据源列表通过数据源类型
|
||||
QUERY_DATA_SOURCE_LIST_PAGING_NOTES=分页查询数据源列表
|
||||
CONNECT_DATA_SOURCE_NOTES=连接数据源
|
||||
CONNECT_DATA_SOURCE_TEST_NOTES=连接数据源测试
|
||||
DELETE_DATA_SOURCE_NOTES=删除数据源
|
||||
VERIFY_DATA_SOURCE_NOTES=验证数据源
|
||||
UNAUTHORIZED_DATA_SOURCE_NOTES=未授权的数据源
|
||||
AUTHORIZED_DATA_SOURCE_NOTES=授权的数据源
|
||||
DELETE_SCHEDULER_BY_ID_NOTES=根据定时id删除定时数据
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
<#--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN' 'http://www.w3.org/TR/html4/loose.dtd'><html><head><title> dolphinscheduler</title><meta name='Keywords' content=''><meta name='Description' content=''><style type="text/css">table { margin-top:0px; padding-top:0px; border:1px solid; font-size: 14px; color: #333333; border-width: 1px; border-color: #666666; border-collapse: collapse; } table th { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #dedede; } table td { border-width: 1px; padding: 8px; border-style: solid; border-color: #666666; background-color: #ffffff; }</style></head><body style="margin:0;padding:0"><table border="1px" cellpadding="5px" cellspacing="-10px"><thead><#if title??> ${title}</#if></thead><#if content??> ${content}</#if></table></body></html>
|
||||
|
|
@ -1,52 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!-- Logback configuration. See http://logback.qos.ch/manual/index.html -->
|
||||
<configuration scan="true" scanPeriod="120 seconds"> <!--debug="true" -->
|
||||
<property name="log.base" value="logs" />
|
||||
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<appender name="MASTERLOGFILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
|
||||
<file>${log.base}/dolphinscheduler-master.log</file>
|
||||
<filter class="org.apache.dolphinscheduler.server.master.log.MasterLogFilter">
|
||||
<level>INFO</level>
|
||||
</filter>
|
||||
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
|
||||
<fileNamePattern>${log.base}/dolphinscheduler-master.%d{yyyy-MM-dd_HH}.%i.log</fileNamePattern>
|
||||
<maxHistory>168</maxHistory>
|
||||
<maxFileSize>200MB</maxFileSize>
|
||||
</rollingPolicy>
|
||||
<encoder>
|
||||
<pattern>
|
||||
[%level] %date{yyyy-MM-dd HH:mm:ss.SSS} %logger{96}:[%line] - %msg%n
|
||||
</pattern>
|
||||
<charset>UTF-8</charset>
|
||||
</encoder>
|
||||
</appender>
|
||||
|
||||
<root level="INFO">
|
||||
<appender-ref ref="MASTERLOGFILE"/>
|
||||
</root>
|
||||
</configuration>
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.AccessTokenMapper">
|
||||
<select id="selectAccessTokenPage" resultType="org.apache.dolphinscheduler.dao.entity.AccessToken">
|
||||
select * from t_ds_access_token t
|
||||
left join t_ds_user u on t.user_id = u.id
|
||||
where 1 = 1
|
||||
<if test="userName != null and userName != ''">
|
||||
and u.user_name like concat ('%', #{userName}, '%')
|
||||
</if>
|
||||
<if test="userId != 0">
|
||||
and t.user_id = #{userId}
|
||||
</if>
|
||||
order by t.update_time desc
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.AlertGroupMapper">
|
||||
<select id="queryAlertGroupPage" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup">
|
||||
select * from t_ds_alertgroup
|
||||
where 1 = 1
|
||||
<if test="groupName != null and groupName != ''">
|
||||
and group_name like concat('%', #{groupName}, '%')
|
||||
</if>
|
||||
order by update_time desc
|
||||
</select>
|
||||
<select id="queryByGroupName" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup">
|
||||
select * from t_ds_alertgroup
|
||||
where group_name=#{groupName}
|
||||
</select>
|
||||
<select id="queryByUserId" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup">
|
||||
select * from t_ds_alertgroup t
|
||||
left join t_ds_relation_user_alertgroup r on t.id=r.alertgroup_id
|
||||
where r.user_id=#{userId}
|
||||
</select>
|
||||
<select id="queryByAlertType" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup">
|
||||
select * from t_ds_alertgroup
|
||||
where group_type=#{alertType}
|
||||
</select>
|
||||
<select id="queryAllGroupList" resultType="org.apache.dolphinscheduler.dao.entity.AlertGroup">
|
||||
select *
|
||||
from t_ds_alertgroup
|
||||
order by update_time desc
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.AlertMapper">
|
||||
<select id="listAlertByStatus" resultType="org.apache.dolphinscheduler.dao.entity.Alert">
|
||||
select *
|
||||
from t_ds_alert
|
||||
where alert_status = #{alertStatus}
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.CommandMapper">
|
||||
<select id="getOneToRun" resultType="org.apache.dolphinscheduler.dao.entity.Command">
|
||||
select command.* from t_ds_command command
|
||||
join t_ds_process_definition definition on command.process_definition_id = definition.id
|
||||
where definition.release_state = 1 AND definition.flag = 1
|
||||
order by command.update_time asc
|
||||
limit 1
|
||||
</select>
|
||||
<select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount">
|
||||
select cmd.command_type as command_type, count(1) as count
|
||||
from t_ds_command cmd, t_ds_process_definition process
|
||||
where cmd.process_definition_id = process.id
|
||||
<if test="projectIdArray != null and projectIdArray.length != 0">
|
||||
and process.project_id in
|
||||
<foreach collection="projectIdArray" index="index" item="i" open="(" close=")" separator=",">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
<if test="startTime != null and endTime != null">
|
||||
and cmd.start_time <![CDATA[ >= ]]> #{startTime} and cmd.update_time <![CDATA[ <= ]]> #{endTime}
|
||||
</if>
|
||||
group by cmd.command_type
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.DataSourceMapper">
|
||||
<select id="queryDataSourceByType" resultType="org.apache.dolphinscheduler.dao.entity.DataSource">
|
||||
select *
|
||||
from t_ds_datasource
|
||||
where type=#{type}
|
||||
<if test="userId != 0">
|
||||
and id in
|
||||
(select datasource_id
|
||||
from t_ds_relation_datasource_user
|
||||
where user_id=#{userId}
|
||||
union select id as datasource_id
|
||||
from t_ds_datasource
|
||||
where user_id=#{userId}
|
||||
)
|
||||
</if>
|
||||
|
||||
</select>
|
||||
|
||||
<select id="selectPaging" resultType="org.apache.dolphinscheduler.dao.entity.DataSource">
|
||||
select *
|
||||
from t_ds_datasource
|
||||
where 1 =1
|
||||
<if test="userId != 0">
|
||||
and id in
|
||||
(select datasource_id
|
||||
from t_ds_relation_datasource_user
|
||||
where user_id=#{userId}
|
||||
union select id as datasource_id
|
||||
from t_ds_datasource
|
||||
where user_id=#{userId}
|
||||
)
|
||||
</if>
|
||||
<if test="name != null and name != ''">
|
||||
and name like concat ('%', #{name}, '%')
|
||||
</if>
|
||||
order by update_time desc
|
||||
</select>
|
||||
<select id="queryDataSourceByName" resultType="org.apache.dolphinscheduler.dao.entity.DataSource">
|
||||
select *
|
||||
from t_ds_datasource
|
||||
where name=#{name}
|
||||
</select>
|
||||
<select id="queryAuthedDatasource" resultType="org.apache.dolphinscheduler.dao.entity.DataSource">
|
||||
select datasource.*
|
||||
from t_ds_datasource datasource, t_ds_relation_datasource_user rel
|
||||
where datasource.id = rel.datasource_id AND rel.user_id = #{userId}
|
||||
</select>
|
||||
<select id="queryDatasourceExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.DataSource">
|
||||
select *
|
||||
from t_ds_datasource
|
||||
where user_id <![CDATA[ <> ]]> #{userId}
|
||||
</select>
|
||||
<select id="listAllDataSourceByType" resultType="org.apache.dolphinscheduler.dao.entity.DataSource">
|
||||
select *
|
||||
from t_ds_datasource
|
||||
where type = #{type}
|
||||
</select>
|
||||
|
||||
|
||||
</mapper>
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper">
|
||||
<delete id="deleteByUserId">
|
||||
delete from t_ds_relation_datasource_user
|
||||
where user_id = #{userId}
|
||||
|
||||
</delete>
|
||||
<delete id="deleteByDatasourceId">
|
||||
delete from t_ds_relation_datasource_user
|
||||
where datasource_id = #{datasourceId}
|
||||
</delete>
|
||||
</mapper>
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper">
|
||||
<select id="countCommandState" resultType="org.apache.dolphinscheduler.dao.entity.CommandCount">
|
||||
select cmd.command_type as command_type, count(1) as count
|
||||
from t_ds_error_command cmd, t_ds_process_definition process
|
||||
where cmd.process_definition_id = process.id
|
||||
<if test="projectIdArray != null and projectIdArray.length != 0">
|
||||
and process.project_id in
|
||||
<foreach collection="projectIdArray" index="index" item="i" open="(" close=")" separator=",">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
<if test="startTime != null and endTime != null">
|
||||
and cmd.startTime <![CDATA[ >= ]]> #{startTime} and cmd.update_time <![CDATA[ <= ]]> #{endTime}
|
||||
</if>
|
||||
group by cmd.command_type
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,96 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper">
|
||||
<select id="queryByDefineName" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
|
||||
select pd.*,u.user_name,p.name as project_name,t.tenant_code,t.tenant_name,q.queue,q.queue_name
|
||||
from t_ds_process_definition pd
|
||||
JOIN t_ds_user u ON pd.user_id = u.id
|
||||
JOIN t_ds_project p ON pd.project_id = p.id
|
||||
JOIN t_ds_tenant t ON t.id = u.tenant_id
|
||||
JOIN t_ds_queue q ON t.queue_id = q.id
|
||||
WHERE p.id = #{projectId}
|
||||
and pd.name = #{processDefinitionName}
|
||||
</select>
|
||||
<select id="queryDefineListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
|
||||
SELECT td.*,sc.schedule_release_state,tu.user_name
|
||||
FROM t_ds_process_definition td
|
||||
left join (select process_definition_id,release_state as schedule_release_state from t_ds_schedules group by process_definition_id,release_state) sc on sc.process_definition_id = td.id
|
||||
left join t_ds_user tu on td.user_id = tu.id
|
||||
where td.project_id = #{projectId}
|
||||
<if test=" isAdmin == false ">
|
||||
and tu.user_type=1
|
||||
</if>
|
||||
<if test=" searchVal != null and searchVal != ''">
|
||||
and td.name like concat('%', #{searchVal}, '%')
|
||||
</if>
|
||||
<if test=" userId != 0">
|
||||
and td.user_id = #{userId}
|
||||
</if>
|
||||
order by sc.schedule_release_state desc,td.update_time desc
|
||||
</select>
|
||||
|
||||
<select id="queryAllDefinitionList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
|
||||
select *
|
||||
from t_ds_process_definition
|
||||
where project_id = #{projectId}
|
||||
order by create_time desc
|
||||
</select>
|
||||
<select id="queryDefinitionListByTenant" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
|
||||
select *
|
||||
from t_ds_process_definition
|
||||
where tenant_id = #{tenantId}
|
||||
</select>
|
||||
<select id="queryDefinitionListByIdList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
|
||||
select *
|
||||
from t_ds_process_definition
|
||||
where id in
|
||||
<foreach collection="ids" index="index" item="i" open="(" separator="," close=")">
|
||||
#{i}
|
||||
</foreach>
|
||||
</select>
|
||||
<select id="countDefinitionGroupByUser" resultType="org.apache.dolphinscheduler.dao.entity.DefinitionGroupByUser">
|
||||
SELECT td.user_id as user_id, tu.user_name as user_name, count(0) as count
|
||||
FROM t_ds_process_definition td
|
||||
JOIN t_ds_user tu on tu.id=td.user_id
|
||||
where 1 = 1
|
||||
<if test=" isAdmin == false ">
|
||||
and tu.user_type=1
|
||||
</if>
|
||||
<if test="projectIds != null and projectIds.length != 0">
|
||||
and td.project_id in
|
||||
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
group by td.user_id,tu.user_name
|
||||
</select>
|
||||
<select id="queryByDefineId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition">
|
||||
SELECT
|
||||
pd.*, u.user_name,
|
||||
p.name AS project_name
|
||||
FROM
|
||||
t_ds_process_definition pd,
|
||||
t_ds_user u,
|
||||
t_ds_project p
|
||||
WHERE
|
||||
pd.user_id = u.id AND pd.project_id = p.id
|
||||
AND pd.id = #{processDefineId}
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper">
|
||||
<delete id="deleteByParentProcessId">
|
||||
delete
|
||||
from t_ds_relation_process_instance
|
||||
where parent_process_instance_id=#{parentProcessId}
|
||||
|
||||
</delete>
|
||||
<select id="queryByParentId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap">
|
||||
select *
|
||||
from t_ds_relation_process_instance
|
||||
where parent_process_instance_id = #{parentProcessId}
|
||||
and parent_task_instance_id = #{parentTaskId}
|
||||
</select>
|
||||
<select id="queryBySubProcessId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap">
|
||||
select *
|
||||
from t_ds_relation_process_instance
|
||||
where process_instance_id = #{subProcessId}
|
||||
</select>
|
||||
<select id="querySubIdListByParentId" resultType="java.lang.Integer">
|
||||
select process_instance_id
|
||||
from t_ds_relation_process_instance
|
||||
where parent_process_instance_id = #{parentInstanceId}
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,182 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper">
|
||||
<select id="queryDetailById" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
|
||||
select inst.*
|
||||
from t_ds_process_instance inst
|
||||
where inst.id = #{processId}
|
||||
</select>
|
||||
<select id="queryByHostAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
|
||||
select *
|
||||
from t_ds_process_instance
|
||||
where 1=1
|
||||
<if test="host != null and host != ''">
|
||||
and host=#{host}
|
||||
</if>
|
||||
and state in
|
||||
<foreach collection="states" item="i" open="(" close=")" separator=",">
|
||||
#{i}
|
||||
</foreach>
|
||||
order by id asc
|
||||
</select>
|
||||
|
||||
<select id="queryByTenantIdAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
|
||||
select *
|
||||
from t_ds_process_instance
|
||||
where 1=1
|
||||
<if test="tenantId != -1">
|
||||
and tenant_id =#{tenantId}
|
||||
</if>
|
||||
and state in
|
||||
<foreach collection="states" item="i" open="(" close=")" separator=",">
|
||||
#{i}
|
||||
</foreach>
|
||||
order by id asc
|
||||
</select>
|
||||
|
||||
<select id="queryByWorkerGroupIdAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
|
||||
select *
|
||||
from t_ds_process_instance
|
||||
where 1=1
|
||||
<if test="workerGroupId != -1">
|
||||
and worker_group_id =#{workerGroupId}
|
||||
</if>
|
||||
and state in
|
||||
<foreach collection="states" item="i" open="(" close=")" separator=",">
|
||||
#{i}
|
||||
</foreach>
|
||||
order by id asc
|
||||
</select>
|
||||
|
||||
<select id="queryProcessInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
|
||||
select instance.*
|
||||
from t_ds_process_instance instance
|
||||
join t_ds_process_definition define ON instance.process_definition_id = define.id
|
||||
where 1=1
|
||||
and instance.is_sub_process=0
|
||||
and define.project_id = #{projectId}
|
||||
<if test="processDefinitionId != 0">
|
||||
and instance.process_definition_id = #{processDefinitionId}
|
||||
</if>
|
||||
<if test="searchVal != null and searchVal != ''">
|
||||
and instance.name like concat('%', #{searchVal}, '%')
|
||||
</if>
|
||||
<if test="startTime != null ">
|
||||
and instance.start_time > #{startTime} and instance.start_time <![CDATA[ <=]]> #{endTime}
|
||||
</if>
|
||||
<if test="states != null and states != ''">
|
||||
and instance.state in
|
||||
<foreach collection="states" index="index" item="i" open="(" separator="," close=")">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
<if test="host != null and host != ''">
|
||||
and instance.host like concat('%', #{host}, '%')
|
||||
</if>
|
||||
order by instance.start_time desc
|
||||
</select>
|
||||
<update id="setFailoverByHostAndStateArray">
|
||||
update t_ds_process_instance
|
||||
set host=null
|
||||
where host =#{host} and state in
|
||||
<foreach collection="states" index="index" item="i" open="(" close=")" separator=",">
|
||||
#{i}
|
||||
</foreach>
|
||||
</update>
|
||||
<update id="updateProcessInstanceByState">
|
||||
update t_ds_process_instance
|
||||
set state = #{destState}
|
||||
where state = #{originState}
|
||||
</update>
|
||||
|
||||
<update id="updateProcessInstanceByTenantId">
|
||||
update t_ds_process_instance
|
||||
set tenant_id = #{destTenantId}
|
||||
where tenant_id = #{originTenantId}
|
||||
</update>
|
||||
|
||||
<update id="updateProcessInstanceByWorkerGroupId">
|
||||
update t_ds_process_instance
|
||||
set worker_group_id = #{destWorkerGroupId}
|
||||
where worker_group_id = #{originWorkerGroupId}
|
||||
</update>
|
||||
|
||||
<select id="countInstanceStateByUser" resultType="org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount">
|
||||
select t.state, count(0) as count
|
||||
from t_ds_process_instance t
|
||||
join t_ds_process_definition d on d.id=t.process_definition_id
|
||||
join t_ds_project p on p.id=d.project_id
|
||||
where 1 = 1
|
||||
and t.is_sub_process = 0
|
||||
<if test="startTime != null and endTime != null">
|
||||
and t.start_time >= #{startTime} and t.start_time <![CDATA[ <= ]]> #{endTime}
|
||||
</if>
|
||||
<if test="projectIds != null and projectIds.length != 0">
|
||||
and p.id in
|
||||
<foreach collection="projectIds" index="index" item="i" open="(" close=")" separator=",">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
group by t.state
|
||||
</select>
|
||||
<select id="queryByProcessDefineId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
|
||||
select *
|
||||
from t_ds_process_instance
|
||||
where process_definition_id=#{processDefinitionId}
|
||||
order by start_time desc limit #{size}
|
||||
</select>
|
||||
<select id="queryLastSchedulerProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
|
||||
select *
|
||||
from t_ds_process_instance
|
||||
where process_definition_id=#{processDefinitionId}
|
||||
<if test="startTime!=null and endTime != null ">
|
||||
and schedule_time between #{startTime} and #{endTime}
|
||||
</if>
|
||||
order by end_time desc limit 1
|
||||
</select>
|
||||
<select id="queryLastRunningProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
|
||||
select *
|
||||
from t_ds_process_instance
|
||||
where 1=1
|
||||
<if test="states !=null and states.length != 0">
|
||||
and state in
|
||||
<foreach collection="states" item="i" index="index" open="(" separator="," close=")">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
<if test="startTime!=null and endTime != null ">
|
||||
and process_definition_id=#{processDefinitionId}
|
||||
and (schedule_time between #{startTime} and #{endTime} or start_time between #{startTime} and #{endTime})
|
||||
</if>
|
||||
order by start_time desc limit 1
|
||||
</select>
|
||||
<select id="queryLastManualProcess" resultType="org.apache.dolphinscheduler.dao.entity.ProcessInstance">
|
||||
select *
|
||||
from t_ds_process_instance
|
||||
where process_definition_id=#{processDefinitionId}
|
||||
and schedule_time is null
|
||||
<if test="startTime!=null and endTime != null ">
|
||||
and start_time between #{startTime} and #{endTime}
|
||||
</if>
|
||||
order by end_time desc limit 1
|
||||
</select>
|
||||
|
||||
|
||||
</mapper>
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProjectMapper">
|
||||
<select id="queryDetailById" resultType="org.apache.dolphinscheduler.dao.entity.Project">
|
||||
select p.*,u.user_name as user_name
|
||||
from t_ds_project p
|
||||
join t_ds_user u on p.user_id = u.id
|
||||
where p.id = #{projectId}
|
||||
</select>
|
||||
<select id="queryByName" resultType="org.apache.dolphinscheduler.dao.entity.Project">
|
||||
select p.*,u.user_name as user_name
|
||||
from t_ds_project p
|
||||
join t_ds_user u on p.user_id = u.id
|
||||
where p.name = #{projectName}
|
||||
limit 1
|
||||
</select>
|
||||
<select id="queryProjectListPaging" resultType="org.apache.dolphinscheduler.dao.entity.Project">
|
||||
select p.*,u.user_name as user_name,
|
||||
(SELECT COUNT(*) FROM t_ds_process_definition AS def WHERE def.project_id = p.id) AS def_count,
|
||||
(SELECT COUNT(*) FROM t_ds_process_definition def, t_ds_process_instance inst WHERE def.id = inst.process_definition_id AND def.project_id = p.id AND inst.state=1 ) as inst_running_count
|
||||
from t_ds_project p
|
||||
join t_ds_user u on u.id=p.user_id
|
||||
where 1=1
|
||||
<if test="userId != 0">
|
||||
and p.id in
|
||||
(select project_id from t_ds_relation_project_user where user_id=#{userId}
|
||||
union select id as project_id from t_ds_project where user_id=#{userId}
|
||||
)
|
||||
</if>
|
||||
<if test="searchName!=null and searchName != ''">
|
||||
and p.name like concat('%', #{searchName}, '%')
|
||||
</if>
|
||||
order by p.create_time desc
|
||||
</select>
|
||||
<select id="queryAuthedProjectListByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project">
|
||||
select p.*
|
||||
from t_ds_project p,t_ds_relation_project_user rel
|
||||
where p.id = rel.project_id and rel.user_id= #{userId}
|
||||
</select>
|
||||
<select id="queryProjectExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project">
|
||||
select *
|
||||
from t_ds_project
|
||||
where user_id <![CDATA[ <> ]]> #{userId}
|
||||
</select>
|
||||
<select id="queryProjectCreatedByUser" resultType="org.apache.dolphinscheduler.dao.entity.Project">
|
||||
select *
|
||||
from t_ds_project
|
||||
where user_id = #{userId}
|
||||
</select>
|
||||
|
||||
</mapper>
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProjectUserMapper">
|
||||
<delete id="deleteProjectRelation">
|
||||
delete from t_ds_relation_project_user
|
||||
where 1=1
|
||||
and user_id = #{userId}
|
||||
<if test="projectId != 0 ">
|
||||
and project_id = #{projectId}
|
||||
</if>
|
||||
</delete>
|
||||
<select id="queryProjectRelation" resultType="org.apache.dolphinscheduler.dao.entity.ProjectUser">
|
||||
select *
|
||||
from t_ds_relation_project_user
|
||||
where project_id = #{projectId}
|
||||
and user_id = #{userId}
|
||||
limit 1
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.QueueMapper">
|
||||
<select id="queryQueuePaging" resultType="org.apache.dolphinscheduler.dao.entity.Queue">
|
||||
select *
|
||||
from t_ds_queue
|
||||
where 1= 1
|
||||
<if test="searchVal != null and searchVal != ''">
|
||||
and queue_name like concat('%', #{searchVal}, '%')
|
||||
</if>
|
||||
order by update_time desc
|
||||
</select>
|
||||
<select id="queryAllQueueList" resultType="org.apache.dolphinscheduler.dao.entity.Queue">
|
||||
select *
|
||||
from t_ds_queue
|
||||
where 1=1
|
||||
<if test="queue != null and queue != ''">
|
||||
and queue = #{queue}
|
||||
</if>
|
||||
<if test="queueName != null and queueName != ''">
|
||||
and queue_name =#{queueName}
|
||||
</if>
|
||||
</select>
|
||||
|
||||
</mapper>
|
||||
|
|
@ -1,74 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ResourceMapper">
|
||||
<select id="queryResourceList" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
|
||||
select *
|
||||
from t_ds_resources
|
||||
where 1= 1
|
||||
<if test="alias != null and alias != ''">
|
||||
and alias = #{alias}
|
||||
</if>
|
||||
<if test="type != -1">
|
||||
and type = #{type}
|
||||
</if>
|
||||
<if test="userId != 0">
|
||||
and user_id = #{userId}
|
||||
</if>
|
||||
</select>
|
||||
<select id="queryResourceListAuthored" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
|
||||
select *
|
||||
from t_ds_resources
|
||||
where 1 = 1
|
||||
<if test="type != -1">
|
||||
and type=#{type}
|
||||
</if>
|
||||
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
|
||||
union select id as resources_id from t_ds_resources where user_id=#{userId})
|
||||
</select>
|
||||
<select id="queryResourcePaging" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
|
||||
select *
|
||||
from t_ds_resources
|
||||
where type=#{type}
|
||||
<if test="userId != 0">
|
||||
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
|
||||
union select id as resources_id from t_ds_resources where user_id=#{userId})
|
||||
</if>
|
||||
<if test="searchVal != null and searchVal != ''">
|
||||
and alias like concat('%', #{searchVal}, '%')
|
||||
</if>
|
||||
order by update_time desc
|
||||
</select>
|
||||
<select id="queryAuthorizedResourceList" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
|
||||
select r.*
|
||||
from t_ds_resources r,t_ds_relation_resources_user rel
|
||||
where r.id = rel.resources_id AND rel.user_id = #{userId}
|
||||
</select>
|
||||
<select id="queryResourceExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.Resource">
|
||||
select *
|
||||
from t_ds_resources
|
||||
where user_id <![CDATA[ <> ]]> #{userId}
|
||||
</select>
|
||||
<select id="queryTenantCodeByResourceName" resultType="java.lang.String">
|
||||
select tenant_code
|
||||
from t_ds_tenant t, t_ds_user u, t_ds_resources res
|
||||
where t.id = u.tenant_id and u.id = res.user_id and res.type=0
|
||||
and res.alias= #{resName}
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper">
|
||||
<delete id="deleteResourceUser">
|
||||
delete
|
||||
from t_ds_relation_resources_user
|
||||
where 1 = 1
|
||||
<if test="userId != 0">
|
||||
and user_id = #{userId}
|
||||
</if>
|
||||
<if test="resourceId != 0">
|
||||
and resources_id = #{resourceId}
|
||||
</if>
|
||||
</delete>
|
||||
</mapper>
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.ScheduleMapper">
|
||||
<select id="queryByProcessDefineIdPaging" resultType="org.apache.dolphinscheduler.dao.entity.Schedule">
|
||||
select p_f.name as process_definition_name, p.name as project_name,u.user_name,s.*
|
||||
from t_ds_schedules s
|
||||
join t_ds_process_definition p_f on s.process_definition_id = p_f.id
|
||||
join t_ds_project as p on p_f.project_id = p.id
|
||||
join t_ds_user as u on s.user_id = u.id
|
||||
where 1=1
|
||||
<if test="processDefinitionId!= 0">
|
||||
and s.process_definition_id = #{processDefinitionId}
|
||||
</if>
|
||||
order by s.update_time desc
|
||||
</select>
|
||||
<select id="querySchedulerListByProjectName" resultType="org.apache.dolphinscheduler.dao.entity.Schedule">
|
||||
select p_f.name as process_definition_name, p_f.description as definition_description, p.name as project_name,u.user_name,s.*
|
||||
from t_ds_schedules s
|
||||
join t_ds_process_definition p_f on s.process_definition_id = p_f.id
|
||||
join t_ds_project as p on p_f.project_id = p.id
|
||||
join t_ds_user as u on s.user_id = u.id
|
||||
where p.name = #{projectName}
|
||||
</select>
|
||||
<select id="selectAllByProcessDefineArray" resultType="org.apache.dolphinscheduler.dao.entity.Schedule">
|
||||
select *
|
||||
from t_ds_schedules
|
||||
where 1= 1
|
||||
<if test="processDefineIds != null and processDefineIds.length != 0 ">
|
||||
and process_definition_id in
|
||||
<foreach collection="processDefineIds" index="index" item="i" open="(" separator="," close=")">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
and release_state = 1
|
||||
</select>
|
||||
<select id="queryByProcessDefinitionId" resultType="org.apache.dolphinscheduler.dao.entity.Schedule">
|
||||
select *
|
||||
from t_ds_schedules
|
||||
where process_definition_id =#{processDefinitionId}
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.SessionMapper">
|
||||
<select id="queryByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Session">
|
||||
select *
|
||||
from t_ds_session
|
||||
where user_id = #{userId}
|
||||
</select>
|
||||
|
||||
<select id="queryByUserIdAndIp" resultType="org.apache.dolphinscheduler.dao.entity.Session">
|
||||
select *
|
||||
from t_ds_session
|
||||
where user_id = #{userId} AND ip = #{ip}
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,129 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper">
|
||||
<update id="setFailoverByHostAndStateArray">
|
||||
update t_ds_task_instance
|
||||
set state = #{destStatus}
|
||||
where host = #{host}
|
||||
and state in
|
||||
<foreach collection="states" index="index" item="i" open="(" separator="," close=")">
|
||||
#{i}
|
||||
</foreach>
|
||||
</update>
|
||||
<select id="queryTaskByProcessIdAndState" resultType="java.lang.Integer">
|
||||
select id
|
||||
from t_ds_task_instance
|
||||
WHERE process_instance_id = #{processInstanceId}
|
||||
and state = #{state}
|
||||
and flag = 1
|
||||
</select>
|
||||
<select id="findValidTaskListByProcessId" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance">
|
||||
select *
|
||||
from t_ds_task_instance
|
||||
WHERE process_instance_id = #{processInstanceId}
|
||||
and flag = #{flag}
|
||||
order by start_time desc
|
||||
</select>
|
||||
<select id="queryByHostAndStatus" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance">
|
||||
select *
|
||||
from t_ds_task_instance
|
||||
where 1 = 1
|
||||
<if test="host != null and host != ''">
|
||||
and host = #{host}
|
||||
</if>
|
||||
<if test="states != null and states.length != 0">
|
||||
and state in
|
||||
<foreach collection="states" index="index" item="i" open="(" separator="," close=")">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
</select>
|
||||
<select id="countTaskInstanceStateByUser" resultType="org.apache.dolphinscheduler.dao.entity.ExecuteStatusCount">
|
||||
select state, count(0) as count
|
||||
from t_ds_task_instance t
|
||||
left join t_ds_process_definition d on d.id=t.process_definition_id
|
||||
left join t_ds_project p on p.id=d.project_id
|
||||
where 1=1
|
||||
<if test="projectIds != null and projectIds.length != 0">
|
||||
and d.project_id in
|
||||
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
<if test="startTime != null and endTime != null">
|
||||
and t.start_time > #{startTime} and t.start_time <![CDATA[ <= ]]> #{endTime}
|
||||
</if>
|
||||
group by t.state
|
||||
</select>
|
||||
<select id="queryByInstanceIdAndName" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance">
|
||||
select *
|
||||
from t_ds_task_instance
|
||||
where process_instance_id = #{processInstanceId}
|
||||
and name = #{name}
|
||||
and flag = 1
|
||||
limit 1
|
||||
</select>
|
||||
<select id="countTask" resultType="java.lang.Integer">
|
||||
select count(1) as count
|
||||
from t_ds_task_instance task,t_ds_process_definition process
|
||||
where task.process_definition_id=process.id
|
||||
<if test="projectIds != null and projectIds.length != 0">
|
||||
and process.project_id in
|
||||
<foreach collection="projectIds" index="index" item="i" open="(" separator="," close=")">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
<if test="taskIds != null and taskIds.length != 0">
|
||||
and task.id in
|
||||
<foreach collection="taskIds" index="index" item="i" open="(" separator="," close=")">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
</select>
|
||||
<select id="queryTaskInstanceListPaging" resultType="org.apache.dolphinscheduler.dao.entity.TaskInstance">
|
||||
select instance.*,process.name as process_instance_name
|
||||
from t_ds_task_instance instance
|
||||
join t_ds_process_definition define ON instance.process_definition_id = define.id
|
||||
join t_ds_process_instance process on process.id=instance.process_instance_id
|
||||
where define.project_id = #{projectId}
|
||||
<if test="startTime != null">
|
||||
and instance.start_time > #{startTime} and instance.start_time <![CDATA[ <=]]> #{endTime}
|
||||
</if>
|
||||
<if test="processInstanceId != 0">
|
||||
and instance.process_instance_id = #{processInstanceId}
|
||||
</if>
|
||||
<if test="searchVal != null and searchVal != ''">
|
||||
and instance.name like concat('%', #{searchVal}, '%')
|
||||
</if>
|
||||
<if test="taskName != null and taskName != ''">
|
||||
and instance.name=#{taskName}
|
||||
</if>
|
||||
<if test="states != null and states.length != 0">
|
||||
and instance.state in
|
||||
<foreach collection="states" index="index" item="i" open="(" separator="," close=")">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
<if test="host != null and host != ''">
|
||||
and instance.host like concat('%', #{host}, '%')
|
||||
</if>
|
||||
order by instance.start_time desc
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.TenantMapper">
|
||||
<select id="queryById" resultType="org.apache.dolphinscheduler.dao.entity.Tenant">
|
||||
SELECT t.*,q.queue_name,q.queue
|
||||
FROM t_ds_tenant t,t_ds_queue q
|
||||
WHERE t.queue_id = q.id
|
||||
and t.id = #{tenantId}
|
||||
</select>
|
||||
<select id="queryByTenantCode" resultType="org.apache.dolphinscheduler.dao.entity.Tenant">
|
||||
select *
|
||||
from t_ds_tenant
|
||||
where tenant_code = #{tenantCode}
|
||||
</select>
|
||||
<select id="queryTenantPaging" resultType="org.apache.dolphinscheduler.dao.entity.Tenant">
|
||||
SELECT t.*,q.queue_name
|
||||
FROM t_ds_tenant t,t_ds_queue q
|
||||
WHERE t.queue_id = q.id
|
||||
<if test="searchVal != null and searchVal != ''">
|
||||
and t.tenant_name like concat('%', #{searchVal}, '%')
|
||||
</if>
|
||||
order by t.update_time desc
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UDFUserMapper">
|
||||
<delete id="deleteByUserId">
|
||||
delete from t_ds_relation_udfs_user
|
||||
where user_id = #{userId}
|
||||
</delete>
|
||||
<delete id="deleteByUdfFuncId">
|
||||
delete from t_ds_relation_udfs_user
|
||||
where udf_id = #{udfFuncId}
|
||||
</delete>
|
||||
</mapper>
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper">
|
||||
<select id="queryUdfByIdStr" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
|
||||
select *
|
||||
from t_ds_udfs
|
||||
where 1 = 1
|
||||
<if test="ids != null and ids != ''">
|
||||
and id in
|
||||
<foreach collection="ids" item="i" open="(" close=")" separator=",">
|
||||
#{i}
|
||||
</foreach>
|
||||
</if>
|
||||
<if test="funcNames != null and funcNames != ''">
|
||||
and func_name = #{funcNames}
|
||||
</if>
|
||||
order by id asc
|
||||
</select>
|
||||
<select id="queryUdfFuncPaging" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
|
||||
select *
|
||||
from t_ds_udfs
|
||||
where 1=1
|
||||
<if test="searchVal!= null and searchVal != ''">
|
||||
and func_name like concat('%', #{searchVal}, '%')
|
||||
</if>
|
||||
<if test="userId != 0">
|
||||
and id in (
|
||||
select udf_id from t_ds_relation_udfs_user where user_id=#{userId}
|
||||
union select id as udf_id from t_ds_udfs where user_id=#{userId})
|
||||
</if>
|
||||
order by create_time desc
|
||||
</select>
|
||||
<select id="getUdfFuncByType" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
|
||||
select *
|
||||
from t_ds_udfs
|
||||
where type=#{type}
|
||||
<if test="userId != 0">
|
||||
and id in (
|
||||
select udf_id from t_ds_relation_udfs_user where user_id=#{userId}
|
||||
union select id as udf_id from t_ds_udfs where user_id=#{userId})
|
||||
</if>
|
||||
</select>
|
||||
<select id="queryUdfFuncExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
|
||||
select *
|
||||
from t_ds_udfs
|
||||
where user_id <![CDATA[ <> ]]> #{userId}
|
||||
</select>
|
||||
<select id="queryAuthedUdfFunc" resultType="org.apache.dolphinscheduler.dao.entity.UdfFunc">
|
||||
SELECT u.*
|
||||
from t_ds_udfs u,t_ds_relation_udfs_user rel
|
||||
WHERE u.id = rel.udf_id
|
||||
AND rel.user_id = #{userId}
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UserAlertGroupMapper">
|
||||
<delete id="deleteByAlertgroupId">
|
||||
delete from t_ds_relation_user_alertgroup
|
||||
where alertgroup_id = #{alertgroupId}
|
||||
</delete>
|
||||
<select id="listUserByAlertgroupId" resultType="org.apache.dolphinscheduler.dao.entity.User">
|
||||
SELECT u.*
|
||||
FROM t_ds_relation_user_alertgroup g_u
|
||||
JOIN t_ds_user u on g_u.user_id = u.id
|
||||
WHERE g_u.alertgroup_id = #{alertgroupId}
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.UserMapper">
|
||||
<select id="queryAllGeneralUser" resultType="org.apache.dolphinscheduler.dao.entity.User">
|
||||
select * from t_ds_user
|
||||
where user_type=1;
|
||||
</select>
|
||||
<select id="queryByUserNameAccurately" resultType="org.apache.dolphinscheduler.dao.entity.User">
|
||||
select * from t_ds_user
|
||||
where user_name=#{userName}
|
||||
</select>
|
||||
<select id="queryUserByNamePassword" resultType="org.apache.dolphinscheduler.dao.entity.User">
|
||||
select * from t_ds_user
|
||||
where user_name=#{userName} and user_password = #{password}
|
||||
</select>
|
||||
<select id="queryUserPaging" resultType="org.apache.dolphinscheduler.dao.entity.User">
|
||||
select u.id,u.user_name,u.user_password,u.user_type,u.email,u.phone,u.tenant_id,u.create_time,
|
||||
u.update_time,t.tenant_name,
|
||||
case when u.queue <![CDATA[ <> ]]> '' then u.queue else q.queue_name end as queue, q.queue_name
|
||||
from t_ds_user u
|
||||
left join t_ds_tenant t on u.tenant_id=t.id
|
||||
left join t_ds_queue q on t.queue_id = q.id
|
||||
where 1=1
|
||||
<if test="userName!=null and userName != ''" >
|
||||
and u.user_name like concat ('%', #{userName}, '%')
|
||||
</if>
|
||||
order by u.update_time desc
|
||||
</select>
|
||||
<select id="queryDetailsById" resultType="org.apache.dolphinscheduler.dao.entity.User">
|
||||
select u.*, t.tenant_name,
|
||||
case when u.queue <![CDATA[ <> ]]> '' then u.queue else q.queue_name end as queue_name
|
||||
from t_ds_user u,t_ds_tenant t,t_ds_queue q
|
||||
WHERE u.tenant_id = t.id and t.queue_id = q.id and u.id = #{userId}
|
||||
</select>
|
||||
<select id="queryUserListByAlertGroupId" resultType="org.apache.dolphinscheduler.dao.entity.User">
|
||||
select u.*
|
||||
from t_ds_user u, t_ds_relation_user_alertgroup rel
|
||||
where u.id = rel.user_id AND rel.alertgroup_id = #{alertgroupId}
|
||||
</select>
|
||||
<select id="queryUserListByTenant" resultType="org.apache.dolphinscheduler.dao.entity.User">
|
||||
select *
|
||||
from t_ds_user
|
||||
where tenant_id = #{tenantId}
|
||||
</select>
|
||||
<select id="queryTenantCodeByUserId" resultType="org.apache.dolphinscheduler.dao.entity.User">
|
||||
SELECT u.*,t.tenant_code
|
||||
FROM t_ds_user u, t_ds_tenant t
|
||||
WHERE u.tenant_id = t.id AND u.id = #{userId}
|
||||
</select>
|
||||
<select id="queryUserByToken" resultType="org.apache.dolphinscheduler.dao.entity.User">
|
||||
select u.*
|
||||
from t_ds_user u ,t_ds_access_token t
|
||||
where u.id = t.user_id and token=#{token} and t.expire_time > NOW()
|
||||
</select>
|
||||
</mapper>
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
~ contributor license agreements. See the NOTICE file distributed with
|
||||
~ this work for additional information regarding copyright ownership.
|
||||
~ The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
~ (the "License"); you may not use this file except in compliance with
|
||||
~ the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" >
|
||||
<mapper namespace="org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper">
|
||||
<select id="queryAllWorkerGroup" resultType="org.apache.dolphinscheduler.dao.entity.WorkerGroup">
|
||||
select *
|
||||
from t_ds_worker_group
|
||||
order by update_time desc
|
||||
</select>
|
||||
<select id="queryWorkerGroupByName" resultType="org.apache.dolphinscheduler.dao.entity.WorkerGroup">
|
||||
select *
|
||||
from t_ds_worker_group
|
||||
where name = #{name}
|
||||
</select>
|
||||
<select id="queryListPaging" resultType="org.apache.dolphinscheduler.dao.entity.WorkerGroup">
|
||||
select *
|
||||
from t_ds_worker_group
|
||||
where 1 = 1
|
||||
<if test="searchVal != null and searchVal != ''">
|
||||
and name like concat('%', #{searchVal}, '%')
|
||||
</if>
|
||||
order by update_time desc
|
||||
</select>
|
||||
</mapper>
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue