[E2E] Add hive datasource e2e test and fix s3 docker image latest tag (#8276)
parent
9fff2afe59
commit
43bd640aad
|
|
@ -99,6 +99,8 @@ jobs:
|
|||
class: org.apache.dolphinscheduler.e2e.cases.PostgresDataSourceE2ETest
|
||||
- name: SqlServerDataSource
|
||||
class: org.apache.dolphinscheduler.e2e.cases.SqlServerDataSourceE2ETest
|
||||
- name: HiveDataSource
|
||||
class: org.apache.dolphinscheduler.e2e.cases.HiveDataSourceE2ETest
|
||||
env:
|
||||
RECORDING_PATH: /tmp/recording-${{ matrix.case.name }}
|
||||
steps:
|
||||
|
|
|
|||
|
|
@ -142,7 +142,7 @@ public class FunctionManageE2ETest {
|
|||
FunctionManagePage page = new FunctionManagePage(browser);
|
||||
|
||||
new WebDriverWait(page.driver(), 10)
|
||||
.until(ExpectedConditions.urlContains("/#/resource/func"));
|
||||
.until(ExpectedConditions.elementToBeClickable(By.id("btnCreateUdfFunction")));
|
||||
|
||||
page.createUdfFunction(testUdfFunctionName, testClassName, testUploadUdfFileName, testDescription);
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package org.apache.dolphinscheduler.e2e.cases;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.awaitility.Awaitility.await;
|
||||
|
||||
import org.apache.dolphinscheduler.e2e.core.DolphinScheduler;
|
||||
import org.apache.dolphinscheduler.e2e.pages.LoginPage;
|
||||
import org.apache.dolphinscheduler.e2e.pages.datasource.DataSourcePage;
|
||||
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Order;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.openqa.selenium.By;
|
||||
import org.openqa.selenium.WebElement;
|
||||
import org.openqa.selenium.remote.RemoteWebDriver;
|
||||
import org.openqa.selenium.support.ui.ExpectedConditions;
|
||||
import org.openqa.selenium.support.ui.WebDriverWait;
|
||||
|
||||
@DolphinScheduler(composeFiles = "docker/datasource-hive/docker-compose.yaml")
|
||||
public class HiveDataSourceE2ETest {
|
||||
private static RemoteWebDriver browser;
|
||||
|
||||
private static final String tenant = System.getProperty("user.name");
|
||||
|
||||
private static final String user = "admin";
|
||||
|
||||
private static final String password = "dolphinscheduler123";
|
||||
|
||||
private static final String dataSourceType = "HIVE";
|
||||
|
||||
private static final String dataSourceName = "hive_test";
|
||||
|
||||
private static final String dataSourceDescription = "hive_test";
|
||||
|
||||
private static final String ip = "hive-server";
|
||||
|
||||
private static final String port = "10000";
|
||||
|
||||
private static final String userName = "hadoop";
|
||||
|
||||
private static final String hivePassword = "";
|
||||
|
||||
private static final String database = "default";
|
||||
|
||||
private static final String jdbcParams = "";
|
||||
|
||||
|
||||
@BeforeAll
|
||||
public static void setup() {
|
||||
new LoginPage(browser)
|
||||
.login(user, password)
|
||||
.goToNav(DataSourcePage.class);
|
||||
}
|
||||
|
||||
@Test
|
||||
@Order(10)
|
||||
void testCreateHiveDataSource() {
|
||||
final DataSourcePage page = new DataSourcePage(browser);
|
||||
|
||||
page.createDataSource(dataSourceType, dataSourceName, dataSourceDescription, ip, port, userName, hivePassword, database, jdbcParams);
|
||||
|
||||
new WebDriverWait(page.driver(), 10).until(ExpectedConditions.invisibilityOfElementLocated(new By.ById("dialogCreateDataSource")));
|
||||
|
||||
await().untilAsserted(() -> assertThat(page.dataSourceItemsList())
|
||||
.as("DataSource list should contain newly-created database")
|
||||
.extracting(WebElement::getText)
|
||||
.anyMatch(it -> it.contains(dataSourceName)));
|
||||
}
|
||||
|
||||
@Test
|
||||
@Order(20)
|
||||
void testDeleteHiveDataSource() {
|
||||
final DataSourcePage page = new DataSourcePage(browser);
|
||||
|
||||
page.delete(dataSourceName);
|
||||
|
||||
await().untilAsserted(() -> {
|
||||
browser.navigate().refresh();
|
||||
|
||||
assertThat(
|
||||
page.dataSourceItemsList()
|
||||
).noneMatch(
|
||||
it -> it.getText().contains(dataSourceName)
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
dolphinscheduler:
|
||||
image: apache/dolphinscheduler-standalone-server:ci
|
||||
environment:
|
||||
MASTER_MAX_CPU_LOAD_AVG: 100
|
||||
WORKER_TENANT_AUTO_CREATE: 'true'
|
||||
expose:
|
||||
- 12345
|
||||
networks:
|
||||
- e2e
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "http://localhost:12345/actuator/health" ]
|
||||
interval: 5s
|
||||
timeout: 60s
|
||||
retries: 120
|
||||
depends_on:
|
||||
hive-server:
|
||||
condition: service_healthy
|
||||
namenode:
|
||||
image: bde2020/hadoop-namenode:2.0.0-hadoop2.7.4-java8
|
||||
environment:
|
||||
- CLUSTER_NAME=test
|
||||
env_file:
|
||||
- ./hadoop-hive.env
|
||||
networks:
|
||||
- e2e
|
||||
expose:
|
||||
- "50070"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "http://localhost:50070/" ]
|
||||
interval: 5s
|
||||
timeout: 60s
|
||||
retries: 120
|
||||
datanode:
|
||||
image: bde2020/hadoop-datanode:2.0.0-hadoop2.7.4-java8
|
||||
env_file:
|
||||
- ./hadoop-hive.env
|
||||
environment:
|
||||
SERVICE_PRECONDITION: "namenode:50070"
|
||||
networks:
|
||||
- e2e
|
||||
expose:
|
||||
- "50075"
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "http://localhost:50075" ]
|
||||
interval: 5s
|
||||
timeout: 60s
|
||||
retries: 120
|
||||
hive-server:
|
||||
image: bde2020/hive:2.3.2-postgresql-metastore
|
||||
env_file:
|
||||
- ./hadoop-hive.env
|
||||
networks:
|
||||
- e2e
|
||||
environment:
|
||||
HIVE_CORE_CONF_javax_jdo_option_ConnectionURL: "jdbc:postgresql://hive-metastore/metastore"
|
||||
SERVICE_PRECONDITION: "hive-metastore:9083"
|
||||
expose:
|
||||
- "10000"
|
||||
depends_on:
|
||||
datanode:
|
||||
condition: service_healthy
|
||||
namenode:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: beeline -u "jdbc:hive2://127.0.0.1:10000/default" -n health_check -e "show databases;"
|
||||
interval: 5s
|
||||
timeout: 60s
|
||||
retries: 120
|
||||
hive-metastore:
|
||||
image: bde2020/hive:2.3.2-postgresql-metastore
|
||||
env_file:
|
||||
- ./hadoop-hive.env
|
||||
command: /opt/hive/bin/hive --service metastore
|
||||
networks:
|
||||
- e2e
|
||||
environment:
|
||||
SERVICE_PRECONDITION: "namenode:50070 datanode:50075 hive-metastore-postgresql:5432"
|
||||
expose:
|
||||
- "9083"
|
||||
depends_on:
|
||||
hive-metastore-postgresql:
|
||||
condition: service_healthy
|
||||
hive-metastore-postgresql:
|
||||
image: bde2020/hive-metastore-postgresql:2.3.0
|
||||
networks:
|
||||
- e2e
|
||||
expose:
|
||||
- "5432"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||||
interval: 5s
|
||||
timeout: 60s
|
||||
retries: 120
|
||||
|
||||
|
||||
networks:
|
||||
e2e:
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
HIVE_SITE_CONF_javax_jdo_option_ConnectionURL=jdbc:postgresql://hive-metastore-postgresql/metastore
|
||||
HIVE_SITE_CONF_javax_jdo_option_ConnectionDriverName=org.postgresql.Driver
|
||||
HIVE_SITE_CONF_javax_jdo_option_ConnectionUserName=hive
|
||||
HIVE_SITE_CONF_javax_jdo_option_ConnectionPassword=hive
|
||||
HIVE_SITE_CONF_datanucleus_autoCreateSchema=false
|
||||
HIVE_SITE_CONF_hive_metastore_uris=thrift://hive-metastore:9083
|
||||
HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false
|
||||
HIVE_SITE_CONF_hive_server2_thrift_bind_host=0.0.0.0
|
||||
HIVE_SITE_CONF_hive_server2_thrift_port=10000
|
||||
|
||||
CORE_CONF_fs_defaultFS=hdfs://namenode:8020
|
||||
CORE_CONF_hadoop_http_staticuser_user=root
|
||||
CORE_CONF_hadoop_proxyuser_hue_hosts=*
|
||||
CORE_CONF_hadoop_proxyuser_hue_groups=*
|
||||
CORE_CONF_hadoop_proxyuser_hive_hosts=*
|
||||
|
||||
HDFS_CONF_dfs_webhdfs_enabled=true
|
||||
HDFS_CONF_dfs_permissions_enabled=false
|
||||
|
||||
YARN_CONF_yarn_log___aggregation___enable=true
|
||||
YARN_CONF_yarn_resourcemanager_recovery_enabled=true
|
||||
YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
|
||||
YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
|
||||
YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
|
||||
YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/
|
||||
YARN_CONF_yarn_timeline___service_enabled=true
|
||||
YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
|
||||
YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
|
||||
YARN_CONF_yarn_resourcemanager_hostname=resourcemanager
|
||||
YARN_CONF_yarn_timeline___service_hostname=historyserver
|
||||
YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032
|
||||
YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030
|
||||
YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031
|
||||
|
|
@ -40,7 +40,7 @@ services:
|
|||
mc:
|
||||
condition: service_completed_successfully
|
||||
s3:
|
||||
image: minio/minio:latest
|
||||
image: minio/minio:RELEASE.2022-01-25T19-56-04Z
|
||||
hostname: s3
|
||||
tty: true
|
||||
stdin_open: true
|
||||
|
|
@ -58,7 +58,7 @@ services:
|
|||
timeout: 120s
|
||||
retries: 120
|
||||
mc:
|
||||
image: minio/mc:latest
|
||||
image: minio/mc:RELEASE.2022-01-07T06-01-38Z
|
||||
entrypoint: bash
|
||||
networks:
|
||||
- e2e
|
||||
|
|
|
|||
Loading…
Reference in New Issue