Skip to content

chore(core): Add unit and bench tests for concurrent tasks #5945

chore(core): Add unit and bench tests for concurrent tasks

chore(core): Add unit and bench tests for concurrent tasks #5945

# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
name: Service Test HDFS
on:
push:
branches:
- main
pull_request:
branches:
- main
paths:
- "core/src/**"
- "core/tests/**"
- "!core/src/docs/**"
- "!core/src/services/**"
- "core/src/services/hdfs/**"
- ".github/workflows/service_test_hdfs.yml"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }}
cancel-in-progress: true
jobs:
hdfs-cluster:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Configure Hdfs
# namenode will use ports: 9870, 9000, 8020
# datanode will use ports: 9864
run: |
docker run -d \
--name namenode \
--network host \
-e CLUSTER_NAME=test \
-e WEBHDFS_CONF_dfs_webhdfs_enabled=true \
-e CORE_CONF_hadoop_http_staticuser_user=root \
-e HDFS_CONF_dfs_permissions_enabled=false \
-e HDFS_CONF_dfs_support_append=true \
-e HDFS_CONF_dfs_replication=1 \
bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8
docker run -d \
--name datanode \
--network host \
-e CLUSTER_NAME=test \
-e WEBHDFS_CONF_dfs_webhdfs_enabled=true \
-e CORE_CONF_hadoop_http_staticuser_user=root \
-e HDFS_CONF_dfs_permissions_enabled=false \
-e HDFS_CONF_dfs_support_append=true \
-e HDFS_CONF_dfs_replication=1 \
bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
curl --retry 30 --retry-delay 1 --retry-connrefused http://localhost:9870
- name: Setup Rust toolchain
uses: ./.github/actions/setup
with:
need-nextest: true
- name: Setup java env
uses: actions/setup-java@v4
with:
distribution: temurin
java-version: "11"
- name: Setup hadoop env
shell: bash
run: |
curl -LsSf https://dlcdn.apache.org/hadoop/common/hadoop-3.2.4/hadoop-3.2.4.tar.gz | tar zxf - -C /home/runner
- name: Test
shell: bash
working-directory: core
run: |
export CLASSPATH=$(${HADOOP_HOME}/bin/hadoop classpath --glob)
export LD_LIBRARY_PATH=${{ env.JAVA_HOME }}/lib/server:${{ env.HADOOP_HOME }}/lib/native
cp ${{ github.workspace }}/fixtures/hdfs/hdfs-site.xml ${{ env.HADOOP_HOME }}/etc/hadoop/hdfs-site.xml
cargo test behavior --features tests,services-hdfs
env:
HADOOP_HOME: "/home/runner/hadoop-3.2.4"
OPENDAL_TEST: hdfs
OPENDAL_HDFS_ROOT: /tmp/opendal/
OPENDAL_HDFS_NAME_NODE: hdfs://localhost:8020
OPENDAL_HDFS_ENABLE_APPEND: true
hdfs-default-with-atomic-write-dir:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Setup Rust toolchain
uses: ./.github/actions/setup
with:
need-nextest: true
- name: Setup java env
uses: actions/setup-java@v4
with:
distribution: temurin
java-version: "11"
- name: Setup hadoop env
shell: bash
run: |
curl -LsSf https://dlcdn.apache.org/hadoop/common/hadoop-3.3.5/hadoop-3.3.5.tar.gz | tar zxf - -C /home/runner
- name: Test
shell: bash
working-directory: core
run: |
export CLASSPATH=$(${HADOOP_HOME}/bin/hadoop classpath --glob)
export LD_LIBRARY_PATH=${{ env.JAVA_HOME }}/lib/server:${{ env.HADOOP_HOME }}/lib/native
cp ${{ github.workspace }}/fixtures/hdfs/hdfs-site.xml ${{ env.HADOOP_HOME }}/etc/hadoop/hdfs-site.xml
cargo test behavior --features tests,services-hdfs
env:
HADOOP_HOME: "/home/runner/hadoop-3.3.5"
OPENDAL_TEST: hdfs
OPENDAL_HDFS_ROOT: /tmp/opendal/
OPENDAL_HDFS_ATOMIC_WRITE_DIR: /tmp/atomic_write_dir/opendal/
OPENDAL_HDFS_NAME_NODE: default
OPENDAL_HDFS_ENABLE_APPEND: false