-
Notifications
You must be signed in to change notification settings - Fork 45
150 lines (123 loc) · 4.38 KB
/
spark.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
# Copyright 2022-2023 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: GraphAr Spark CI
on:
# Trigger the workflow on push or pull request,
# but only for the main branch
push:
branches:
- main
paths:
- 'spark/**'
- '.github/workflows/spark.yaml'
pull_request:
branches:
- main
paths:
- 'spark/**'
- '.github/workflows/spark.yaml'
concurrency:
group: ${{ github.repository }}-${{ github.event.number || github.head_ref || github.sha }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
GraphAr-spark:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
include:
- mvn-profile: "datasources-32"
spark: "spark-3.2.2"
spark-hadoop: "spark-3.2.2-bin-hadoop3.2"
- mvn-profile: "datasources-33"
spark: "spark-3.3.4"
spark-hadoop: "spark-3.3.4-bin-hadoop3"
steps:
- uses: actions/checkout@v3
with:
submodules: true
- name: Code Format Check
run: |
export JAVA_HOME=${JAVA_HOME_11_X64}
pushd spark
mvn --no-transfer-progress spotless:check
popd
- name: Build GraphAr Spark
run: |
export JAVA_HOME=${JAVA_HOME_11_X64}
pushd spark
echo "Build ${{ matrix.mvn-profile }}"
mvn --no-transfer-progress clean package -DskipTests -Dspotless.check.skip=true -P ${{ matrix.mvn-profile }}
popd
- name: Run test
run: |
export JAVA_HOME=${JAVA_HOME_11_X64}
pushd spark
echo "Test ${{ matrix.mvn-profile }}"
mvn --no-transfer-progress test -Dspotless.check.skip=true -P ${{ matrix.mvn-profile }}
popd
- name: Run Neo4j2GraphAr example
run: |
export JAVA_HOME=${JAVA_HOME_11_X64}
pushd spark
scripts/get-spark-to-home.sh ${{ matrix.spark }} ${{ matrix.spark-hadoop }}
export SPARK_HOME="${HOME}/${{ matrix.spark-hadoop }}"
export PATH="${SPARK_HOME}/bin":"${PATH}"
scripts/get-neo4j-to-home.sh
export NEO4J_HOME="${HOME}/neo4j-community-4.4.23"
export PATH="${NEO4J_HOME}/bin":"${PATH}"
neo4j-admin set-initial-password neo4j
scripts/deploy-neo4j-movie-data.sh
scripts/build.sh ${{ matrix.mvn-profile }}
export NEO4J_USR="neo4j"
export NEO4J_PWD="neo4j"
scripts/run-neo4j2graphar.sh
# clean the movie data and import from GraphAr
echo "match (a) -[r] -> () delete a, r;match (a) delete a;" | cypher-shell -u ${NEO4J_USR} -p ${NEO4J_PWD} -d neo4j --format plain
scripts/run-graphar2neo4j.sh
# stop and clean
popd
- name: Run Nebula2GraphAr example
run: |
export JAVA_HOME=${JAVA_HOME_11_X64}
pushd spark
scripts/get-nebula-to-home.sh
export SPARK_HOME="${HOME}/${{ matrix.spark-hadoop }}"
export PATH="${SPARK_HOME}/bin":"${PATH}"
scripts/get-nebula-to-home.sh
scripts/deploy-nebula-default-data.sh
scripts/build.sh ${{ matrix.mvn-profile }}
scripts/run-nebula2graphar.sh
# clean the data
docker run \
--rm \
--name nebula-console-loader \
--network nebula-docker-env_nebula-net \
vesoft/nebula-console:nightly -addr 172.28.3.1 -port 9669 -u root -p nebula -e "use basketballplayer; clear space basketballplayer;"
# import from GraphAr
scripts/run-graphar2nebula.sh
# stop and clean
popd
- name: Run Neo4j importer
run: |
export JAVA_HOME=${JAVA_HOME_11_X64}
pushd spark
export SPARK_HOME="${HOME}/${{ matrix.spark-hadoop }}"
export PATH="${SPARK_HOME}/bin":"${PATH}"
scripts/build.sh ${{ matrix.mvn-profile }}
# run the importer
cd import
./neo4j.sh neo4j.json
# stop and clean
popd