From b58175b6f45b0fcf7c283e56c54cf5bcface630d Mon Sep 17 00:00:00 2001 From: daragu Date: Mon, 13 May 2024 16:05:20 +0800 Subject: [PATCH] [Demo] Make start_demo script easier to call --- demo/start_demo.sh | 7 ++++++- website/docs/how-to.md | 6 +++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/demo/start_demo.sh b/demo/start_demo.sh index a22398ea..2232ae85 100755 --- a/demo/start_demo.sh +++ b/demo/start_demo.sh @@ -16,7 +16,12 @@ # limitations under the License. # ## Create the require jars for the demo and copy them into a directory we'll mount in our notebook container -cd .. && mvn install -am -pl core -DskipTests -T 2 + +CURRENT_DIR="$( cd "$(dirname "$0")" ; pwd -P )" +XTABLE_HOME="$( cd "$(dirname "$CURRENT_DIR")" ; pwd -P )" +cd $XTABLE_HOME + +mvn install -am -pl xtable-core -DskipTests -T 2 mkdir -p demo/jars cp xtable-hudi-support/xtable-hudi-support-utils/target/xtable-hudi-support-utils-0.1.0-SNAPSHOT.jar demo/jars cp xtable-api/target/xtable-api-0.1.0-SNAPSHOT.jar demo/jars diff --git a/website/docs/how-to.md b/website/docs/how-to.md index 6e57fcb6..5e457c0f 100644 --- a/website/docs/how-to.md +++ b/website/docs/how-to.md @@ -105,7 +105,7 @@ from pyspark.sql.types import * # initialize the bucket table_name = "people" -local_base_path = "/tmp/hudi-dataset" +local_base_path = "file:/tmp/hudi-dataset" records = [ (1, 'John', 25, 'NYC', '2023-09-28 00:00:00'), @@ -148,7 +148,7 @@ from pyspark.sql.types import * # initialize the bucket table_name = "people" -local_base_path = "/tmp/delta-dataset" +local_base_path = "file:/tmp/delta-dataset" records = [ (1, 'John', 25, 'NYC', '2023-09-28 00:00:00'), @@ -185,7 +185,7 @@ from pyspark.sql.types import * # initialize the bucket table_name = "people" -local_base_path = "/tmp/iceberg-dataset" +local_base_path = "file:/tmp/iceberg-dataset" records = [ (1, 'John', 25, 'NYC', '2023-09-28 00:00:00'),