diff --git a/all-spark-notebook/test/data/issue_1168.ipynb b/all-spark-notebook/test/data/issue_1168.ipynb new file mode 100644 index 0000000000..52b56f5930 --- /dev/null +++ b/all-spark-notebook/test/data/issue_1168.ipynb @@ -0,0 +1,72 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is a test for the issue [#1168](https://github.com/jupyter/docker-stacks/issues/1168)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "from pyspark.sql import SparkSession\n", + "from pyspark.sql.functions import pandas_udf\n", + "\n", + "# Spark session & context\n", + "spark = SparkSession.builder.master('local').getOrCreate()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "+---+---+\n", + "| id|age|\n", + "+---+---+\n", + "| 1| 21|\n", + "+---+---+\n", + "\n" + ] + } + ], + "source": [ + "df = spark.createDataFrame([(1, 21), (2, 30)], (\"id\", \"age\"))\n", + "def filter_func(iterator):\n", + " for pdf in iterator:\n", + " yield pdf[pdf.id == 1]\n", + "\n", + "df.mapInPandas(filter_func, df.schema).show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/all-spark-notebook/test/test_spark_notebooks.py b/all-spark-notebook/test/test_spark_notebooks.py index 75536ef05e..2e232daf49 100644 --- a/all-spark-notebook/test/test_spark_notebooks.py +++ b/all-spark-notebook/test/test_spark_notebooks.py @@ -12,7 +12,7 @@ @pytest.mark.parametrize( "test_file", # TODO: add local_sparklyr - ["local_pyspark", "local_spylon", "local_sparkR"], + ["local_pyspark", "local_spylon", "local_sparkR", "issue_1168"], ) def test_nbconvert(container, test_file): """Check if Spark notebooks can be executed""" diff --git a/pyspark-notebook/Dockerfile b/pyspark-notebook/Dockerfile index b34f133cf9..c1acbce3a8 100644 --- a/pyspark-notebook/Dockerfile +++ b/pyspark-notebook/Dockerfile @@ -49,6 +49,12 @@ RUN ln -s "spark-${APACHE_SPARK_VERSION}-bin-hadoop${HADOOP_VERSION}" spark && \ mkdir -p /usr/local/bin/before-notebook.d && \ ln -s "${SPARK_HOME}/sbin/spark-config.sh" /usr/local/bin/before-notebook.d/spark-config.sh +# Fix Spark installation for Java 11 and Apache Arrow library +# see: https://github.com/apache/spark/pull/27356, https://spark.apache.org/docs/latest/#downloading +RUN cp -p "$SPARK_HOME/conf/spark-defaults.conf.template" "$SPARK_HOME/conf/spark-defaults.conf" && \ + echo 'spark.driver.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true"' >> $SPARK_HOME/conf/spark-defaults.conf && \ + echo 'spark.executor.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true"' >> $SPARK_HOME/conf/spark-defaults.conf + USER $NB_UID # Install pyarrow