Skip to content

Commit

Permalink
Merge pull request #1198 from romainx/fix_spark_java11
Browse files Browse the repository at this point in the history
Fix spark installation for Java 11 and Arrow
  • Loading branch information
romainx authored Dec 26, 2020
2 parents a0a544e + 1dd95ba commit d113a60
Show file tree
Hide file tree
Showing 3 changed files with 79 additions and 1 deletion.
72 changes: 72 additions & 0 deletions all-spark-notebook/test/data/issue_1168.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This is a test for the issue [#1168](https://github.com/jupyter/docker-stacks/issues/1168)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"from pyspark.sql import SparkSession\n",
"from pyspark.sql.functions import pandas_udf\n",
"\n",
"# Spark session & context\n",
"spark = SparkSession.builder.master('local').getOrCreate()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"+---+---+\n",
"| id|age|\n",
"+---+---+\n",
"| 1| 21|\n",
"+---+---+\n",
"\n"
]
}
],
"source": [
"df = spark.createDataFrame([(1, 21), (2, 30)], (\"id\", \"age\"))\n",
"def filter_func(iterator):\n",
" for pdf in iterator:\n",
" yield pdf[pdf.id == 1]\n",
"\n",
"df.mapInPandas(filter_func, df.schema).show()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
2 changes: 1 addition & 1 deletion all-spark-notebook/test/test_spark_notebooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
@pytest.mark.parametrize(
"test_file",
# TODO: add local_sparklyr
["local_pyspark", "local_spylon", "local_sparkR"],
["local_pyspark", "local_spylon", "local_sparkR", "issue_1168"],
)
def test_nbconvert(container, test_file):
"""Check if Spark notebooks can be executed"""
Expand Down
6 changes: 6 additions & 0 deletions pyspark-notebook/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,12 @@ RUN ln -s "spark-${APACHE_SPARK_VERSION}-bin-hadoop${HADOOP_VERSION}" spark && \
mkdir -p /usr/local/bin/before-notebook.d && \
ln -s "${SPARK_HOME}/sbin/spark-config.sh" /usr/local/bin/before-notebook.d/spark-config.sh

# Fix Spark installation for Java 11 and Apache Arrow library
# see: https://github.com/apache/spark/pull/27356, https://spark.apache.org/docs/latest/#downloading
RUN cp -p "$SPARK_HOME/conf/spark-defaults.conf.template" "$SPARK_HOME/conf/spark-defaults.conf" && \
echo 'spark.driver.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true"' >> $SPARK_HOME/conf/spark-defaults.conf && \
echo 'spark.executor.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true"' >> $SPARK_HOME/conf/spark-defaults.conf

USER $NB_UID

# Install pyarrow
Expand Down

0 comments on commit d113a60

Please sign in to comment.