@@ -26,7 +26,6 @@ import org.apache.spark.{SparkEnv, TaskContext}
2626import org .apache .spark .api .python .{BasePythonRunner , PythonRDD }
2727import org .apache .spark .sql .catalyst .InternalRow
2828import org .apache .spark .sql .execution .arrow .ArrowWriter
29- import org .apache .spark .sql .internal .SQLConf
3029import org .apache .spark .sql .types .StructType
3130import org .apache .spark .sql .util .ArrowUtils
3231import org .apache .spark .util .Utils
@@ -36,8 +35,6 @@ import org.apache.spark.util.Utils
3635 * JVM (an iterator of internal rows + additional data if required) to Python (Arrow).
3736 */
3837private [python] trait PythonArrowInput [IN ] { self : BasePythonRunner [IN , _] =>
39- protected val sqlConf = SQLConf .get
40-
4138 protected val workerConf : Map [String , String ]
4239
4340 protected val schema : StructType
@@ -112,10 +109,10 @@ private[python] trait BasicPythonArrowInput extends PythonArrowInput[Iterator[In
112109 self : BasePythonRunner [Iterator [InternalRow ], _] =>
113110
114111 protected def writeIteratorToArrowStream (
115- root : VectorSchemaRoot ,
116- writer : ArrowStreamWriter ,
117- dataOut : DataOutputStream ,
118- inputIterator : Iterator [Iterator [InternalRow ]]): Unit = {
112+ root : VectorSchemaRoot ,
113+ writer : ArrowStreamWriter ,
114+ dataOut : DataOutputStream ,
115+ inputIterator : Iterator [Iterator [InternalRow ]]): Unit = {
119116 val arrowWriter = ArrowWriter .create(root)
120117
121118 while (inputIterator.hasNext) {
0 commit comments