diff --git a/akka/src/main/scala/akka.stream.io.compression.brotli/BrotliCompressor.scala b/akka/src/main/scala/akka.stream.io.compression.brotli/BrotliCompressor.scala index b8d10cc..1c4ee23 100644 --- a/akka/src/main/scala/akka.stream.io.compression.brotli/BrotliCompressor.scala +++ b/akka/src/main/scala/akka.stream.io.compression.brotli/BrotliCompressor.scala @@ -61,52 +61,4 @@ class BrotliCompressor(level: Int = BrotliCompressor.DefaultQuality) extends Com /** Make sure any resources have been released */ override final def close(): Unit = {} -} - - -/* - - - - public BrotliOutputStream(OutputStream destination, Encoder.Parameters params) - - - - - - */ - - - -/* -public BrotliEncoderChannel(WritableByteChannel destination, Encoder.Parameters params) - throws IOException { - -*/ - - -/* - -protected def flushWithBuffer(buffer: Array[Byte]): ByteString = { - val written = deflater.deflate(buffer, 0, buffer.length, Deflater.SYNC_FLUSH) - ByteString.fromArray(buffer, 0, written) - } - - private def newTempBuffer(size: Int = 65536): Array[Byte] = { - // The default size is somewhat arbitrary, we'd like to guess a better value but Deflater/zlib - // is buffering in an unpredictable manner. - // `compress` will only return any data if the buffered compressed data has some size in - // the region of 10000-50000 bytes. - // `flush` and `finish` will return any size depending on the previous input. - // This value will hopefully provide a good compromise between memory churn and - // excessive fragmentation of ByteStrings. - // We also make sure that buffer size stays within a reasonable range, to avoid - // draining deflator with too small buffer. - new Array[Byte](math.max(size, MinBufferSize)) - } -} - -*/ - - - +} \ No newline at end of file