diff --git a/.scala-steward.conf b/.scala-steward.conf index 9269601a91..71751bb199 100644 --- a/.scala-steward.conf +++ b/.scala-steward.conf @@ -7,8 +7,8 @@ updates.ignore = [ ] updates.pin = [ - { groupId = "com.fasterxml.jackson.core", version = "2.15." } - { groupId = "com.fasterxml.jackson.datatype", version = "2.15." } + { groupId = "com.fasterxml.jackson.core", version = "2.17." } + { groupId = "com.fasterxml.jackson.datatype", version = "2.17." } // v10 switches to Play 3 { groupId = "com.github.jwt-scala", version = "9.4." } ] diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectionProvider.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectionProvider.scala index a0d8de1a67..2bd209942d 100644 --- a/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectionProvider.scala +++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectionProvider.scala @@ -13,7 +13,7 @@ import javax.net.ssl.{SSLContext, TrustManager} import scala.annotation.tailrec import scala.collection.immutable -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * Only for internal implementations @@ -121,7 +121,7 @@ final class AmqpDetailsConnectionProvider private ( copy(connectionName = Option(name)) override def get: Connection = { - import scala.collection.JavaConverters._ + import scala.jdk.CollectionConverters._ val factory = new ConnectionFactory credentials.foreach { credentials => factory.setUsername(credentials.username) @@ -331,7 +331,6 @@ final class AmqpConnectionFactoryConnectionProvider private (val factory: Connec copy(hostAndPorts = hostAndPorts.asScala.map(_.toScala).toIndexedSeq) override def get: Connection = { - import scala.collection.JavaConverters._ factory.newConnection(hostAndPortList.map(hp => new Address(hp._1, hp._2)).asJava) } diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectorSettings.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectorSettings.scala index 5f03dee3d3..a261611cd6 100644 --- a/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectorSettings.scala +++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/AmqpConnectorSettings.scala @@ -5,11 +5,11 @@ package akka.stream.alpakka.amqp import akka.annotation.InternalApi -import akka.util.JavaDurationConverters._ -import scala.collection.JavaConverters._ import scala.collection.immutable import scala.concurrent.duration._ +import scala.jdk.CollectionConverters._ +import scala.jdk.DurationConverters._ /** * Internal API @@ -221,8 +221,9 @@ final class AmqpWriteSettings private ( /** * Java API */ - def withConfirmationTimeout(confirmationTimeout: java.time.Duration): AmqpWriteSettings = - copy(confirmationTimeout = confirmationTimeout.asScala) + def withConfirmationTimeout(confirmationTimeout: java.time.Duration): AmqpWriteSettings = { + copy(confirmationTimeout = confirmationTimeout.toScala) + } private def copy(connectionProvider: AmqpConnectionProvider = connectionProvider, exchange: Option[String] = exchange, diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpConnectorLogic.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpConnectorLogic.scala index c22539e23f..bf93f56a2a 100644 --- a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpConnectorLogic.scala +++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpConnectorLogic.scala @@ -32,7 +32,7 @@ private trait AmqpConnectorLogic { this: GraphStageLogic => connection.addShutdownListener(shutdownListener) channel.addShutdownListener(shutdownListener) - import scala.collection.JavaConverters._ + import scala.jdk.CollectionConverters._ settings.declarations.foreach { case d: QueueDeclaration => diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpSourceStage.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpSourceStage.scala index 7a3690aa70..bb01248d90 100644 --- a/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpSourceStage.scala +++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/impl/AmqpSourceStage.scala @@ -50,7 +50,7 @@ private[amqp] final class AmqpSourceStage(settings: AmqpSourceSettings, bufferSi private var unackedMessages = 0 override def whenConnected(): Unit = { - import scala.collection.JavaConverters._ + import scala.jdk.CollectionConverters._ channel.basicQos(bufferSize) val consumerCallback = getAsyncCallback(handleDelivery) diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlow.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlow.scala index 07e7d07342..bb931eb02c 100644 --- a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlow.scala +++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlow.scala @@ -11,7 +11,7 @@ import akka.japi.Pair import akka.stream.alpakka.amqp._ import akka.stream.scaladsl.Keep -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ object AmqpFlow { @@ -29,7 +29,7 @@ object AmqpFlow { def create( settings: AmqpWriteSettings ): akka.stream.javadsl.Flow[WriteMessage, WriteResult, CompletionStage[Done]] = - akka.stream.alpakka.amqp.scaladsl.AmqpFlow(settings).mapMaterializedValue(f => f.toJava).asJava + akka.stream.alpakka.amqp.scaladsl.AmqpFlow(settings).mapMaterializedValue(f => f.asJava).asJava /** * Creates an `AmqpFlow` that accepts `WriteMessage` elements and emits `WriteResult`. @@ -54,7 +54,7 @@ object AmqpFlow { ): akka.stream.javadsl.Flow[WriteMessage, WriteResult, CompletionStage[Done]] = akka.stream.alpakka.amqp.scaladsl.AmqpFlow .withConfirm(settings = settings) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -80,7 +80,7 @@ object AmqpFlow { ): akka.stream.javadsl.Flow[WriteMessage, WriteResult, CompletionStage[Done]] = akka.stream.alpakka.amqp.scaladsl.AmqpFlow .withConfirmUnordered(settings) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -103,6 +103,6 @@ object AmqpFlow { .withConfirmAndPassThroughUnordered[T](settings = settings) )(Keep.right) .map { case (writeResult, passThrough) => Pair(writeResult, passThrough) } - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlowWithContext.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlowWithContext.scala index 943e5f9975..121157c2d6 100644 --- a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlowWithContext.scala +++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpFlowWithContext.scala @@ -9,7 +9,7 @@ import java.util.concurrent.CompletionStage import akka.Done import akka.stream.alpakka.amqp._ -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ object AmqpFlowWithContext { @@ -23,7 +23,7 @@ object AmqpFlowWithContext { ): akka.stream.javadsl.FlowWithContext[WriteMessage, T, WriteResult, T, CompletionStage[Done]] = akka.stream.alpakka.amqp.scaladsl.AmqpFlowWithContext .apply(settings) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -40,6 +40,6 @@ object AmqpFlowWithContext { ): akka.stream.javadsl.FlowWithContext[WriteMessage, T, WriteResult, T, CompletionStage[Done]] = akka.stream.alpakka.amqp.scaladsl.AmqpFlowWithContext .withConfirm(settings) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpRpcFlow.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpRpcFlow.scala index 91f2527167..12016cf3dd 100644 --- a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpRpcFlow.scala +++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpRpcFlow.scala @@ -10,7 +10,7 @@ import akka.stream.alpakka.amqp._ import akka.stream.javadsl.Flow import akka.util.ByteString -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ object AmqpRpcFlow { @@ -27,7 +27,7 @@ object AmqpRpcFlow { repliesPerMessage: Int): Flow[ByteString, ByteString, CompletionStage[String]] = akka.stream.alpakka.amqp.scaladsl.AmqpRpcFlow .simple(settings, repliesPerMessage) - .mapMaterializedValue(f => f.toJava) + .mapMaterializedValue(f => f.asJava) .asJava /** @@ -39,7 +39,7 @@ object AmqpRpcFlow { bufferSize: Int): Flow[WriteMessage, ReadResult, CompletionStage[String]] = akka.stream.alpakka.amqp.scaladsl.AmqpRpcFlow .atMostOnceFlow(settings, bufferSize) - .mapMaterializedValue(f => f.toJava) + .mapMaterializedValue(f => f.asJava) .asJava /** @@ -52,7 +52,7 @@ object AmqpRpcFlow { repliesPerMessage: Int): Flow[WriteMessage, ReadResult, CompletionStage[String]] = akka.stream.alpakka.amqp.scaladsl.AmqpRpcFlow .atMostOnceFlow(settings, bufferSize, repliesPerMessage) - .mapMaterializedValue(f => f.toJava) + .mapMaterializedValue(f => f.asJava) .asJava /** @@ -73,7 +73,7 @@ object AmqpRpcFlow { ): Flow[WriteMessage, CommittableReadResult, CompletionStage[String]] = akka.stream.alpakka.amqp.scaladsl.AmqpRpcFlow .committableFlow(settings, bufferSize, repliesPerMessage) - .mapMaterializedValue(f => f.toJava) + .mapMaterializedValue(f => f.asJava) .map(cm => new CommittableReadResult(cm)) .asJava diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpSink.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpSink.scala index 0e75fefe90..0067d14f7a 100644 --- a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpSink.scala +++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/AmqpSink.scala @@ -10,7 +10,7 @@ import akka.Done import akka.stream.alpakka.amqp._ import akka.util.ByteString -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ object AmqpSink { @@ -21,7 +21,7 @@ object AmqpSink { * either normally or because of an amqp failure. */ def create(settings: AmqpWriteSettings): akka.stream.javadsl.Sink[WriteMessage, CompletionStage[Done]] = - akka.stream.alpakka.amqp.scaladsl.AmqpSink(settings).mapMaterializedValue(f => f.toJava).asJava + akka.stream.alpakka.amqp.scaladsl.AmqpSink(settings).mapMaterializedValue(f => f.asJava).asJava /** * Creates an `AmqpSink` that accepts `ByteString` elements. @@ -30,7 +30,7 @@ object AmqpSink { * either normally or because of an amqp failure. */ def createSimple(settings: AmqpWriteSettings): akka.stream.javadsl.Sink[ByteString, CompletionStage[Done]] = - akka.stream.alpakka.amqp.scaladsl.AmqpSink.simple(settings).mapMaterializedValue(f => f.toJava).asJava + akka.stream.alpakka.amqp.scaladsl.AmqpSink.simple(settings).mapMaterializedValue(f => f.asJava).asJava /** * Connects to an AMQP server upon materialization and sends incoming messages to the server. @@ -43,6 +43,6 @@ object AmqpSink { def createReplyTo( settings: AmqpReplyToSinkSettings ): akka.stream.javadsl.Sink[WriteMessage, CompletionStage[Done]] = - akka.stream.alpakka.amqp.scaladsl.AmqpSink.replyTo(settings).mapMaterializedValue(f => f.toJava).asJava + akka.stream.alpakka.amqp.scaladsl.AmqpSink.replyTo(settings).mapMaterializedValue(f => f.asJava).asJava } diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/CommittableReadResult.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/CommittableReadResult.scala index eab0170326..a41fda9131 100644 --- a/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/CommittableReadResult.scala +++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/javadsl/CommittableReadResult.scala @@ -10,15 +10,15 @@ import akka.Done import akka.stream.alpakka.amqp.ReadResult import akka.stream.alpakka.amqp.scaladsl -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ final class CommittableReadResult(cm: scaladsl.CommittableReadResult) { val message: ReadResult = cm.message def ack(): CompletionStage[Done] = ack(false) - def ack(multiple: Boolean): CompletionStage[Done] = cm.ack(multiple).toJava + def ack(multiple: Boolean): CompletionStage[Done] = cm.ack(multiple).asJava def nack(): CompletionStage[Done] = nack(false, true) def nack(multiple: Boolean, requeue: Boolean): CompletionStage[Done] = - cm.nack(multiple, requeue).toJava + cm.nack(multiple, requeue).asJava } diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpRpcFlow.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpRpcFlow.scala index f66360379c..2cb05c8d72 100644 --- a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpRpcFlow.scala +++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpRpcFlow.scala @@ -4,11 +4,11 @@ package akka.stream.alpakka.amqp.scaladsl -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.amqp._ import akka.stream.scaladsl.{Flow, Keep} import akka.util.ByteString +import scala.concurrent.ExecutionContext import scala.concurrent.Future object AmqpRpcFlow { @@ -39,7 +39,7 @@ object AmqpRpcFlow { repliesPerMessage: Int = 1): Flow[WriteMessage, ReadResult, Future[String]] = committableFlow(settings, bufferSize, repliesPerMessage) .mapAsync(1) { cm => - cm.ack().map(_ => cm.message)(ExecutionContexts.parasitic) + cm.ack().map(_ => cm.message)(ExecutionContext.parasitic) } /** diff --git a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpSource.scala b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpSource.scala index 45472dd0ad..f7a8071e6f 100644 --- a/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpSource.scala +++ b/amqp/src/main/scala/akka/stream/alpakka/amqp/scaladsl/AmqpSource.scala @@ -5,7 +5,6 @@ package akka.stream.alpakka.amqp.scaladsl import akka.NotUsed -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.amqp.impl import akka.stream.alpakka.amqp.{AmqpSourceSettings, ReadResult} import akka.stream.scaladsl.Source @@ -13,7 +12,7 @@ import akka.stream.scaladsl.Source import scala.concurrent.ExecutionContext object AmqpSource { - private implicit val executionContext: ExecutionContext = ExecutionContexts.parasitic + private implicit val executionContext: ExecutionContext = ExecutionContext.parasitic /** * Scala API: Convenience for "at-most once delivery" semantics. Each message is acked to RabbitMQ diff --git a/amqp/src/test/java/akka/stream/alpakka/amqp/javadsl/AmqpConnectorsTest.java b/amqp/src/test/java/akka/stream/alpakka/amqp/javadsl/AmqpConnectorsTest.java index 11d6972f43..3944be8612 100644 --- a/amqp/src/test/java/akka/stream/alpakka/amqp/javadsl/AmqpConnectorsTest.java +++ b/amqp/src/test/java/akka/stream/alpakka/amqp/javadsl/AmqpConnectorsTest.java @@ -24,8 +24,8 @@ import akka.util.ByteString; import com.rabbitmq.client.AuthenticationFailureException; import org.junit.*; -import scala.collection.JavaConverters; import scala.concurrent.duration.Duration; +import scala.jdk.javaapi.CollectionConverters; import java.net.ConnectException; import java.util.Arrays; @@ -161,10 +161,7 @@ public void publishAndConsumeRpcWithoutAutoAck() throws Exception { .to(amqpSink) .run(system); - List probeResult = - JavaConverters.seqAsJavaListConverter( - result.second().toStrict(Duration.create(3, TimeUnit.SECONDS))) - .asJava(); + List probeResult = CollectionConverters.asJavaCollection(result.second().toStrict(Duration.create(3, TimeUnit.SECONDS))).stream().toList(); assertEquals( probeResult.stream().map(s -> s.bytes().utf8String()).collect(Collectors.toList()), input); sourceToSink.shutdown(); diff --git a/amqp/src/test/java/akka/stream/alpakka/amqp/javadsl/AmqpFlowTest.java b/amqp/src/test/java/akka/stream/alpakka/amqp/javadsl/AmqpFlowTest.java index 13ade9e64a..ff94ae8783 100644 --- a/amqp/src/test/java/akka/stream/alpakka/amqp/javadsl/AmqpFlowTest.java +++ b/amqp/src/test/java/akka/stream/alpakka/amqp/javadsl/AmqpFlowTest.java @@ -30,7 +30,8 @@ import akka.stream.testkit.TestSubscriber; import akka.stream.testkit.javadsl.TestSink; import akka.util.ByteString; -import scala.collection.JavaConverters; + +import scala.jdk.javaapi.CollectionConverters; /** Needs a local running AMQP server on the default port with no password. */ public class AmqpFlowTest { @@ -86,7 +87,7 @@ private void shouldEmitConfirmationForPublishedMessages( result .request(input.size()) - .expectNextN(JavaConverters.asScalaBufferConverter(expectedOutput).asScala().toList()); + .expectNextN(CollectionConverters.asScala(expectedOutput).toList()); } @Test @@ -120,7 +121,7 @@ private void shouldPropagateContext( result .request(input.size()) - .expectNextN(JavaConverters.asScalaBufferConverter(expectedOutput).asScala().toList()); + .expectNextN(CollectionConverters.asScala(expectedOutput).toList()); } @Test @@ -143,6 +144,6 @@ public void shouldPropagatePassThrough() { result .request(input.size()) - .expectNextN(JavaConverters.asScalaBufferConverter(expectedOutput).asScala().toList()); + .expectNextN(CollectionConverters.asScala(expectedOutput).toList()); } } diff --git a/amqp/src/test/scala/akka/stream/alpakka/amqp/AmqpSpec.scala b/amqp/src/test/scala/akka/stream/alpakka/amqp/AmqpSpec.scala index c200c1b5fd..9464ff26f1 100644 --- a/amqp/src/test/scala/akka/stream/alpakka/amqp/AmqpSpec.scala +++ b/amqp/src/test/scala/akka/stream/alpakka/amqp/AmqpSpec.scala @@ -5,7 +5,6 @@ package akka.stream.alpakka.amqp import akka.actor.ActorSystem -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.testkit.scaladsl.LogCapturing import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.ScalaFutures @@ -17,7 +16,7 @@ import scala.concurrent.ExecutionContext abstract class AmqpSpec extends AnyWordSpec with Matchers with BeforeAndAfterAll with ScalaFutures with LogCapturing { implicit val system: ActorSystem = ActorSystem(this.getClass.getSimpleName) - implicit val executionContext: ExecutionContext = ExecutionContexts.parasitic + implicit val executionContext: ExecutionContext = ExecutionContext.parasitic override protected def afterAll(): Unit = system.terminate() diff --git a/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala b/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala index 6422419240..c7a0e35c73 100644 --- a/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala +++ b/amqp/src/test/scala/akka/stream/alpakka/amqp/scaladsl/AmqpGraphStageLogicConnectionShutdownSpec.scala @@ -7,7 +7,6 @@ package akka.stream.alpakka.amqp.scaladsl import java.util.concurrent.ExecutorService import java.util.concurrent.atomic.AtomicInteger import akka.actor.ActorSystem -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.amqp.{ AmqpCachedConnectionProvider, AmqpConnectionFactoryConnectionProvider, @@ -40,7 +39,7 @@ class AmqpGraphStageLogicConnectionShutdownSpec with LogCapturing { override implicit val patienceConfig: PatienceConfig = PatienceConfig(10.seconds) - private implicit val executionContext: ExecutionContext = ExecutionContexts.parasitic + private implicit val executionContext: ExecutionContext = ExecutionContext.parasitic val shutdownsAdded = new AtomicInteger() val shutdownsRemoved = new AtomicInteger() diff --git a/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/scaladsl/EventBridgePublisher.scala b/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/scaladsl/EventBridgePublisher.scala index c619171af5..48a5b97490 100644 --- a/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/scaladsl/EventBridgePublisher.scala +++ b/aws-event-bridge/src/main/scala/akka/stream/alpakka/aws/eventbridge/scaladsl/EventBridgePublisher.scala @@ -11,7 +11,7 @@ import software.amazon.awssdk.services.eventbridge.EventBridgeAsyncClient import software.amazon.awssdk.services.eventbridge.model._ import scala.concurrent.Future -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ /** * Scala API @@ -55,7 +55,7 @@ object EventBridgePublisher { settings: EventBridgePublishSettings )(implicit eventBridgeClient: EventBridgeAsyncClient): Flow[PutEventsRequest, PutEventsResponse, NotUsed] = Flow[PutEventsRequest] - .mapAsync(settings.concurrency)(eventBridgeClient.putEvents(_).toScala) + .mapAsync(settings.concurrency)(eventBridgeClient.putEvents(_).asScala) /** * Creates a [[akka.stream.scaladsl.Flow Flow]] to publish messages to an EventBridge. diff --git a/awslambda/src/main/scala/akka/stream/alpakka/awslambda/scaladsl/AwsLambdaFlow.scala b/awslambda/src/main/scala/akka/stream/alpakka/awslambda/scaladsl/AwsLambdaFlow.scala index 19a509f547..2fcb8b36de 100644 --- a/awslambda/src/main/scala/akka/stream/alpakka/awslambda/scaladsl/AwsLambdaFlow.scala +++ b/awslambda/src/main/scala/akka/stream/alpakka/awslambda/scaladsl/AwsLambdaFlow.scala @@ -8,7 +8,7 @@ import akka.NotUsed import akka.stream.scaladsl.Flow import software.amazon.awssdk.services.lambda.model.{InvokeRequest, InvokeResponse} import software.amazon.awssdk.services.lambda.LambdaAsyncClient -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ object AwsLambdaFlow { @@ -18,6 +18,6 @@ object AwsLambdaFlow { def apply( parallelism: Int )(implicit awsLambdaClient: LambdaAsyncClient): Flow[InvokeRequest, InvokeResponse, NotUsed] = - Flow[InvokeRequest].mapAsyncUnordered(parallelism)(awsLambdaClient.invoke(_).toScala) + Flow[InvokeRequest].mapAsyncUnordered(parallelism)(awsLambdaClient.invoke(_).asScala) } diff --git a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/impl/AzureQueueSourceStage.scala b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/impl/AzureQueueSourceStage.scala index b50810a3b3..a941345cc3 100644 --- a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/impl/AzureQueueSourceStage.scala +++ b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/impl/AzureQueueSourceStage.scala @@ -35,7 +35,7 @@ import scala.collection.mutable.Queue retrieveMessages() def retrieveMessages(): Unit = { - import scala.collection.JavaConverters._ + import scala.jdk.CollectionConverters._ val res = cloudQueueBuilt .retrieveMessages(settings.batchSize, settings.initialVisibilityTimeout, null, null) .asScala diff --git a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/javadsl/AzureQueueSink.scala b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/javadsl/AzureQueueSink.scala index d54a2dafc1..fdcfe83038 100644 --- a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/javadsl/AzureQueueSink.scala +++ b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/javadsl/AzureQueueSink.scala @@ -26,8 +26,8 @@ object AzureQueueSink { */ private[javadsl] def fromFunction[T](f: T => Unit): Sink[T, CompletionStage[Done]] = { import akka.stream.alpakka.azure.storagequeue.scaladsl.{AzureQueueSink => AzureQueueSinkScalaDSL} - import scala.compat.java8.FutureConverters._ - AzureQueueSinkScalaDSL.fromFunction(f).mapMaterializedValue(_.toJava).asJava + import scala.jdk.FutureConverters._ + AzureQueueSinkScalaDSL.fromFunction(f).mapMaterializedValue(_.asJava).asJava } } diff --git a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/settings.scala b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/settings.scala index de65edbcd4..b736fe6f7e 100644 --- a/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/settings.scala +++ b/azure-storage-queue/src/main/scala/akka/stream/alpakka/azure/storagequeue/settings.scala @@ -7,7 +7,7 @@ package akka.stream.alpakka.azure.storagequeue import java.time.{Duration => JavaDuration} import java.util.Optional -import scala.compat.java8.OptionConverters._ +import scala.jdk.OptionConverters._ import scala.concurrent.duration.{Duration, FiniteDuration} /** Settings for AzureQueueSource @@ -43,7 +43,7 @@ final class AzureQueueSourceSettings private ( * Java API */ def getRetrieveRetryTimeout(): Optional[JavaDuration] = - retrieveRetryTimeout.map(d => JavaDuration.ofNanos(d.toNanos)).asJava + retrieveRetryTimeout.map(d => JavaDuration.ofNanos(d.toNanos)).toJava private def copy(batchSize: Int = batchSize, retrieveRetryTimeout: Option[FiniteDuration] = retrieveRetryTimeout) = new AzureQueueSourceSettings(initialVisibilityTimeout, batchSize, retrieveRetryTimeout) diff --git a/azure-storage-queue/src/test/scala/docs/scaladsl/AzureQueueSpec.scala b/azure-storage-queue/src/test/scala/docs/scaladsl/AzureQueueSpec.scala index 879b72a782..df2b8d940f 100644 --- a/azure-storage-queue/src/test/scala/docs/scaladsl/AzureQueueSpec.scala +++ b/azure-storage-queue/src/test/scala/docs/scaladsl/AzureQueueSpec.scala @@ -15,7 +15,7 @@ import com.microsoft.azure.storage._ import com.microsoft.azure.storage.queue._ import org.scalatest._ -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent._ import scala.concurrent.duration._ import scala.util.Properties diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/AkkaDiscoverySessionProvider.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/AkkaDiscoverySessionProvider.scala index 727be3a5aa..d37d85a22c 100644 --- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/AkkaDiscoverySessionProvider.scala +++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/AkkaDiscoverySessionProvider.scala @@ -7,14 +7,14 @@ package akka.stream.alpakka.cassandra import akka.ConfigurationException import akka.actor.{ActorSystem, ClassicActorSystemProvider} import akka.discovery.Discovery -import akka.util.JavaDurationConverters._ import com.datastax.oss.driver.api.core.CqlSession import com.typesafe.config.{Config, ConfigFactory} import scala.collection.immutable -import scala.compat.java8.FutureConverters._ import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future} +import scala.jdk.DurationConverters._ +import scala.jdk.FutureConverters._ /** * [[https://doc.akka.io/docs/akka/current/discovery/index.html Akka Discovery]] @@ -62,7 +62,7 @@ private[cassandra] object AkkaDiscoverySessionProvider { basic.contact-points = [${contactPoints.mkString("\"", "\", \"", "\"")}] """).withFallback(CqlSessionProvider.driverConfig(system, config)) val driverConfigLoader = DriverConfigLoaderFromConfig.fromConfig(driverConfigWithContactPoints) - CqlSession.builder().withConfigLoader(driverConfigLoader).buildAsync().toScala + CqlSession.builder().withConfigLoader(driverConfigLoader).buildAsync().asScala } } @@ -76,7 +76,7 @@ private[cassandra] object AkkaDiscoverySessionProvider { ec: ExecutionContext): Future[immutable.Seq[String]] = { val serviceConfig = config.getConfig("service-discovery") val serviceName = serviceConfig.getString("name") - val lookupTimeout = serviceConfig.getDuration("lookup-timeout").asScala + val lookupTimeout = serviceConfig.getDuration("lookup-timeout").toScala readNodes(serviceName, lookupTimeout) } diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraMetricsRegistry.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraMetricsRegistry.scala index 7cfca3e5dc..2b32180d3f 100644 --- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraMetricsRegistry.scala +++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraMetricsRegistry.scala @@ -8,7 +8,7 @@ import akka.actor.{ClassicActorSystemProvider, ExtendedActorSystem, Extension, E import akka.annotation.InternalApi import com.codahale.metrics.MetricRegistry -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * Retrieves Cassandra metrics registry for an actor system diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraSessionSettings.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraSessionSettings.scala index 85bfb957d6..30d07e1905 100644 --- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraSessionSettings.scala +++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraSessionSettings.scala @@ -8,8 +8,8 @@ import java.util.concurrent.CompletionStage import akka.Done import com.datastax.oss.driver.api.core.CqlSession -import scala.compat.java8.FunctionConverters._ -import scala.compat.java8.FutureConverters._ +import scala.jdk.FunctionConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.Future @@ -30,7 +30,7 @@ class CassandraSessionSettings private (val configPath: String, * only execute the first. */ def withInit(value: java.util.function.Function[CqlSession, CompletionStage[Done]]): CassandraSessionSettings = - copy(init = Some(value.asScala.andThen(_.toScala))) + copy(init = Some(value.asScala.andThen(_.asScala))) /** * The `init` function will be performed once when the session is created, i.e. diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraWriteSettings.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraWriteSettings.scala index 8b5fbf3aba..901e04dc2e 100644 --- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraWriteSettings.scala +++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CassandraWriteSettings.scala @@ -4,10 +4,10 @@ package akka.stream.alpakka.cassandra -import akka.util.JavaDurationConverters._ import com.datastax.oss.driver.api.core.cql.BatchType import scala.concurrent.duration.{FiniteDuration, _} +import scala.jdk.DurationConverters._ class CassandraWriteSettings private (val parallelism: Int, val maxBatchSize: Int, @@ -37,7 +37,7 @@ class CassandraWriteSettings private (val parallelism: Int, * Java API: Batch grouping time for `CassandraFlow.createUnloggedBatch`. */ def withMaxBatchWait(maxBatchWait: java.time.Duration): CassandraWriteSettings = - copy(maxBatchWait = maxBatchWait.asScala) + copy(maxBatchWait = maxBatchWait.toScala) def withBatchType(value: BatchType): CassandraWriteSettings = copy(batchType = value) diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CqlSessionProvider.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CqlSessionProvider.scala index 1bdaced404..e1a66836d4 100644 --- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CqlSessionProvider.scala +++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/CqlSessionProvider.scala @@ -9,7 +9,7 @@ import com.datastax.oss.driver.api.core.CqlSession import com.typesafe.config.{Config, ConfigFactory} import scala.collection.immutable -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.{ExecutionContext, Future} import scala.util.Failure @@ -50,7 +50,7 @@ class DefaultSessionProvider(system: ActorSystem, config: Config) extends CqlSes } else { val driverConfig = CqlSessionProvider.driverConfig(system, config) val driverConfigLoader = DriverConfigLoaderFromConfig.fromConfig(driverConfig) - CqlSession.builder().withConfigLoader(driverConfigLoader).buildAsync().toScala + CqlSession.builder().withConfigLoader(driverConfigLoader).buildAsync().asScala } } } diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSession.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSession.scala index e385bd41dc..6dc361bb68 100644 --- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSession.scala +++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSession.scala @@ -10,9 +10,9 @@ import java.util.concurrent.{CompletionStage, Executor} import java.util.function.{Function => JFunction} import scala.annotation.varargs -import scala.collection.JavaConverters._ -import scala.compat.java8.FutureConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.FutureConverters._ +import scala.jdk.OptionConverters._ import scala.concurrent.ExecutionContext import akka.Done import akka.NotUsed @@ -58,7 +58,7 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C executionContext, log, metricsCategory, - session => init.apply(session).toScala, + session => init.apply(session).asScala, () => onClose.run()) ) @@ -80,13 +80,13 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C * Closes the underlying Cassandra session. * @param executor as this might be used after actor system termination, the actor systems dispatcher can't be used */ - def close(executor: Executor): CompletionStage[Done] = delegate.close(ExecutionContext.fromExecutor(executor)).toJava + def close(executor: Executor): CompletionStage[Done] = delegate.close(ExecutionContext.fromExecutor(executor)).asJava /** * Meta data about the Cassandra server, such as its version. */ def serverMetaData: CompletionStage[CassandraServerMetaData] = - delegate.serverMetaData.toJava + delegate.serverMetaData.asJava /** * The `Session` of the underlying @@ -95,7 +95,7 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C * API exposed by this class. Be careful to not use blocking calls. */ def underlying(): CompletionStage[CqlSession] = - delegate.underlying().toJava + delegate.underlying().asJava /** * Execute CQL commands @@ -104,14 +104,14 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C * The returned `CompletionStage` is completed when the command is done, or if the statement fails. */ def executeDDL(stmt: String): CompletionStage[Done] = - delegate.executeDDL(stmt).toJava + delegate.executeDDL(stmt).asJava /** * Create a `PreparedStatement` that can be bound and used in * `executeWrite` or `select` multiple times. */ def prepare(stmt: String): CompletionStage[PreparedStatement] = - delegate.prepare(stmt).toJava + delegate.prepare(stmt).asJava /** * Execute several statements in a batch. First you must `prepare` the @@ -126,7 +126,7 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C * successfully executed, or if it fails. */ def executeWriteBatch(batch: BatchStatement): CompletionStage[Done] = - delegate.executeWriteBatch(batch).toJava + delegate.executeWriteBatch(batch).asJava /** * Execute one statement. First you must `prepare` the @@ -141,7 +141,7 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C * successfully executed, or if it fails. */ def executeWrite(stmt: Statement[_]): CompletionStage[Done] = - delegate.executeWrite(stmt).toJava + delegate.executeWrite(stmt).asJava /** * Prepare, bind and execute one statement in one go. @@ -155,7 +155,7 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C */ @varargs def executeWrite(stmt: String, bindValues: AnyRef*): CompletionStage[Done] = - delegate.executeWrite(stmt, bindValues: _*).toJava + delegate.executeWrite(stmt, bindValues: _*).asJava /** * Execute a select statement. First you must `prepare` the @@ -185,7 +185,7 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C * this `Source` and then `run` the stream. */ def select(stmt: CompletionStage[Statement[_]]): Source[Row, NotUsed] = - delegate.select(stmt.toScala).asJava + delegate.select(stmt.asScala).asJava /** * Prepare, bind and execute a select statement in one go. @@ -213,7 +213,7 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C * The returned `CompletionStage` is completed with the found rows. */ def selectAll(stmt: Statement[_]): CompletionStage[JList[Row]] = - delegate.selectAll(stmt).map(_.asJava).toJava + delegate.selectAll(stmt).map(_.asJava).asJava /** * Prepare, bind and execute a select statement in one go. Only use this method @@ -226,7 +226,7 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C */ @varargs def selectAll(stmt: String, bindValues: AnyRef*): CompletionStage[JList[Row]] = - delegate.selectAll(stmt, bindValues: _*).map(_.asJava).toJava + delegate.selectAll(stmt, bindValues: _*).map(_.asJava).asJava /** * Execute a select statement that returns one row. First you must `prepare` the @@ -239,7 +239,7 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C * if any. */ def selectOne(stmt: Statement[_]): CompletionStage[Optional[Row]] = - delegate.selectOne(stmt).map(_.asJava).toJava + delegate.selectOne(stmt).map(_.toJava).asJava /** * Prepare, bind and execute a select statement that returns one row. @@ -251,6 +251,6 @@ final class CassandraSession(@InternalApi private[akka] val delegate: scaladsl.C */ @varargs def selectOne(stmt: String, bindValues: AnyRef*): CompletionStage[Optional[Row]] = - delegate.selectOne(stmt, bindValues: _*).map(_.asJava).toJava + delegate.selectOne(stmt, bindValues: _*).map(_.toJava).asJava } diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSessionRegistry.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSessionRegistry.scala index fa0ab63a8a..eba6b736c0 100644 --- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSessionRegistry.scala +++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/javadsl/CassandraSessionRegistry.scala @@ -11,7 +11,7 @@ import akka.actor.ClassicActorSystemProvider import akka.stream.alpakka.cassandra.{scaladsl, CassandraSessionSettings} import com.datastax.oss.driver.api.core.CqlSession -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ /** * This Cassandra session registry makes it possible to share Cassandra sessions between multiple use sites @@ -57,7 +57,7 @@ final class CassandraSessionRegistry private (delegate: scaladsl.CassandraSessio */ def sessionFor(configPath: String, init: java.util.function.Function[CqlSession, CompletionStage[Done]]): CassandraSession = - new CassandraSession(delegate.sessionFor(configPath, ses => init(ses).toScala)) + new CassandraSession(delegate.sessionFor(configPath, ses => init(ses).asScala)) /** * Get an existing session or start a new one with the given settings, diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraFlow.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraFlow.scala index 50e617c7f6..b3770b0f17 100644 --- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraFlow.scala +++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraFlow.scala @@ -5,12 +5,11 @@ package akka.stream.alpakka.cassandra.scaladsl import akka.NotUsed -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.cassandra.CassandraWriteSettings import akka.stream.scaladsl.{Flow, FlowWithContext} import com.datastax.oss.driver.api.core.cql.{BatchStatement, BoundStatement, PreparedStatement} - -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ +import scala.concurrent.ExecutionContext import scala.concurrent.Future /** @@ -40,7 +39,7 @@ object CassandraFlow { Flow[T].mapAsync(writeSettings.parallelism) { element => session .executeWrite(statementBinder(element, preparedStatement)) - .map(_ => element)(ExecutionContexts.parasitic) + .map(_ => element)(ExecutionContext.parasitic) } }(session.ec) } @@ -72,7 +71,7 @@ object CassandraFlow { case tuple @ (element, _) => session .executeWrite(statementBinder(element, preparedStatement)) - .map(_ => tuple)(ExecutionContexts.parasitic) + .map(_ => tuple)(ExecutionContext.parasitic) } }(session.ec) } @@ -116,7 +115,7 @@ object CassandraFlow { .mapAsyncUnordered(writeSettings.parallelism) { list => val boundStatements = list.map(t => statementBinder(t, preparedStatement)) val batchStatement = BatchStatement.newInstance(writeSettings.batchType).addAll(boundStatements.asJava) - session.executeWriteBatch(batchStatement).map(_ => list)(ExecutionContexts.parasitic) + session.executeWriteBatch(batchStatement).map(_ => list)(ExecutionContext.parasitic) } .mapConcat(_.toList) }(session.ec) diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSession.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSession.scala index dc424155f7..17a6c9e0f5 100644 --- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSession.scala +++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSession.scala @@ -17,7 +17,7 @@ import com.datastax.oss.driver.api.core.cql._ import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException import scala.collection.immutable -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal @@ -78,7 +78,7 @@ final class CassandraSession(system: akka.actor.ActorSystem, def close(executionContext: ExecutionContext): Future[Done] = { implicit val ec: ExecutionContext = executionContext onClose() - _underlyingSession.map(_.closeAsync().toScala).map(_ => Done) + _underlyingSession.map(_.closeAsync().asScala).map(_ => Done) } /** @@ -122,7 +122,7 @@ final class CassandraSession(system: akka.actor.ActorSystem, */ def executeDDL(stmt: String): Future[Done] = underlying().flatMap { cqlSession => - cqlSession.executeAsync(stmt).toScala.map(_ => Done) + cqlSession.executeAsync(stmt).asScala.map(_ => Done) } /** @@ -131,7 +131,7 @@ final class CassandraSession(system: akka.actor.ActorSystem, */ def prepare(stmt: String): Future[PreparedStatement] = underlying().flatMap { cqlSession => - cqlSession.prepareAsync(stmt).toScala + cqlSession.prepareAsync(stmt).asScala } /** @@ -163,7 +163,7 @@ final class CassandraSession(system: akka.actor.ActorSystem, */ def executeWrite(stmt: Statement[_]): Future[Done] = { underlying().flatMap { cqlSession => - cqlSession.executeAsync(stmt).toScala.map(_ => Done) + cqlSession.executeAsync(stmt).asScala.map(_ => Done) } } @@ -186,7 +186,7 @@ final class CassandraSession(system: akka.actor.ActorSystem, */ @InternalApi private[akka] def selectResultSet(stmt: Statement[_]): Future[AsyncResultSet] = { underlying().flatMap { s => - s.executeAsync(stmt).toScala + s.executeAsync(stmt).asScala } } diff --git a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSessionRegistry.scala b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSessionRegistry.scala index 86ca7a4d24..3c5c71a586 100644 --- a/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSessionRegistry.scala +++ b/cassandra/src/main/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraSessionRegistry.scala @@ -6,7 +6,7 @@ package akka.stream.alpakka.cassandra.scaladsl import java.util.concurrent.ConcurrentHashMap -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.ExecutionContext import scala.concurrent.Future import akka.Done diff --git a/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraLifecycle.scala b/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraLifecycle.scala index 938c717e71..92c9f55537 100644 --- a/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraLifecycle.scala +++ b/cassandra/src/test/scala/akka/stream/alpakka/cassandra/scaladsl/CassandraLifecycle.scala @@ -12,12 +12,12 @@ import com.datastax.oss.driver.api.core.cql._ import org.scalatest._ import org.scalatest.concurrent.{PatienceConfiguration, ScalaFutures} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext, Future} import scala.util.control.NonFatal -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ trait CassandraLifecycleBase { def lifecycleSession: CassandraSession @@ -59,7 +59,7 @@ trait CassandraLifecycleBase { def executeCql(statements: immutable.Seq[String]): Future[Done] = executeCql(lifecycleSession, statements) def executeCqlList(statements: java.util.List[String]): CompletionStage[Done] = - executeCql(lifecycleSession, statements.asScala.toList).toJava + executeCql(lifecycleSession, statements.asScala.toList).asJava def withSchemaMetadataDisabled(block: => Future[Done]): Future[Done] = { implicit val ec: ExecutionContext = lifecycleSession.ec diff --git a/cassandra/src/test/scala/docs/javadsl/CassandraSessionSpec.scala b/cassandra/src/test/scala/docs/javadsl/CassandraSessionSpec.scala index e8ad1a3864..7b4c7454fb 100644 --- a/cassandra/src/test/scala/docs/javadsl/CassandraSessionSpec.scala +++ b/cassandra/src/test/scala/docs/javadsl/CassandraSessionSpec.scala @@ -19,10 +19,10 @@ import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped import akka.stream.testkit.scaladsl.TestSink import com.datastax.oss.driver.api.core.cql.Row -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable -import scala.compat.java8.FutureConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.FutureConverters._ +import scala.jdk.OptionConverters._ import scala.concurrent.Future import scala.concurrent.duration._ @@ -72,7 +72,7 @@ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("Cassandr // testing javadsl to prove delegation works lazy val session: javadsl.CassandraSession = javadslSessionRegistry.sessionFor(sessionSettings) - def await[T](cs: CompletionStage[T]): T = cs.toScala.futureValue + def await[T](cs: CompletionStage[T]): T = cs.asScala.futureValue "session" must { @@ -91,7 +91,7 @@ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("Cassandr } yield Done }.futureValue mustBe Done val sink: Sink[Row, CompletionStage[util.List[Row]]] = Sink.seq - val rows = session.select(s"SELECT * FROM $table").runWith(sink, materializer).toScala.futureValue + val rows = session.select(s"SELECT * FROM $table").runWith(sink, materializer).asScala.futureValue rows.asScala.map(_.getInt("id")) must contain theSameElementsAs data } @@ -133,7 +133,7 @@ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("Cassandr "selectOne empty" in { val row = await(session.selectOne(s"SELECT count FROM $dataTable WHERE partition = ? and key = ?", "A", "x")) - row.asScala mustBe empty + row.toString mustBe empty } "create indexes" in { @@ -144,7 +144,7 @@ final class CassandraSessionSpec extends CassandraSpecBase(ActorSystem("Cassandr await( session.selectOne("SELECT * FROM system_schema.indexes WHERE table_name = ? ALLOW FILTERING", dataTableName) ) - row.asScala.map(index => index.getString("table_name") -> index.getString("index_name")) mustBe Some( + row.toScala.map(index => index.getString("table_name") -> index.getString("index_name")) mustBe Some( dataTableName -> "count_idx" ) } diff --git a/contributor-advice.md b/contributor-advice.md index 66bd422da6..f0d6b96584 100644 --- a/contributor-advice.md +++ b/contributor-advice.md @@ -45,7 +45,7 @@ Alpakka, same as Akka, aims to keep 100% feature parity between the various lang 1. If the underlying Scala code requires an `ExecutionContext`, make the Java API take an `Executor` and use `ExecutionContext.fromExecutor(executor)` for conversion. -1. Make use of `scala-java8-compat` conversions, see [GitHub](https://github.com/scala/scala-java8-compat) (eg. `scala.compat.java8.FutureConverters` to translate Futures to `CompletionStage`s). +1. Make use of `scala-java8-compat` conversions, see [GitHub](https://github.com/scala/scala-java8-compat) (eg. `scala.jdk.FutureConverters` to translate Futures to `CompletionStage`s). ### Overview of Scala types and their Java counterparts diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/CouchbaseSessionRegistry.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/CouchbaseSessionRegistry.scala index 342d025f91..d81c471010 100644 --- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/CouchbaseSessionRegistry.scala +++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/CouchbaseSessionRegistry.scala @@ -8,14 +8,14 @@ import java.util.concurrent.CompletionStage import java.util.concurrent.atomic.AtomicReference import akka.actor.{ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider} -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.couchbase.impl.CouchbaseClusterRegistry import akka.stream.alpakka.couchbase.javadsl.{CouchbaseSession => JCouchbaseSession} import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession import scala.annotation.tailrec -import scala.compat.java8.FutureConverters._ +import scala.concurrent.ExecutionContext import scala.concurrent.{Future, Promise} +import scala.jdk.FutureConverters._ /** * This Couchbase session registry makes it possible to share Couchbase sessions between multiple use sites @@ -78,8 +78,8 @@ final class CouchbaseSessionRegistry(system: ExtendedActorSystem) extends Extens */ def getSessionFor(settings: CouchbaseSessionSettings, bucketName: String): CompletionStage[JCouchbaseSession] = sessionFor(settings, bucketName) - .map(_.asJava)(ExecutionContexts.parasitic) - .toJava + .map(_.asJava)(ExecutionContext.parasitic) + .asJava @tailrec private def startSession(key: SessionKey): Future[CouchbaseSession] = { @@ -91,7 +91,7 @@ final class CouchbaseSessionRegistry(system: ExtendedActorSystem) extends Extens val session = clusterRegistry .clusterFor(key.settings) .flatMap(cluster => CouchbaseSession(cluster, key.bucketName)(blockingDispatcher))( - ExecutionContexts.parasitic + ExecutionContext.parasitic ) promise.completeWith(session) promise.future diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionImpl.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionImpl.scala index 88bef87933..87256a93d3 100644 --- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionImpl.scala +++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionImpl.scala @@ -7,7 +7,6 @@ package akka.stream.alpakka.couchbase.impl import java.util.concurrent.TimeUnit import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession import akka.stream.alpakka.couchbase.{javadsl, CouchbaseWriteSettings} import akka.stream.scaladsl.Source @@ -20,8 +19,9 @@ import com.couchbase.client.java.query.{N1qlQuery, Statement} import com.couchbase.client.java.{AsyncBucket, AsyncCluster} import rx.RxReactiveStreams -import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration +import scala.concurrent.ExecutionContext +import scala.concurrent.Future /** * INTERNAL API @@ -101,7 +101,7 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl def remove(id: String): Future[Done] = singleObservableToFuture(asyncBucket.remove(id), id) - .map(_ => Done)(ExecutionContexts.parasitic) + .map(_ => Done)(ExecutionContext.parasitic) def remove(id: String, writeSettings: CouchbaseWriteSettings): Future[Done] = singleObservableToFuture(asyncBucket.remove(id, @@ -110,7 +110,7 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl writeSettings.timeout.toMillis, TimeUnit.MILLISECONDS), id) - .map(_ => Done)(ExecutionContexts.parasitic) + .map(_ => Done)(ExecutionContext.parasitic) def streamedQuery(query: N1qlQuery): Source[JsonObject, NotUsed] = // FIXME verify cancellation works @@ -126,7 +126,7 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl def counter(id: String, delta: Long, initial: Long): Future[Long] = singleObservableToFuture(asyncBucket.counter(id, delta, initial), id) - .map(_.content(): Long)(ExecutionContexts.parasitic) + .map(_.content(): Long)(ExecutionContext.parasitic) def counter(id: String, delta: Long, initial: Long, writeSettings: CouchbaseWriteSettings): Future[Long] = singleObservableToFuture(asyncBucket.counter(id, @@ -137,7 +137,7 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl writeSettings.timeout.toMillis, TimeUnit.MILLISECONDS), id) - .map(_.content(): Long)(ExecutionContexts.parasitic) + .map(_.content(): Long)(ExecutionContext.parasitic) def close(): Future[Done] = if (!asyncBucket.isClosed) { @@ -145,10 +145,10 @@ final private[couchbase] class CouchbaseSessionImpl(asyncBucket: AsyncBucket, cl .flatMap { _ => cluster match { case Some(cluster) => - singleObservableToFuture(cluster.disconnect(), "close").map(_ => Done)(ExecutionContexts.global()) + singleObservableToFuture(cluster.disconnect(), "close").map(_ => Done)(ExecutionContext.global) case None => Future.successful(Done) } - }(ExecutionContexts.global()) + }(ExecutionContext.global) } else { Future.successful(Done) } diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionJavaAdapter.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionJavaAdapter.scala index bf87ed3521..93db54837e 100644 --- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionJavaAdapter.scala +++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/impl/CouchbaseSessionJavaAdapter.scala @@ -9,21 +9,22 @@ import java.util.Optional import java.util.concurrent.CompletionStage import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.couchbase.CouchbaseWriteSettings import akka.stream.alpakka.couchbase.javadsl import akka.stream.alpakka.couchbase.scaladsl import akka.stream.javadsl.Source import akka.{Done, NotUsed} + import com.couchbase.client.java.AsyncBucket import com.couchbase.client.java.document.json.JsonObject import com.couchbase.client.java.document.{Document, JsonDocument} import com.couchbase.client.java.query.util.IndexInfo import com.couchbase.client.java.query.{N1qlQuery, Statement} -import scala.compat.java8.FutureConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.FutureConverters._ +import scala.jdk.OptionConverters._ import scala.concurrent.duration.FiniteDuration +import scala.concurrent.ExecutionContext import scala.concurrent.{duration, Future} /** @@ -37,19 +38,19 @@ private[couchbase] final class CouchbaseSessionJavaAdapter(delegate: scaladsl.Co override def underlying: AsyncBucket = delegate.underlying - override def insert(document: JsonDocument): CompletionStage[JsonDocument] = delegate.insertDoc(document).toJava + override def insert(document: JsonDocument): CompletionStage[JsonDocument] = delegate.insertDoc(document).asJava - override def insertDoc[T <: Document[_]](document: T): CompletionStage[T] = delegate.insertDoc(document).toJava + override def insertDoc[T <: Document[_]](document: T): CompletionStage[T] = delegate.insertDoc(document).asJava override def insert( document: JsonDocument, writeSettings: CouchbaseWriteSettings - ): CompletionStage[JsonDocument] = delegate.insert(document, writeSettings).toJava + ): CompletionStage[JsonDocument] = delegate.insert(document, writeSettings).asJava override def insertDoc[T <: Document[_]]( document: T, writeSettings: CouchbaseWriteSettings - ): CompletionStage[T] = delegate.insertDoc(document, writeSettings).toJava + ): CompletionStage[T] = delegate.insertDoc(document, writeSettings).asJava override def get(id: String): CompletionStage[Optional[JsonDocument]] = futureOptToJava(delegate.get(id)) @@ -63,30 +64,30 @@ private[couchbase] final class CouchbaseSessionJavaAdapter(delegate: scaladsl.Co def get[T <: Document[_]](id: String, timeout: Duration, documentClass: Class[T]): CompletionStage[Optional[T]] = futureOptToJava(delegate.get(id, FiniteDuration.apply(timeout.toNanos, duration.NANOSECONDS), documentClass)) - override def upsert(document: JsonDocument): CompletionStage[JsonDocument] = delegate.upsert(document).toJava + override def upsert(document: JsonDocument): CompletionStage[JsonDocument] = delegate.upsert(document).asJava - override def upsertDoc[T <: Document[_]](document: T): CompletionStage[T] = delegate.upsertDoc(document).toJava + override def upsertDoc[T <: Document[_]](document: T): CompletionStage[T] = delegate.upsertDoc(document).asJava override def upsert(document: JsonDocument, writeSettings: CouchbaseWriteSettings): CompletionStage[JsonDocument] = - delegate.upsert(document, writeSettings).toJava + delegate.upsert(document, writeSettings).asJava override def upsertDoc[T <: Document[_]](document: T, writeSettings: CouchbaseWriteSettings): CompletionStage[T] = - delegate.upsertDoc(document, writeSettings).toJava + delegate.upsertDoc(document, writeSettings).asJava - override def replace(document: JsonDocument): CompletionStage[JsonDocument] = delegate.replace(document).toJava + override def replace(document: JsonDocument): CompletionStage[JsonDocument] = delegate.replace(document).asJava - override def replaceDoc[T <: Document[_]](document: T): CompletionStage[T] = delegate.replaceDoc(document).toJava + override def replaceDoc[T <: Document[_]](document: T): CompletionStage[T] = delegate.replaceDoc(document).asJava override def replace(document: JsonDocument, writeSettings: CouchbaseWriteSettings): CompletionStage[JsonDocument] = - delegate.replace(document, writeSettings).toJava + delegate.replace(document, writeSettings).asJava override def replaceDoc[T <: Document[_]](document: T, writeSettings: CouchbaseWriteSettings): CompletionStage[T] = - delegate.replaceDoc(document, writeSettings).toJava + delegate.replaceDoc(document, writeSettings).asJava - override def remove(id: String): CompletionStage[Done] = delegate.remove(id).toJava + override def remove(id: String): CompletionStage[Done] = delegate.remove(id).asJava override def remove(id: String, writeSettings: CouchbaseWriteSettings): CompletionStage[Done] = - delegate.remove(id, writeSettings).toJava + delegate.remove(id, writeSettings).asJava override def streamedQuery(query: N1qlQuery): Source[JsonObject, _root_.akka.NotUsed] = delegate.streamedQuery(query).asJava @@ -101,22 +102,22 @@ private[couchbase] final class CouchbaseSessionJavaAdapter(delegate: scaladsl.Co futureOptToJava(delegate.singleResponseQuery(query)) override def counter(id: String, delta: Long, initial: Long): CompletionStage[Long] = - delegate.counter(id, delta, initial).toJava + delegate.counter(id, delta, initial).asJava override def counter( id: String, delta: Long, initial: Long, writeSettings: CouchbaseWriteSettings - ): CompletionStage[Long] = delegate.counter(id, delta, initial, writeSettings).toJava + ): CompletionStage[Long] = delegate.counter(id, delta, initial, writeSettings).asJava - override def close(): CompletionStage[Done] = delegate.close().toJava + override def close(): CompletionStage[Done] = delegate.close().asJava override def createIndex(indexName: String, ignoreIfExist: Boolean, fields: AnyRef*): CompletionStage[Boolean] = - delegate.createIndex(indexName, ignoreIfExist, fields).toJava + delegate.createIndex(indexName, ignoreIfExist, fields).asJava private def futureOptToJava[T](future: Future[Option[T]]): CompletionStage[Optional[T]] = - future.map(_.asJava)(ExecutionContexts.parasitic).toJava + future.map(_.toJava)(ExecutionContext.parasitic).asJava def listIndexes(): Source[IndexInfo, NotUsed] = delegate.listIndexes().asJava diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSession.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSession.scala index 560e8c0f5e..5f1e659452 100644 --- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSession.scala +++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/javadsl/CouchbaseSession.scala @@ -9,7 +9,6 @@ import java.util.Optional import java.util.concurrent.{CompletionStage, Executor} import akka.annotation.DoNotInherit -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.couchbase.impl.CouchbaseSessionJavaAdapter import akka.stream.alpakka.couchbase.scaladsl.{CouchbaseSession => ScalaDslCouchbaseSession} import akka.stream.alpakka.couchbase.{CouchbaseSessionSettings, CouchbaseWriteSettings} @@ -21,7 +20,7 @@ import com.couchbase.client.java.query.util.IndexInfo import com.couchbase.client.java.query.{N1qlQuery, Statement} import com.couchbase.client.java.{AsyncBucket, AsyncCluster, Bucket} -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.ExecutionContext /** @@ -41,9 +40,9 @@ object CouchbaseSession { ScalaDslCouchbaseSession .apply(settings, bucketName)(executionContext(executor)) .map(new CouchbaseSessionJavaAdapter(_): CouchbaseSession)( - ExecutionContexts.parasitic + ExecutionContext.parasitic ) - .toJava + .asJava /** * Create a given bucket using a pre-existing cluster client, allowing for it to be shared among @@ -52,9 +51,9 @@ object CouchbaseSession { def create(client: AsyncCluster, bucketName: String, executor: Executor): CompletionStage[CouchbaseSession] = ScalaDslCouchbaseSession(client, bucketName)(executionContext(executor)) .map(new CouchbaseSessionJavaAdapter(_): CouchbaseSession)( - ExecutionContexts.parasitic + ExecutionContext.parasitic ) - .toJava + .asJava /** * Connects to a Couchbase cluster by creating an `AsyncCluster`. @@ -63,7 +62,7 @@ object CouchbaseSession { def createClient(settings: CouchbaseSessionSettings, executor: Executor): CompletionStage[AsyncCluster] = ScalaDslCouchbaseSession .createClusterClient(settings)(executionContext(executor)) - .toJava + .asJava private def executionContext(executor: Executor): ExecutionContext = executor match { diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/model.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/model.scala index 80e4563a80..64bce9c258 100644 --- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/model.scala +++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/model.scala @@ -13,10 +13,10 @@ import com.couchbase.client.java.env.CouchbaseEnvironment import com.couchbase.client.java.{PersistTo, ReplicateTo} import com.typesafe.config.Config -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable import scala.concurrent.Future -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.duration._ /** @@ -205,7 +205,7 @@ final class CouchbaseSessionSettings private ( def withEnrichAsyncCs( value: java.util.function.Function[CouchbaseSessionSettings, CompletionStage[CouchbaseSessionSettings]] ): CouchbaseSessionSettings = - copy(enrichAsync = (s: CouchbaseSessionSettings) => value.apply(s).toScala) + copy(enrichAsync = (s: CouchbaseSessionSettings) => value.apply(s).asScala) def withEnvironment(environment: CouchbaseEnvironment): CouchbaseSessionSettings = copy(environment = Some(environment)) diff --git a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/DiscoverySupport.scala b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/DiscoverySupport.scala index a77a003010..2f51f656ac 100644 --- a/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/DiscoverySupport.scala +++ b/couchbase/src/main/scala/akka/stream/alpakka/couchbase/scaladsl/DiscoverySupport.scala @@ -10,12 +10,12 @@ import akka.actor.{ActorSystem, ClassicActorSystemProvider} import akka.annotation.InternalApi import akka.discovery.Discovery import akka.stream.alpakka.couchbase.CouchbaseSessionSettings -import akka.util.JavaDurationConverters._ import com.typesafe.config.Config import scala.collection.immutable -import scala.compat.java8.FunctionConverters._ -import scala.compat.java8.FutureConverters._ +import scala.jdk.DurationConverters._ +import scala.jdk.FunctionConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration @@ -44,7 +44,7 @@ sealed class DiscoverySupport private { private def readNodes(config: Config)(implicit system: ClassicActorSystemProvider): Future[immutable.Seq[String]] = if (config.hasPath("service")) { val serviceName = config.getString("service.name") - val lookupTimeout = config.getDuration("service.lookup-timeout").asScala + val lookupTimeout = config.getDuration("service.lookup-timeout").toScala readNodes(serviceName, lookupTimeout) } else throw new IllegalArgumentException(s"config $config does not contain `service` section") @@ -75,7 +75,7 @@ sealed class DiscoverySupport private { config: Config, system: ClassicActorSystemProvider ): java.util.function.Function[CouchbaseSessionSettings, CompletionStage[CouchbaseSessionSettings]] = - nodes(config)(system).andThen(_.toJava).asJava + nodes(config)(system).andThen(_.asJava).asJava /** * Expects a `service` section in `alpakka.couchbase.session` and reads the given service name's address diff --git a/couchbase/src/test/scala/akka/stream/alpakka/couchbase/testing/CouchbaseSupport.scala b/couchbase/src/test/scala/akka/stream/alpakka/couchbase/testing/CouchbaseSupport.scala index c8c3d821c4..587e348541 100644 --- a/couchbase/src/test/scala/akka/stream/alpakka/couchbase/testing/CouchbaseSupport.scala +++ b/couchbase/src/test/scala/akka/stream/alpakka/couchbase/testing/CouchbaseSupport.scala @@ -17,7 +17,7 @@ import com.couchbase.client.java.document.{BinaryDocument, JsonDocument, RawJson import org.slf4j.LoggerFactory import play.api.libs.json.Json -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable.Seq import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ diff --git a/csv/src/main/java/akka/stream/alpakka/csv/javadsl/CsvFormatting.java b/csv/src/main/java/akka/stream/alpakka/csv/javadsl/CsvFormatting.java index fe8d99aa17..0af3c202bd 100644 --- a/csv/src/main/java/akka/stream/alpakka/csv/javadsl/CsvFormatting.java +++ b/csv/src/main/java/akka/stream/alpakka/csv/javadsl/CsvFormatting.java @@ -10,7 +10,7 @@ import akka.util.ByteString; import scala.Option; import scala.Some; -import scala.collection.JavaConverters; +import scala.jdk.javaapi.CollectionConverters; import scala.collection.immutable.List; import java.nio.charset.Charset; @@ -77,7 +77,7 @@ public static > Flow format akka.stream.alpakka.csv.scaladsl.CsvFormatting.format( delimiter, quoteChar, escapeChar, endOfLine, qs, charset, byteOrderMarkScala); return Flow.create() - .map(c -> JavaConverters.collectionAsScalaIterableConverter(c).asScala().toList()) + .map(c -> CollectionConverters.asScala(c).toList()) .via(formattingFlow); } } diff --git a/csv/src/main/java/akka/stream/alpakka/csv/javadsl/CsvParsing.java b/csv/src/main/java/akka/stream/alpakka/csv/javadsl/CsvParsing.java index 7c9089ade5..b6d6d4c7f1 100644 --- a/csv/src/main/java/akka/stream/alpakka/csv/javadsl/CsvParsing.java +++ b/csv/src/main/java/akka/stream/alpakka/csv/javadsl/CsvParsing.java @@ -7,7 +7,7 @@ import akka.NotUsed; import akka.stream.javadsl.Flow; import akka.util.ByteString; -import scala.collection.JavaConverters; +import scala.jdk.javaapi.CollectionConverters; import java.util.Collection; @@ -35,7 +35,7 @@ public static Flow, NotUsed> lineScanner( return akka.stream.alpakka.csv.scaladsl.CsvParsing.lineScanner( delimiter, quoteChar, escapeChar, maximumLineLength) .asJava() - .map(c -> JavaConverters.asJavaCollectionConverter(c).asJavaCollection()) + .map(CollectionConverters::asJavaCollection) .mapMaterializedValue(m -> NotUsed.getInstance()); } } diff --git a/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/DynamoDbOp.scala b/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/DynamoDbOp.scala index c086065685..15093eb685 100644 --- a/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/DynamoDbOp.scala +++ b/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/DynamoDbOp.scala @@ -17,7 +17,7 @@ import software.amazon.awssdk.services.dynamodb.paginators.{ ScanPublisher } -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.Future /** @@ -31,7 +31,7 @@ import scala.concurrent.Future sealed class DynamoDbOp[In <: DynamoDbRequest, Out <: DynamoDbResponse]( sdkExecute: DynamoDbAsyncClient => In => CompletableFuture[Out] ) { - def execute(request: In)(implicit client: DynamoDbAsyncClient): Future[Out] = sdkExecute(client)(request).toScala + def execute(request: In)(implicit client: DynamoDbAsyncClient): Future[Out] = sdkExecute(client)(request).asScala } /** diff --git a/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/scaladsl/DynamoDb.scala b/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/scaladsl/DynamoDb.scala index 366d0e0c19..667ea5d94e 100644 --- a/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/scaladsl/DynamoDb.scala +++ b/dynamodb/src/main/scala/akka/stream/alpakka/dynamodb/scaladsl/DynamoDb.scala @@ -6,14 +6,14 @@ package akka.stream.alpakka.dynamodb.scaladsl import akka.NotUsed import akka.actor.ClassicActorSystemProvider -import akka.dispatch.ExecutionContexts -import scala.annotation.implicitNotFound import akka.stream.alpakka.dynamodb.{DynamoDbOp, DynamoDbPaginatedOp} import akka.stream.scaladsl.{Flow, FlowWithContext, Sink, Source} import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient import software.amazon.awssdk.services.dynamodb.model._ +import scala.annotation.implicitNotFound +import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.util.{Failure, Success, Try} @@ -52,8 +52,8 @@ object DynamoDb { case (in, ctx) => operation .execute(in) - .map[(Try[Out], Ctx)](res => (Success(res), ctx))(ExecutionContexts.parasitic) - .recover { case t => (Failure(t), ctx) }(ExecutionContexts.parasitic) + .map[(Try[Out], Ctx)](res => (Success(res), ctx))(ExecutionContext.parasitic) + .recover { case t => (Failure(t), ctx) }(ExecutionContext.parasitic) } ) diff --git a/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/ItemSpec.scala b/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/ItemSpec.scala index 9f88ab3522..86e57e1a9e 100644 --- a/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/ItemSpec.scala +++ b/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/ItemSpec.scala @@ -19,7 +19,7 @@ import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient import software.amazon.awssdk.services.dynamodb.model.TableStatus import scala.annotation.nowarn -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.ExecutionContext class ItemSpec extends TestKit(ActorSystem("ItemSpec")) with AsyncWordSpecLike with Matchers with BeforeAndAfterAll { diff --git a/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TableSpec.scala b/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TableSpec.scala index 0c7498044e..fafc2c2d7e 100644 --- a/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TableSpec.scala +++ b/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TableSpec.scala @@ -15,7 +15,7 @@ import software.amazon.awssdk.auth.credentials.{AwsBasicCredentials, StaticCrede import software.amazon.awssdk.regions.Region import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AsyncWordSpecLike diff --git a/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TestOps.scala b/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TestOps.scala index 9c83752edd..91093b3a68 100644 --- a/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TestOps.scala +++ b/dynamodb/src/test/scala/akka/stream/alpakka/dynamodb/TestOps.scala @@ -6,7 +6,7 @@ package akka.stream.alpakka.dynamodb import software.amazon.awssdk.services.dynamodb.model._ -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ trait TestOps { diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchConnectionSettings.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchConnectionSettings.scala index d45de15fb5..275fd3e105 100644 --- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchConnectionSettings.scala +++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchConnectionSettings.scala @@ -8,7 +8,7 @@ import akka.http.scaladsl.{ConnectionContext, HttpsConnectionContext} import akka.http.scaladsl.model.HttpHeader import akka.http.scaladsl.model.HttpHeader.ParsingResult -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import javax.net.ssl.SSLContext final class ElasticsearchConnectionSettings private ( diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchWriteSettings.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchWriteSettings.scala index 07979653bf..cf93290611 100644 --- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchWriteSettings.scala +++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ElasticsearchWriteSettings.scala @@ -4,9 +4,8 @@ package akka.stream.alpakka.elasticsearch -import akka.util.JavaDurationConverters._ - import scala.concurrent.duration._ +import scala.jdk.DurationConverters._ trait RetryLogic { def maxRetries: Int @@ -33,7 +32,7 @@ object RetryAtFixedRate { new RetryAtFixedRate(maxRetries, retryInterval) def create(maxRetries: Int, retryInterval: java.time.Duration): RetryAtFixedRate = - new RetryAtFixedRate(maxRetries, retryInterval.asScala) + new RetryAtFixedRate(maxRetries, retryInterval.toScala) } final class RetryWithBackoff(_maxRetries: Int, @@ -53,7 +52,7 @@ object RetryWithBackoff { new RetryWithBackoff(maxRetries, minBackoff, maxBackoff) def create(maxRetries: Int, minBackoff: java.time.Duration, maxBackoff: java.time.Duration): RetryWithBackoff = - new RetryWithBackoff(maxRetries, minBackoff.asScala, maxBackoff.asScala) + new RetryWithBackoff(maxRetries, minBackoff.toScala, maxBackoff.toScala) } /** diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ReadResult.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ReadResult.scala index 452046301c..ad9d41fc41 100644 --- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ReadResult.scala +++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/ReadResult.scala @@ -6,7 +6,7 @@ package akka.stream.alpakka.elasticsearch import akka.annotation.InternalApi -import scala.compat.java8.OptionConverters._ +import scala.jdk.OptionConverters._ /** * Stream element type emitted by Elasticsearch sources. @@ -19,7 +19,7 @@ final class ReadResult[T] @InternalApi private[elasticsearch] (val id: String, val version: Option[Long]) { /** Java API */ - def getVersion: java.util.Optional[Long] = version.asJava + def getVersion: java.util.Optional[Long] = version.toJava override def toString = s"""ReadResult(id=$id,source=$source,version=${version.getOrElse("")})""" diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/SourceSettingsBase.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/SourceSettingsBase.scala index 123eafdf2e..c84bab68f4 100644 --- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/SourceSettingsBase.scala +++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/SourceSettingsBase.scala @@ -4,12 +4,12 @@ package akka.stream.alpakka.elasticsearch -import akka.util.JavaDurationConverters._ import java.util.concurrent.TimeUnit import akka.stream.alpakka.elasticsearch.ElasticsearchConnectionSettings import scala.concurrent.duration.FiniteDuration +import scala.jdk.DurationConverters._ /** * Configure Elastiscsearch/OpenSearch sources. @@ -28,7 +28,7 @@ abstract class SourceSettingsBase[Version <: ApiVersionBase, S <: SourceSettings def withScrollDuration(value: FiniteDuration): S = copy(scrollDuration = value) - def withScrollDuration(value: java.time.Duration): S = copy(scrollDuration = value.asScala) + def withScrollDuration(value: java.time.Duration): S = copy(scrollDuration = value.toScala) /** * If includeDocumentVersion is true, '_version' is returned with the search-results diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/WriteMessage.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/WriteMessage.scala index 8a81dc78e3..85822ff4ae 100644 --- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/WriteMessage.scala +++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/WriteMessage.scala @@ -7,8 +7,8 @@ package akka.stream.alpakka.elasticsearch import akka.NotUsed import akka.annotation.InternalApi -import scala.collection.JavaConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ /** * INTERNAL API @@ -143,7 +143,7 @@ final class WriteResult[T2, C2] @InternalApi private[elasticsearch] (val message val success: Boolean = error.isEmpty /** Java API: JSON structure of the Elasticsearch error. */ - def getError: java.util.Optional[String] = error.asJava + def getError: java.util.Optional[String] = error.toJava /** `reason` field value of the Elasticsearch error. */ def errorReason: Option[String] = { @@ -152,7 +152,7 @@ final class WriteResult[T2, C2] @InternalApi private[elasticsearch] (val message } /** Java API: `reason` field value from the Elasticsearch error */ - def getErrorReason: java.util.Optional[String] = errorReason.asJava + def getErrorReason: java.util.Optional[String] = errorReason.toJava override def toString = s"""WriteResult(message=$message,error=$error)""" diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchFlow.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchFlow.scala index da847224ae..8f19c4a968 100644 --- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchFlow.scala +++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchFlow.scala @@ -9,7 +9,7 @@ import akka.annotation.ApiMayChange import akka.stream.alpakka.elasticsearch.{scaladsl, _} import com.fasterxml.jackson.databind.ObjectMapper -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * Java API to create Elasticsearch flows. diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchSource.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchSource.scala index 3b14f8c249..dd1c48427c 100644 --- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchSource.scala +++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/javadsl/ElasticsearchSource.scala @@ -13,7 +13,7 @@ import akka.stream.{Attributes, Materializer} import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.node.{ArrayNode, NumericNode} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.ExecutionContext /** diff --git a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/testkit/MessageFactory.scala b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/testkit/MessageFactory.scala index 34a6ca7205..6aff6323b5 100644 --- a/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/testkit/MessageFactory.scala +++ b/elasticsearch/src/main/scala/akka/stream/alpakka/elasticsearch/testkit/MessageFactory.scala @@ -6,7 +6,7 @@ package akka.stream.alpakka.elasticsearch.testkit import akka.annotation.ApiMayChange import akka.stream.alpakka.elasticsearch.{ReadResult, WriteMessage, WriteResult} -import scala.compat.java8.OptionConverters._ +import scala.jdk.OptionConverters._ object MessageFactory { @@ -37,7 +37,7 @@ object MessageFactory { ): ReadResult[T] = new ReadResult( id, source, - version.asScala + version.toScala ) @ApiMayChange def createWriteResult[T, PT]( @@ -57,7 +57,7 @@ object MessageFactory { error: java.util.Optional[String] ): WriteResult[T, PT] = new WriteResult( message, - error.asScala + error.toScala ) } diff --git a/file/src/main/java/akka/stream/alpakka/file/javadsl/DirectoryChangesSource.java b/file/src/main/java/akka/stream/alpakka/file/javadsl/DirectoryChangesSource.java index dc0c091409..bdbf15207c 100644 --- a/file/src/main/java/akka/stream/alpakka/file/javadsl/DirectoryChangesSource.java +++ b/file/src/main/java/akka/stream/alpakka/file/javadsl/DirectoryChangesSource.java @@ -8,10 +8,10 @@ import akka.japi.Pair; import akka.stream.alpakka.file.DirectoryChange; import akka.stream.javadsl.Source; -import akka.util.JavaDurationConverters; import java.nio.file.Path; +import scala.jdk.javaapi.DurationConverters; /** * Watches a file system directory and streams change events from it. * @@ -31,7 +31,7 @@ public static Source, NotUsed> create( return Source.fromGraph( new akka.stream.alpakka.file.impl.DirectoryChangesSource<>( directoryPath, - JavaDurationConverters.asFiniteDuration(pollInterval), + DurationConverters.toScala(pollInterval), maxBufferSize, Pair::apply)); } diff --git a/file/src/main/java/akka/stream/alpakka/file/javadsl/FileTailSource.java b/file/src/main/java/akka/stream/alpakka/file/javadsl/FileTailSource.java index 0162730cbf..7906672ac7 100644 --- a/file/src/main/java/akka/stream/alpakka/file/javadsl/FileTailSource.java +++ b/file/src/main/java/akka/stream/alpakka/file/javadsl/FileTailSource.java @@ -10,8 +10,8 @@ import akka.stream.javadsl.Framing; import akka.stream.javadsl.Source; import akka.util.ByteString; -import akka.util.JavaDurationConverters; -import scala.concurrent.duration.FiniteDuration; + +import scala.jdk.javaapi.DurationConverters; import java.io.FileNotFoundException; import java.nio.charset.Charset; @@ -55,7 +55,8 @@ public static Source create( path, maxChunkSize, startingPosition, - JavaDurationConverters.asFiniteDuration(pollingInterval))); + DurationConverters.toScala(pollingInterval)) + ); } /** diff --git a/file/src/main/scala/akka/stream/alpakka/file/javadsl/LogRotatorSink.scala b/file/src/main/scala/akka/stream/alpakka/file/javadsl/LogRotatorSink.scala index 0680f737d2..72d9cce2bd 100644 --- a/file/src/main/scala/akka/stream/alpakka/file/javadsl/LogRotatorSink.scala +++ b/file/src/main/scala/akka/stream/alpakka/file/javadsl/LogRotatorSink.scala @@ -16,11 +16,11 @@ import akka.stream.javadsl.Sink import akka.util.ByteString import akka.japi.function -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.Future -import scala.compat.java8.FutureConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.FutureConverters._ +import scala.jdk.OptionConverters._ /** * Java API. @@ -70,7 +70,7 @@ object LogRotatorSink { sinkFactory: function.Function[C, Sink[ByteString, CompletionStage[R]]] ): javadsl.Sink[ByteString, CompletionStage[Done]] = { val t: C => scaladsl.Sink[ByteString, Future[R]] = path => - sinkFactory.apply(path).asScala.mapMaterializedValue(_.toScala) + sinkFactory.apply(path).asScala.mapMaterializedValue(_.asScala) new Sink( akka.stream.alpakka.file.scaladsl.LogRotatorSink .withSinkFactory(asScala[C](triggerGeneratorCreator), t) @@ -82,7 +82,7 @@ object LogRotatorSink { f: function.Creator[function.Function[ByteString, Optional[C]]] ): () => ByteString => Option[C] = () => { val fun = f.create() - elem => fun(elem).asScala + elem => fun(elem).toScala } } diff --git a/file/src/main/scala/akka/stream/alpakka/file/scaladsl/LogRotatorSink.scala b/file/src/main/scala/akka/stream/alpakka/file/scaladsl/LogRotatorSink.scala index 969892cdb0..99aa97f30c 100644 --- a/file/src/main/scala/akka/stream/alpakka/file/scaladsl/LogRotatorSink.scala +++ b/file/src/main/scala/akka/stream/alpakka/file/scaladsl/LogRotatorSink.scala @@ -108,7 +108,7 @@ final private class LogRotatorSink[T, C, R](triggerGeneratorCreator: () => T => sourceOut.complete() } implicit val executionContext: ExecutionContext = - akka.dispatch.ExecutionContexts.parasitic + ExecutionContext.parasitic promise.completeWith(Future.sequence(sinkCompletions).map(_ => Done)) } @@ -165,7 +165,7 @@ final private class LogRotatorSink[T, C, R](triggerGeneratorCreator: () => T => implicit val ec = materializer.executionContext Future .sequence(sinkCompletions) - .map(_ => Done)(akka.dispatch.ExecutionContexts.parasitic) + .map(_ => Done)(ExecutionContext.parasitic) } def futureCB(newFuture: Future[R]) = @@ -192,7 +192,7 @@ final private class LogRotatorSink[T, C, R](triggerGeneratorCreator: () => T => val holder = new Holder[R](NotYetThere, futureCB(newFuture)) newFuture.onComplete(holder)( - akka.dispatch.ExecutionContexts.parasitic + ExecutionContext.parasitic ) prevOut.foreach(_.complete()) diff --git a/file/src/test/scala/docs/scaladsl/ArchiveSpec.scala b/file/src/test/scala/docs/scaladsl/ArchiveSpec.scala index 0bc3a11cc3..a45b164b0f 100644 --- a/file/src/test/scala/docs/scaladsl/ArchiveSpec.scala +++ b/file/src/test/scala/docs/scaladsl/ArchiveSpec.scala @@ -21,7 +21,7 @@ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import java.util.zip.Deflater -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.{ExecutionContext, Future} class ArchiveSpec diff --git a/file/src/test/scala/docs/scaladsl/TarArchiveSpec.scala b/file/src/test/scala/docs/scaladsl/TarArchiveSpec.scala index bc72fe30c5..eedc566eef 100644 --- a/file/src/test/scala/docs/scaladsl/TarArchiveSpec.scala +++ b/file/src/test/scala/docs/scaladsl/TarArchiveSpec.scala @@ -22,7 +22,7 @@ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import scala.annotation.nowarn -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable import scala.concurrent.{ExecutionContext, Future} diff --git a/ftp/src/main/scala/akka/stream/alpakka/ftp/impl/SftpOperations.scala b/ftp/src/main/scala/akka/stream/alpakka/ftp/impl/SftpOperations.scala index 97dd71ceef..7f5eacbd06 100644 --- a/ftp/src/main/scala/akka/stream/alpakka/ftp/impl/SftpOperations.scala +++ b/ftp/src/main/scala/akka/stream/alpakka/ftp/impl/SftpOperations.scala @@ -19,7 +19,7 @@ import net.schmizz.sshj.userauth.password.{PasswordFinder, PasswordUtils, Resour import net.schmizz.sshj.xfer.FilePermission import org.apache.commons.net.DefaultSocketFactory -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable import scala.util.{Failure, Try} diff --git a/ftp/src/main/scala/akka/stream/alpakka/ftp/javadsl/FtpApi.scala b/ftp/src/main/scala/akka/stream/alpakka/ftp/javadsl/FtpApi.scala index 14d8dfabf2..b16074b3d9 100644 --- a/ftp/src/main/scala/akka/stream/alpakka/ftp/javadsl/FtpApi.scala +++ b/ftp/src/main/scala/akka/stream/alpakka/ftp/javadsl/FtpApi.scala @@ -18,7 +18,7 @@ import akka.{Done, NotUsed} import net.schmizz.sshj.SSHClient import org.apache.commons.net.ftp.{FTPClient, FTPSClient} -import scala.compat.java8.FunctionConverters._ +import scala.jdk.FunctionConverters._ @DoNotInherit sealed trait FtpApi[FtpClient, S <: RemoteFileSettings] { factory: FtpSourceFactory[FtpClient, S] => @@ -301,7 +301,7 @@ object Ftp extends FtpApi[FTPClient, FtpSettings] with FtpSourceParams { createBrowserGraph( basePath, connectionSettings, - asScalaFromPredicate(branchSelector), + branchSelector.asScala, _emitTraversedDirectories = false ) ) @@ -311,7 +311,7 @@ object Ftp extends FtpApi[FTPClient, FtpSettings] with FtpSourceParams { branchSelector: Predicate[FtpFile], emitTraversedDirectories: Boolean): Source[FtpFile, NotUsed] = Source.fromGraph( - createBrowserGraph(basePath, connectionSettings, asScalaFromPredicate(branchSelector), emitTraversedDirectories) + createBrowserGraph(basePath, connectionSettings, branchSelector.asScala, emitTraversedDirectories) ) def fromPath(host: String, path: String): Source[ByteString, CompletionStage[IOResult]] = @@ -335,10 +335,10 @@ object Ftp extends FtpApi[FTPClient, FtpSettings] with FtpSourceParams { connectionSettings: S, chunkSize: Int, offset: Long): Source[ByteString, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ + import scala.jdk.FutureConverters._ Source .fromGraph(createIOSource(path, connectionSettings, chunkSize, offset)) - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) } def mkdir(basePath: String, name: String, connectionSettings: S): Source[Done, NotUsed] = @@ -357,8 +357,8 @@ object Ftp extends FtpApi[FTPClient, FtpSettings] with FtpSourceParams { } def toPath(path: String, connectionSettings: S, append: Boolean): Sink[ByteString, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ - Sink.fromGraph(createIOSink(path, connectionSettings, append)).mapMaterializedValue(func(_.toJava)) + import scala.jdk.FutureConverters._ + Sink.fromGraph(createIOSink(path, connectionSettings, append)).mapMaterializedValue(func(_.asJava)) } def toPath(path: String, connectionSettings: S): Sink[ByteString, CompletionStage[IOResult]] = @@ -366,16 +366,16 @@ object Ftp extends FtpApi[FTPClient, FtpSettings] with FtpSourceParams { def move(destinationPath: Function[FtpFile, String], connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] = { - import scala.compat.java8.FunctionConverters._ - import scala.compat.java8.FutureConverters._ + import scala.jdk.FunctionConverters._ + import scala.jdk.FutureConverters._ Sink .fromGraph(createMoveSink(destinationPath.asScala, connectionSettings)) - .mapMaterializedValue[CompletionStage[IOResult]](func(_.toJava)) + .mapMaterializedValue[CompletionStage[IOResult]](func(_.asJava)) } def remove(connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ - Sink.fromGraph(createRemoveSink(connectionSettings)).mapMaterializedValue(func(_.toJava)) + import scala.jdk.FutureConverters._ + Sink.fromGraph(createRemoveSink(connectionSettings)).mapMaterializedValue(func(_.asJava)) } } @@ -398,7 +398,7 @@ object Ftps extends FtpApi[FTPSClient, FtpsSettings] with FtpsSourceParams { createBrowserGraph( basePath, connectionSettings, - asScalaFromPredicate(branchSelector), + branchSelector.asScala, _emitTraversedDirectories = false ) ) @@ -408,7 +408,7 @@ object Ftps extends FtpApi[FTPSClient, FtpsSettings] with FtpsSourceParams { branchSelector: Predicate[FtpFile], emitTraversedDirectories: Boolean): Source[FtpFile, NotUsed] = Source.fromGraph( - createBrowserGraph(basePath, connectionSettings, asScalaFromPredicate(branchSelector), emitTraversedDirectories) + createBrowserGraph(basePath, connectionSettings, branchSelector.asScala, emitTraversedDirectories) ) def fromPath(host: String, path: String): Source[ByteString, CompletionStage[IOResult]] = @@ -432,10 +432,10 @@ object Ftps extends FtpApi[FTPSClient, FtpsSettings] with FtpsSourceParams { connectionSettings: S, chunkSize: Int, offset: Long): Source[ByteString, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ + import scala.jdk.FutureConverters._ Source .fromGraph(createIOSource(path, connectionSettings, chunkSize, offset)) - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) } def mkdir(basePath: String, name: String, connectionSettings: S): Source[Done, NotUsed] = @@ -454,8 +454,8 @@ object Ftps extends FtpApi[FTPSClient, FtpsSettings] with FtpsSourceParams { } def toPath(path: String, connectionSettings: S, append: Boolean): Sink[ByteString, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ - Sink.fromGraph(createIOSink(path, connectionSettings, append)).mapMaterializedValue(func(_.toJava)) + import scala.jdk.FutureConverters._ + Sink.fromGraph(createIOSink(path, connectionSettings, append)).mapMaterializedValue(func(_.asJava)) } def toPath(path: String, connectionSettings: S): Sink[ByteString, CompletionStage[IOResult]] = @@ -463,16 +463,16 @@ object Ftps extends FtpApi[FTPSClient, FtpsSettings] with FtpsSourceParams { def move(destinationPath: Function[FtpFile, String], connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] = { - import scala.compat.java8.FunctionConverters._ - import scala.compat.java8.FutureConverters._ + import scala.jdk.FunctionConverters._ + import scala.jdk.FutureConverters._ Sink .fromGraph(createMoveSink(destinationPath.asScala, connectionSettings)) - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) } def remove(connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ - Sink.fromGraph(createRemoveSink(connectionSettings)).mapMaterializedValue(func(_.toJava)) + import scala.jdk.FutureConverters._ + Sink.fromGraph(createRemoveSink(connectionSettings)).mapMaterializedValue(func(_.asJava)) } } @@ -496,7 +496,7 @@ class SftpApi extends FtpApi[SSHClient, SftpSettings] with SftpSourceParams { createBrowserGraph( basePath, connectionSettings, - asScalaFromPredicate(branchSelector), + branchSelector.asScala, _emitTraversedDirectories = false ) ) @@ -506,7 +506,7 @@ class SftpApi extends FtpApi[SSHClient, SftpSettings] with SftpSourceParams { branchSelector: Predicate[FtpFile], emitTraversedDirectories: Boolean): Source[FtpFile, NotUsed] = Source.fromGraph( - createBrowserGraph(basePath, connectionSettings, asScalaFromPredicate(branchSelector), emitTraversedDirectories) + createBrowserGraph(basePath, connectionSettings, branchSelector.asScala, emitTraversedDirectories) ) def fromPath(host: String, path: String): Source[ByteString, CompletionStage[IOResult]] = @@ -530,10 +530,10 @@ class SftpApi extends FtpApi[SSHClient, SftpSettings] with SftpSourceParams { connectionSettings: S, chunkSize: Int, offset: Long): Source[ByteString, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ + import scala.jdk.FutureConverters._ Source .fromGraph(createIOSource(path, connectionSettings, chunkSize, offset)) - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) } def mkdir(basePath: String, name: String, connectionSettings: S): Source[Done, NotUsed] = @@ -552,8 +552,8 @@ class SftpApi extends FtpApi[SSHClient, SftpSettings] with SftpSourceParams { } def toPath(path: String, connectionSettings: S, append: Boolean): Sink[ByteString, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ - Sink.fromGraph(createIOSink(path, connectionSettings, append)).mapMaterializedValue(func(_.toJava)) + import scala.jdk.FutureConverters._ + Sink.fromGraph(createIOSink(path, connectionSettings, append)).mapMaterializedValue(func(_.asJava)) } def toPath(path: String, connectionSettings: S): Sink[ByteString, CompletionStage[IOResult]] = @@ -561,16 +561,16 @@ class SftpApi extends FtpApi[SSHClient, SftpSettings] with SftpSourceParams { def move(destinationPath: Function[FtpFile, String], connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] = { - import scala.compat.java8.FunctionConverters._ - import scala.compat.java8.FutureConverters._ + import scala.jdk.FunctionConverters._ + import scala.jdk.FutureConverters._ Sink .fromGraph(createMoveSink(destinationPath.asScala, connectionSettings)) - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) } def remove(connectionSettings: S): Sink[FtpFile, CompletionStage[IOResult]] = { - import scala.compat.java8.FutureConverters._ - Sink.fromGraph(createRemoveSink(connectionSettings)).mapMaterializedValue(func(_.toJava)) + import scala.jdk.FutureConverters._ + Sink.fromGraph(createRemoveSink(connectionSettings)).mapMaterializedValue(func(_.asJava)) } } diff --git a/geode/src/main/java/akka/stream/alpakka/geode/javadsl/Geode.java b/geode/src/main/java/akka/stream/alpakka/geode/javadsl/Geode.java index fdb055b2e0..f31be5d4e0 100644 --- a/geode/src/main/java/akka/stream/alpakka/geode/javadsl/Geode.java +++ b/geode/src/main/java/akka/stream/alpakka/geode/javadsl/Geode.java @@ -19,7 +19,7 @@ import akka.stream.javadsl.Source; import org.apache.geode.cache.client.ClientCacheFactory; -import scala.compat.java8.FutureConverters; +import scala.jdk.javaapi.FutureConverters; import java.util.concurrent.CompletionStage; @@ -42,7 +42,7 @@ public Source> query(String query, AkkaPdxSerialize registerPDXSerializer(serializer, serializer.clazz()); return Source.fromGraph(new GeodeFiniteSourceStage(cache(), query)) - .mapMaterializedValue(FutureConverters::toJava); + .mapMaterializedValue(FutureConverters::asJava); } public Flow flow( diff --git a/geode/src/main/java/akka/stream/alpakka/geode/javadsl/GeodeWithPoolSubscription.java b/geode/src/main/java/akka/stream/alpakka/geode/javadsl/GeodeWithPoolSubscription.java index b056488285..123bf654b1 100644 --- a/geode/src/main/java/akka/stream/alpakka/geode/javadsl/GeodeWithPoolSubscription.java +++ b/geode/src/main/java/akka/stream/alpakka/geode/javadsl/GeodeWithPoolSubscription.java @@ -14,7 +14,7 @@ import org.apache.geode.cache.query.CqQuery; import org.apache.geode.cache.query.QueryService; -import scala.compat.java8.FutureConverters; +import scala.jdk.javaapi.FutureConverters; import java.util.concurrent.CompletionStage; @@ -38,7 +38,7 @@ public Source> continuousQuery( String queryName, String query, AkkaPdxSerializer serializer) { registerPDXSerializer(serializer, serializer.clazz()); return Source.fromGraph(new GeodeContinuousSourceStage(cache(), queryName, query)) - .mapMaterializedValue(FutureConverters::toJava); + .mapMaterializedValue(FutureConverters::asJava); } public boolean closeContinuousQuery(String name) throws CqException { diff --git a/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/ProtobufConverters.scala b/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/ProtobufConverters.scala index b66171e889..a2b8b897c7 100644 --- a/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/ProtobufConverters.scala +++ b/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/ProtobufConverters.scala @@ -9,7 +9,7 @@ import com.google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions import com.google.cloud.bigquery.storage.v1.stream.ReadSession import scalapb.UnknownFieldSet -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * Internal API diff --git a/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/impl/ArrowSource.scala b/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/impl/ArrowSource.scala index 66e0cdd768..df5b5f7d52 100644 --- a/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/impl/ArrowSource.scala +++ b/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/impl/ArrowSource.scala @@ -18,7 +18,7 @@ import org.apache.arrow.vector.util.ByteArrayReadableSeekableByteChannel import scala.collection.mutable import scala.collection.mutable.ListBuffer -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ object ArrowSource { diff --git a/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/BigQueryArrowStorage.scala b/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/BigQueryArrowStorage.scala index 64b74e4a32..f48529d7a5 100644 --- a/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/BigQueryArrowStorage.scala +++ b/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/BigQueryArrowStorage.scala @@ -12,8 +12,8 @@ import akka.stream.alpakka.googlecloud.bigquery.storage.{scaladsl => scstorage} import com.google.cloud.bigquery.storage.v1.arrow.{ArrowRecordBatch, ArrowSchema} import java.util.concurrent.CompletionStage -import scala.collection.JavaConverters._ -import scala.compat.java8.FutureConverters.FutureOps +import scala.jdk.CollectionConverters._ +import scala.jdk.FutureConverters.FutureOps /** * Google BigQuery Storage Api Akka Stream operator factory using Arrow Format. @@ -59,7 +59,7 @@ object BigQueryArrowStorage { stream.asJava }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) def readRecords(projectId: String, datasetId: String, @@ -104,7 +104,7 @@ object BigQueryArrowStorage { stream.map(_.asJava).asJava }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) def readMerged(projectId: String, datasetId: String, @@ -149,7 +149,7 @@ object BigQueryArrowStorage { (stream._1, stream._2.asJava) }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) def read( projectId: String, @@ -196,6 +196,6 @@ object BigQueryArrowStorage { (stream._1, stream._2.map(_.asJava).asJava) }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) } diff --git a/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/BigQueryAvroStorage.scala b/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/BigQueryAvroStorage.scala index 681f7c716c..fa5510fd79 100644 --- a/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/BigQueryAvroStorage.scala +++ b/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/BigQueryAvroStorage.scala @@ -11,8 +11,8 @@ import com.google.cloud.bigquery.storage.v1.avro.{AvroRows, AvroSchema} import com.google.cloud.bigquery.storage.v1.stream.ReadSession.TableReadOptions import java.util.concurrent.CompletionStage -import scala.collection.JavaConverters._ -import scala.compat.java8.FutureConverters.FutureOps +import scala.jdk.CollectionConverters._ +import scala.jdk.FutureConverters.FutureOps /** * Google BigQuery Storage Api Akka Stream operator factory using Avro Format. @@ -58,7 +58,7 @@ object BigQueryAvroStorage { stream.asJava }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) def readRecords(projectId: String, datasetId: String, @@ -103,7 +103,7 @@ object BigQueryAvroStorage { stream.map(_.asJava).asJava }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) def readMerged(projectId: String, datasetId: String, @@ -144,7 +144,7 @@ object BigQueryAvroStorage { (stream._1, stream._2.asJava) }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) def read(projectId: String, datasetId: String, @@ -189,6 +189,6 @@ object BigQueryAvroStorage { (stream._1, stream._2.map(_.asJava).asJava) }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) } diff --git a/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/BigQueryStorage.scala b/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/BigQueryStorage.scala index 2d7ab9397e..cbb3448ffc 100644 --- a/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/BigQueryStorage.scala +++ b/google-cloud-bigquery-storage/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/BigQueryStorage.scala @@ -17,8 +17,8 @@ import com.google.cloud.bigquery.storage.v1.storage.ReadRowsResponse import com.google.cloud.bigquery.storage.v1.stream.ReadSession import java.util.concurrent.CompletionStage -import scala.compat.java8.FutureConverters.FutureOps -import scala.collection.JavaConverters._ +import scala.jdk.FutureConverters.FutureOps +import scala.jdk.CollectionConverters._ /** * Google BigQuery Storage Api Akka Stream operator factory. @@ -116,7 +116,7 @@ object BigQueryStorage { (stream._1, stream._2.map(_.asJava).asJava) }) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) /** * Create a source that contains a number of sources, one for each stream, or section of the table data. @@ -207,6 +207,6 @@ object BigQueryStorage { scstorage.BigQueryStorage .createMergedStreams(projectId, datasetId, tableId, dataFormat, readOptions.map(_.asScala()), maxNumStreams)(um) .asJava - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) } diff --git a/google-cloud-bigquery-storage/src/test/java/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/AvroByteStringDecoder.scala b/google-cloud-bigquery-storage/src/test/java/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/AvroByteStringDecoder.scala index 0b0dc10585..6b8f653f3e 100644 --- a/google-cloud-bigquery-storage/src/test/java/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/AvroByteStringDecoder.scala +++ b/google-cloud-bigquery-storage/src/test/java/akka/stream/alpakka/googlecloud/bigquery/storage/javadsl/AvroByteStringDecoder.scala @@ -16,7 +16,7 @@ import org.apache.avro.io.DecoderFactory import java.util import scala.collection.mutable.ListBuffer import scala.concurrent.{ExecutionContext, Future} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ class AvroByteStringDecoder(schema: Schema) extends FromByteStringUnmarshaller[java.util.List[BigQueryRecord]] { diff --git a/google-cloud-bigquery-storage/src/test/scala/akka/stream/alpakka/googlecloud/bigquery/storage/scaladsl/ArrowByteStringDecoder.scala b/google-cloud-bigquery-storage/src/test/scala/akka/stream/alpakka/googlecloud/bigquery/storage/scaladsl/ArrowByteStringDecoder.scala index 16e7dcbbda..ac17d940b6 100644 --- a/google-cloud-bigquery-storage/src/test/scala/akka/stream/alpakka/googlecloud/bigquery/storage/scaladsl/ArrowByteStringDecoder.scala +++ b/google-cloud-bigquery-storage/src/test/scala/akka/stream/alpakka/googlecloud/bigquery/storage/scaladsl/ArrowByteStringDecoder.scala @@ -18,7 +18,7 @@ import org.apache.arrow.vector.util.ByteArrayReadableSeekableByteChannel import scala.collection.mutable import scala.collection.mutable.ListBuffer import scala.concurrent.{ExecutionContext, Future} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ class ArrowByteStringDecoder(val schema: ArrowSchema) extends FromByteStringUnmarshaller[List[BigQueryRecord]] { diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/BigQuerySettings.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/BigQuerySettings.scala index e356c1f137..70309ac813 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/BigQuerySettings.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/BigQuerySettings.scala @@ -6,11 +6,12 @@ package akka.stream.alpakka.googlecloud.bigquery import akka.actor.ClassicActorSystemProvider import akka.annotation.InternalApi -import akka.util.JavaDurationConverters._ import com.typesafe.config.Config import java.time + import scala.concurrent.duration._ +import scala.jdk.DurationConverters._ object BigQuerySettings { val ConfigPath = "alpakka.google.bigquery" @@ -19,7 +20,7 @@ object BigQuerySettings { * Reads from the given config. */ def apply(c: Config): BigQuerySettings = - BigQuerySettings(c.getDuration("load-job-per-table-quota").asScala) + BigQuerySettings(c.getDuration("load-job-per-table-quota").toScala) /** * Java API: Reads from the given config. @@ -46,14 +47,14 @@ object BigQuerySettings { /** * Java API */ - def create(loadJobPerTableQuota: time.Duration) = BigQuerySettings(loadJobPerTableQuota.asScala) + def create(loadJobPerTableQuota: time.Duration) = BigQuerySettings(loadJobPerTableQuota.toScala) } final case class BigQuerySettings @InternalApi private (loadJobPerTableQuota: FiniteDuration) { - def getLoadJobPerTableQuota = loadJobPerTableQuota.asJava + def getLoadJobPerTableQuota = loadJobPerTableQuota.toJava def withLoadJobPerTableQuota(loadJobPerTableQuota: FiniteDuration) = copy(loadJobPerTableQuota = loadJobPerTableQuota) def withLoadJobPerTableQuota(loadJobPerTableQuota: time.Duration) = - copy(loadJobPerTableQuota = loadJobPerTableQuota.asScala) + copy(loadJobPerTableQuota = loadJobPerTableQuota.toScala) } diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/javadsl/BigQuery.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/javadsl/BigQuery.scala index 8f2a3b8ea4..9d1d2c9d14 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/javadsl/BigQuery.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/javadsl/BigQuery.scala @@ -34,9 +34,9 @@ import java.util.concurrent.CompletionStage import java.{lang, util} import scala.annotation.nowarn -import scala.collection.JavaConverters._ -import scala.compat.java8.FutureConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.FutureConverters._ +import scala.jdk.OptionConverters._ import scala.concurrent.duration.{FiniteDuration, MILLISECONDS} /** @@ -57,7 +57,7 @@ object BigQuery extends Google { def listDatasets(maxResults: util.OptionalInt, all: util.Optional[lang.Boolean], filter: util.Map[String, String]): Source[Dataset, NotUsed] = - ScalaBigQuery.datasets(maxResults.asScala, all.asScala.map(_.booleanValue), filter.asScala.toMap).asJava + ScalaBigQuery.datasets(maxResults.toScala, all.toScala.map(_.booleanValue), filter.asScala.toMap).asJava /** * Returns the specified dataset. @@ -71,7 +71,7 @@ object BigQuery extends Google { def getDataset(datasetId: String, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Dataset] = - ScalaBigQuery.dataset(datasetId)(system, settings).toJava + ScalaBigQuery.dataset(datasetId)(system, settings).asJava /** * Creates a new empty dataset. @@ -85,7 +85,7 @@ object BigQuery extends Google { def createDataset(datasetId: String, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Dataset] = - ScalaBigQuery.createDataset(datasetId)(system, settings).toJava + ScalaBigQuery.createDataset(datasetId)(system, settings).asJava /** * Creates a new empty dataset. @@ -99,7 +99,7 @@ object BigQuery extends Google { def createDataset(dataset: Dataset, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Dataset] = - ScalaBigQuery.createDataset(dataset)(system, settings).toJava + ScalaBigQuery.createDataset(dataset)(system, settings).asJava /** * Deletes the dataset specified by the datasetId value. @@ -114,7 +114,7 @@ object BigQuery extends Google { deleteContents: Boolean, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Done] = - ScalaBigQuery.deleteDataset(datasetId, deleteContents)(system, settings).toJava + ScalaBigQuery.deleteDataset(datasetId, deleteContents)(system, settings).asJava /** * Lists all tables in the specified dataset. @@ -125,7 +125,7 @@ object BigQuery extends Google { * @return a [[akka.stream.javadsl.Source]] that emits each [[akka.stream.alpakka.googlecloud.bigquery.model.Table]] in the dataset and materializes a [[java.util.concurrent.CompletionStage]] containing the [[akka.stream.alpakka.googlecloud.bigquery.model.TableListResponse]] */ def listTables(datasetId: String, maxResults: util.OptionalInt): Source[Table, CompletionStage[TableListResponse]] = - ScalaBigQuery.tables(datasetId, maxResults.asScala).mapMaterializedValue(_.toJava).asJava + ScalaBigQuery.tables(datasetId, maxResults.toScala).mapMaterializedValue(_.asJava).asJava /** * Gets the specified table resource. This method does not return the data in the table, it only returns the table resource, which describes the structure of this table. @@ -141,7 +141,7 @@ object BigQuery extends Google { tableId: String, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Table] = - ScalaBigQuery.table(datasetId, tableId)(system, settings).toJava + ScalaBigQuery.table(datasetId, tableId)(system, settings).asJava /** * Creates a new, empty table in the dataset. @@ -171,7 +171,7 @@ object BigQuery extends Google { * @return a [[java.util.concurrent.CompletionStage]] containing the [[akka.stream.alpakka.googlecloud.bigquery.model.Table]] */ def createTable(table: Table, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Table] = - ScalaBigQuery.createTable(table)(system, settings).toJava + ScalaBigQuery.createTable(table)(system, settings).asJava /** * Deletes the specified table from the dataset. If the table contains data, all the data will be deleted. @@ -187,7 +187,7 @@ object BigQuery extends Google { tableId: String, settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Done] = - ScalaBigQuery.deleteTable(datasetId, tableId)(system, settings).toJava + ScalaBigQuery.deleteTable(datasetId, tableId)(system, settings).asJava /** * Lists the content of a table in rows. @@ -212,8 +212,8 @@ object BigQuery extends Google { ): Source[Out, CompletionStage[TableDataListResponse[Out]]] = { implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] ScalaBigQuery - .tableData(datasetId, tableId, startIndex.asScala, maxResults.asScala, selectedFields.asScala.toList) - .mapMaterializedValue(_.toJava) + .tableData(datasetId, tableId, startIndex.toScala, maxResults.toScala, selectedFields.asScala.toList) + .mapMaterializedValue(_.asJava) .asJava } @@ -239,7 +239,7 @@ object BigQuery extends Google { implicit val m = marshaller.asScalaCastOutput[sm.RequestEntity] ss.Flow[util.List[In]] .map(_.asScala.toList) - .to(ScalaBigQuery.insertAll[In](datasetId, tableId, retryPolicy, templateSuffix.asScala)) + .to(ScalaBigQuery.insertAll[In](datasetId, tableId, retryPolicy, templateSuffix.toScala)) .asJava[util.List[In]] } @@ -283,7 +283,7 @@ object BigQuery extends Google { unmarshaller: Unmarshaller[HttpEntity, QueryResponse[Out]] ): Source[Out, CompletionStage[QueryResponse[Out]]] = { implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] - ScalaBigQuery.query(query, dryRun, useLegacySql).mapMaterializedValue(_.toJava).asJava + ScalaBigQuery.query(query, dryRun, useLegacySql).mapMaterializedValue(_.asJava).asJava } /** @@ -306,7 +306,7 @@ object BigQuery extends Google { .query(query) .mapMaterializedValue { case (jobReference, queryResponse) => - Pair(jobReference.toJava, queryResponse.toJava) + Pair(jobReference.asJava, queryResponse.asJava) } .asJava } @@ -335,11 +335,11 @@ object BigQuery extends Google { implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] ScalaBigQuery .queryResults(jobId, - startIndex.asScala, - maxResults.asScala, - timeout.asScala.map(d => FiniteDuration(d.toMillis, MILLISECONDS)), - location.asScala) - .mapMaterializedValue(_.toJava) + startIndex.toScala, + maxResults.toScala, + timeout.toScala.map(d => FiniteDuration(d.toMillis, MILLISECONDS)), + location.toScala) + .mapMaterializedValue(_.asJava) .asJava } @@ -357,7 +357,7 @@ object BigQuery extends Google { location: util.Optional[String], settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[Job] = - ScalaBigQuery.job(jobId, location.asScala)(system, settings).toJava + ScalaBigQuery.job(jobId, location.toScala)(system, settings).asJava /** * Requests that a job be cancelled. @@ -373,7 +373,7 @@ object BigQuery extends Google { location: util.Optional[String], settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[JobCancelResponse] = - ScalaBigQuery.cancelJob(jobId, location.asScala)(system, settings).toJava + ScalaBigQuery.cancelJob(jobId, location.toScala)(system, settings).asJava /** * Loads data into BigQuery via a series of asynchronous load jobs created at the rate [[akka.stream.alpakka.googlecloud.bigquery.BigQuerySettings.loadJobPerTableQuota]]. @@ -409,7 +409,7 @@ object BigQuery extends Google { labels: util.Optional[util.Map[String, String]], marshaller: Marshaller[In, RequestEntity]): Flow[In, Job, NotUsed] = { implicit val m = marshaller.asScalaCastOutput[sm.RequestEntity] - ScalaBigQuery.insertAllAsync[In](datasetId, tableId, labels.asScala.map(_.asScala.toMap)).asJava[In] + ScalaBigQuery.insertAllAsync[In](datasetId, tableId, labels.toScala.map(_.asScala.toMap)).asJava[In] } /** @@ -432,7 +432,7 @@ object BigQuery extends Google { ): Sink[ByteString, CompletionStage[Job]] = { implicit val m = marshaller.asScalaCastOutput[sm.RequestEntity] implicit val um = unmarshaller.asScalaCastInput[sm.HttpEntity] - ScalaBigQuery.createLoadJob(job).mapMaterializedValue(_.toJava).asJava[ByteString] + ScalaBigQuery.createLoadJob(job).mapMaterializedValue(_.asJava).asJava[ByteString] } } diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/DatasetJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/DatasetJsonProtocol.scala index 331735281a..41e7ea9713 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/DatasetJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/DatasetJsonProtocol.scala @@ -9,9 +9,9 @@ import akka.stream.alpakka.googlecloud.bigquery.scaladsl.spray.BigQueryRestJsonP import spray.json.{JsonFormat, RootJsonFormat} import java.util -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable.Seq -import scala.compat.java8.OptionConverters._ +import scala.jdk.OptionConverters._ /** * Dataset resource model @@ -28,9 +28,9 @@ final case class Dataset private (datasetReference: DatasetReference, location: Option[String]) { def getDatasetReference = datasetReference - def getFriendlyName = friendlyName.asJava - def getLabels = labels.map(_.asJava).asJava - def getLocation = location.asJava + def getFriendlyName = friendlyName.toJava + def getLabels = labels.map(_.asJava).toJava + def getLocation = location.toJava def withDatasetReference(datasetReference: DatasetReference) = copy(datasetReference = datasetReference) @@ -38,15 +38,15 @@ final case class Dataset private (datasetReference: DatasetReference, def withFriendlyName(friendlyName: Option[String]) = copy(friendlyName = friendlyName) def withFriendlyName(friendlyName: util.Optional[String]) = - copy(friendlyName = friendlyName.asScala) + copy(friendlyName = friendlyName.toScala) def withLabels(labels: Option[Map[String, String]]) = copy(labels = labels) def withLabels(labels: util.Optional[util.Map[String, String]]) = - copy(labels = labels.asScala.map(_.asScala.toMap)) + copy(labels = labels.toScala.map(_.asScala.toMap)) def withLocation(location: util.Optional[String]) = - copy(location = location.asScala) + copy(location = location.toScala) } object Dataset { @@ -65,7 +65,7 @@ object Dataset { friendlyName: util.Optional[String], labels: util.Optional[util.Map[String, String]], location: util.Optional[String]) = - Dataset(datasetReference, friendlyName.asScala, labels.asScala.map(_.asScala.toMap), location.asScala) + Dataset(datasetReference, friendlyName.toScala, labels.toScala.map(_.asScala.toMap), location.toScala) implicit val format: RootJsonFormat[Dataset] = jsonFormat4(apply) } @@ -79,18 +79,18 @@ object Dataset { */ final case class DatasetReference private (datasetId: Option[String], projectId: Option[String]) { - def getDatasetId = datasetId.asJava - def getProjectId = projectId.asJava + def getDatasetId = datasetId.toJava + def getProjectId = projectId.toJava def withDatasetId(datasetId: Option[String]) = copy(datasetId = datasetId) def withDatasetId(datasetId: util.Optional[String]) = - copy(datasetId = datasetId.asScala) + copy(datasetId = datasetId.toScala) def withProjectId(projectId: Option[String]) = copy(projectId = projectId) def withProjectId(projectId: util.Optional[String]) = - copy(projectId = projectId.asScala) + copy(projectId = projectId.toScala) } object DatasetReference { @@ -104,7 +104,7 @@ object DatasetReference { * @return a [[DatasetReference]] */ def create(datasetId: util.Optional[String], projectId: util.Optional[String]) = - DatasetReference(datasetId.asScala, projectId.asScala) + DatasetReference(datasetId.toScala, projectId.toScala) implicit val format: JsonFormat[DatasetReference] = jsonFormat2(apply) } @@ -118,18 +118,18 @@ object DatasetReference { */ final case class DatasetListResponse private (nextPageToken: Option[String], datasets: Option[Seq[Dataset]]) { - def getNextPageToken = nextPageToken.asJava - def getDatasets = datasets.map(_.asJava).asJava + def getNextPageToken = nextPageToken.toJava + def getDatasets = datasets.map(_.asJava).toJava def withNextPageToken(nextPageToken: Option[String]) = copy(nextPageToken = nextPageToken) def withNextPageToken(nextPageToken: util.Optional[String]) = - copy(nextPageToken = nextPageToken.asScala) + copy(nextPageToken = nextPageToken.toScala) def withDatasets(datasets: Option[Seq[Dataset]]) = copy(datasets = datasets) def withDatasets(datasets: util.Optional[util.List[Dataset]]) = - copy(datasets = datasets.asScala.map(_.asScala.toList)) + copy(datasets = datasets.toScala.map(_.asScala.toList)) } object DatasetListResponse { @@ -143,7 +143,7 @@ object DatasetListResponse { * @return a [[DatasetListResponse]] */ def create(nextPageToken: util.Optional[String], datasets: util.Optional[util.List[Dataset]]) = - DatasetListResponse(nextPageToken.asScala, datasets.asScala.map(_.asScala.toList)) + DatasetListResponse(nextPageToken.toScala, datasets.toScala.map(_.asScala.toList)) implicit val format: RootJsonFormat[DatasetListResponse] = jsonFormat2(apply) implicit val paginated: Paginated[DatasetListResponse] = _.nextPageToken diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala index 7596c29d5e..d5f3a9e542 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/ErrorProtoJsonProtocol.scala @@ -11,7 +11,7 @@ import spray.json.JsonFormat import java.util import scala.annotation.nowarn -import scala.compat.java8.OptionConverters._ +import scala.jdk.OptionConverters._ /** * ErrorProto model @@ -30,24 +30,24 @@ final case class ErrorProto private (reason: Option[String], location: Option[St @JsonProperty(value = "message") message: String) = this(Option(reason), Option(location), Option(message)) - def getReason = reason.asJava - def getLocation = location.asJava - def getMessage = message.asJava + def getReason = reason.toJava + def getLocation = location.toJava + def getMessage = message.toJava def withReason(reason: Option[String]) = copy(reason = reason) def withReason(reason: util.Optional[String]) = - copy(reason = reason.asScala) + copy(reason = reason.toScala) def withLocation(location: Option[String]) = copy(location = location) def withLocation(location: util.Optional[String]) = - copy(location = location.asScala) + copy(location = location.toScala) def withMessage(message: Option[String]) = copy(message = message) def withMessage(message: util.Optional[String]) = - copy(message = message.asScala) + copy(message = message.toScala) } object ErrorProto { @@ -62,7 +62,7 @@ object ErrorProto { * @return an [[ErrorProto]] */ def create(reason: util.Optional[String], location: util.Optional[String], message: util.Optional[String]) = - ErrorProto(reason.asScala, location.asScala, message.asScala) + ErrorProto(reason.toScala, location.toScala, message.toScala) implicit val format: JsonFormat[ErrorProto] = jsonFormat3(apply) } diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/JobJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/JobJsonProtocol.scala index 366d8872d1..f6a1272820 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/JobJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/JobJsonProtocol.scala @@ -11,9 +11,9 @@ import spray.json.{JsonFormat, RootJsonFormat} import java.util import scala.annotation.nowarn -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable.Seq -import scala.compat.java8.OptionConverters._ +import scala.jdk.OptionConverters._ /** * Job model @@ -27,24 +27,24 @@ final case class Job private (configuration: Option[JobConfiguration], jobReference: Option[JobReference], status: Option[JobStatus]) { - def getConfiguration = configuration.asJava - def getJobReference = jobReference.asJava - def getStatus = status.asJava + def getConfiguration = configuration.toJava + def getJobReference = jobReference.toJava + def getStatus = status.toJava def withConfiguration(configuration: Option[JobConfiguration]) = copy(configuration = configuration) def withConfiguration(configuration: util.Optional[JobConfiguration]) = - copy(configuration = configuration.asScala) + copy(configuration = configuration.toScala) def withJobReference(jobReference: Option[JobReference]) = copy(jobReference = jobReference) def withJobReference(jobReference: util.Optional[JobReference]) = - copy(jobReference = jobReference.asScala) + copy(jobReference = jobReference.toScala) def withStatus(status: Option[JobStatus]) = copy(status = status) def withStatus(status: util.Optional[JobStatus]) = - copy(status = status.asScala) + copy(status = status.toScala) } object Job { @@ -61,7 +61,7 @@ object Job { def create(configuration: util.Optional[JobConfiguration], jobReference: util.Optional[JobReference], status: util.Optional[JobStatus]) = - Job(configuration.asScala, jobReference.asScala, status.asScala) + Job(configuration.toScala, jobReference.toScala, status.toScala) implicit val format: RootJsonFormat[Job] = jsonFormat3(apply) } @@ -74,18 +74,18 @@ object Job { * @param labels the labels associated with this job */ final case class JobConfiguration private (load: Option[JobConfigurationLoad], labels: Option[Map[String, String]]) { - def getLoad = load.asJava - def getLabels = labels.asJava + def getLoad = load.toJava + def getLabels = labels.toJava def withLoad(load: Option[JobConfigurationLoad]) = copy(load = load) def withLoad(load: util.Optional[JobConfigurationLoad]) = - copy(load = load.asScala) + copy(load = load.toScala) def withLabels(labels: Option[Map[String, String]]) = copy(labels = labels) def withLabels(labels: util.Optional[util.Map[String, String]]) = - copy(labels = labels.asScala.map(_.asScala.toMap)) + copy(labels = labels.toScala.map(_.asScala.toMap)) } object JobConfiguration { @@ -108,7 +108,7 @@ object JobConfiguration { * @return a [[JobConfiguration]] */ def create(load: util.Optional[JobConfigurationLoad]) = - JobConfiguration(load.asScala) + JobConfiguration(load.toScala) /** * Java API: JobConfiguration model @@ -119,7 +119,7 @@ object JobConfiguration { * @return a [[JobConfiguration]] */ def create(load: util.Optional[JobConfigurationLoad], labels: util.Optional[util.Map[String, String]]) = - JobConfiguration(load.asScala, labels.asScala.map(_.asScala.toMap)) + JobConfiguration(load.toScala, labels.toScala.map(_.asScala.toMap)) implicit val format: JsonFormat[JobConfiguration] = jsonFormat2(apply) } @@ -140,36 +140,36 @@ final case class JobConfigurationLoad private (schema: Option[TableSchema], writeDisposition: Option[WriteDisposition], sourceFormat: Option[SourceFormat]) { - def getSchema = schema.asJava - def getDestinationTable = destinationTable.asJava - def getCreateDisposition = createDisposition.asJava - def getWriteDisposition = writeDisposition.asJava - def getSourceFormat = sourceFormat.asJava + def getSchema = schema.toJava + def getDestinationTable = destinationTable.toJava + def getCreateDisposition = createDisposition.toJava + def getWriteDisposition = writeDisposition.toJava + def getSourceFormat = sourceFormat.toJava def withSchema(schema: Option[TableSchema]) = copy(schema = schema) def withSchema(schema: util.Optional[TableSchema]) = - copy(schema = schema.asScala) + copy(schema = schema.toScala) def withDestinationTable(destinationTable: Option[TableReference]) = copy(destinationTable = destinationTable) def withDestinationTable(destinationTable: util.Optional[TableReference]) = - copy(destinationTable = destinationTable.asScala) + copy(destinationTable = destinationTable.toScala) def withCreateDisposition(createDisposition: Option[CreateDisposition]) = copy(createDisposition = createDisposition) def withCreateDisposition(createDisposition: util.Optional[CreateDisposition]) = - copy(createDisposition = createDisposition.asScala) + copy(createDisposition = createDisposition.toScala) def withWriteDisposition(writeDisposition: Option[WriteDisposition]) = copy(writeDisposition = writeDisposition) def withWriteDisposition(writeDisposition: util.Optional[WriteDisposition]) = - copy(writeDisposition = writeDisposition.asScala) + copy(writeDisposition = writeDisposition.toScala) def withSourceFormat(sourceFormat: Option[SourceFormat]) = copy(sourceFormat = sourceFormat) def withSourceFormat(sourceFormat: util.Optional[SourceFormat]) = - copy(sourceFormat = sourceFormat.asScala) + copy(sourceFormat = sourceFormat.toScala) } object JobConfigurationLoad { @@ -191,11 +191,11 @@ object JobConfigurationLoad { writeDisposition: util.Optional[WriteDisposition], sourceFormat: util.Optional[SourceFormat]) = JobConfigurationLoad( - schema.asScala, - destinationTable.asScala, - createDisposition.asScala, - writeDisposition.asScala, - sourceFormat.asScala + schema.toScala, + destinationTable.toScala, + createDisposition.toScala, + writeDisposition.toScala, + sourceFormat.toScala ) implicit val configurationLoadFormat: JsonFormat[JobConfigurationLoad] = jsonFormat5(apply) @@ -269,24 +269,24 @@ final case class JobReference private (projectId: Option[String], jobId: Option[ @JsonProperty("location") location: String) = this(Option(projectId), Option(jobId), Option(location)) - def getProjectId = projectId.asJava - def getJobId = jobId.asJava - def getLocation = location.asJava + def getProjectId = projectId.toJava + def getJobId = jobId.toJava + def getLocation = location.toJava def withProjectId(projectId: Option[String]) = copy(projectId = projectId) def withProjectId(projectId: util.Optional[String]) = - copy(projectId = projectId.asScala) + copy(projectId = projectId.toScala) def withJobId(jobId: Option[String]) = copy(jobId = jobId) def withJobId(jobId: util.Optional[String]) = - copy(jobId = jobId.asScala) + copy(jobId = jobId.toScala) def withLocation(location: Option[String]) = copy(location = location) def withLocation(location: util.Optional[String]) = - copy(location = location.asScala) + copy(location = location.toScala) } object JobReference { @@ -301,7 +301,7 @@ object JobReference { * @return a [[JobReference]] */ def create(projectId: util.Optional[String], jobId: util.Optional[String], location: util.Optional[String]) = - JobReference(projectId.asScala, jobId.asScala, location.asScala) + JobReference(projectId.toScala, jobId.toScala, location.toScala) implicit val format: JsonFormat[JobReference] = jsonFormat3(apply) } @@ -316,19 +316,19 @@ object JobReference { */ final case class JobStatus private (errorResult: Option[ErrorProto], errors: Option[Seq[ErrorProto]], state: JobState) { - def getErrorResult = errorResult.asJava - def getErrors = errors.map(_.asJava).asJava + def getErrorResult = errorResult.toJava + def getErrors = errors.map(_.asJava).toJava def getState = state def withErrorResult(errorResult: Option[ErrorProto]) = copy(errorResult = errorResult) def withErrorResult(errorResult: util.Optional[ErrorProto]) = - copy(errorResult = errorResult.asScala) + copy(errorResult = errorResult.toScala) def withErrors(errors: Option[Seq[ErrorProto]]) = copy(errors = errors) def withErrors(errors: util.Optional[util.List[ErrorProto]]) = - copy(errors = errors.asScala.map(_.asScala.toList)) + copy(errors = errors.toScala.map(_.asScala.toList)) def withState(state: JobState) = copy(state = state) @@ -346,7 +346,7 @@ object JobStatus { * @return a [[JobStatus]] */ def create(errorResult: util.Optional[ErrorProto], errors: util.Optional[util.List[ErrorProto]], state: JobState) = - JobStatus(errorResult.asScala, errors.asScala.map(_.asScala.toList), state) + JobStatus(errorResult.toScala, errors.toScala.map(_.asScala.toList), state) implicit val format: JsonFormat[JobStatus] = jsonFormat3(apply) } diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/QueryJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/QueryJsonProtocol.scala index 737edf32fb..c3c415c11d 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/QueryJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/QueryJsonProtocol.scala @@ -7,18 +7,16 @@ package akka.stream.alpakka.googlecloud.bigquery.model import akka.stream.alpakka.google.scaladsl.Paginated import akka.stream.alpakka.googlecloud.bigquery.scaladsl.spray.BigQueryRestJsonProtocol._ import akka.stream.alpakka.googlecloud.bigquery.scaladsl.spray.BigQueryRootJsonReader -import akka.util.JavaDurationConverters._ import com.fasterxml.jackson.annotation.{JsonCreator, JsonIgnoreProperties, JsonProperty} import spray.json.{RootJsonFormat, RootJsonReader} - import java.time.Duration import java.{lang, util} import scala.annotation.nowarn import scala.annotation.unchecked.uncheckedVariance -import scala.collection.JavaConverters._ -import scala.collection.immutable.Seq -import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.DurationConverters._ +import scala.jdk.OptionConverters._ import scala.concurrent.duration.FiniteDuration /** @@ -48,15 +46,15 @@ final case class QueryRequest private (query: String, requestId: Option[String]) { def getQuery = query - def getMaxResults = maxResults.asPrimitive - def getDefaultDataset = defaultDataset.asJava - def getTimeout = timeout.map(_.asJava).asJava - def getDryRun = dryRun.map(lang.Boolean.valueOf).asJava - def getUseLegacySql = useLegacySql.map(lang.Boolean.valueOf).asJava - def getRequestId = requestId.asJava - def getLocation = location.asJava - def getMaximumBytesBilled = maximumBytesBilled.asJava - def getLabels = labels.asJava + def getMaxResults = maxResults.toJava + def getDefaultDataset = defaultDataset.toJava + def getTimeout = timeout.map(_.toJava).toJava + def getDryRun = dryRun.map(lang.Boolean.valueOf).toJava + def getUseLegacySql = useLegacySql.map(lang.Boolean.valueOf).toJava + def getRequestId = requestId.toJava + def getLocation = location.toJava + def getMaximumBytesBilled = maximumBytesBilled.toJava + def getLabels = labels.toJava def withQuery(query: String) = copy(query = query) @@ -64,47 +62,47 @@ final case class QueryRequest private (query: String, def withMaxResults(maxResults: Option[Int]) = copy(maxResults = maxResults) def withMaxResults(maxResults: util.OptionalInt) = - copy(maxResults = maxResults.asScala) + copy(maxResults = maxResults.toScala) def withDefaultDataset(defaultDataset: Option[DatasetReference]) = copy(defaultDataset = defaultDataset) def withDefaultDataset(defaultDataset: util.Optional[DatasetReference]) = - copy(defaultDataset = defaultDataset.asScala) + copy(defaultDataset = defaultDataset.toScala) def withTimeout(timeout: Option[FiniteDuration]) = copy(timeout = timeout) def withTimeout(timeout: util.Optional[Duration]) = - copy(timeout = timeout.asScala.map(_.asScala)) + copy(timeout = timeout.toScala.map(_.toScala)) def withDryRun(dryRun: Option[Boolean]) = copy(dryRun = dryRun) def withDryRun(dryRun: util.Optional[lang.Boolean]) = - copy(dryRun = dryRun.asScala.map(_.booleanValue)) + copy(dryRun = dryRun.toScala.map(_.booleanValue)) def withUseLegacySql(useLegacySql: Option[Boolean]) = copy(useLegacySql = useLegacySql) def withUseLegacySql(useLegacySql: util.Optional[lang.Boolean]) = - copy(useLegacySql = useLegacySql.asScala.map(_.booleanValue)) + copy(useLegacySql = useLegacySql.toScala.map(_.booleanValue)) def withRequestId(requestId: Option[String]) = copy(requestId = requestId) def withRequestId(requestId: util.Optional[String]) = - copy(requestId = requestId.asScala) + copy(requestId = requestId.toScala) def withLocation(location: Option[String]) = copy(location = location) def withLocation(location: util.Optional[String]) = - copy(location = location.asScala) + copy(location = location.toScala) def withMaximumBytesBilled(maximumBytesBilled: Option[Long]) = copy(maximumBytesBilled = maximumBytesBilled) def withMaximumBytesBilled(maximumBytesBilled: util.OptionalLong) = - copy(maximumBytesBilled = maximumBytesBilled.asScala) + copy(maximumBytesBilled = maximumBytesBilled.toScala) def withLabels(labels: Option[Map[String, String]]) = copy(labels = labels) def withLabels(labels: util.Optional[util.Map[String, String]]) = - copy(labels = labels.asScala.map(_.asScala.toMap)) + copy(labels = labels.toScala.map(_.asScala.toMap)) } object QueryRequest { @@ -140,15 +138,15 @@ object QueryRequest { requestId: util.Optional[String]) = QueryRequest( query, - maxResults.asScala, - defaultDataset.asScala, - timeout.asScala.map(_.asScala), - dryRun.asScala.map(_.booleanValue), - useLegacySql.asScala.map(_.booleanValue), + maxResults.toScala, + defaultDataset.toScala, + timeout.toScala.map(_.toScala), + dryRun.toScala.map(_.booleanValue), + useLegacySql.toScala.map(_.booleanValue), None, None, None, - requestId.asScala + requestId.toScala ) implicit val format: RootJsonFormat[QueryRequest] = jsonFormat( @@ -220,21 +218,21 @@ final case class QueryResponse[+T] private (schema: Option[TableSchema], Option(numDmlAffectedRows).map(_.toLong) ) - def getSchema = schema.asJava + def getSchema = schema.toJava def getJobReference = jobReference - def getTotalRows = totalRows.asPrimitive - def getPageToken = pageToken.asJava - def getRows: util.Optional[util.List[T] @uncheckedVariance] = rows.map(_.asJava).asJava - def getTotalBytesProcessed = totalBytesProcessed.asPrimitive + def getTotalRows = totalRows.toJavaPrimitive + def getPageToken = pageToken.toJava + def getRows: util.Optional[util.List[T] @uncheckedVariance] = rows.map(_.asJava).toJava + def getTotalBytesProcessed = totalBytesProcessed.toJavaPrimitive def getJobComplete = jobComplete - def getErrors = errors.map(_.asJava).asJava - def getCacheHit = cacheHit.map(lang.Boolean.valueOf).asJava - def getNumDmlAffectedRows = numDmlAffectedRows.asPrimitive + def getErrors = errors.map(_.asJava).toJava + def getCacheHit = cacheHit.map(lang.Boolean.valueOf).toJava + def getNumDmlAffectedRows = numDmlAffectedRows.toJavaPrimitive def withSchema(schema: Option[TableSchema]) = copy(schema = schema) def withSchema(schema: util.Optional[TableSchema]) = - copy(schema = schema.asScala) + copy(schema = schema.toScala) def withJobReference(jobReference: JobReference) = copy(jobReference = jobReference) @@ -242,22 +240,22 @@ final case class QueryResponse[+T] private (schema: Option[TableSchema], def withTotalRows(totalRows: Option[Long]) = copy(totalRows = totalRows) def withTotalRows(totalRows: util.OptionalLong) = - copy(totalRows = totalRows.asScala) + copy(totalRows = totalRows.toScala) def withPageToken(pageToken: Option[String]) = copy(pageToken = pageToken) def withPageToken(pageToken: util.Optional[String]) = - copy(pageToken = pageToken.asScala) + copy(pageToken = pageToken.toScala) def withRows[S >: T](rows: Option[Seq[S]]) = copy(rows = rows) def withRows(rows: util.Optional[util.List[T] @uncheckedVariance]) = - copy(rows = rows.asScala.map(_.asScala.toList)) + copy(rows = rows.toScala.map(_.asScala.toList)) def withTotalBytesProcessed(totalBytesProcessed: Option[Long]) = copy(totalBytesProcessed = totalBytesProcessed) def withTotalBytesProcessed(totalBytesProcessed: util.OptionalLong) = - copy(totalBytesProcessed = totalBytesProcessed.asScala) + copy(totalBytesProcessed = totalBytesProcessed.toScala) def withJobComplete(jobComplete: Boolean) = copy(jobComplete = jobComplete) @@ -265,17 +263,17 @@ final case class QueryResponse[+T] private (schema: Option[TableSchema], def withErrors(errors: Option[Seq[ErrorProto]]) = copy(errors = errors) def withErrors(errors: util.Optional[util.List[ErrorProto]]) = - copy(errors = errors.asScala.map(_.asScala.toList)) + copy(errors = errors.toScala.map(_.asScala.toList)) def withCacheHit(cacheHit: Option[Boolean]) = copy(cacheHit = cacheHit) def withCacheHit(cacheHit: util.Optional[lang.Boolean]) = - copy(cacheHit = cacheHit.asScala.map(_.booleanValue)) + copy(cacheHit = cacheHit.toScala.map(_.booleanValue)) def withNumDmlAffectedRows(numDmlAffectedRows: Option[Long]) = copy(numDmlAffectedRows = numDmlAffectedRows) def withNumDmlAffectedRows(numDmlAffectedRows: util.OptionalLong) = - copy(numDmlAffectedRows = numDmlAffectedRows.asScala) + copy(numDmlAffectedRows = numDmlAffectedRows.toScala) } object QueryResponse { @@ -309,16 +307,16 @@ object QueryResponse { cacheHit: util.Optional[lang.Boolean], numDmlAffectedRows: util.OptionalLong) = QueryResponse[T]( - schema.asScala, + schema.toScala, jobReference, - totalRows.asScala, - pageToken.asScala, - rows.asScala.map(_.asScala.toList), - totalBytesProcessed.asScala, + totalRows.toScala, + pageToken.toScala, + rows.toScala.map(_.asScala.toList), + totalBytesProcessed.toScala, jobComplete, - errors.asScala.map(_.asScala.toList), - cacheHit.asScala.map(_.booleanValue), - numDmlAffectedRows.asScala + errors.toScala.map(_.asScala.toList), + cacheHit.toScala.map(_.booleanValue), + numDmlAffectedRows.toScala ) implicit def reader[T <: AnyRef]( diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/TableDataJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/TableDataJsonProtocol.scala index f88359ed2e..ea92685ba0 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/TableDataJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/TableDataJsonProtocol.scala @@ -15,9 +15,8 @@ import java.{lang, util} import scala.annotation.nowarn import scala.annotation.unchecked.uncheckedVariance -import scala.collection.JavaConverters._ -import scala.collection.immutable.Seq -import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ /** * TableDataListResponse model @@ -39,8 +38,8 @@ final case class TableDataListResponse[+T] private (totalRows: Long, pageToken: this(totalRows.toLong, Option(pageToken), Option(rows).map(_.asScala.toList)) def getTotalRows = totalRows - def getPageToken = pageToken.asJava - def getRows: util.Optional[util.List[T] @uncheckedVariance] = rows.map(_.asJava).asJava + def getPageToken = pageToken.toJava + def getRows: util.Optional[util.List[T] @uncheckedVariance] = rows.map(_.asJava).toJava def withTotalRows(totalRows: Long) = copy(totalRows = totalRows) @@ -48,12 +47,12 @@ final case class TableDataListResponse[+T] private (totalRows: Long, pageToken: def withPageToken(pageToken: Option[String]) = copy(pageToken = pageToken) def withPageToken(pageToken: util.Optional[String]) = - copy(pageToken = pageToken.asScala) + copy(pageToken = pageToken.toScala) def withRows[S >: T](rows: Option[Seq[S]]) = copy(rows = rows) def withRows(rows: util.Optional[util.List[T] @uncheckedVariance]) = - copy(rows = rows.asScala.map(_.asScala.toList)) + copy(rows = rows.toScala.map(_.asScala.toList)) } object TableDataListResponse { @@ -69,7 +68,7 @@ object TableDataListResponse { * @return a [[TableDataListResponse]] */ def create[T](totalRows: Long, pageToken: util.Optional[String], rows: util.Optional[util.List[T]]) = - TableDataListResponse(totalRows, pageToken.asScala, rows.asScala.map(_.asScala.toList)) + TableDataListResponse(totalRows, pageToken.toScala, rows.toScala.map(_.asScala.toList)) implicit def reader[T <: AnyRef]( implicit reader: BigQueryRootJsonReader[T] @@ -96,9 +95,9 @@ final case class TableDataInsertAllRequest[+T] private (skipInvalidRows: Option[ templateSuffix: Option[String], rows: Seq[Row[T]]) { - @JsonIgnore def getSkipInvalidRows = skipInvalidRows.map(lang.Boolean.valueOf).asJava - @JsonIgnore def getIgnoreUnknownValues = ignoreUnknownValues.map(lang.Boolean.valueOf).asJava - @JsonIgnore def getTemplateSuffix = templateSuffix.asJava + @JsonIgnore def getSkipInvalidRows = skipInvalidRows.map(lang.Boolean.valueOf).toJava + @JsonIgnore def getIgnoreUnknownValues = ignoreUnknownValues.map(lang.Boolean.valueOf).toJava + @JsonIgnore def getTemplateSuffix = templateSuffix.toJava def getRows: util.List[Row[T] @uncheckedVariance] = rows.asJava @nowarn("msg=never used") @@ -114,17 +113,17 @@ final case class TableDataInsertAllRequest[+T] private (skipInvalidRows: Option[ def withSkipInvalidRows(skipInvalidRows: Option[Boolean]) = copy(skipInvalidRows = skipInvalidRows) def withSkipInvalidRows(skipInvalidRows: util.Optional[lang.Boolean]) = - copy(skipInvalidRows = skipInvalidRows.asScala.map(_.booleanValue)) + copy(skipInvalidRows = skipInvalidRows.toScala.map(_.booleanValue)) def withIgnoreUnknownValues(ignoreUnknownValues: Option[Boolean]) = copy(ignoreUnknownValues = ignoreUnknownValues) def withIgnoreUnknownValues(ignoreUnknownValues: util.Optional[lang.Boolean]) = - copy(ignoreUnknownValues = ignoreUnknownValues.asScala.map(_.booleanValue)) + copy(ignoreUnknownValues = ignoreUnknownValues.toScala.map(_.booleanValue)) def withTemplateSuffix(templateSuffix: Option[String]) = copy(templateSuffix = templateSuffix) def withTemplateSuffix(templateSuffix: util.Optional[String]) = - copy(templateSuffix = templateSuffix.asScala) + copy(templateSuffix = templateSuffix.toScala) def withRows[S >: T](rows: Seq[Row[S]]) = copy(rows = rows) @@ -150,9 +149,9 @@ object TableDataInsertAllRequest { templateSuffix: util.Optional[String], rows: util.List[Row[T]]) = TableDataInsertAllRequest( - skipInvalidRows.asScala.map(_.booleanValue), - ignoreUnknownValues.asScala.map(_.booleanValue), - templateSuffix.asScala, + skipInvalidRows.toScala.map(_.booleanValue), + ignoreUnknownValues.toScala.map(_.booleanValue), + templateSuffix.toScala, rows.asScala.toList ) @@ -175,13 +174,13 @@ object TableDataInsertAllRequest { */ final case class Row[+T] private (insertId: Option[String], json: T) { - def getInsertId = insertId.asJava + def getInsertId = insertId.toJava def getJson = json def withInsertId(insertId: Option[String]) = copy(insertId = insertId) def withInsertId(insertId: util.Optional[String]) = - copy(insertId = insertId.asScala) + copy(insertId = insertId.toScala) def withJson[U >: T](json: U): Row[U] = copy(json = json) @@ -199,7 +198,7 @@ object Row { * @return a [[Row]] */ def create[T](insertId: util.Optional[String], json: T) = - Row(insertId.asScala, json) + Row(insertId.toScala, json) } /** @@ -207,13 +206,13 @@ object Row { * @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response-body BigQuery reference]] */ final case class TableDataInsertAllResponse private (insertErrors: Option[Seq[InsertError]]) { - def getInsertErrors = insertErrors.map(_.asJava).asJava + def getInsertErrors = insertErrors.map(_.asJava).toJava def withInsertErrors(insertErrors: Option[Seq[InsertError]]) = copy(insertErrors = insertErrors) def withInsertErrors(insertErrors: util.Optional[util.List[InsertError]]) = - copy(insertErrors = insertErrors.asScala.map(_.asScala.toList)) + copy(insertErrors = insertErrors.toScala.map(_.asScala.toList)) } object TableDataInsertAllResponse { @@ -223,7 +222,7 @@ object TableDataInsertAllResponse { * @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response-body BigQuery reference]] */ def create(insertErrors: util.Optional[util.List[InsertError]]) = - TableDataInsertAllResponse(insertErrors.asScala.map(_.asScala.toList)) + TableDataInsertAllResponse(insertErrors.toScala.map(_.asScala.toList)) implicit val format: RootJsonFormat[TableDataInsertAllResponse] = jsonFormat1(apply) @@ -235,7 +234,7 @@ object TableDataInsertAllResponse { */ final case class InsertError private (index: Int, errors: Option[Seq[ErrorProto]]) { def getIndex = index - def getErrors = errors.map(_.asJava).asJava + def getErrors = errors.map(_.asJava).toJava def withIndex(index: Int) = copy(index = index) @@ -243,7 +242,7 @@ final case class InsertError private (index: Int, errors: Option[Seq[ErrorProto] def withErrors(errors: Option[Seq[ErrorProto]]) = copy(errors = errors) def withErrors(errors: util.Optional[util.List[ErrorProto]]) = - copy(errors = errors.asScala.map(_.asScala.toList)) + copy(errors = errors.toScala.map(_.asScala.toList)) } object InsertError { @@ -253,7 +252,7 @@ object InsertError { * @see [[https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll#response-body BigQuery reference]] */ def create(index: Int, errors: util.Optional[util.List[ErrorProto]]) = - InsertError(index, errors.asScala.map(_.asScala.toList)) + InsertError(index, errors.toScala.map(_.asScala.toList)) implicit val format: JsonFormat[InsertError] = jsonFormat2(apply) } diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/TableJsonProtocol.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/TableJsonProtocol.scala index db18312635..f599e0d2d3 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/TableJsonProtocol.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/model/TableJsonProtocol.scala @@ -13,9 +13,8 @@ import java.util import scala.annotation.nowarn import scala.annotation.varargs -import scala.collection.immutable.Seq -import scala.compat.java8.OptionConverters._ -import scala.collection.JavaConverters._ +import scala.jdk.OptionConverters._ +import scala.jdk.CollectionConverters._ /** * Table resource model @@ -34,10 +33,10 @@ final case class Table private (tableReference: TableReference, location: Option[String]) { def getTableReference = tableReference - def getLabels = labels.map(_.asJava).asJava - def getSchema = schema.asJava - def getNumRows = numRows.asPrimitive - def getLocation = location.asJava + def getLabels = labels.map(_.asJava).toJava + def getSchema = schema.toJava + def getNumRows = numRows.toJavaPrimitive + def getLocation = location.toJava def withTableReference(tableReference: TableReference) = copy(tableReference = tableReference) @@ -45,22 +44,22 @@ final case class Table private (tableReference: TableReference, def withLabels(labels: Option[Map[String, String]]) = copy(labels = labels) def withLabels(labels: util.Optional[util.Map[String, String]]) = - copy(labels = labels.asScala.map(_.asScala.toMap)) + copy(labels = labels.toScala.map(_.asScala.toMap)) def withSchema(schema: Option[TableSchema]) = copy(schema = schema) def withSchema(schema: util.Optional[TableSchema]) = - copy(schema = schema.asScala) + copy(schema = schema.toScala) def withNumRows(numRows: Option[Long]) = copy(numRows = numRows) def withNumRows(numRows: util.OptionalLong) = - copy(numRows = numRows.asScala) + copy(numRows = numRows.toScala) def withLocation(location: Option[String]) = copy(location = location) def withLocation(location: util.Optional[String]) = - copy(location = location.asScala) + copy(location = location.toScala) } object Table { @@ -83,10 +82,10 @@ object Table { location: util.Optional[String]) = Table( tableReference, - labels.asScala.map(_.asScala.toMap), - schema.asScala, - numRows.asScala, - location.asScala + labels.toScala.map(_.asScala.toMap), + schema.toScala, + numRows.toScala, + location.toScala ) implicit val format: RootJsonFormat[Table] = jsonFormat5(apply) @@ -102,14 +101,14 @@ object Table { */ final case class TableReference private (projectId: Option[String], datasetId: String, tableId: Option[String]) { - def getProjectId = projectId.asJava + def getProjectId = projectId.toJava def getDatasetId = datasetId def getTableId = tableId def withProjectId(projectId: Option[String]) = copy(projectId = projectId) def withProjectId(projectId: util.Optional[String]) = - copy(projectId = projectId.asScala) + copy(projectId = projectId.toScala) def withDatasetId(datasetId: String) = copy(datasetId = datasetId) @@ -117,7 +116,7 @@ final case class TableReference private (projectId: Option[String], datasetId: S def withTableId(tableId: Option[String]) = copy(tableId = tableId) def withTableId(tableId: util.Optional[String]) = - copy(tableId = tableId.asScala) + copy(tableId = tableId.toScala) } object TableReference { @@ -132,7 +131,7 @@ object TableReference { * @return a [[TableReference]] */ def create(projectId: util.Optional[String], datasetId: String, tableId: util.Optional[String]) = - TableReference(projectId.asScala, datasetId, tableId.asScala) + TableReference(projectId.toScala, datasetId, tableId.toScala) implicit val referenceFormat: JsonFormat[TableReference] = jsonFormat3(apply) } @@ -211,8 +210,8 @@ final case class TableFieldSchema private (name: String, def getName = name def getType = `type` - def getMode = mode.asJava - def getFields = fields.map(_.asJava).asJava + def getMode = mode.toJava + def getFields = fields.map(_.asJava).toJava def withName(name: String) = copy(name = name) @@ -223,12 +222,12 @@ final case class TableFieldSchema private (name: String, def withMode(mode: Option[TableFieldSchemaMode]) = copy(mode = mode) def withMode(mode: util.Optional[TableFieldSchemaMode]) = - copy(mode = mode.asScala) + copy(mode = mode.toScala) def withFields(fields: Option[Seq[TableFieldSchema]]) = copy(fields = fields) def withFields(fields: util.Optional[util.List[TableFieldSchema]]) = - copy(fields = fields.asScala.map(_.asScala.toList)) + copy(fields = fields.toScala.map(_.asScala.toList)) } object TableFieldSchema { @@ -247,7 +246,7 @@ object TableFieldSchema { `type`: TableFieldSchemaType, mode: util.Optional[TableFieldSchemaMode], fields: util.Optional[util.List[TableFieldSchema]]) = - TableFieldSchema(name, `type`, mode.asScala, fields.asScala.map(_.asScala.toList)) + TableFieldSchema(name, `type`, mode.toScala, fields.toScala.map(_.asScala.toList)) /** * A field in TableSchema @@ -264,7 +263,7 @@ object TableFieldSchema { `type`: TableFieldSchemaType, mode: util.Optional[TableFieldSchemaMode], fields: TableFieldSchema*) = - TableFieldSchema(name, `type`, mode.asScala, if (fields.nonEmpty) Some(fields.toList) else None) + TableFieldSchema(name, `type`, mode.toScala, if (fields.nonEmpty) Some(fields.toList) else None) implicit val format: JsonFormat[TableFieldSchema] = lazyFormat( jsonFormat(apply, "name", "type", "mode", "fields") @@ -353,16 +352,16 @@ final case class TableListResponse private (nextPageToken: Option[String], tables: Option[Seq[Table]], totalItems: Option[Int]) { - def getNextPageToken = nextPageToken.asJava - def getTables = tables.map(_.asJava).asJava - def getTotalItems = totalItems.asPrimitive + def getNextPageToken = nextPageToken.toJava + def getTables = tables.map(_.asJava).toJava + def getTotalItems = totalItems.toJavaPrimitive def withNextPageToken(nextPageToken: util.Optional[String]) = - copy(nextPageToken = nextPageToken.asScala) + copy(nextPageToken = nextPageToken.toScala) def withTables(tables: util.Optional[util.List[Table]]) = - copy(tables = tables.asScala.map(_.asScala.toList)) + copy(tables = tables.toScala.map(_.asScala.toList)) def withTotalItems(totalItems: util.OptionalInt) = - copy(totalItems = totalItems.asScala) + copy(totalItems = totalItems.toScala) } object TableListResponse { @@ -379,7 +378,7 @@ object TableListResponse { def createTableListResponse(nextPageToken: util.Optional[String], tables: util.Optional[util.List[Table]], totalItems: util.OptionalInt) = - TableListResponse(nextPageToken.asScala, tables.asScala.map(_.asScala.toList), totalItems.asScala) + TableListResponse(nextPageToken.toScala, tables.toScala.map(_.asScala.toList), totalItems.toScala) implicit val format: RootJsonFormat[TableListResponse] = jsonFormat3(apply) implicit val paginated: Paginated[TableListResponse] = _.nextPageToken diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryDatasets.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryDatasets.scala index f9229a7505..f386da2a98 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryDatasets.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryDatasets.scala @@ -5,7 +5,6 @@ package akka.stream.alpakka.googlecloud.bigquery.scaladsl import akka.actor.ClassicActorSystemProvider -import akka.dispatch.ExecutionContexts import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model.HttpMethods.{DELETE, POST} @@ -18,6 +17,7 @@ import akka.stream.alpakka.googlecloud.bigquery.{BigQueryEndpoints, BigQueryExce import akka.stream.scaladsl.Source import akka.{Done, NotUsed} +import scala.concurrent.ExecutionContext import scala.concurrent.Future private[scaladsl] trait BigQueryDatasets { this: BigQueryRest => @@ -92,7 +92,7 @@ private[scaladsl] trait BigQueryDatasets { this: BigQueryRest => settings: GoogleSettings): Future[Dataset] = { import BigQueryException._ import SprayJsonSupport._ - implicit val ec = ExecutionContexts.parasitic + implicit val ec = ExecutionContext.parasitic val uri = BigQueryEndpoints.datasets(settings.projectId) Marshal(dataset).to[RequestEntity].flatMap { entity => val request = HttpRequest(POST, uri, entity = entity) diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryJobs.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryJobs.scala index 97fae9bf55..516c2580a8 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryJobs.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryJobs.scala @@ -6,7 +6,6 @@ package akka.stream.alpakka.googlecloud.bigquery.scaladsl import akka.NotUsed import akka.actor.ClassicActorSystemProvider -import akka.dispatch.ExecutionContexts import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.http.scaladsl.marshalling.{Marshal, ToEntityMarshaller} import akka.http.scaladsl.model.ContentTypes.`application/octet-stream` @@ -28,6 +27,7 @@ import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Sink} import akka.util.ByteString import scala.annotation.nowarn +import scala.concurrent.ExecutionContext import scala.concurrent.Future private[scaladsl] trait BigQueryJobs { this: BigQueryRest => @@ -160,7 +160,7 @@ private[scaladsl] trait BigQueryJobs { this: BigQueryRest => .fromMaterializer { (mat, attr) => import BigQueryException._ implicit val settings = GoogleAttributes.resolveSettings(mat, attr) - implicit val ec = ExecutionContexts.parasitic + implicit val ec = ExecutionContext.parasitic val uri = BigQueryMediaEndpoints.jobs(settings.projectId).withQuery(Query("uploadType" -> "resumable")) Sink .lazyFutureSink { () => @@ -169,7 +169,7 @@ private[scaladsl] trait BigQueryJobs { this: BigQueryRest => .map { entity => val request = HttpRequest(POST, uri, List(`X-Upload-Content-Type`(`application/octet-stream`)), entity) resumableUpload[Job](request) - }(ExecutionContexts.parasitic) + }(ExecutionContext.parasitic) } .mapMaterializedValue(_.flatten) } diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryQueries.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryQueries.scala index f0251a9aa9..c598516aed 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryQueries.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryQueries.scala @@ -5,7 +5,6 @@ package akka.stream.alpakka.googlecloud.bigquery.scaladsl import akka.NotUsed -import akka.dispatch.ExecutionContexts import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model.HttpMethods.POST @@ -20,8 +19,9 @@ import akka.stream.alpakka.googlecloud.bigquery.model.{QueryRequest, QueryRespon import akka.stream.alpakka.googlecloud.bigquery.{BigQueryEndpoints, BigQueryException} import akka.stream.scaladsl.{Keep, RestartSource, Sink, Source} -import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration +import scala.concurrent.ExecutionContext +import scala.concurrent.Future import scala.util.{Failure, Success} private[scaladsl] trait BigQueryQueries { this: BigQueryRest => @@ -62,7 +62,7 @@ private[scaladsl] trait BigQueryQueries { this: BigQueryRest => import BigQueryException._ import SprayJsonSupport._ implicit val system = mat.system - implicit val ec = ExecutionContexts.parasitic + implicit val ec = ExecutionContext.parasitic implicit val settings = GoogleAttributes.resolveSettings(mat, attr) Source.lazyFutureSource { () => diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryTableData.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryTableData.scala index e6e628b833..d78764b312 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryTableData.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryTableData.scala @@ -5,7 +5,6 @@ package akka.stream.alpakka.googlecloud.bigquery.scaladsl import akka.NotUsed -import akka.dispatch.ExecutionContexts import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.http.scaladsl.marshalling.{Marshal, ToEntityMarshaller} import akka.http.scaladsl.model.HttpMethods.POST @@ -23,9 +22,9 @@ import akka.stream.alpakka.googlecloud.bigquery.model.{ } import akka.stream.alpakka.googlecloud.bigquery.{BigQueryEndpoints, BigQueryException, InsertAllRetryPolicy} import akka.stream.scaladsl.{Flow, Keep, Sink, Source} - import java.util.{SplittableRandom, UUID} -import scala.collection.immutable.Seq + +import scala.concurrent.ExecutionContext import scala.concurrent.Future private[scaladsl] trait BigQueryTableData { this: BigQueryRest => @@ -122,7 +121,7 @@ private[scaladsl] trait BigQueryTableData { this: BigQueryRest => import BigQueryException._ import SprayJsonSupport._ implicit val system = mat.system - implicit val ec = ExecutionContexts.parasitic + implicit val ec = ExecutionContext.parasitic implicit val settings = GoogleAttributes.resolveSettings(mat, attr) val uri = BigQueryEndpoints.tableDataInsertAll(settings.projectId, datasetId, tableId) diff --git a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryTables.scala b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryTables.scala index b200180158..16e4ea4505 100644 --- a/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryTables.scala +++ b/google-cloud-bigquery/src/main/scala/akka/stream/alpakka/googlecloud/bigquery/scaladsl/BigQueryTables.scala @@ -6,7 +6,6 @@ package akka.stream.alpakka.googlecloud.bigquery.scaladsl import akka.Done import akka.actor.ClassicActorSystemProvider -import akka.dispatch.ExecutionContexts import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model.HttpMethods.{DELETE, POST} @@ -19,6 +18,7 @@ import akka.stream.alpakka.googlecloud.bigquery.scaladsl.schema.TableSchemaWrite import akka.stream.alpakka.googlecloud.bigquery.{BigQueryEndpoints, BigQueryException} import akka.stream.scaladsl.{Keep, Sink, Source} +import scala.concurrent.ExecutionContext import scala.concurrent.Future private[scaladsl] trait BigQueryTables { this: BigQueryRest => @@ -85,7 +85,7 @@ private[scaladsl] trait BigQueryTables { this: BigQueryRest => settings: GoogleSettings): Future[Table] = { import BigQueryException._ import SprayJsonSupport._ - implicit val ec = ExecutionContexts.parasitic + implicit val ec = ExecutionContext.parasitic val projectId = table.tableReference.projectId.getOrElse(settings.projectId) val datasetId = table.tableReference.datasetId val uri = BigQueryEndpoints.tables(projectId, datasetId) diff --git a/google-cloud-bigquery/src/test/scala/akka/stream/alpakka/googlecloud/bigquery/e2e/A.scala b/google-cloud-bigquery/src/test/scala/akka/stream/alpakka/googlecloud/bigquery/e2e/A.scala index 616f1b1ba7..56510d79ba 100644 --- a/google-cloud-bigquery/src/test/scala/akka/stream/alpakka/googlecloud/bigquery/e2e/A.scala +++ b/google-cloud-bigquery/src/test/scala/akka/stream/alpakka/googlecloud/bigquery/e2e/A.scala @@ -12,7 +12,7 @@ import com.fasterxml.jackson.databind.annotation.JsonSerialize import com.fasterxml.jackson.databind.ser.std.ToStringSerializer import java.time.{Instant, LocalDate, LocalDateTime, LocalTime} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ @JsonPropertyOrder(alphabetic = true) case class A(integer: Int, long: Long, float: Float, double: Double, string: String, boolean: Boolean, record: B) { diff --git a/google-cloud-bigquery/src/test/scala/akka/stream/alpakka/googlecloud/bigquery/e2e/javadsl/EndToEndHelper.scala b/google-cloud-bigquery/src/test/scala/akka/stream/alpakka/googlecloud/bigquery/e2e/javadsl/EndToEndHelper.scala index bdf6ca0915..39661d2413 100644 --- a/google-cloud-bigquery/src/test/scala/akka/stream/alpakka/googlecloud/bigquery/e2e/javadsl/EndToEndHelper.scala +++ b/google-cloud-bigquery/src/test/scala/akka/stream/alpakka/googlecloud/bigquery/e2e/javadsl/EndToEndHelper.scala @@ -5,7 +5,7 @@ package akka.stream.alpakka.googlecloud.bigquery.e2e.javadsl import akka.stream.alpakka.googlecloud.bigquery.e2e.scaladsl; -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ abstract class EndToEndHelper extends scaladsl.EndToEndHelper { diff --git a/google-cloud-pub-sub-grpc/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/grpc/scaladsl/GooglePubSub.scala b/google-cloud-pub-sub-grpc/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/grpc/scaladsl/GooglePubSub.scala index dd42482edf..f09b4198d3 100644 --- a/google-cloud-pub-sub-grpc/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/grpc/scaladsl/GooglePubSub.scala +++ b/google-cloud-pub-sub-grpc/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/grpc/scaladsl/GooglePubSub.scala @@ -5,13 +5,13 @@ package akka.stream.alpakka.googlecloud.pubsub.grpc.scaladsl import akka.actor.Cancellable -import akka.dispatch.ExecutionContexts import akka.stream.{Attributes, Materializer} import akka.stream.scaladsl.{Flow, Keep, Sink, Source} import akka.{Done, NotUsed} import com.google.pubsub.v1.pubsub._ import scala.concurrent.duration._ +import scala.concurrent.ExecutionContext import scala.concurrent.{Future, Promise} /** @@ -67,7 +67,7 @@ object GooglePubSub { .mapConcat(_.receivedMessages.toVector) .mapMaterializedValue(_ => cancellable.future) } - .mapMaterializedValue(_.flatMap(identity)(ExecutionContexts.parasitic)) + .mapMaterializedValue(_.flatMap(identity)(ExecutionContext.parasitic)) /** * Create a source that emits messages for a given subscription using a synchronous PullRequest. @@ -92,7 +92,7 @@ object GooglePubSub { .mapConcat(_.receivedMessages.toVector) .mapMaterializedValue(_ => cancellable.future) } - .mapMaterializedValue(_.flatMap(identity)(ExecutionContexts.parasitic)) + .mapMaterializedValue(_.flatMap(identity)(ExecutionContext.parasitic)) /** * Create a flow that accepts consumed message acknowledgements. @@ -124,7 +124,7 @@ object GooglePubSub { .mapAsyncUnordered(parallelism)(subscriber(mat, attr).client.acknowledge) .toMat(Sink.ignore)(Keep.right) } - .mapMaterializedValue(_.flatMap(identity)(ExecutionContexts.parasitic)) + .mapMaterializedValue(_.flatMap(identity)(ExecutionContext.parasitic)) } private def publisher(mat: Materializer, attr: Attributes) = diff --git a/google-cloud-pub-sub/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/impl/PubSubApi.scala b/google-cloud-pub-sub/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/impl/PubSubApi.scala index b6e05be7a1..87586fe341 100644 --- a/google-cloud-pub-sub/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/impl/PubSubApi.scala +++ b/google-cloud-pub-sub/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/impl/PubSubApi.scala @@ -6,7 +6,6 @@ package akka.stream.alpakka.googlecloud.pubsub.impl import akka.actor.ActorSystem import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts import akka.http.scaladsl.Http.HostConnectionPool import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.marshalling.Marshal @@ -21,9 +20,10 @@ import akka.stream.scaladsl.{Flow, FlowWithContext} import akka.{Done, NotUsed} import spray.json.DefaultJsonProtocol._ import spray.json._ - import java.time.Instant + import scala.collection.immutable +import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.util.Try @@ -248,7 +248,7 @@ private[pubsub] trait PubSubApi { .to[RequestEntity] .map { entity => HttpRequest(POST, url, entity = entity) - }(ExecutionContexts.parasitic) + }(ExecutionContext.parasitic) } .via(pool[PublishResponse, T](parallelism, host)) .map(_.get) diff --git a/google-cloud-pub-sub/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/javadsl/GooglePubSub.scala b/google-cloud-pub-sub/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/javadsl/GooglePubSub.scala index 74071b97d8..69b1efc813 100644 --- a/google-cloud-pub-sub/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/javadsl/GooglePubSub.scala +++ b/google-cloud-pub-sub/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/javadsl/GooglePubSub.scala @@ -11,9 +11,9 @@ import akka.stream.javadsl.{Flow, FlowWithContext, Sink, Source} import akka.{Done, NotUsed} import java.util.concurrent.CompletionStage -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.Future -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * Java DSL for Google Pub/Sub @@ -104,6 +104,6 @@ object GooglePubSub { def acknowledge(subscription: String, config: PubSubConfig): Sink[AcknowledgeRequest, CompletionStage[Done]] = GPubSub .acknowledge(subscription, config) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/google-cloud-pub-sub/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/model.scala b/google-cloud-pub-sub/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/model.scala index a5f1a16c6d..044c9dc7c7 100644 --- a/google-cloud-pub-sub/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/model.scala +++ b/google-cloud-pub-sub/src/main/scala/akka/stream/alpakka/googlecloud/pubsub/model.scala @@ -12,7 +12,7 @@ import akka.stream.alpakka.google.auth.ServiceAccountCredentials import scala.annotation.nowarn import scala.collection.immutable -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * @param projectId (deprecated) the project Id in the google account diff --git a/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/FailedUpload.scala b/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/FailedUpload.scala index 13ed61a653..d348accb1b 100644 --- a/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/FailedUpload.scala +++ b/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/FailedUpload.scala @@ -5,7 +5,7 @@ package akka.stream.alpakka.googlecloud.storage import scala.collection.immutable.Seq -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ final class FailedUpload private ( val reasons: Seq[Throwable] diff --git a/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/Owner.scala b/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/Owner.scala index 9213af0c73..60be1663c8 100644 --- a/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/Owner.scala +++ b/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/Owner.scala @@ -5,14 +5,14 @@ package akka.stream.alpakka.googlecloud.storage import java.util.Optional -import scala.compat.java8.OptionConverters._ +import scala.jdk.OptionConverters._ final class Owner private (entity: String, entityId: Option[String]) { def withEntity(entity: String): Owner = copy(entity = entity) def withEntityId(entityId: String): Owner = copy(entityId = Option(entityId)) /** Java API */ - def getEntityId: Optional[String] = entityId.asJava + def getEntityId: Optional[String] = entityId.toJava private def copy(entity: String = entity, entityId: Option[String] = entityId): Owner = new Owner(entity, entityId) @@ -29,5 +29,5 @@ object Owner { /** Java API */ def create(entity: String, entityId: Optional[String]): Owner = - new Owner(entity, entityId.asScala) + new Owner(entity, entityId.toScala) } diff --git a/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/StorageObject.scala b/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/StorageObject.scala index c11f2b18b7..45f5a371da 100644 --- a/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/StorageObject.scala +++ b/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/StorageObject.scala @@ -8,8 +8,8 @@ import java.time.OffsetDateTime import java.util.Optional import akka.http.scaladsl.model.ContentType -import scala.compat.java8.OptionConverters._ -import scala.collection.JavaConverters._ +import scala.jdk.OptionConverters._ +import scala.jdk.CollectionConverters._ /** * Represents an object within Google Cloud Storage. @@ -89,24 +89,24 @@ final class StorageObject private ( /** Java API */ def getContentType: akka.http.javadsl.model.ContentType = contentType.asInstanceOf[ContentType] - def getTimeDeleted: Optional[OffsetDateTime] = timeDeleted.asJava - def getContentDisposition: Optional[String] = contentDisposition.asJava - def getContentEncoding: Optional[String] = contentEncoding.asJava - def getContentLanguage: Optional[String] = contentLanguage.asJava - def getTemporaryHold: Optional[Boolean] = temporaryHold.asJava - def getEventBasedHold: Optional[Boolean] = eventBasedHold.asJava - def getRetentionExpirationTime: Optional[OffsetDateTime] = retentionExpirationTime.asJava - def getCacheControl: Optional[String] = cacheControl.asJava - def getMetadata: Optional[java.util.Map[String, String]] = metadata.map(_.asJava).asJava - def getComponentCount: Optional[Integer] = componentCount.map(Int.box).asJava - def getKmsKeyName: Optional[String] = kmsKeyName.asJava - def getCustomerEncryption: Optional[CustomerEncryption] = customerEncryption.asJava - def getOwner: Optional[Owner] = owner.asJava - def getAcl: Optional[java.util.List[ObjectAccessControls]] = acl.map(_.asJava).asJava - def getCustomTime: Optional[OffsetDateTime] = customTime.asJava - def getMaybeMd5Hash: Optional[String] = maybeMd5Hash.asJava - def getMaybeCrc32c: Optional[String] = maybeCrc32c.asJava - def getMaybeStorageClass: Optional[String] = maybeStorageClass.asJava + def getTimeDeleted: Optional[OffsetDateTime] = timeDeleted.toJava + def getContentDisposition: Optional[String] = contentDisposition.toJava + def getContentEncoding: Optional[String] = contentEncoding.toJava + def getContentLanguage: Optional[String] = contentLanguage.toJava + def getTemporaryHold: Optional[Boolean] = temporaryHold.toJava + def getEventBasedHold: Optional[Boolean] = eventBasedHold.toJava + def getRetentionExpirationTime: Optional[OffsetDateTime] = retentionExpirationTime.toJava + def getCacheControl: Optional[String] = cacheControl.toJava + def getMetadata: Optional[java.util.Map[String, String]] = metadata.map(_.asJava).toJava + def getComponentCount: Optional[Integer] = componentCount.map(Int.box).toJava + def getKmsKeyName: Optional[String] = kmsKeyName.toJava + def getCustomerEncryption: Optional[CustomerEncryption] = customerEncryption.toJava + def getOwner: Optional[Owner] = owner.toJava + def getAcl: Optional[java.util.List[ObjectAccessControls]] = acl.map(_.asJava).toJava + def getCustomTime: Optional[OffsetDateTime] = customTime.toJava + def getMaybeMd5Hash: Optional[String] = maybeMd5Hash.toJava + def getMaybeCrc32c: Optional[String] = maybeCrc32c.toJava + def getMaybeStorageClass: Optional[String] = maybeStorageClass.toJava def withKind(value: String): StorageObject = copy(kind = value) def withId(value: String): StorageObject = copy(id = value) @@ -463,24 +463,24 @@ object StorageObject { selfLink, updated, timeCreated, - timeDeleted.asScala, + timeDeleted.toScala, storageClass, Option(storageClass), - contentDisposition.asScala, - contentEncoding.asScala, - contentLanguage.asScala, + contentDisposition.toScala, + contentEncoding.toScala, + contentLanguage.toScala, metageneration, - temporaryHold.asScala, - eventBasedHold.asScala, - retentionExpirationTime.asScala, + temporaryHold.toScala, + eventBasedHold.toScala, + retentionExpirationTime.toScala, timeStorageClassUpdated, - cacheControl.asScala, - customTime.asScala, - metadata.asScala, - componentCount.asScala, - kmsKeyName.asScala, - customerEncryption.asScala, - owner.asScala, - acl.asScala + cacheControl.toScala, + customTime.toScala, + metadata.toScala, + componentCount.toScala, + kmsKeyName.toScala, + customerEncryption.toScala, + owner.toScala, + acl.toScala ) } diff --git a/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/impl/GCStorageStream.scala b/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/impl/GCStorageStream.scala index c17861a2b9..27cc43e6f7 100644 --- a/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/impl/GCStorageStream.scala +++ b/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/impl/GCStorageStream.scala @@ -5,8 +5,6 @@ package akka.stream.alpakka.googlecloud.storage.impl import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts -import akka.dispatch.ExecutionContexts.parasitic import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model.HttpMethods.{DELETE, POST} @@ -27,6 +25,7 @@ import akka.{Done, NotUsed} import spray.json._ import scala.annotation.nowarn +import scala.concurrent.ExecutionContext import scala.concurrent.Future @InternalApi private[storage] object GCStorageStream { @@ -45,7 +44,7 @@ import scala.concurrent.Future val uri = Uri(gcsSettings.endpointUrl) .withPath(Path(gcsSettings.basePath) / "b") .withQuery(Query("project" -> settings.projectId)) - implicit val ec = parasitic + implicit val ec = ExecutionContext.parasitic val request = Marshal(BucketInfo(bucketName, location)).to[RequestEntity].map { entity => HttpRequest(POST, uri, entity = entity) } @@ -210,7 +209,7 @@ import scala.concurrent.Future case Some(resource) => Future.successful(resource) case None => Future.failed(new RuntimeException("Storage object is missing")) } - )(ExecutionContexts.parasitic) + )(ExecutionContext.parasitic) ) } @@ -224,7 +223,7 @@ import scala.concurrent.Future Source.lazyFuture { () => request.flatMap { request => GoogleHttp()(mat.system).singleAuthenticatedRequest[T](request) - }(ExecutionContexts.parasitic) + }(ExecutionContext.parasitic) } } .mapMaterializedValue(_ => NotUsed) diff --git a/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/javadsl/GCStorage.scala b/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/javadsl/GCStorage.scala index 237fea7e0c..4a3b632647 100644 --- a/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/javadsl/GCStorage.scala +++ b/google-cloud-storage/src/main/scala/akka/stream/alpakka/googlecloud/storage/javadsl/GCStorage.scala @@ -17,9 +17,9 @@ import akka.stream.{Attributes, Materializer} import akka.util.ByteString import akka.{Done, NotUsed} -import scala.collection.JavaConverters._ -import scala.compat.java8.FutureConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.FutureConverters._ +import scala.jdk.OptionConverters._ /** * Java API @@ -45,8 +45,8 @@ object GCStorage { attributes: Attributes): CompletionStage[Optional[Bucket]] = GCStorageStream .getBucket(bucketName)(materializer, attributes) - .map(_.asJava)(materializer.executionContext) - .toJava + .map(_.toJava)(materializer.executionContext) + .asJava /** * Gets information on a bucket @@ -61,8 +61,8 @@ object GCStorage { def getBucket(bucketName: String, system: ActorSystem, attributes: Attributes): CompletionStage[Optional[Bucket]] = GCStorageStream .getBucket(bucketName)(Materializer.matFromSystem(system), attributes) - .map(_.asJava)(system.dispatcher) - .toJava + .map(_.toJava)(system.dispatcher) + .asJava /** * Gets information on a bucket @@ -73,7 +73,7 @@ object GCStorage { * @return a `Source` containing `Bucket` if it exists */ def getBucketSource(bucketName: String): Source[Optional[Bucket], NotUsed] = - GCStorageStream.getBucketSource(bucketName).map(_.asJava).asJava + GCStorageStream.getBucketSource(bucketName).map(_.toJava).asJava /** * Creates a new bucket @@ -90,7 +90,7 @@ object GCStorage { location: String, materializer: Materializer, attributes: Attributes): CompletionStage[Bucket] = - GCStorageStream.createBucket(bucketName, location)(materializer, attributes).toJava + GCStorageStream.createBucket(bucketName, location)(materializer, attributes).asJava /** * Creates a new bucket @@ -105,7 +105,7 @@ object GCStorage { location: String, system: ActorSystem, attributes: Attributes): CompletionStage[Bucket] = - GCStorageStream.createBucket(bucketName, location)(Materializer.matFromSystem(system), attributes).toJava + GCStorageStream.createBucket(bucketName, location)(Materializer.matFromSystem(system), attributes).asJava /** * Creates a new bucket @@ -130,7 +130,7 @@ object GCStorage { */ @deprecated("pass in the actor system instead of the materializer", "3.0.0") def deleteBucket(bucketName: String, materializer: Materializer, attributes: Attributes): CompletionStage[Done] = - GCStorageStream.deleteBucket(bucketName)(materializer, attributes).toJava + GCStorageStream.deleteBucket(bucketName)(materializer, attributes).asJava /** * Deletes bucket @@ -141,7 +141,7 @@ object GCStorage { * @return a `CompletionStage` of `Done` on successful deletion */ def deleteBucket(bucketName: String, system: ActorSystem, attributes: Attributes): CompletionStage[Done] = - GCStorageStream.deleteBucket(bucketName)(Materializer.matFromSystem(system), attributes).toJava + GCStorageStream.deleteBucket(bucketName)(Materializer.matFromSystem(system), attributes).asJava /** * Deletes bucket @@ -164,7 +164,7 @@ object GCStorage { * @return a `Source` containing `StorageObject` if it exists */ def getObject(bucket: String, objectName: String): Source[Optional[StorageObject], NotUsed] = - GCStorageStream.getObject(bucket, objectName).map(_.asJava).asJava + GCStorageStream.getObject(bucket, objectName).map(_.toJava).asJava /** * Get storage object @@ -177,7 +177,7 @@ object GCStorage { * @return a `Source` containing `StorageObject` if it exists */ def getObject(bucket: String, objectName: String, generation: Long): Source[Optional[StorageObject], NotUsed] = - GCStorageStream.getObject(bucket, objectName, Option(generation)).map(_.asJava).asJava + GCStorageStream.getObject(bucket, objectName, Option(generation)).map(_.toJava).asJava /** * Deletes object in bucket @@ -251,7 +251,7 @@ object GCStorage { * Otherwise [[scala.Option Option]] will contain a source of object's data. */ def download(bucket: String, objectName: String): Source[Optional[Source[ByteString, NotUsed]], NotUsed] = - GCStorageStream.download(bucket, objectName).map(_.map(_.asJava).asJava).asJava + GCStorageStream.download(bucket, objectName).map(_.map(_.asJava).toJava).asJava /** * Downloads object from bucket. @@ -267,7 +267,7 @@ object GCStorage { def download(bucket: String, objectName: String, generation: Long): Source[Optional[Source[ByteString, NotUsed]], NotUsed] = - GCStorageStream.download(bucket, objectName, Option(generation)).map(_.map(_.asJava).asJava).asJava + GCStorageStream.download(bucket, objectName, Option(generation)).map(_.map(_.asJava).toJava).asJava /** * Uploads object, use this for small files and `resumableUpload` for big ones @@ -341,7 +341,7 @@ object GCStorage { chunkSize, metadata.map(_.asScala.toMap)) .asJava - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) } /** @@ -360,7 +360,7 @@ object GCStorage { GCStorageStream .resumableUpload(bucket, objectName, contentType.asInstanceOf[ScalaContentType]) .asJava - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) /** * Rewrites object to wanted destination by making multiple requests. @@ -381,7 +381,7 @@ object GCStorage { .fromGraph( GCStorageStream.rewrite(sourceBucket, sourceObjectName, destinationBucket, destinationObjectName) ) - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) /** * Deletes folder and its content. diff --git a/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCSExtSpec.scala b/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCSExtSpec.scala index d0267fe6a9..6386655da4 100644 --- a/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCSExtSpec.scala +++ b/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCSExtSpec.scala @@ -10,7 +10,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ class GCSExtSpec extends AnyFlatSpec with Matchers with LogCapturing { "GCSExt" should "reuse application config from actor system" in { diff --git a/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCSSettingsSpec.scala b/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCSSettingsSpec.scala index bbc5a0664c..32620f1f6c 100644 --- a/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCSSettingsSpec.scala +++ b/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCSSettingsSpec.scala @@ -9,7 +9,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ class GCSSettingsSpec extends AnyFlatSpec with Matchers with LogCapturing { "GCSSettings" should "create settings from application config" in { diff --git a/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCStorageExtSpec.scala b/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCStorageExtSpec.scala index b70a3f4974..4dbabe3889 100644 --- a/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCStorageExtSpec.scala +++ b/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCStorageExtSpec.scala @@ -11,7 +11,7 @@ import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import scala.annotation.nowarn -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ class GCStorageExtSpec extends AnyFlatSpec with Matchers with LogCapturing { "GCStorageExt" should "reuse application config from actor system" in { diff --git a/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCStorageSettingsSpec.scala b/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCStorageSettingsSpec.scala index 0f442de276..b0f1daaf0a 100644 --- a/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCStorageSettingsSpec.scala +++ b/google-cloud-storage/src/test/scala/akka/stream/alpakka/googlecloud/storage/GCStorageSettingsSpec.scala @@ -10,7 +10,7 @@ import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import scala.annotation.nowarn -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ class GCStorageSettingsSpec extends AnyFlatSpec with Matchers with LogCapturing { "GCStorageSettings" should "create settings from application config" in { diff --git a/google-common/src/main/scala/akka/stream/alpakka/google/GoogleSettings.scala b/google-common/src/main/scala/akka/stream/alpakka/google/GoogleSettings.scala index e922d2bc86..c10a0617db 100644 --- a/google-common/src/main/scala/akka/stream/alpakka/google/GoogleSettings.scala +++ b/google-common/src/main/scala/akka/stream/alpakka/google/GoogleSettings.scala @@ -15,12 +15,12 @@ import akka.http.{javadsl => jh} import akka.stream.alpakka.google.auth.Credentials import akka.stream.alpakka.google.http.{ForwardProxyHttpsContext, ForwardProxyPoolSettings} import akka.stream.alpakka.google.implicits._ -import akka.util.JavaDurationConverters._ import com.typesafe.config.Config import java.time import java.util.Optional -import scala.compat.java8.OptionConverters._ +import scala.jdk.DurationConverters._ +import scala.jdk.OptionConverters._ import scala.concurrent.duration._ object GoogleSettings { @@ -122,7 +122,7 @@ object RequestSettings { chunkSize: Int, retrySettings: RetrySettings, forwardProxy: Optional[ForwardProxy]) = - apply(userIp.asScala, quotaUser.asScala, prettyPrint, chunkSize, retrySettings, forwardProxy.asScala) + apply(userIp.toScala, quotaUser.toScala, prettyPrint, chunkSize, retrySettings, forwardProxy.toScala) } final case class RequestSettings @InternalApi private ( @@ -139,8 +139,8 @@ final case class RequestSettings @InternalApi private ( "Chunk size must be a multiple of 256 KiB" ) - def getUserIp = userIp.asJava - def getQuotaUser = quotaUser.asJava + def getUserIp = userIp.toJava + def getQuotaUser = quotaUser.toJava def getPrettyPrint = prettyPrint def getUploadChunkSize = uploadChunkSize def getRetrySettings = retrySettings @@ -149,11 +149,11 @@ final case class RequestSettings @InternalApi private ( def withUserIp(userIp: Option[String]) = copy(userIp = userIp) def withUserIp(userIp: Optional[String]) = - copy(userIp = userIp.asScala) + copy(userIp = userIp.toScala) def withQuotaUser(quotaUser: Option[String]) = copy(quotaUser = quotaUser) def withQuotaUser(quotaUser: Optional[String]) = - copy(quotaUser = quotaUser.asScala) + copy(quotaUser = quotaUser.toScala) def withPrettyPrint(prettyPrint: Boolean) = copy(prettyPrint = prettyPrint) def withUploadChunkSize(uploadChunkSize: Int) = @@ -163,7 +163,7 @@ final case class RequestSettings @InternalApi private ( def withForwardProxy(forwardProxy: Option[ForwardProxy]) = copy(forwardProxy = forwardProxy) def withForwardProxy(forwardProxy: Optional[ForwardProxy]) = - copy(forwardProxy = forwardProxy.asScala) + copy(forwardProxy = forwardProxy.toScala) // Cache query string private[google] def query = @@ -177,8 +177,8 @@ object RetrySettings { def apply(config: Config): RetrySettings = { RetrySettings( config.getInt("max-retries"), - config.getDuration("min-backoff").asScala, - config.getDuration("max-backoff").asScala, + config.getDuration("min-backoff").toScala, + config.getDuration("max-backoff").toScala, config.getDouble("random-factor") ) } @@ -188,8 +188,8 @@ object RetrySettings { def create(maxRetries: Int, minBackoff: time.Duration, maxBackoff: time.Duration, randomFactor: Double) = apply( maxRetries, - minBackoff.asScala, - maxBackoff.asScala, + minBackoff.toScala, + maxBackoff.toScala, randomFactor ) } @@ -199,8 +199,8 @@ final case class RetrySettings @InternalApi private (maxRetries: Int, maxBackoff: FiniteDuration, randomFactor: Double) { def getMaxRetries = maxRetries - def getMinBackoff = minBackoff.asJava - def getMaxBackoff = maxBackoff.asJava + def getMinBackoff = minBackoff.toJava + def getMaxBackoff = maxBackoff.toJava def getRandomFactor = randomFactor def withMaxRetries(maxRetries: Int) = @@ -208,11 +208,11 @@ final case class RetrySettings @InternalApi private (maxRetries: Int, def withMinBackoff(minBackoff: FiniteDuration) = copy(minBackoff = minBackoff) def withMinBackoff(minBackoff: time.Duration) = - copy(minBackoff = minBackoff.asScala) + copy(minBackoff = minBackoff.toScala) def withMaxBackoff(maxBackoff: FiniteDuration) = copy(maxBackoff = maxBackoff) def withMaxBackoff(maxBackoff: time.Duration) = - copy(maxBackoff = maxBackoff.asScala) + copy(maxBackoff = maxBackoff.toScala) def withRandomFactor(randomFactor: Double) = copy(randomFactor = randomFactor) } @@ -258,7 +258,7 @@ object ForwardProxy { credentials: Optional[jm.headers.BasicHttpCredentials], trustPem: Optional[String], system: ClassicActorSystemProvider) = - apply(scheme, host, port, credentials.asScala.map(_.asInstanceOf[BasicHttpCredentials]), trustPem.asScala)(system) + apply(scheme, host, port, credentials.toScala.map(_.asInstanceOf[BasicHttpCredentials]), trustPem.toScala)(system) def create(connectionContext: jh.HttpConnectionContext, poolSettings: jh.settings.ConnectionPoolSettings) = apply(connectionContext.asInstanceOf[HttpsConnectionContext], poolSettings.asInstanceOf[ConnectionPoolSettings]) diff --git a/google-common/src/main/scala/akka/stream/alpakka/google/PaginatedRequest.scala b/google-common/src/main/scala/akka/stream/alpakka/google/PaginatedRequest.scala index de917d7fb5..d9538f4d83 100644 --- a/google-common/src/main/scala/akka/stream/alpakka/google/PaginatedRequest.scala +++ b/google-common/src/main/scala/akka/stream/alpakka/google/PaginatedRequest.scala @@ -6,7 +6,6 @@ package akka.stream.alpakka.google import akka.actor.ActorSystem import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts import akka.http.scaladsl.model.HttpMethods.GET import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.model.Uri.Query @@ -16,6 +15,7 @@ import akka.stream.alpakka.google.scaladsl.Paginated import akka.stream.scaladsl.Source import akka.{Done, NotUsed} +import scala.concurrent.ExecutionContext import scala.concurrent.Future @InternalApi @@ -59,7 +59,7 @@ private[alpakka] object PaginatedRequest { .pageToken(out) .fold[Either[Done, Option[String]]](Left(Done))(pageToken => Right(Some(pageToken))) Some((nextPageToken, out)) - }(ExecutionContexts.parasitic) + }(ExecutionContext.parasitic) } } .mapMaterializedValue(_ => NotUsed) diff --git a/google-common/src/main/scala/akka/stream/alpakka/google/auth/Credentials.scala b/google-common/src/main/scala/akka/stream/alpakka/google/auth/Credentials.scala index 179bcab1d8..f9b52b38c4 100644 --- a/google-common/src/main/scala/akka/stream/alpakka/google/auth/Credentials.scala +++ b/google-common/src/main/scala/akka/stream/alpakka/google/auth/Credentials.scala @@ -9,13 +9,14 @@ import akka.annotation.DoNotInherit import akka.event.Logging import akka.http.scaladsl.model.headers.HttpCredentials import akka.stream.alpakka.google.RequestSettings -import akka.util.JavaDurationConverters._ import com.google.auth.{Credentials => GoogleCredentials} import com.typesafe.config.Config import java.util.concurrent.Executor + import scala.collection.immutable.ListMap import scala.concurrent.{Await, ExecutionContext, Future} +import scala.jdk.DurationConverters._ import scala.util.control.NonFatal object Credentials { @@ -63,7 +64,7 @@ object Credentials { ServiceAccountCredentials(c.getConfig("service-account")) private def parseComputeEngine(c: Config)(implicit system: ClassicActorSystemProvider) = - Await.result(ComputeEngineCredentials(), c.getDuration("compute-engine.timeout").asScala) + Await.result(ComputeEngineCredentials(), c.getDuration("compute-engine.timeout").toScala) private def parseUserAccess(c: Config)(implicit system: ClassicActorSystemProvider) = UserAccessCredentials(c.getConfig("user-access")) diff --git a/google-common/src/main/scala/akka/stream/alpakka/google/auth/OAuth2Credentials.scala b/google-common/src/main/scala/akka/stream/alpakka/google/auth/OAuth2Credentials.scala index b020af6be7..23b342538d 100644 --- a/google-common/src/main/scala/akka/stream/alpakka/google/auth/OAuth2Credentials.scala +++ b/google-common/src/main/scala/akka/stream/alpakka/google/auth/OAuth2Credentials.scala @@ -5,7 +5,6 @@ package akka.stream.alpakka.google.auth import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts import akka.http.scaladsl.model.headers.OAuth2BearerToken import akka.stream.alpakka.google.RequestSettings import akka.stream.alpakka.google.auth.OAuth2Credentials.{ForceRefresh, TokenRequest} @@ -63,9 +62,9 @@ private[auth] abstract class OAuth2Credentials(val projectId: String)(implicit m .andThen { case response => promise.complete(response.map(t => OAuth2BearerToken(t.token))) - }(ExecutionContexts.parasitic) - .map(Some(_))(ExecutionContexts.parasitic) - .recover { case _ => None }(ExecutionContexts.parasitic) + }(ExecutionContext.parasitic) + .map(Some(_))(ExecutionContext.parasitic) + .recover { case _ => None }(ExecutionContext.parasitic) case (_, ForceRefresh) => Future.successful(None) } diff --git a/google-common/src/main/scala/akka/stream/alpakka/google/auth/ServiceAccountCredentials.scala b/google-common/src/main/scala/akka/stream/alpakka/google/auth/ServiceAccountCredentials.scala index 68dc3516ea..4982002ea8 100644 --- a/google-common/src/main/scala/akka/stream/alpakka/google/auth/ServiceAccountCredentials.scala +++ b/google-common/src/main/scala/akka/stream/alpakka/google/auth/ServiceAccountCredentials.scala @@ -13,7 +13,7 @@ import spray.json.DefaultJsonProtocol._ import spray.json.{JsonParser, RootJsonFormat} import java.time.Clock -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.Future import scala.io.Source diff --git a/google-common/src/main/scala/akka/stream/alpakka/google/http/GoogleHttp.scala b/google-common/src/main/scala/akka/stream/alpakka/google/http/GoogleHttp.scala index 1f803a8f2f..d71c259f86 100644 --- a/google-common/src/main/scala/akka/stream/alpakka/google/http/GoogleHttp.scala +++ b/google-common/src/main/scala/akka/stream/alpakka/google/http/GoogleHttp.scala @@ -6,7 +6,6 @@ package akka.stream.alpakka.google.http import akka.actor.{ActorSystem, ClassicActorSystemProvider, Scheduler} import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts import akka.http.scaladsl.Http.HostConnectionPool import akka.http.scaladsl.model.headers.Authorization import akka.http.scaladsl.model.{HttpRequest, HttpResponse} @@ -57,7 +56,7 @@ private[alpakka] final class GoogleHttp private (val http: HttpExt) extends AnyV implicit settings: RequestSettings, um: FromResponseUnmarshaller[T] ): Future[T] = Retry(settings.retrySettings) { - singleRawRequest(request).flatMap(Unmarshal(_).to[T])(ExecutionContexts.parasitic) + singleRawRequest(request).flatMap(Unmarshal(_).to[T])(ExecutionContext.parasitic) } /** @@ -69,7 +68,7 @@ private[alpakka] final class GoogleHttp private (val http: HttpExt) extends AnyV um: FromResponseUnmarshaller[T] ): Future[T] = Retry(settings.requestSettings.retrySettings) { implicit val requestSettings: RequestSettings = settings.requestSettings - addAuth(request).flatMap(singleRequest(_))(ExecutionContexts.parasitic) + addAuth(request).flatMap(singleRequest(_))(ExecutionContext.parasitic) } /** @@ -131,8 +130,8 @@ private[alpakka] final class GoogleHttp private (val http: HttpExt) extends AnyV case (res, ctx) => Future .fromTry(res) - .flatMap(Unmarshal(_).to[T])(ExecutionContexts.parasitic) - .transform(Success(_))(ExecutionContexts.parasitic) + .flatMap(Unmarshal(_).to[T])(ExecutionContext.parasitic) + .transform(Success(_))(ExecutionContext.parasitic) .zip(Future.successful(ctx)) } @@ -165,6 +164,6 @@ private[alpakka] final class GoogleHttp private (val http: HttpExt) extends AnyV .get() .map { token => request.addHeader(Authorization(token)) - }(ExecutionContexts.parasitic) + }(ExecutionContext.parasitic) } } diff --git a/google-common/src/main/scala/akka/stream/alpakka/google/javadsl/Google.scala b/google-common/src/main/scala/akka/stream/alpakka/google/javadsl/Google.scala index b1991fc4d3..a8b5f74c25 100644 --- a/google-common/src/main/scala/akka/stream/alpakka/google/javadsl/Google.scala +++ b/google-common/src/main/scala/akka/stream/alpakka/google/javadsl/Google.scala @@ -15,7 +15,7 @@ import akka.stream.javadsl.{Sink, Source} import akka.util.ByteString import java.util.concurrent.CompletionStage -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ import scala.language.implicitConversions /** @@ -37,7 +37,7 @@ private[alpakka] trait Google { unmarshaller: Unmarshaller[HttpResponse, T], settings: GoogleSettings, system: ClassicActorSystemProvider): CompletionStage[T] = - ScalaGoogle.singleRequest[T](request)(unmarshaller.asScala, system, settings).toJava + ScalaGoogle.singleRequest[T](request)(unmarshaller.asScala, system, settings).asJava /** * Makes a series of requests to page through a resource. Authentication is handled automatically. @@ -66,7 +66,7 @@ private[alpakka] trait Google { request: HttpRequest, unmarshaller: Unmarshaller[HttpResponse, Out] ): Sink[ByteString, CompletionStage[Out]] = - ScalaGoogle.resumableUpload(request)(unmarshaller.asScala).mapMaterializedValue(_.toJava).asJava + ScalaGoogle.resumableUpload(request)(unmarshaller.asScala).mapMaterializedValue(_.asJava).asJava private implicit def requestAsScala(request: HttpRequest): sm.HttpRequest = request.asInstanceOf[sm.HttpRequest] } diff --git a/google-common/src/main/scala/akka/stream/alpakka/google/javadsl/Paginated.scala b/google-common/src/main/scala/akka/stream/alpakka/google/javadsl/Paginated.scala index 6fd3f8a513..b8e5f7ef71 100644 --- a/google-common/src/main/scala/akka/stream/alpakka/google/javadsl/Paginated.scala +++ b/google-common/src/main/scala/akka/stream/alpakka/google/javadsl/Paginated.scala @@ -7,7 +7,7 @@ package akka.stream.alpakka.google.javadsl import akka.stream.alpakka.google.scaladsl import java.util -import scala.compat.java8.OptionConverters._ +import scala.jdk.OptionConverters._ /** * Models a paginated resource @@ -22,6 +22,6 @@ trait Paginated { private[alpakka] object Paginated { implicit object paginatedIsPaginated extends scaladsl.Paginated[Paginated] { - override def pageToken(paginated: Paginated): Option[String] = paginated.getPageToken.asScala + override def pageToken(paginated: Paginated): Option[String] = paginated.getPageToken.toScala } } diff --git a/google-common/src/main/scala/akka/stream/alpakka/google/util/Retry.scala b/google-common/src/main/scala/akka/stream/alpakka/google/util/Retry.scala index 436a167195..ff50bd6114 100644 --- a/google-common/src/main/scala/akka/stream/alpakka/google/util/Retry.scala +++ b/google-common/src/main/scala/akka/stream/alpakka/google/util/Retry.scala @@ -6,7 +6,6 @@ package akka.stream.alpakka.google.util import akka.actor.Scheduler import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts import akka.pattern import akka.stream.alpakka.google.RetrySettings import akka.stream.scaladsl.{Flow, RetryFlow} @@ -49,14 +48,14 @@ object Retry { import settings._ val futureBuilder = () => future - .map(Success(_))(ExecutionContexts.parasitic) + .map(Success(_))(ExecutionContext.parasitic) .recover { case Retry(ex) => throw ex case ex => Failure(ex) - }(ExecutionContexts.parasitic) + }(ExecutionContext.parasitic) pattern .retry(futureBuilder, maxRetries, minBackoff, maxBackoff, randomFactor) - .flatMap(Future.fromTry)(ExecutionContexts.parasitic) + .flatMap(Future.fromTry)(ExecutionContext.parasitic) } def flow[In, Out, Mat](retrySettings: RetrySettings)(flow: Flow[In, Out, Mat]): Flow[In, Out, Mat] = diff --git a/google-fcm/src/main/scala/akka/stream/alpakka/google/firebase/fcm/FcmSettings.scala b/google-fcm/src/main/scala/akka/stream/alpakka/google/firebase/fcm/FcmSettings.scala index bf0f63d350..a90e498e53 100644 --- a/google-fcm/src/main/scala/akka/stream/alpakka/google/firebase/fcm/FcmSettings.scala +++ b/google-fcm/src/main/scala/akka/stream/alpakka/google/firebase/fcm/FcmSettings.scala @@ -10,7 +10,7 @@ import akka.stream.alpakka.google.{ForwardProxy => CommonForwardProxy} import java.util.Objects import scala.annotation.nowarn -import scala.compat.java8.OptionConverters._ +import scala.jdk.OptionConverters._ @nowarn("msg=deprecated") final class FcmSettings private ( @@ -234,9 +234,9 @@ final class ForwardProxy private (val host: String, def getPort: Int = port /** Java API */ - def getCredentials: java.util.Optional[ForwardProxyCredentials] = credentials.asJava + def getCredentials: java.util.Optional[ForwardProxyCredentials] = credentials.toJava - def getForwardProxyTrustPem: java.util.Optional[ForwardProxyTrustPem] = trustPem.asJava + def getForwardProxyTrustPem: java.util.Optional[ForwardProxyTrustPem] = trustPem.toJava def withHost(host: String) = copy(host = host) def withPort(port: Int) = copy(port = port) diff --git a/hbase/src/main/scala/akka/stream/alpakka/hbase/HTableSettings.scala b/hbase/src/main/scala/akka/stream/alpakka/hbase/HTableSettings.scala index 733b838620..0d7766c4fd 100644 --- a/hbase/src/main/scala/akka/stream/alpakka/hbase/HTableSettings.scala +++ b/hbase/src/main/scala/akka/stream/alpakka/hbase/HTableSettings.scala @@ -9,8 +9,8 @@ import org.apache.hadoop.hbase.TableName import org.apache.hadoop.hbase.client.Mutation import scala.collection.immutable -import scala.collection.JavaConverters._ -import scala.compat.java8.FunctionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.FunctionConverters._ final class HTableSettings[T] private (val conf: Configuration, val tableName: TableName, diff --git a/hbase/src/main/scala/akka/stream/alpakka/hbase/javadsl/HTableStage.scala b/hbase/src/main/scala/akka/stream/alpakka/hbase/javadsl/HTableStage.scala index 3861974371..b9b0b54cc4 100644 --- a/hbase/src/main/scala/akka/stream/alpakka/hbase/javadsl/HTableStage.scala +++ b/hbase/src/main/scala/akka/stream/alpakka/hbase/javadsl/HTableStage.scala @@ -12,7 +12,7 @@ import akka.stream.scaladsl.{Flow, Keep, Sink, Source} import akka.{Done, NotUsed} import org.apache.hadoop.hbase.client.{Result, Scan} -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ object HTableStage { @@ -21,7 +21,7 @@ object HTableStage { * HBase mutations for every incoming element are derived from the converter functions defined in the config. */ def sink[A](config: HTableSettings[A]): akka.stream.javadsl.Sink[A, CompletionStage[Done]] = - Flow[A].via(flow(config)).toMat(Sink.ignore)(Keep.right).mapMaterializedValue(toJava).asJava + Flow[A].via(flow(config)).toMat(Sink.ignore)(Keep.right).mapMaterializedValue(_.asJava).asJava /** * Writes incoming element to HBase. diff --git a/hdfs/src/main/scala/akka/stream/alpakka/hdfs/javadsl/HdfsSource.scala b/hdfs/src/main/scala/akka/stream/alpakka/hdfs/javadsl/HdfsSource.scala index 694b35d914..1c08f45912 100644 --- a/hdfs/src/main/scala/akka/stream/alpakka/hdfs/javadsl/HdfsSource.scala +++ b/hdfs/src/main/scala/akka/stream/alpakka/hdfs/javadsl/HdfsSource.scala @@ -15,7 +15,7 @@ import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.hadoop.io.Writable import org.apache.hadoop.io.compress.CompressionCodec -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ object HdfsSource { @@ -29,7 +29,7 @@ object HdfsSource { fs: FileSystem, path: Path ): javadsl.Source[ByteString, CompletionStage[IOResult]] = - ScalaHdfsSource.data(fs, path).mapMaterializedValue(_.toJava).asJava + ScalaHdfsSource.data(fs, path).mapMaterializedValue(_.asJava).asJava /** * Java API: creates a [[Source]] that consumes as [[ByteString]] @@ -43,7 +43,7 @@ object HdfsSource { path: Path, chunkSize: Int ): javadsl.Source[ByteString, CompletionStage[IOResult]] = - ScalaHdfsSource.data(fs, path, chunkSize).mapMaterializedValue(_.toJava).asJava + ScalaHdfsSource.data(fs, path, chunkSize).mapMaterializedValue(_.asJava).asJava /** * Java API: creates a [[Source]] that consumes as [[ByteString]] @@ -57,7 +57,7 @@ object HdfsSource { path: Path, codec: CompressionCodec ): javadsl.Source[ByteString, CompletionStage[IOResult]] = - ScalaHdfsSource.compressed(fs, path, codec).mapMaterializedValue(_.toJava).asJava + ScalaHdfsSource.compressed(fs, path, codec).mapMaterializedValue(_.asJava).asJava /** * Java API: creates a [[Source]] that consumes as [[ByteString]] @@ -73,7 +73,7 @@ object HdfsSource { codec: CompressionCodec, chunkSize: Int = 8192 ): javadsl.Source[ByteString, CompletionStage[IOResult]] = - ScalaHdfsSource.compressed(fs, path, codec, chunkSize).mapMaterializedValue(_.toJava).asJava + ScalaHdfsSource.compressed(fs, path, codec, chunkSize).mapMaterializedValue(_.asJava).asJava /** * Java API: creates a [[Source]] that consumes as [[(K, V]] diff --git a/hdfs/src/test/scala/akka/stream/alpakka/hdfs/util/TestUtils.scala b/hdfs/src/test/scala/akka/stream/alpakka/hdfs/util/TestUtils.scala index 42f914216c..7b410d9e9d 100644 --- a/hdfs/src/test/scala/akka/stream/alpakka/hdfs/util/TestUtils.scala +++ b/hdfs/src/test/scala/akka/stream/alpakka/hdfs/util/TestUtils.scala @@ -178,7 +178,7 @@ object JavaTestUtils extends TestUtils { import org.junit.Assert._ - import scala.collection.JavaConverters._ + import scala.jdk.CollectionConverters._ val books: util.List[ByteString] = ScalaTestUtils.books.asJava diff --git a/influxdb/src/main/scala/akka/stream/alpakka/influxdb/impl/AlpakkaResultMapperHelper.scala b/influxdb/src/main/scala/akka/stream/alpakka/influxdb/impl/AlpakkaResultMapperHelper.scala index 045314feb6..f0c0b861e5 100644 --- a/influxdb/src/main/scala/akka/stream/alpakka/influxdb/impl/AlpakkaResultMapperHelper.scala +++ b/influxdb/src/main/scala/akka/stream/alpakka/influxdb/impl/AlpakkaResultMapperHelper.scala @@ -17,7 +17,7 @@ import akka.annotation.InternalApi import org.influxdb.InfluxDBMapperException import org.influxdb.dto.Point -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * Internal API. diff --git a/influxdb/src/main/scala/akka/stream/alpakka/influxdb/impl/InfluxDbSourceStage.scala b/influxdb/src/main/scala/akka/stream/alpakka/influxdb/impl/InfluxDbSourceStage.scala index 5318af0978..3eff57d175 100644 --- a/influxdb/src/main/scala/akka/stream/alpakka/influxdb/impl/InfluxDbSourceStage.scala +++ b/influxdb/src/main/scala/akka/stream/alpakka/influxdb/impl/InfluxDbSourceStage.scala @@ -11,7 +11,7 @@ import akka.stream.stage.{GraphStage, GraphStageLogic, OutHandler} import org.influxdb.{InfluxDB, InfluxDBException} import org.influxdb.dto.{Query, QueryResult} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * INTERNAL API diff --git a/influxdb/src/main/scala/akka/stream/alpakka/influxdb/javadsl/InfluxDbFlow.scala b/influxdb/src/main/scala/akka/stream/alpakka/influxdb/javadsl/InfluxDbFlow.scala index de19f90bea..5bdc6216f3 100644 --- a/influxdb/src/main/scala/akka/stream/alpakka/influxdb/javadsl/InfluxDbFlow.scala +++ b/influxdb/src/main/scala/akka/stream/alpakka/influxdb/javadsl/InfluxDbFlow.scala @@ -12,7 +12,7 @@ import akka.stream.javadsl.Flow import akka.stream.alpakka.influxdb.scaladsl import org.influxdb.dto.Point -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * API may change. diff --git a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala index c48739c24c..f01afdbd65 100644 --- a/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala +++ b/influxdb/src/test/scala/docs/scaladsl/InfluxDbSpec.scala @@ -18,7 +18,7 @@ import akka.testkit.TestKit import docs.javadsl.TestUtils._ import akka.stream.scaladsl.Sink -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import docs.javadsl.TestConstants.{INFLUXDB_URL, PASSWORD, USERNAME} import org.scalatest.matchers.must.Matchers import org.scalatest.wordspec.AnyWordSpec diff --git a/ironmq/src/main/scala/akka/stream/alpakka/ironmq/IronMqSettings.scala b/ironmq/src/main/scala/akka/stream/alpakka/ironmq/IronMqSettings.scala index d90f7db52f..5d229a23a8 100644 --- a/ironmq/src/main/scala/akka/stream/alpakka/ironmq/IronMqSettings.scala +++ b/ironmq/src/main/scala/akka/stream/alpakka/ironmq/IronMqSettings.scala @@ -10,7 +10,7 @@ import akka.stream.alpakka.ironmq.IronMqSettings.ConsumerSettings import com.typesafe.config.Config import scala.concurrent.duration.FiniteDuration -import akka.util.JavaDurationConverters._ +import scala.jdk.DurationConverters._ /** * IronMQ settings. To a detailed documentation please refer to the reference.conf. @@ -78,7 +78,7 @@ object IronMqSettings { copy(fetchInterval = value) /** Java API: The interval of time between each poll loop. */ - def withFetchInterval(value: java.time.Duration): ConsumerSettings = copy(fetchInterval = value.asScala) + def withFetchInterval(value: java.time.Duration): ConsumerSettings = copy(fetchInterval = value.toScala) /** Scala API: * The amount of time the consumer will wait for the messages to be available on the queue. The IronMQ time unit is @@ -90,7 +90,7 @@ object IronMqSettings { * The amount of time the consumer will wait for the messages to be available on the queue. The IronMQ time unit is * the second so any other value is approximated to the second. */ - def withPollTimeout(value: java.time.Duration): ConsumerSettings = copy(pollTimeout = value.asScala) + def withPollTimeout(value: java.time.Duration): ConsumerSettings = copy(pollTimeout = value.toScala) /** Scala API: * The amount of time the consumer will reserve the message from. It should be higher that the time needed to @@ -105,7 +105,7 @@ object IronMqSettings { * process the message otherwise the same message will be processed multiple times. Again the IronMq time unit is * the second. */ - def withReservationTimeout(value: java.time.Duration): ConsumerSettings = copy(reservationTimeout = value.asScala) + def withReservationTimeout(value: java.time.Duration): ConsumerSettings = copy(reservationTimeout = value.toScala) private def copy( bufferMinSize: Int = bufferMinSize, @@ -135,9 +135,9 @@ object IronMqSettings { def apply(config: Config): ConsumerSettings = { val bufferMinSize: Int = config.getInt("buffer-min-size") val bufferMaxSize: Int = config.getInt("buffer-max-size") - val fetchInterval: FiniteDuration = config.getDuration("fetch-interval").asScala - val pollTimeout: FiniteDuration = config.getDuration("poll-timeout").asScala - val reservationTimeout: FiniteDuration = config.getDuration("reservation-timeout").asScala + val fetchInterval: FiniteDuration = config.getDuration("fetch-interval").toScala + val pollTimeout: FiniteDuration = config.getDuration("poll-timeout").toScala + val reservationTimeout: FiniteDuration = config.getDuration("reservation-timeout").toScala new ConsumerSettings(bufferMinSize, bufferMaxSize, fetchInterval, pollTimeout, reservationTimeout) } } diff --git a/ironmq/src/main/scala/akka/stream/alpakka/ironmq/domain.scala b/ironmq/src/main/scala/akka/stream/alpakka/ironmq/domain.scala index 6d6fc0e6d1..6e784873ec 100644 --- a/ironmq/src/main/scala/akka/stream/alpakka/ironmq/domain.scala +++ b/ironmq/src/main/scala/akka/stream/alpakka/ironmq/domain.scala @@ -5,7 +5,7 @@ package akka.stream.alpakka.ironmq import scala.concurrent.duration.{Duration, FiniteDuration} -import akka.util.JavaDurationConverters._ +import scala.jdk.DurationConverters._ case class PushMessage(body: String, delay: FiniteDuration = Duration.Zero) @@ -14,7 +14,7 @@ object PushMessage { def create(body: String): PushMessage = PushMessage(body) def create(body: String, duration: java.time.Duration): PushMessage = - PushMessage(body, duration.asScala) + PushMessage(body, duration.toScala) } /** diff --git a/ironmq/src/main/scala/akka/stream/alpakka/ironmq/javadsl/IronMqProducer.scala b/ironmq/src/main/scala/akka/stream/alpakka/ironmq/javadsl/IronMqProducer.scala index 6ceed33e53..e55b344da4 100644 --- a/ironmq/src/main/scala/akka/stream/alpakka/ironmq/javadsl/IronMqProducer.scala +++ b/ironmq/src/main/scala/akka/stream/alpakka/ironmq/javadsl/IronMqProducer.scala @@ -13,7 +13,7 @@ import akka.stream.javadsl.{Flow, Sink} import akka.stream.scaladsl.{Keep, Flow => ScalaFlow} import akka.stream.alpakka.ironmq.scaladsl.{IronMqProducer => ScalaIronMqProducer} -import scala.compat.java8.FutureConverters +import scala.jdk.FutureConverters object IronMqProducer { @@ -27,7 +27,7 @@ object IronMqProducer { .asInstanceOf[Flow[PushMessage, String, NotUsed]] def sink(queueName: String, settings: IronMqSettings): Sink[PushMessage, CompletionStage[Done]] = - ScalaIronMqProducer.sink(queueName, settings).mapMaterializedValue(_.toJava).asJava + ScalaIronMqProducer.sink(queueName, settings).mapMaterializedValue(_.asJava).asJava def atLeastOnceFlow[C1 <: Committable]( queueName: String, diff --git a/ironmq/src/main/scala/akka/stream/alpakka/ironmq/javadsl/package.scala b/ironmq/src/main/scala/akka/stream/alpakka/ironmq/javadsl/package.scala index 4f90a36586..7fb043dec7 100644 --- a/ironmq/src/main/scala/akka/stream/alpakka/ironmq/javadsl/package.scala +++ b/ironmq/src/main/scala/akka/stream/alpakka/ironmq/javadsl/package.scala @@ -12,7 +12,7 @@ import akka.stream.alpakka.ironmq.scaladsl.{ CommittableMessage => ScalaCommittableMessage } -import scala.compat.java8.FutureConverters +import scala.jdk.FutureConverters import scala.concurrent.Future /** @@ -25,26 +25,26 @@ package object javadsl { private[javadsl] implicit class RichScalaCommittableMessage(cm: ScalaCommittableMessage) { def asJava: CommittableMessage = new CommittableMessage { override def message: Message = cm.message - override def commit(): CompletionStage[Done] = cm.commit().toJava + override def commit(): CompletionStage[Done] = cm.commit().asJava } } private[javadsl] implicit class RichScalaCommittable(cm: ScalaCommittable) { def asJava: Committable = new Committable { - override def commit(): CompletionStage[Done] = cm.commit().toJava + override def commit(): CompletionStage[Done] = cm.commit().asJava } } private[javadsl] implicit class RichCommittableMessage(cm: CommittableMessage) { def asScala: ScalaCommittableMessage = new ScalaCommittableMessage { override def message: Message = cm.message - override def commit(): Future[Done] = cm.commit().toScala + override def commit(): Future[Done] = cm.commit().asScala } } private[javadsl] implicit class RichCommittable(cm: Committable) { def asScala: ScalaCommittable = new ScalaCommittable { - override def commit(): Future[Done] = cm.commit().toScala + override def commit(): Future[Done] = cm.commit().asScala } } diff --git a/ironmq/src/main/scala/akka/stream/alpakka/ironmq/scaladsl/IronMqConsumer.scala b/ironmq/src/main/scala/akka/stream/alpakka/ironmq/scaladsl/IronMqConsumer.scala index 4fc49f242f..9f07081c30 100644 --- a/ironmq/src/main/scala/akka/stream/alpakka/ironmq/scaladsl/IronMqConsumer.scala +++ b/ironmq/src/main/scala/akka/stream/alpakka/ironmq/scaladsl/IronMqConsumer.scala @@ -5,16 +5,17 @@ package akka.stream.alpakka.ironmq.scaladsl import akka.NotUsed -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.ironmq._ import akka.stream.alpakka.ironmq.impl.IronMqPullStage import akka.stream.scaladsl._ +import scala.concurrent.ExecutionContext + object IronMqConsumer { def atMostOnceSource(queueName: String, settings: IronMqSettings): Source[Message, NotUsed] = Source.fromGraph(new IronMqPullStage(queueName, settings)).mapAsync(1) { cm => - cm.commit().map(_ => cm.message)(ExecutionContexts.parasitic) + cm.commit().map(_ => cm.message)(ExecutionContext.parasitic) } def atLeastOnceSource[K, V](queueName: String, settings: IronMqSettings): Source[CommittableMessage, NotUsed] = diff --git a/ironmq/src/test/java/akka/stream/alpakka/ironmq/UnitTest.java b/ironmq/src/test/java/akka/stream/alpakka/ironmq/UnitTest.java index 400f531aab..9fd0e431f2 100644 --- a/ironmq/src/test/java/akka/stream/alpakka/ironmq/UnitTest.java +++ b/ironmq/src/test/java/akka/stream/alpakka/ironmq/UnitTest.java @@ -15,14 +15,13 @@ import org.junit.Before; import org.junit.Rule; +import scala.jdk.javaapi.CollectionConverters; +import scala.jdk.javaapi.FutureConverters; import java.util.List; import java.util.UUID; import java.util.stream.Collectors; import java.util.stream.IntStream; -import static scala.collection.JavaConverters.*; -import static scala.compat.java8.FutureConverters.*; - public abstract class UnitTest { @Rule public final LogCapturingJunit4 logCapturing = new LogCapturingJunit4(); @@ -77,7 +76,7 @@ protected String givenQueue() { protected String givenQueue(String name) { try { - return toJava(ironMqClient.createQueue(name, system.dispatcher())) + return FutureConverters.asJava(ironMqClient.createQueue(name, system.dispatcher())) .toCompletableFuture() .get(); } catch (Exception e) { @@ -93,10 +92,10 @@ protected Message.Ids givenMessages(String queueName, int n) { .collect(Collectors.toList()); try { - return toJava( + return FutureConverters.asJava( ironMqClient.pushMessages( queueName, - asScalaBufferConverter(messages).asScala().toSeq(), + CollectionConverters.asScala(messages).toSeq(), system.dispatcher())) .toCompletableFuture() .get(); diff --git a/ironmq/src/test/scala/akka/stream/alpakka/ironmq/impl/IronMqPushStageSpec.scala b/ironmq/src/test/scala/akka/stream/alpakka/ironmq/impl/IronMqPushStageSpec.scala index 0d22622bd6..d092d4ee1c 100644 --- a/ironmq/src/test/scala/akka/stream/alpakka/ironmq/impl/IronMqPushStageSpec.scala +++ b/ironmq/src/test/scala/akka/stream/alpakka/ironmq/impl/IronMqPushStageSpec.scala @@ -4,7 +4,6 @@ package akka.stream.alpakka.ironmq.impl -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.ironmq.{IronMqSettings, IronMqSpec, PushMessage} import akka.stream.scaladsl._ import akka.stream.testkit.scaladsl.StreamTestKit.assertAllStagesStopped @@ -13,7 +12,7 @@ import scala.concurrent.ExecutionContext class IronMqPushStageSpec extends IronMqSpec { - implicit val ec: ExecutionContext = ExecutionContexts.global() + implicit val ec: ExecutionContext = ExecutionContext.global "IronMqPushMessageStage" should { "push messages to the queue" in assertAllStagesStopped { diff --git a/ironmq/src/test/scala/akka/stream/alpakka/ironmq/scaladsl/IronMqConsumerSpec.scala b/ironmq/src/test/scala/akka/stream/alpakka/ironmq/scaladsl/IronMqConsumerSpec.scala index 71c9feec49..b211c48c77 100644 --- a/ironmq/src/test/scala/akka/stream/alpakka/ironmq/scaladsl/IronMqConsumerSpec.scala +++ b/ironmq/src/test/scala/akka/stream/alpakka/ironmq/scaladsl/IronMqConsumerSpec.scala @@ -5,7 +5,6 @@ package akka.stream.alpakka.ironmq.scaladsl import akka.NotUsed -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.ironmq.{IronMqSettings, IronMqSpec, PushMessage} import akka.stream.scaladsl.{Sink, Source} import com.typesafe.config.{Config, ConfigFactory} @@ -16,7 +15,7 @@ import scala.concurrent.ExecutionContext class IronMqConsumerSpec extends IronMqSpec with ParallelTestExecution { - implicit val ec: ExecutionContext = ExecutionContexts.global() + implicit val ec: ExecutionContext = ExecutionContext.global val messages: Source[PushMessage, NotUsed] = Source.fromIterator(() => Iterator.from(0)).map(i => PushMessage(s"test-$i")) diff --git a/ironmq/src/test/scala/akka/stream/alpakka/ironmq/scaladsl/IronMqProducerSpec.scala b/ironmq/src/test/scala/akka/stream/alpakka/ironmq/scaladsl/IronMqProducerSpec.scala index e48ec57549..886606ddaa 100644 --- a/ironmq/src/test/scala/akka/stream/alpakka/ironmq/scaladsl/IronMqProducerSpec.scala +++ b/ironmq/src/test/scala/akka/stream/alpakka/ironmq/scaladsl/IronMqProducerSpec.scala @@ -4,7 +4,6 @@ package akka.stream.alpakka.ironmq.scaladsl -import akka.dispatch.ExecutionContexts import akka.stream.alpakka.ironmq.{IronMqSettings, IronMqSpec, PushMessage} import akka.stream.scaladsl.{Flow, Sink, Source} import akka.{Done, NotUsed} @@ -18,7 +17,7 @@ class IronMqProducerSpec extends IronMqSpec { val messages: Source[PushMessage, NotUsed] = Source.fromIterator(() => Iterator.from(0)).map(i => PushMessage(s"test-$i")) - implicit val ec: ExecutionContext = ExecutionContexts.global() + implicit val ec: ExecutionContext = ExecutionContext.global "producerSink" should { "publish messages on IronMq" in assertAllStagesStopped { diff --git a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/ConnectionRetrySettings.scala b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/ConnectionRetrySettings.scala index a3c16b49b2..f30b4c203c 100644 --- a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/ConnectionRetrySettings.scala +++ b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/ConnectionRetrySettings.scala @@ -5,7 +5,7 @@ package akka.stream.alpakka.jakartajms import akka.actor.{ActorSystem, ClassicActorSystemProvider} -import akka.util.JavaDurationConverters._ +import scala.jdk.DurationConverters._ import com.typesafe.config.Config import scala.concurrent.duration._ @@ -28,14 +28,14 @@ final class ConnectionRetrySettings private ( copy(connectTimeout = value) /** Java API: Time allowed to establish and start a connection. */ - def withConnectTimeout(value: java.time.Duration): ConnectionRetrySettings = copy(connectTimeout = value.asScala) + def withConnectTimeout(value: java.time.Duration): ConnectionRetrySettings = copy(connectTimeout = value.toScala) /** Wait time before retrying the first time. */ def withInitialRetry(value: scala.concurrent.duration.FiniteDuration): ConnectionRetrySettings = copy(initialRetry = value) /** Java API: Wait time before retrying the first time. */ - def withInitialRetry(value: java.time.Duration): ConnectionRetrySettings = copy(initialRetry = value.asScala) + def withInitialRetry(value: java.time.Duration): ConnectionRetrySettings = copy(initialRetry = value.toScala) /** Back-off factor for subsequent retries. */ def withBackoffFactor(value: Double): ConnectionRetrySettings = copy(backoffFactor = value) @@ -45,7 +45,7 @@ final class ConnectionRetrySettings private ( copy(maxBackoff = value) /** Java API: Maximum back-off time allowed, after which all retries will happen after this delay. */ - def withMaxBackoff(value: java.time.Duration): ConnectionRetrySettings = copy(maxBackoff = value.asScala) + def withMaxBackoff(value: java.time.Duration): ConnectionRetrySettings = copy(maxBackoff = value.toScala) /** Maximum number of retries allowed. */ def withMaxRetries(value: Int): ConnectionRetrySettings = copy(maxRetries = value) @@ -90,10 +90,10 @@ object ConnectionRetrySettings { * Reads from the given config. */ def apply(c: Config): ConnectionRetrySettings = { - val connectTimeout = c.getDuration("connect-timeout").asScala - val initialRetry = c.getDuration("initial-retry").asScala + val connectTimeout = c.getDuration("connect-timeout").toScala + val initialRetry = c.getDuration("initial-retry").toScala val backoffFactor = c.getDouble("backoff-factor") - val maxBackoff = c.getDuration("max-backoff").asScala + val maxBackoff = c.getDuration("max-backoff").toScala val maxRetries = if (c.getString("max-retries") == "infinite") infiniteRetries else c.getInt("max-retries") new ConnectionRetrySettings( connectTimeout, diff --git a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/Destinations.scala b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/Destinations.scala index 0a634b54c1..2daccd2a1b 100644 --- a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/Destinations.scala +++ b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/Destinations.scala @@ -5,7 +5,7 @@ package akka.stream.alpakka.jakartajms import jakarta.jms -import scala.compat.java8.FunctionConverters._ +import scala.jdk.FunctionConverters._ /** * A destination to send to/receive from. diff --git a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/JmsConsumerSettings.scala b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/JmsConsumerSettings.scala index 42783bc699..be1510680a 100644 --- a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/JmsConsumerSettings.scala +++ b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/JmsConsumerSettings.scala @@ -5,10 +5,10 @@ package akka.stream.alpakka.jakartajms import akka.actor.{ActorSystem, ClassicActorSystemProvider} -import akka.util.JavaDurationConverters._ import com.typesafe.config.{Config, ConfigValueType} import scala.concurrent.duration.FiniteDuration +import scala.jdk.DurationConverters._ /** * Settings for [[akka.stream.alpakka.jakartajms.scaladsl.JmsConsumer]] and [[akka.stream.alpakka.jakartajms.javadsl.JmsConsumer]]. @@ -74,7 +74,7 @@ final class JmsConsumerSettings private ( def withAckTimeout(value: scala.concurrent.duration.Duration): JmsConsumerSettings = copy(ackTimeout = value) /** Java API: Timeout for acknowledge. (Used by TX consumers.) */ - def withAckTimeout(value: java.time.Duration): JmsConsumerSettings = copy(ackTimeout = value.asScala) + def withAckTimeout(value: java.time.Duration): JmsConsumerSettings = copy(ackTimeout = value.toScala) /** Max interval before sending queued acknowledges back to the broker. (Used by AckSources.) */ def withMaxAckInterval(value: scala.concurrent.duration.FiniteDuration): JmsConsumerSettings = @@ -82,7 +82,7 @@ final class JmsConsumerSettings private ( /** Java API: Max interval before sending queued acknowledges back to the broker. (Used by AckSources.) */ def withMaxAckInterval(value: java.time.Duration): JmsConsumerSettings = - copy(maxAckInterval = Option(value.asScala)) + copy(maxAckInterval = Option(value.toScala)) /** Max number of acks queued by AckSource before they are sent to broker. (Unless MaxAckInterval is specified) */ def withMaxPendingAcks(value: Int): JmsConsumerSettings = copy(maxPendingAcks = value) @@ -99,7 +99,7 @@ final class JmsConsumerSettings private ( /** Java API: Timeout for connection status subscriber */ def withConnectionStatusSubscriptionTimeout(value: java.time.Duration): JmsConsumerSettings = - copy(connectionStatusSubscriptionTimeout = value.asScala) + copy(connectionStatusSubscriptionTimeout = value.toScala) private def copy( connectionFactory: jakarta.jms.ConnectionFactory = connectionFactory, @@ -176,12 +176,12 @@ object JmsConsumerSettings { val bufferSize = c.getInt("buffer-size") val selector = getStringOption("selector") val acknowledgeMode = getOption("acknowledge-mode", c => AcknowledgeMode.from(c.getString("acknowledge-mode"))) - val ackTimeout = c.getDuration("ack-timeout").asScala - val maxAckIntervalDuration = getOption("max-ack-interval", config => config.getDuration("max-ack-interval").asScala) + val ackTimeout = c.getDuration("ack-timeout").toScala + val maxAckIntervalDuration = getOption("max-ack-interval", config => config.getDuration("max-ack-interval").toScala) val maxAckInterval = maxAckIntervalDuration.map(duration => FiniteDuration(duration.length, duration.unit)) val maxPendingAcks = c.getInt("max-pending-acks") val failStreamOnAckTimeout = c.getBoolean("fail-stream-on-ack-timeout") - val connectionStatusSubscriptionTimeout = c.getDuration("connection-status-subscription-timeout").asScala + val connectionStatusSubscriptionTimeout = c.getDuration("connection-status-subscription-timeout").toScala new JmsConsumerSettings( connectionFactory, connectionRetrySettings, diff --git a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/JmsMessages.scala b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/JmsMessages.scala index fe0e51af6e..66990eb898 100644 --- a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/JmsMessages.scala +++ b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/JmsMessages.scala @@ -9,8 +9,8 @@ import jakarta.jms import akka.NotUsed import akka.stream.alpakka.jakartajms.impl.JmsMessageReader._ import akka.util.ByteString -import scala.collection.JavaConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ /** * Base interface for messages handled by JmsProducers. Sub-classes support pass-through or use [[akka.NotUsed]] as type for pass-through. @@ -37,7 +37,7 @@ sealed trait JmsEnvelope[+PassThrough] { /** * Java API. */ - def getDestination: java.util.Optional[Destination] = destination.asJava + def getDestination: java.util.Optional[Destination] = destination.toJava def passThrough: PassThrough diff --git a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/JmsProducerSettings.scala b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/JmsProducerSettings.scala index 86289794ac..c64bcfaf94 100644 --- a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/JmsProducerSettings.scala +++ b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/JmsProducerSettings.scala @@ -5,10 +5,10 @@ package akka.stream.alpakka.jakartajms import akka.actor.{ActorSystem, ClassicActorSystemProvider} -import akka.util.JavaDurationConverters._ import com.typesafe.config.{Config, ConfigValueType} import scala.concurrent.duration.FiniteDuration +import scala.jdk.DurationConverters._ /** * Settings for [[akka.stream.alpakka.jakartajms.scaladsl.JmsProducer]] and [[akka.stream.alpakka.jakartajms.javadsl.JmsProducer]]. @@ -64,7 +64,7 @@ final class JmsProducerSettings private ( * Java API: Time messages should be kept on the JMS broker. This setting can be overridden on * individual messages. If not set, messages will never expire. */ - def withTimeToLive(value: java.time.Duration): JmsProducerSettings = copy(timeToLive = Option(value).map(_.asScala)) + def withTimeToLive(value: java.time.Duration): JmsProducerSettings = copy(timeToLive = Option(value).map(_.toScala)) /** Timeout for connection status subscriber */ def withConnectionStatusSubscriptionTimeout(value: FiniteDuration): JmsProducerSettings = @@ -72,7 +72,7 @@ final class JmsProducerSettings private ( /** Java API: Timeout for connection status subscriber */ def withConnectionStatusSubscriptionTimeout(value: java.time.Duration): JmsProducerSettings = - copy(connectionStatusSubscriptionTimeout = value.asScala) + copy(connectionStatusSubscriptionTimeout = value.toScala) private def copy( connectionFactory: jakarta.jms.ConnectionFactory = connectionFactory, @@ -128,8 +128,8 @@ object JmsProducerSettings { val sendRetrySettings = SendRetrySettings(c.getConfig("send-retry")) val credentials = getOption("credentials", c => Credentials(c.getConfig("credentials"))) val sessionCount = c.getInt("session-count") - val timeToLive = getOption("time-to-live", _.getDuration("time-to-live").asScala) - val connectionStatusSubscriptionTimeout = c.getDuration("connection-status-subscription-timeout").asScala + val timeToLive = getOption("time-to-live", _.getDuration("time-to-live").toScala) + val connectionStatusSubscriptionTimeout = c.getDuration("connection-status-subscription-timeout").toScala new JmsProducerSettings( connectionFactory, connectionRetrySettings, diff --git a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/SendRetrySettings.scala b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/SendRetrySettings.scala index 667a6e02ef..63a2a5c0ad 100644 --- a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/SendRetrySettings.scala +++ b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/SendRetrySettings.scala @@ -8,7 +8,7 @@ import akka.actor.{ActorSystem, ClassicActorSystemProvider} import com.typesafe.config.Config import scala.concurrent.duration._ -import akka.util.JavaDurationConverters._ +import scala.jdk.DurationConverters._ /** * When a connection to a broker starts failing, sending JMS messages will also fail. @@ -23,7 +23,7 @@ final class SendRetrySettings private (val initialRetry: scala.concurrent.durati def withInitialRetry(value: scala.concurrent.duration.FiniteDuration): SendRetrySettings = copy(initialRetry = value) /** Java API: Wait time before retrying the first time. */ - def withInitialRetry(value: java.time.Duration): SendRetrySettings = copy(initialRetry = value.asScala) + def withInitialRetry(value: java.time.Duration): SendRetrySettings = copy(initialRetry = value.toScala) /** Back-off factor for subsequent retries */ def withBackoffFactor(value: Double): SendRetrySettings = copy(backoffFactor = value) @@ -32,7 +32,7 @@ final class SendRetrySettings private (val initialRetry: scala.concurrent.durati def withMaxBackoff(value: scala.concurrent.duration.FiniteDuration): SendRetrySettings = copy(maxBackoff = value) /** Java API: Maximum back-off time allowed, after which all retries will happen after this delay. */ - def withMaxBackoff(value: java.time.Duration): SendRetrySettings = copy(maxBackoff = value.asScala) + def withMaxBackoff(value: java.time.Duration): SendRetrySettings = copy(maxBackoff = value.toScala) /** Maximum number of retries allowed. */ def withMaxRetries(value: Int): SendRetrySettings = copy(maxRetries = value) @@ -74,9 +74,9 @@ object SendRetrySettings { * Reads from the given config. */ def apply(c: Config): SendRetrySettings = { - val initialRetry = c.getDuration("initial-retry").asScala + val initialRetry = c.getDuration("initial-retry").toScala val backoffFactor = c.getDouble("backoff-factor") - val maxBackoff = c.getDuration("max-backoff").asScala + val maxBackoff = c.getDuration("max-backoff").toScala val maxRetries = if (c.getString("max-retries") == "infinite") infiniteRetries else c.getInt("max-retries") new SendRetrySettings( initialRetry, diff --git a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/impl/JmsConnector.scala b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/impl/JmsConnector.scala index 1a9cdd62f2..db45e02be1 100644 --- a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/impl/JmsConnector.scala +++ b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/impl/JmsConnector.scala @@ -9,7 +9,6 @@ import java.util.concurrent.atomic.AtomicReference import akka.{Done, NotUsed} import akka.actor.ActorSystem import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts import akka.pattern.after import akka.stream.alpakka.jakartajms._ import akka.stream.alpakka.jakartajms.impl.InternalConnectionState._ @@ -246,7 +245,7 @@ private[jakartajms] trait JmsConnector[S <: JmsSession] extends TimerGraphStageL protected def initSessionAsync(attempt: Int = 0, backoffMaxed: Boolean = false): Unit = { val allSessions = openSessions(attempt, backoffMaxed) - allSessions.failed.foreach(connectionFailedCB.invoke)(ExecutionContexts.parasitic) + allSessions.failed.foreach(connectionFailedCB.invoke)(ExecutionContext.parasitic) // wait for all sessions to successfully initialize before invoking the onSession callback. // reduces flakiness (start, consume, then crash) at the cost of increased latency of startup. allSessions.foreach(_.foreach(onSession.invoke)) @@ -329,7 +328,7 @@ private[jakartajms] trait JmsConnector[S <: JmsSession] extends TimerGraphStageL for (_ <- 0 until jmsSettings.sessionCount) yield Future(createSession(connection, destination.create)) Future.sequence(sessionFutures) - }(ExecutionContexts.parasitic) + }(ExecutionContext.parasitic) } private def openConnection(attempt: Int, backoffMaxed: Boolean): Future[jms.Connection] = { @@ -392,7 +391,7 @@ private[jakartajms] trait JmsConnector[S <: JmsSession] extends TimerGraphStageL } } - Future.firstCompletedOf(Iterator(connectionFuture, timeoutFuture))(ExecutionContexts.parasitic) + Future.firstCompletedOf(Iterator(connectionFuture, timeoutFuture))(ExecutionContext.parasitic) } } diff --git a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/impl/JmsMessageReader.scala b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/impl/JmsMessageReader.scala index 8979abd6b8..7aef8f07af 100644 --- a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/impl/JmsMessageReader.scala +++ b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/impl/JmsMessageReader.scala @@ -10,7 +10,7 @@ import akka.annotation.InternalApi import akka.stream.alpakka.jakartajms._ import akka.util.ByteString import scala.annotation.tailrec -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ @InternalApi private[jakartajms] object JmsMessageReader { diff --git a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/javadsl/JmsConsumer.scala b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/javadsl/JmsConsumer.scala index 829430529f..176a26b395 100644 --- a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/javadsl/JmsConsumer.scala +++ b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/javadsl/JmsConsumer.scala @@ -9,7 +9,7 @@ import akka.NotUsed import akka.stream.alpakka.jakartajms._ import akka.stream.javadsl.Source -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * Factory methods to create JMS consumers. diff --git a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/javadsl/JmsProducer.scala b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/javadsl/JmsProducer.scala index 507e6dea28..4c0f514ea1 100644 --- a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/javadsl/JmsProducer.scala +++ b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/javadsl/JmsProducer.scala @@ -12,8 +12,8 @@ import akka.stream.scaladsl.{Flow, Keep} import akka.util.ByteString import akka.{Done, NotUsed} -import scala.collection.JavaConverters._ -import scala.compat.java8.FutureConverters +import scala.jdk.CollectionConverters._ +import scala.jdk.FutureConverters._ /** * Factory methods to create JMS producers. @@ -50,7 +50,7 @@ object JmsProducer { ): akka.stream.javadsl.Sink[R, CompletionStage[Done]] = akka.stream.alpakka.jakartajms.scaladsl.JmsProducer .sink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -59,7 +59,7 @@ object JmsProducer { def textSink(settings: JmsProducerSettings): akka.stream.javadsl.Sink[String, CompletionStage[Done]] = akka.stream.alpakka.jakartajms.scaladsl.JmsProducer .textSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -68,7 +68,7 @@ object JmsProducer { def bytesSink(settings: JmsProducerSettings): akka.stream.javadsl.Sink[Array[Byte], CompletionStage[Done]] = akka.stream.alpakka.jakartajms.scaladsl.JmsProducer .bytesSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -77,7 +77,7 @@ object JmsProducer { def byteStringSink(settings: JmsProducerSettings): akka.stream.javadsl.Sink[ByteString, CompletionStage[Done]] = akka.stream.alpakka.jakartajms.scaladsl.JmsProducer .byteStringSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -90,7 +90,7 @@ object JmsProducer { val scalaSink = akka.stream.alpakka.jakartajms.scaladsl.JmsProducer .mapSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) val javaToScalaConversion = Flow.fromFunction((javaMap: java.util.Map[String, Any]) => javaMap.asScala.toMap) javaToScalaConversion.toMat(scalaSink)(Keep.right).asJava @@ -104,7 +104,7 @@ object JmsProducer { ): akka.stream.javadsl.Sink[java.io.Serializable, CompletionStage[Done]] = akka.stream.alpakka.jakartajms.scaladsl.JmsProducer .objectSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava private def toProducerStatus(scalaStatus: scaladsl.JmsProducerStatus) = new JmsProducerStatus { diff --git a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/scaladsl/JmsConsumer.scala b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/scaladsl/JmsConsumer.scala index 3ea8d7a99c..ab96abe2c1 100644 --- a/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/scaladsl/JmsConsumer.scala +++ b/jakarta-jms/src/main/scala/akka/stream/alpakka/jakartajms/scaladsl/JmsConsumer.scala @@ -10,7 +10,7 @@ import akka.stream.alpakka.jakartajms.impl._ import akka.stream.scaladsl.Source import jakarta.jms -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * Factory methods to create JMS consumers. diff --git a/jakarta-jms/src/test/scala/akka/stream/alpakka/jakartajms/impl/SoftReferenceCacheSpec.scala b/jakarta-jms/src/test/scala/akka/stream/alpakka/jakartajms/impl/SoftReferenceCacheSpec.scala index 9d73ac4f24..73292f976a 100644 --- a/jakarta-jms/src/test/scala/akka/stream/alpakka/jakartajms/impl/SoftReferenceCacheSpec.scala +++ b/jakarta-jms/src/test/scala/akka/stream/alpakka/jakartajms/impl/SoftReferenceCacheSpec.scala @@ -103,7 +103,7 @@ class SoftReferenceCacheSpec extends AnyWordSpec with Matchers { } state.counter = count state - }.foreach(enqueue)(akka.dispatch.ExecutionContexts.parasitic) + }.foreach(enqueue)(ExecutionContext.parasitic) } } } diff --git a/jakarta-jms/src/test/scala/jakartajmstestkit/JmsQueue.scala b/jakarta-jms/src/test/scala/jakartajmstestkit/JmsQueue.scala index 0f814dc604..256e2d99ac 100644 --- a/jakarta-jms/src/test/scala/jakartajmstestkit/JmsQueue.scala +++ b/jakarta-jms/src/test/scala/jakartajmstestkit/JmsQueue.scala @@ -8,7 +8,7 @@ import java.util.{Collections, UUID} import jakarta.jms.{ConnectionFactory, QueueConnectionFactory, TextMessage} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.util.Try /** diff --git a/jakarta-jms/src/test/scala/jakartajmstestkit/JmsTopic.scala b/jakarta-jms/src/test/scala/jakartajmstestkit/JmsTopic.scala index d82e61a48e..7d04673346 100644 --- a/jakarta-jms/src/test/scala/jakartajmstestkit/JmsTopic.scala +++ b/jakarta-jms/src/test/scala/jakartajmstestkit/JmsTopic.scala @@ -8,7 +8,7 @@ import java.util.UUID import jakarta.jms.{ConnectionFactory, Message, MessageListener, TextMessage, TopicConnectionFactory} import scala.util.Try import scala.collection.mutable.ListBuffer -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * This testkit was copied from https://github.com/sullis/jms-testkit with modifications diff --git a/jms/src/main/scala/akka/stream/alpakka/jms/ConnectionRetrySettings.scala b/jms/src/main/scala/akka/stream/alpakka/jms/ConnectionRetrySettings.scala index 66b9c9cc92..f8c6ddf045 100644 --- a/jms/src/main/scala/akka/stream/alpakka/jms/ConnectionRetrySettings.scala +++ b/jms/src/main/scala/akka/stream/alpakka/jms/ConnectionRetrySettings.scala @@ -5,10 +5,10 @@ package akka.stream.alpakka.jms import akka.actor.{ActorSystem, ClassicActorSystemProvider} -import akka.util.JavaDurationConverters._ import com.typesafe.config.Config import scala.concurrent.duration._ +import scala.jdk.DurationConverters._ /** * When a connection to a broker cannot be established and errors out, or is timing out being established or @@ -28,14 +28,14 @@ final class ConnectionRetrySettings private ( copy(connectTimeout = value) /** Java API: Time allowed to establish and start a connection. */ - def withConnectTimeout(value: java.time.Duration): ConnectionRetrySettings = copy(connectTimeout = value.asScala) + def withConnectTimeout(value: java.time.Duration): ConnectionRetrySettings = copy(connectTimeout = value.toScala) /** Wait time before retrying the first time. */ def withInitialRetry(value: scala.concurrent.duration.FiniteDuration): ConnectionRetrySettings = copy(initialRetry = value) /** Java API: Wait time before retrying the first time. */ - def withInitialRetry(value: java.time.Duration): ConnectionRetrySettings = copy(initialRetry = value.asScala) + def withInitialRetry(value: java.time.Duration): ConnectionRetrySettings = copy(initialRetry = value.toScala) /** Back-off factor for subsequent retries. */ def withBackoffFactor(value: Double): ConnectionRetrySettings = copy(backoffFactor = value) @@ -45,7 +45,7 @@ final class ConnectionRetrySettings private ( copy(maxBackoff = value) /** Java API: Maximum back-off time allowed, after which all retries will happen after this delay. */ - def withMaxBackoff(value: java.time.Duration): ConnectionRetrySettings = copy(maxBackoff = value.asScala) + def withMaxBackoff(value: java.time.Duration): ConnectionRetrySettings = copy(maxBackoff = value.toScala) /** Maximum number of retries allowed. */ def withMaxRetries(value: Int): ConnectionRetrySettings = copy(maxRetries = value) @@ -90,10 +90,10 @@ object ConnectionRetrySettings { * Reads from the given config. */ def apply(c: Config): ConnectionRetrySettings = { - val connectTimeout = c.getDuration("connect-timeout").asScala - val initialRetry = c.getDuration("initial-retry").asScala + val connectTimeout = c.getDuration("connect-timeout").toScala + val initialRetry = c.getDuration("initial-retry").toScala val backoffFactor = c.getDouble("backoff-factor") - val maxBackoff = c.getDuration("max-backoff").asScala + val maxBackoff = c.getDuration("max-backoff").toScala val maxRetries = if (c.getString("max-retries") == "infinite") infiniteRetries else c.getInt("max-retries") new ConnectionRetrySettings( connectTimeout, diff --git a/jms/src/main/scala/akka/stream/alpakka/jms/Destinations.scala b/jms/src/main/scala/akka/stream/alpakka/jms/Destinations.scala index 6220cf1f2a..9902dcac22 100644 --- a/jms/src/main/scala/akka/stream/alpakka/jms/Destinations.scala +++ b/jms/src/main/scala/akka/stream/alpakka/jms/Destinations.scala @@ -5,7 +5,7 @@ package akka.stream.alpakka.jms import javax.jms -import scala.compat.java8.FunctionConverters._ +import scala.jdk.FunctionConverters._ /** * A destination to send to/receive from. diff --git a/jms/src/main/scala/akka/stream/alpakka/jms/JmsConsumerSettings.scala b/jms/src/main/scala/akka/stream/alpakka/jms/JmsConsumerSettings.scala index a8e0afe1db..899396784e 100644 --- a/jms/src/main/scala/akka/stream/alpakka/jms/JmsConsumerSettings.scala +++ b/jms/src/main/scala/akka/stream/alpakka/jms/JmsConsumerSettings.scala @@ -5,10 +5,10 @@ package akka.stream.alpakka.jms import akka.actor.{ActorSystem, ClassicActorSystemProvider} -import akka.util.JavaDurationConverters._ import com.typesafe.config.{Config, ConfigValueType} import scala.concurrent.duration.FiniteDuration +import scala.jdk.DurationConverters._ /** * Settings for [[akka.stream.alpakka.jms.scaladsl.JmsConsumer]] and [[akka.stream.alpakka.jms.javadsl.JmsConsumer]]. @@ -74,7 +74,7 @@ final class JmsConsumerSettings private ( def withAckTimeout(value: scala.concurrent.duration.Duration): JmsConsumerSettings = copy(ackTimeout = value) /** Java API: Timeout for acknowledge. (Used by TX consumers.) */ - def withAckTimeout(value: java.time.Duration): JmsConsumerSettings = copy(ackTimeout = value.asScala) + def withAckTimeout(value: java.time.Duration): JmsConsumerSettings = copy(ackTimeout = value.toScala) /** Max interval before sending queued acknowledges back to the broker. (Used by AckSources.) */ def withMaxAckInterval(value: scala.concurrent.duration.FiniteDuration): JmsConsumerSettings = @@ -82,7 +82,7 @@ final class JmsConsumerSettings private ( /** Java API: Max interval before sending queued acknowledges back to the broker. (Used by AckSources.) */ def withMaxAckInterval(value: java.time.Duration): JmsConsumerSettings = - copy(maxAckInterval = Option(value.asScala)) + copy(maxAckInterval = Option(value.toScala)) /** Max number of acks queued by AckSource before they are sent to broker. (Unless MaxAckInterval is specified) */ def withMaxPendingAcks(value: Int): JmsConsumerSettings = copy(maxPendingAcks = value) @@ -99,7 +99,7 @@ final class JmsConsumerSettings private ( /** Java API: Timeout for connection status subscriber */ def withConnectionStatusSubscriptionTimeout(value: java.time.Duration): JmsConsumerSettings = - copy(connectionStatusSubscriptionTimeout = value.asScala) + copy(connectionStatusSubscriptionTimeout = value.toScala) private def copy( connectionFactory: javax.jms.ConnectionFactory = connectionFactory, @@ -176,12 +176,12 @@ object JmsConsumerSettings { val bufferSize = c.getInt("buffer-size") val selector = getStringOption("selector") val acknowledgeMode = getOption("acknowledge-mode", c => AcknowledgeMode.from(c.getString("acknowledge-mode"))) - val ackTimeout = c.getDuration("ack-timeout").asScala - val maxAckIntervalDuration = getOption("max-ack-interval", config => config.getDuration("max-ack-interval").asScala) + val ackTimeout = c.getDuration("ack-timeout").toScala + val maxAckIntervalDuration = getOption("max-ack-interval", config => config.getDuration("max-ack-interval").toScala) val maxAckInterval = maxAckIntervalDuration.map(duration => FiniteDuration(duration.length, duration.unit)) val maxPendingAcks = c.getInt("max-pending-acks") val failStreamOnAckTimeout = c.getBoolean("fail-stream-on-ack-timeout") - val connectionStatusSubscriptionTimeout = c.getDuration("connection-status-subscription-timeout").asScala + val connectionStatusSubscriptionTimeout = c.getDuration("connection-status-subscription-timeout").toScala new JmsConsumerSettings( connectionFactory, connectionRetrySettings, diff --git a/jms/src/main/scala/akka/stream/alpakka/jms/JmsMessages.scala b/jms/src/main/scala/akka/stream/alpakka/jms/JmsMessages.scala index a6fd022239..54119f4dde 100644 --- a/jms/src/main/scala/akka/stream/alpakka/jms/JmsMessages.scala +++ b/jms/src/main/scala/akka/stream/alpakka/jms/JmsMessages.scala @@ -9,8 +9,8 @@ import javax.jms import akka.NotUsed import akka.stream.alpakka.jms.impl.JmsMessageReader._ import akka.util.ByteString -import scala.collection.JavaConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ /** * Base interface for messages handled by JmsProducers. Sub-classes support pass-through or use [[akka.NotUsed]] as type for pass-through. @@ -37,7 +37,7 @@ sealed trait JmsEnvelope[+PassThrough] { /** * Java API. */ - def getDestination: java.util.Optional[Destination] = destination.asJava + def getDestination: java.util.Optional[Destination] = destination.toJava def passThrough: PassThrough diff --git a/jms/src/main/scala/akka/stream/alpakka/jms/JmsProducerSettings.scala b/jms/src/main/scala/akka/stream/alpakka/jms/JmsProducerSettings.scala index eb9c94c67a..5f82703478 100644 --- a/jms/src/main/scala/akka/stream/alpakka/jms/JmsProducerSettings.scala +++ b/jms/src/main/scala/akka/stream/alpakka/jms/JmsProducerSettings.scala @@ -5,10 +5,10 @@ package akka.stream.alpakka.jms import akka.actor.{ActorSystem, ClassicActorSystemProvider} -import akka.util.JavaDurationConverters._ import com.typesafe.config.{Config, ConfigValueType} import scala.concurrent.duration.FiniteDuration +import scala.jdk.DurationConverters._ /** * Settings for [[akka.stream.alpakka.jms.scaladsl.JmsProducer]] and [[akka.stream.alpakka.jms.javadsl.JmsProducer]]. @@ -64,7 +64,7 @@ final class JmsProducerSettings private ( * Java API: Time messages should be kept on the JMS broker. This setting can be overridden on * individual messages. If not set, messages will never expire. */ - def withTimeToLive(value: java.time.Duration): JmsProducerSettings = copy(timeToLive = Option(value).map(_.asScala)) + def withTimeToLive(value: java.time.Duration): JmsProducerSettings = copy(timeToLive = Option(value).map(_.toScala)) /** Timeout for connection status subscriber */ def withConnectionStatusSubscriptionTimeout(value: FiniteDuration): JmsProducerSettings = @@ -72,7 +72,7 @@ final class JmsProducerSettings private ( /** Java API: Timeout for connection status subscriber */ def withConnectionStatusSubscriptionTimeout(value: java.time.Duration): JmsProducerSettings = - copy(connectionStatusSubscriptionTimeout = value.asScala) + copy(connectionStatusSubscriptionTimeout = value.toScala) private def copy( connectionFactory: javax.jms.ConnectionFactory = connectionFactory, @@ -128,8 +128,8 @@ object JmsProducerSettings { val sendRetrySettings = SendRetrySettings(c.getConfig("send-retry")) val credentials = getOption("credentials", c => Credentials(c.getConfig("credentials"))) val sessionCount = c.getInt("session-count") - val timeToLive = getOption("time-to-live", _.getDuration("time-to-live").asScala) - val connectionStatusSubscriptionTimeout = c.getDuration("connection-status-subscription-timeout").asScala + val timeToLive = getOption("time-to-live", _.getDuration("time-to-live").toScala) + val connectionStatusSubscriptionTimeout = c.getDuration("connection-status-subscription-timeout").toScala new JmsProducerSettings( connectionFactory, connectionRetrySettings, diff --git a/jms/src/main/scala/akka/stream/alpakka/jms/SendRetrySettings.scala b/jms/src/main/scala/akka/stream/alpakka/jms/SendRetrySettings.scala index 3c7196b876..8c86826916 100644 --- a/jms/src/main/scala/akka/stream/alpakka/jms/SendRetrySettings.scala +++ b/jms/src/main/scala/akka/stream/alpakka/jms/SendRetrySettings.scala @@ -8,7 +8,7 @@ import akka.actor.{ActorSystem, ClassicActorSystemProvider} import com.typesafe.config.Config import scala.concurrent.duration._ -import akka.util.JavaDurationConverters._ +import scala.jdk.DurationConverters._ /** * When a connection to a broker starts failing, sending JMS messages will also fail. @@ -23,7 +23,7 @@ final class SendRetrySettings private (val initialRetry: scala.concurrent.durati def withInitialRetry(value: scala.concurrent.duration.FiniteDuration): SendRetrySettings = copy(initialRetry = value) /** Java API: Wait time before retrying the first time. */ - def withInitialRetry(value: java.time.Duration): SendRetrySettings = copy(initialRetry = value.asScala) + def withInitialRetry(value: java.time.Duration): SendRetrySettings = copy(initialRetry = value.toScala) /** Back-off factor for subsequent retries */ def withBackoffFactor(value: Double): SendRetrySettings = copy(backoffFactor = value) @@ -32,7 +32,7 @@ final class SendRetrySettings private (val initialRetry: scala.concurrent.durati def withMaxBackoff(value: scala.concurrent.duration.FiniteDuration): SendRetrySettings = copy(maxBackoff = value) /** Java API: Maximum back-off time allowed, after which all retries will happen after this delay. */ - def withMaxBackoff(value: java.time.Duration): SendRetrySettings = copy(maxBackoff = value.asScala) + def withMaxBackoff(value: java.time.Duration): SendRetrySettings = copy(maxBackoff = value.toScala) /** Maximum number of retries allowed. */ def withMaxRetries(value: Int): SendRetrySettings = copy(maxRetries = value) @@ -74,9 +74,9 @@ object SendRetrySettings { * Reads from the given config. */ def apply(c: Config): SendRetrySettings = { - val initialRetry = c.getDuration("initial-retry").asScala + val initialRetry = c.getDuration("initial-retry").toScala val backoffFactor = c.getDouble("backoff-factor") - val maxBackoff = c.getDuration("max-backoff").asScala + val maxBackoff = c.getDuration("max-backoff").toScala val maxRetries = if (c.getString("max-retries") == "infinite") infiniteRetries else c.getInt("max-retries") new SendRetrySettings( initialRetry, diff --git a/jms/src/main/scala/akka/stream/alpakka/jms/impl/JmsConnector.scala b/jms/src/main/scala/akka/stream/alpakka/jms/impl/JmsConnector.scala index 5d85126cf6..6bb2f9aea4 100644 --- a/jms/src/main/scala/akka/stream/alpakka/jms/impl/JmsConnector.scala +++ b/jms/src/main/scala/akka/stream/alpakka/jms/impl/JmsConnector.scala @@ -9,7 +9,6 @@ import java.util.concurrent.atomic.AtomicReference import akka.{Done, NotUsed} import akka.actor.ActorSystem import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts import akka.pattern.after import akka.stream.alpakka.jms._ import akka.stream.alpakka.jms.impl.InternalConnectionState._ @@ -246,7 +245,7 @@ private[jms] trait JmsConnector[S <: JmsSession] extends TimerGraphStageLogic wi protected def initSessionAsync(attempt: Int = 0, backoffMaxed: Boolean = false): Unit = { val allSessions = openSessions(attempt, backoffMaxed) - allSessions.failed.foreach(connectionFailedCB.invoke)(ExecutionContexts.parasitic) + allSessions.failed.foreach(connectionFailedCB.invoke)(ExecutionContext.parasitic) // wait for all sessions to successfully initialize before invoking the onSession callback. // reduces flakiness (start, consume, then crash) at the cost of increased latency of startup. allSessions.foreach(_.foreach(onSession.invoke)) @@ -329,7 +328,7 @@ private[jms] trait JmsConnector[S <: JmsSession] extends TimerGraphStageLogic wi for (_ <- 0 until jmsSettings.sessionCount) yield Future(createSession(connection, destination.create)) Future.sequence(sessionFutures) - }(ExecutionContexts.parasitic) + }(ExecutionContext.parasitic) } private def openConnection(attempt: Int, backoffMaxed: Boolean): Future[jms.Connection] = { @@ -392,7 +391,7 @@ private[jms] trait JmsConnector[S <: JmsSession] extends TimerGraphStageLogic wi } } - Future.firstCompletedOf(Iterator(connectionFuture, timeoutFuture))(ExecutionContexts.parasitic) + Future.firstCompletedOf(Iterator(connectionFuture, timeoutFuture))(ExecutionContext.parasitic) } } diff --git a/jms/src/main/scala/akka/stream/alpakka/jms/impl/JmsMessageReader.scala b/jms/src/main/scala/akka/stream/alpakka/jms/impl/JmsMessageReader.scala index 87b9af41c0..6d326ccb74 100644 --- a/jms/src/main/scala/akka/stream/alpakka/jms/impl/JmsMessageReader.scala +++ b/jms/src/main/scala/akka/stream/alpakka/jms/impl/JmsMessageReader.scala @@ -10,7 +10,7 @@ import akka.annotation.InternalApi import akka.stream.alpakka.jms._ import akka.util.ByteString import scala.annotation.tailrec -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ @InternalApi private[jms] object JmsMessageReader { diff --git a/jms/src/main/scala/akka/stream/alpakka/jms/javadsl/JmsConsumer.scala b/jms/src/main/scala/akka/stream/alpakka/jms/javadsl/JmsConsumer.scala index 73236884f7..4cb9d3f46c 100644 --- a/jms/src/main/scala/akka/stream/alpakka/jms/javadsl/JmsConsumer.scala +++ b/jms/src/main/scala/akka/stream/alpakka/jms/javadsl/JmsConsumer.scala @@ -9,7 +9,7 @@ import akka.NotUsed import akka.stream.alpakka.jms._ import akka.stream.javadsl.Source -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * Factory methods to create JMS consumers. diff --git a/jms/src/main/scala/akka/stream/alpakka/jms/javadsl/JmsProducer.scala b/jms/src/main/scala/akka/stream/alpakka/jms/javadsl/JmsProducer.scala index 7e4302abbd..0af38c7c3d 100644 --- a/jms/src/main/scala/akka/stream/alpakka/jms/javadsl/JmsProducer.scala +++ b/jms/src/main/scala/akka/stream/alpakka/jms/javadsl/JmsProducer.scala @@ -12,8 +12,8 @@ import akka.stream.scaladsl.{Flow, Keep} import akka.util.ByteString import akka.{Done, NotUsed} -import scala.collection.JavaConverters._ -import scala.compat.java8.FutureConverters +import scala.jdk.CollectionConverters._ +import scala.jdk.FutureConverters._ /** * Factory methods to create JMS producers. @@ -50,7 +50,7 @@ object JmsProducer { ): akka.stream.javadsl.Sink[R, CompletionStage[Done]] = akka.stream.alpakka.jms.scaladsl.JmsProducer .sink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -59,7 +59,7 @@ object JmsProducer { def textSink(settings: JmsProducerSettings): akka.stream.javadsl.Sink[String, CompletionStage[Done]] = akka.stream.alpakka.jms.scaladsl.JmsProducer .textSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -68,7 +68,7 @@ object JmsProducer { def bytesSink(settings: JmsProducerSettings): akka.stream.javadsl.Sink[Array[Byte], CompletionStage[Done]] = akka.stream.alpakka.jms.scaladsl.JmsProducer .bytesSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -77,7 +77,7 @@ object JmsProducer { def byteStringSink(settings: JmsProducerSettings): akka.stream.javadsl.Sink[ByteString, CompletionStage[Done]] = akka.stream.alpakka.jms.scaladsl.JmsProducer .byteStringSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -90,7 +90,7 @@ object JmsProducer { val scalaSink = akka.stream.alpakka.jms.scaladsl.JmsProducer .mapSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) val javaToScalaConversion = Flow.fromFunction((javaMap: java.util.Map[String, Any]) => javaMap.asScala.toMap) javaToScalaConversion.toMat(scalaSink)(Keep.right).asJava @@ -104,7 +104,7 @@ object JmsProducer { ): akka.stream.javadsl.Sink[java.io.Serializable, CompletionStage[Done]] = akka.stream.alpakka.jms.scaladsl.JmsProducer .objectSink(settings) - .mapMaterializedValue(FutureConverters.toJava) + .mapMaterializedValue(_.asJava) .asJava private def toProducerStatus(scalaStatus: scaladsl.JmsProducerStatus) = new JmsProducerStatus { diff --git a/jms/src/main/scala/akka/stream/alpakka/jms/scaladsl/JmsConsumer.scala b/jms/src/main/scala/akka/stream/alpakka/jms/scaladsl/JmsConsumer.scala index 6925d030db..f5c3bce5ff 100644 --- a/jms/src/main/scala/akka/stream/alpakka/jms/scaladsl/JmsConsumer.scala +++ b/jms/src/main/scala/akka/stream/alpakka/jms/scaladsl/JmsConsumer.scala @@ -10,7 +10,7 @@ import akka.stream.alpakka.jms.impl._ import akka.stream.scaladsl.Source import javax.jms -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * Factory methods to create JMS consumers. diff --git a/jms/src/test/scala/akka/stream/alpakka/jms/impl/SoftReferenceCacheSpec.scala b/jms/src/test/scala/akka/stream/alpakka/jms/impl/SoftReferenceCacheSpec.scala index cbaa47f9a5..367ae75b9e 100644 --- a/jms/src/test/scala/akka/stream/alpakka/jms/impl/SoftReferenceCacheSpec.scala +++ b/jms/src/test/scala/akka/stream/alpakka/jms/impl/SoftReferenceCacheSpec.scala @@ -103,7 +103,7 @@ class SoftReferenceCacheSpec extends AnyWordSpec with Matchers { } state.counter = count state - }.foreach(enqueue)(akka.dispatch.ExecutionContexts.parasitic) + }.foreach(enqueue)(ExecutionContext.parasitic) } } } diff --git a/jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala b/jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala index 7cd223ad2a..e773e7fb39 100644 --- a/jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala +++ b/jms/src/test/scala/docs/scaladsl/JmsConnectorsSpec.scala @@ -24,7 +24,7 @@ import org.mockito.invocation.InvocationOnMock import org.mockito.stubbing.Answer import scala.annotation.tailrec -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable import scala.collection.mutable import scala.concurrent.Future diff --git a/kinesis/src/main/scala/akka/stream/alpakka/kinesis/ShardSettings.scala b/kinesis/src/main/scala/akka/stream/alpakka/kinesis/ShardSettings.scala index 82e193b3fe..5d54d09d2c 100644 --- a/kinesis/src/main/scala/akka/stream/alpakka/kinesis/ShardSettings.scala +++ b/kinesis/src/main/scala/akka/stream/alpakka/kinesis/ShardSettings.scala @@ -7,7 +7,7 @@ package akka.stream.alpakka.kinesis import software.amazon.awssdk.services.kinesis.model.ShardIteratorType import scala.concurrent.duration._ -import akka.util.JavaDurationConverters._ +import scala.jdk.DurationConverters._ final class ShardSettings private ( val streamName: String, @@ -47,7 +47,7 @@ final class ShardSettings private ( copy(refreshInterval = value) /** Java API */ - def withRefreshInterval(value: java.time.Duration): ShardSettings = copy(refreshInterval = value.asScala) + def withRefreshInterval(value: java.time.Duration): ShardSettings = copy(refreshInterval = value.toScala) def withLimit(value: Int): ShardSettings = copy(limit = value) private def copy( diff --git a/kinesis/src/main/scala/akka/stream/alpakka/kinesis/impl/KinesisSourceStage.scala b/kinesis/src/main/scala/akka/stream/alpakka/kinesis/impl/KinesisSourceStage.scala index bbd6422167..564010f574 100644 --- a/kinesis/src/main/scala/akka/stream/alpakka/kinesis/impl/KinesisSourceStage.scala +++ b/kinesis/src/main/scala/akka/stream/alpakka/kinesis/impl/KinesisSourceStage.scala @@ -6,7 +6,6 @@ package akka.stream.alpakka.kinesis.impl import akka.actor.ActorRef import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts.parasitic import akka.stream.alpakka.kinesis.{ShardSettings, KinesisErrors => Errors} import akka.stream.stage.GraphStageLogic.StageActor import akka.stream.stage._ @@ -15,11 +14,11 @@ import software.amazon.awssdk.services.kinesis.KinesisAsyncClient import software.amazon.awssdk.services.kinesis.model._ import scala.collection.mutable -import scala.collection.JavaConverters._ +import scala.concurrent.ExecutionContext +import scala.jdk.CollectionConverters._ +import scala.jdk.FutureConverters._ import scala.util.{Failure, Success, Try} -import scala.compat.java8.FutureConverters._ - /** * Internal API */ @@ -145,8 +144,8 @@ private[kinesis] class KinesisSourceStage(shardSettings: ShardSettings, amazonKi .getRecords( GetRecordsRequest.builder().limit(limit).shardIterator(currentShardIterator).build() ) - .toScala - .onComplete(handleGetRecords)(parasitic) + .asScala + .onComplete(handleGetRecords)(ExecutionContext.parasitic) private[this] def requestShardIterator(): Unit = { val request = Function @@ -171,8 +170,8 @@ private[kinesis] class KinesisSourceStage(shardSettings: ShardSettings, amazonKi amazonKinesisAsync .getShardIterator(request) - .toScala - .onComplete(handleGetShardIterator)(parasitic) + .asScala + .onComplete(handleGetShardIterator)(ExecutionContext.parasitic) } } diff --git a/kinesis/src/main/scala/akka/stream/alpakka/kinesis/impl/ShardProcessor.scala b/kinesis/src/main/scala/akka/stream/alpakka/kinesis/impl/ShardProcessor.scala index 43b6bfd798..7a70aa2e1d 100644 --- a/kinesis/src/main/scala/akka/stream/alpakka/kinesis/impl/ShardProcessor.scala +++ b/kinesis/src/main/scala/akka/stream/alpakka/kinesis/impl/ShardProcessor.scala @@ -14,7 +14,7 @@ import software.amazon.kinesis.lifecycle.events._ import software.amazon.kinesis.processor.{RecordProcessorCheckpointer, ShardRecordProcessor} import software.amazon.kinesis.retrieval.KinesisClientRecord -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ @InternalApi private[kinesis] class ShardProcessor( diff --git a/kinesis/src/main/scala/akka/stream/alpakka/kinesis/javadsl/KinesisSchedulerSource.scala b/kinesis/src/main/scala/akka/stream/alpakka/kinesis/javadsl/KinesisSchedulerSource.scala index 432d29d3ff..050e2bf6fc 100644 --- a/kinesis/src/main/scala/akka/stream/alpakka/kinesis/javadsl/KinesisSchedulerSource.scala +++ b/kinesis/src/main/scala/akka/stream/alpakka/kinesis/javadsl/KinesisSchedulerSource.scala @@ -13,7 +13,7 @@ import software.amazon.kinesis.coordinator.Scheduler import software.amazon.kinesis.processor.ShardRecordProcessorFactory import software.amazon.kinesis.retrieval.KinesisClientRecord -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.Future object KinesisSchedulerSource { @@ -28,7 +28,7 @@ object KinesisSchedulerSource { ): Source[CommittableRecord, CompletionStage[Scheduler]] = scaladsl.KinesisSchedulerSource .apply(schedulerBuilder.build, settings) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava def createSharded( diff --git a/kinesis/src/main/scala/akka/stream/alpakka/kinesis/javadsl/KinesisSource.scala b/kinesis/src/main/scala/akka/stream/alpakka/kinesis/javadsl/KinesisSource.scala index c0a9d8e9ad..bb63028a58 100644 --- a/kinesis/src/main/scala/akka/stream/alpakka/kinesis/javadsl/KinesisSource.scala +++ b/kinesis/src/main/scala/akka/stream/alpakka/kinesis/javadsl/KinesisSource.scala @@ -10,7 +10,7 @@ import akka.stream.javadsl.Source import software.amazon.awssdk.services.kinesis.KinesisAsyncClient import software.amazon.awssdk.services.kinesis.model.Record -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ object KinesisSource { diff --git a/kinesis/src/main/scala/akka/stream/alpakka/kinesis/scaladsl/KinesisFlow.scala b/kinesis/src/main/scala/akka/stream/alpakka/kinesis/scaladsl/KinesisFlow.scala index babe55179d..a24292d82f 100644 --- a/kinesis/src/main/scala/akka/stream/alpakka/kinesis/scaladsl/KinesisFlow.scala +++ b/kinesis/src/main/scala/akka/stream/alpakka/kinesis/scaladsl/KinesisFlow.scala @@ -5,9 +5,9 @@ package akka.stream.alpakka.kinesis.scaladsl import java.nio.ByteBuffer + import akka.NotUsed import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts.parasitic import akka.stream.ThrottleMode import akka.stream.alpakka.kinesis.KinesisFlowSettings import akka.stream.alpakka.kinesis.KinesisErrors.FailurePublishingRecords @@ -21,11 +21,11 @@ import software.amazon.awssdk.services.kinesis.model.{ PutRecordsResponse, PutRecordsResultEntry } - -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable.Queue import scala.concurrent.duration._ -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ +import scala.concurrent.ExecutionContext import scala.util.{Failure, Success, Try} object KinesisFlow { @@ -93,8 +93,8 @@ object KinesisFlow { .putRecords( PutRecordsRequest.builder().streamName(streamName).records(entries.map(_._1).asJavaCollection).build ) - .toScala - .transform(handleBatch(entries))(parasitic) + .asScala + .transform(handleBatch(entries))(ExecutionContext.parasitic) ) .mapConcat(identity) } diff --git a/kinesis/src/main/scala/akka/stream/alpakka/kinesisfirehose/scaladsl/KinesisFirehoseFlow.scala b/kinesis/src/main/scala/akka/stream/alpakka/kinesisfirehose/scaladsl/KinesisFirehoseFlow.scala index 4b2b028dd7..116ed284ec 100644 --- a/kinesis/src/main/scala/akka/stream/alpakka/kinesisfirehose/scaladsl/KinesisFirehoseFlow.scala +++ b/kinesis/src/main/scala/akka/stream/alpakka/kinesisfirehose/scaladsl/KinesisFirehoseFlow.scala @@ -5,7 +5,6 @@ package akka.stream.alpakka.kinesisfirehose.scaladsl import akka.NotUsed -import akka.dispatch.ExecutionContexts.parasitic import akka.stream.ThrottleMode import akka.stream.alpakka.kinesisfirehose.KinesisFirehoseFlowSettings import akka.stream.alpakka.kinesisfirehose.KinesisFirehoseErrors.FailurePublishingRecords @@ -13,11 +12,11 @@ import akka.stream.scaladsl.Flow import software.amazon.awssdk.services.firehose.FirehoseAsyncClient import software.amazon.awssdk.services.firehose.model.{PutRecordBatchRequest, PutRecordBatchResponseEntry, Record} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable.Queue import scala.concurrent.duration._ - -import scala.compat.java8.FutureConverters._ +import scala.concurrent.ExecutionContext +import scala.jdk.FutureConverters._ object KinesisFirehoseFlow { def apply(streamName: String, settings: KinesisFirehoseFlowSettings = KinesisFirehoseFlowSettings.Defaults)( @@ -37,8 +36,8 @@ object KinesisFirehoseFlow { .records(records.asJavaCollection) .build() ) - .toScala - .transform(identity, FailurePublishingRecords(_))(parasitic) + .asScala + .transform(identity, FailurePublishingRecords(_))(ExecutionContext.parasitic) ) .mapConcat(_.requestResponses.asScala.toIndexedSeq) diff --git a/kinesis/src/test/scala/akka/stream/alpakka/kinesis/KinesisFlowSpec.scala b/kinesis/src/test/scala/akka/stream/alpakka/kinesis/KinesisFlowSpec.scala index 2c813b23b7..eed4124762 100644 --- a/kinesis/src/test/scala/akka/stream/alpakka/kinesis/KinesisFlowSpec.scala +++ b/kinesis/src/test/scala/akka/stream/alpakka/kinesis/KinesisFlowSpec.scala @@ -23,7 +23,7 @@ import org.scalatest.wordspec.AnyWordSpec import org.scalatest.matchers.should.Matchers import software.amazon.awssdk.core.SdkBytes import software.amazon.awssdk.services.kinesis.model._ -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ class KinesisFlowSpec extends AnyWordSpec with Matchers with KinesisMock with LogCapturing { diff --git a/kinesis/src/test/scala/akka/stream/alpakka/kinesis/KinesisSchedulerSourceSpec.scala b/kinesis/src/test/scala/akka/stream/alpakka/kinesis/KinesisSchedulerSourceSpec.scala index 856ab4fb22..520c41c6b0 100644 --- a/kinesis/src/test/scala/akka/stream/alpakka/kinesis/KinesisSchedulerSourceSpec.scala +++ b/kinesis/src/test/scala/akka/stream/alpakka/kinesis/KinesisSchedulerSourceSpec.scala @@ -37,7 +37,7 @@ import software.amazon.kinesis.processor.{ import software.amazon.kinesis.retrieval.KinesisClientRecord import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.Random diff --git a/kinesis/src/test/scala/akka/stream/alpakka/kinesisfirehose/KinesisFirehoseFlowSpec.scala b/kinesis/src/test/scala/akka/stream/alpakka/kinesisfirehose/KinesisFirehoseFlowSpec.scala index b7e3865ef6..e3e4902181 100644 --- a/kinesis/src/test/scala/akka/stream/alpakka/kinesisfirehose/KinesisFirehoseFlowSpec.scala +++ b/kinesis/src/test/scala/akka/stream/alpakka/kinesisfirehose/KinesisFirehoseFlowSpec.scala @@ -22,7 +22,7 @@ import org.scalatest.matchers.should.Matchers import software.amazon.awssdk.core.SdkBytes import software.amazon.awssdk.services.firehose.model._ -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ class KinesisFirehoseFlowSpec extends AnyWordSpec with Matchers with KinesisFirehoseMock with LogCapturing { diff --git a/kudu/src/main/scala/akka/stream/alpakka/kudu/KuduTableSettings.scala b/kudu/src/main/scala/akka/stream/alpakka/kudu/KuduTableSettings.scala index 9a62b78921..aa36ac913c 100644 --- a/kudu/src/main/scala/akka/stream/alpakka/kudu/KuduTableSettings.scala +++ b/kudu/src/main/scala/akka/stream/alpakka/kudu/KuduTableSettings.scala @@ -5,7 +5,7 @@ package akka.stream.alpakka.kudu import org.apache.kudu.client.PartialRow -import scala.compat.java8.FunctionConverters._ +import scala.jdk.FunctionConverters._ final class KuduTableSettings[T] private (val tableName: String, val schema: org.apache.kudu.Schema, diff --git a/kudu/src/main/scala/akka/stream/alpakka/kudu/impl/KuduFlowStage.scala b/kudu/src/main/scala/akka/stream/alpakka/kudu/impl/KuduFlowStage.scala index a1f898896a..ad26864c01 100644 --- a/kudu/src/main/scala/akka/stream/alpakka/kudu/impl/KuduFlowStage.scala +++ b/kudu/src/main/scala/akka/stream/alpakka/kudu/impl/KuduFlowStage.scala @@ -12,7 +12,7 @@ import org.apache.kudu.Schema import org.apache.kudu.Type._ import org.apache.kudu.client.{KuduClient, KuduTable, PartialRow} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.util.control.NonFatal /** diff --git a/kudu/src/test/scala/docs/scaladsl/KuduTableSpec.scala b/kudu/src/test/scala/docs/scaladsl/KuduTableSpec.scala index af02cabb0a..7b9d54e9d8 100644 --- a/kudu/src/test/scala/docs/scaladsl/KuduTableSpec.scala +++ b/kudu/src/test/scala/docs/scaladsl/KuduTableSpec.scala @@ -16,7 +16,7 @@ import org.apache.kudu.{ColumnSchema, Schema, Type} import org.scalatest.concurrent.ScalaFutures import org.scalatest.BeforeAndAfterAll -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.Future import scala.concurrent.duration._ import org.scalatest.matchers.should.Matchers diff --git a/mongodb/src/main/scala/akka/stream/alpakka/mongodb/javadsl/MongoFlow.scala b/mongodb/src/main/scala/akka/stream/alpakka/mongodb/javadsl/MongoFlow.scala index 702d68d251..c772df42ec 100644 --- a/mongodb/src/main/scala/akka/stream/alpakka/mongodb/javadsl/MongoFlow.scala +++ b/mongodb/src/main/scala/akka/stream/alpakka/mongodb/javadsl/MongoFlow.scala @@ -19,7 +19,7 @@ import com.mongodb.client.result.{DeleteResult, UpdateResult} import com.mongodb.reactivestreams.client.MongoCollection import org.bson.conversions.Bson -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ object MongoFlow { diff --git a/mongodb/src/main/scala/akka/stream/alpakka/mongodb/scaladsl/MongoFlow.scala b/mongodb/src/main/scala/akka/stream/alpakka/mongodb/scaladsl/MongoFlow.scala index 6795f6e496..946afcf688 100644 --- a/mongodb/src/main/scala/akka/stream/alpakka/mongodb/scaladsl/MongoFlow.scala +++ b/mongodb/src/main/scala/akka/stream/alpakka/mongodb/scaladsl/MongoFlow.scala @@ -13,7 +13,7 @@ import com.mongodb.client.result.{DeleteResult, UpdateResult} import com.mongodb.reactivestreams.client.MongoCollection import org.bson.conversions.Bson -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ object MongoFlow { diff --git a/mongodb/src/test/scala/docs/scaladsl/MongoSinkSpec.scala b/mongodb/src/test/scala/docs/scaladsl/MongoSinkSpec.scala index 76fb0e1bf9..1a5b3a1e1b 100644 --- a/mongodb/src/test/scala/docs/scaladsl/MongoSinkSpec.scala +++ b/mongodb/src/test/scala/docs/scaladsl/MongoSinkSpec.scala @@ -19,7 +19,7 @@ import org.mongodb.scala.bson.codecs.Macros._ import org.scalatest._ import org.scalatest.concurrent.ScalaFutures -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.duration._ import org.scalatest.matchers.must.Matchers import org.scalatest.wordspec.AnyWordSpec diff --git a/mongodb/src/test/scala/docs/scaladsl/MongoSourceSpec.scala b/mongodb/src/test/scala/docs/scaladsl/MongoSourceSpec.scala index a41c445603..6ea1ff0571 100644 --- a/mongodb/src/test/scala/docs/scaladsl/MongoSourceSpec.scala +++ b/mongodb/src/test/scala/docs/scaladsl/MongoSourceSpec.scala @@ -15,7 +15,7 @@ import org.bson.Document import org.scalatest._ import org.scalatest.concurrent.ScalaFutures -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable.Seq import scala.concurrent._ import scala.concurrent.duration._ diff --git a/mqtt-streaming/src/main/scala/akka/stream/alpakka/mqtt/streaming/MqttSessionSettings.scala b/mqtt-streaming/src/main/scala/akka/stream/alpakka/mqtt/streaming/MqttSessionSettings.scala index 4604445b1e..54435c6868 100644 --- a/mqtt-streaming/src/main/scala/akka/stream/alpakka/mqtt/streaming/MqttSessionSettings.scala +++ b/mqtt-streaming/src/main/scala/akka/stream/alpakka/mqtt/streaming/MqttSessionSettings.scala @@ -50,7 +50,7 @@ final class MqttSessionSettings private (val maxPacketSize: Int = 4096, require(maxPacketSize >= 0 && maxPacketSize <= (1 << 28), s"maxPacketSize of $maxPacketSize must be positive and less than ${1 << 28}") - import akka.util.JavaDurationConverters._ + import scala.jdk.DurationConverters._ /** * Just for clients - the number of commands that can be buffered while connected to a server. Defaults @@ -92,7 +92,7 @@ final class MqttSessionSettings private (val maxPacketSize: Int = 4096, * 5 minutes. */ def withReceiveConnectTimeout(receiveConnectTimeout: Duration): MqttSessionSettings = - copy(receiveConnectTimeout = receiveConnectTimeout.asScala) + copy(receiveConnectTimeout = receiveConnectTimeout.toScala) /** * For clients, the amount of time to wait for a server to ack a connection. For servers, the amount of time @@ -108,7 +108,7 @@ final class MqttSessionSettings private (val maxPacketSize: Int = 4096, * to wait before receiving an ack command locally in reply to a connect event. Defaults to 30 seconds. */ def withReceiveConnAckTimeout(receiveConnAckTimeout: Duration): MqttSessionSettings = - copy(receiveConnAckTimeout = receiveConnAckTimeout.asScala) + copy(receiveConnAckTimeout = receiveConnAckTimeout.toScala) /** * For producers of PUBLISH, the amount of time to wait to ack/receive a QoS 1/2 publish before retrying with @@ -124,7 +124,7 @@ final class MqttSessionSettings private (val maxPacketSize: Int = 4096, * the DUP flag set. Defaults to 0 seconds, which means republishing only occurs on reconnect. */ def withProducerPubAckRecTimeout(producerPubAckRecTimeout: Duration): MqttSessionSettings = - copy(producerPubAckRecTimeout = producerPubAckRecTimeout.asScala) + copy(producerPubAckRecTimeout = producerPubAckRecTimeout.toScala) /** * For producers of PUBLISH, the amount of time to wait for a server to complete a QoS 2 publish before retrying @@ -140,7 +140,7 @@ final class MqttSessionSettings private (val maxPacketSize: Int = 4096, * with another PUBREL. Defaults to 0 seconds, which means republishing only occurs on reconnect. */ def withProducerPubCompTimeout(producerPubCompTimeout: Duration): MqttSessionSettings = - copy(producerPubCompTimeout = producerPubCompTimeout.asScala) + copy(producerPubCompTimeout = producerPubCompTimeout.toScala) /** * For consumers of PUBLISH, the amount of time to wait before receiving an ack/receive command locally in reply @@ -156,7 +156,7 @@ final class MqttSessionSettings private (val maxPacketSize: Int = 4096, * to a QoS 1/2 publish event before failing. Defaults to 30 seconds. */ def withConsumerPubAckRecTimeout(consumerPubAckRecTimeout: Duration): MqttSessionSettings = - copy(consumerPubAckRecTimeout = consumerPubAckRecTimeout.asScala) + copy(consumerPubAckRecTimeout = consumerPubAckRecTimeout.toScala) /** * For consumers of PUBLISH, the amount of time to wait before receiving a complete command locally in reply to a @@ -172,7 +172,7 @@ final class MqttSessionSettings private (val maxPacketSize: Int = 4096, * QoS 2 publish event before failing. Defaults to 30 seconds. */ def withConsumerPubCompTimeout(consumerPubCompTimeout: Duration): MqttSessionSettings = - copy(consumerPubCompTimeout = consumerPubCompTimeout.asScala) + copy(consumerPubCompTimeout = consumerPubCompTimeout.toScala) /** * For consumers of PUBLISH, the amount of time to wait for a server to release a QoS 2 publish before failing. @@ -188,7 +188,7 @@ final class MqttSessionSettings private (val maxPacketSize: Int = 4096, * Defaults to 30 seconds. */ def withConsumerPubRelTimeout(consumerPubRelTimeout: Duration): MqttSessionSettings = - copy(consumerPubRelTimeout = consumerPubRelTimeout.asScala) + copy(consumerPubRelTimeout = consumerPubRelTimeout.toScala) /** * For clients, the amount of time to wait for a server to ack a subscribe. For servers, the amount of time @@ -204,7 +204,7 @@ final class MqttSessionSettings private (val maxPacketSize: Int = 4096, * to wait before receiving an ack command locally in reply to a subscribe event. Defaults to 30 seconds. */ def withReceiveSubAckTimeout(receiveSubAckTimeout: Duration): MqttSessionSettings = - copy(receiveSubAckTimeout = receiveSubAckTimeout.asScala) + copy(receiveSubAckTimeout = receiveSubAckTimeout.toScala) /** * For clients, the amount of time to wait for a server to ack a unsubscribe. For servers, the amount of time @@ -220,7 +220,7 @@ final class MqttSessionSettings private (val maxPacketSize: Int = 4096, * to wait before receiving an ack command locally in reply to a unsubscribe event. Defaults to 30 seconds. */ def withReceiveUnsubAckTimeout(receiveUnsubAckTimeout: Duration): MqttSessionSettings = - copy(receiveUnsubAckTimeout = receiveUnsubAckTimeout.asScala) + copy(receiveUnsubAckTimeout = receiveUnsubAckTimeout.toScala) /** * The maximum number of client termination event observers permitted. Defaults to 100 which should be diff --git a/mqtt-streaming/src/main/scala/akka/stream/alpakka/mqtt/streaming/model.scala b/mqtt-streaming/src/main/scala/akka/stream/alpakka/mqtt/streaming/model.scala index 49d5cb6c87..559cf34e83 100644 --- a/mqtt-streaming/src/main/scala/akka/stream/alpakka/mqtt/streaming/model.scala +++ b/mqtt-streaming/src/main/scala/akka/stream/alpakka/mqtt/streaming/model.scala @@ -16,8 +16,8 @@ import akka.util.{ByteIterator, ByteString, ByteStringBuilder} import scala.annotation.tailrec import scala.concurrent.duration._ -import scala.collection.JavaConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ import scala.concurrent.{ExecutionContext, Promise} /** @@ -1119,13 +1119,13 @@ final case class Command[A](command: ControlPacket, completed: Option[Promise[Do def this(command: ControlPacket, completed: Optional[CompletionStage[Done]], carry: Optional[A]) = this( command, - completed.asScala.map { f => + completed.toScala.map { f => val p = Promise[Done]() p.future .foreach(f.toCompletableFuture.complete)(ExecutionContext.fromExecutorService(ForkJoinPool.commonPool())) p }, - carry.asScala + carry.toScala ) /** @@ -1184,7 +1184,7 @@ final case class Event[A](event: ControlPacket, carry: Option[A]) { * @param carry The data to carry though */ def this(event: ControlPacket, carry: Optional[A]) = - this(event, carry.asScala) + this(event, carry.toScala) /** * Receive an event from a MQTT session diff --git a/mqtt-streaming/src/test/java/docs/javadsl/MqttFlowTest.java b/mqtt-streaming/src/test/java/docs/javadsl/MqttFlowTest.java index 5fe75776c4..4ff71c190d 100644 --- a/mqtt-streaming/src/test/java/docs/javadsl/MqttFlowTest.java +++ b/mqtt-streaming/src/test/java/docs/javadsl/MqttFlowTest.java @@ -31,8 +31,9 @@ import akka.testkit.javadsl.TestKit; import akka.util.ByteString; import org.junit.*; + import scala.Tuple2; -import scala.collection.JavaConverters; +import scala.jdk.javaapi.CollectionConverters; import java.util.Collection; import java.util.List; @@ -185,8 +186,7 @@ public void establishServerBidirectionalConnectionAndSubscribeToATopic() } else if (cp instanceof Subscribe) { Subscribe subscribe = (Subscribe) cp; Collection> topicFilters = - JavaConverters.asJavaCollectionConverter(subscribe.topicFilters()) - .asJavaCollection(); + CollectionConverters.asJava(subscribe.topicFilters()); List flags = topicFilters.stream() .map(x -> x._2().underlying()) diff --git a/mqtt/src/main/scala/akka/stream/alpakka/mqtt/javadsl/MqttFlow.scala b/mqtt/src/main/scala/akka/stream/alpakka/mqtt/javadsl/MqttFlow.scala index 9f2fa521ff..5570ac6d82 100644 --- a/mqtt/src/main/scala/akka/stream/alpakka/mqtt/javadsl/MqttFlow.scala +++ b/mqtt/src/main/scala/akka/stream/alpakka/mqtt/javadsl/MqttFlow.scala @@ -10,7 +10,7 @@ import akka.Done import akka.stream.alpakka.mqtt._ import akka.stream.javadsl.Flow -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ /** * Java API @@ -33,7 +33,7 @@ object MqttFlow { defaultQos: MqttQoS): Flow[MqttMessage, MqttMessage, CompletionStage[Done]] = scaladsl.MqttFlow .atMostOnce(settings, subscriptions, bufferSize, defaultQos) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -53,7 +53,7 @@ object MqttFlow { scaladsl.MqttFlow .atLeastOnce(settings, subscriptions, bufferSize, defaultQos) .map(MqttMessageWithAck.toJava) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -73,6 +73,6 @@ object MqttFlow { scaladsl.MqttFlow .atLeastOnceWithAckForJava(settings, subscriptions, bufferSize, defaultQos) .map(MqttMessageWithAck.toJava) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/mqtt/src/main/scala/akka/stream/alpakka/mqtt/javadsl/MqttMessageWithAck.scala b/mqtt/src/main/scala/akka/stream/alpakka/mqtt/javadsl/MqttMessageWithAck.scala index d8c20ecea1..87e4c7b766 100644 --- a/mqtt/src/main/scala/akka/stream/alpakka/mqtt/javadsl/MqttMessageWithAck.scala +++ b/mqtt/src/main/scala/akka/stream/alpakka/mqtt/javadsl/MqttMessageWithAck.scala @@ -11,7 +11,7 @@ import akka.annotation.InternalApi import akka.stream.alpakka.mqtt.MqttMessage import akka.stream.alpakka.mqtt.scaladsl -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ /** * Java API @@ -40,7 +40,7 @@ sealed trait MqttMessageWithAck { private[javadsl] object MqttMessageWithAck { def toJava(cm: scaladsl.MqttMessageWithAck): MqttMessageWithAck = new MqttMessageWithAck { override val message: MqttMessage = cm.message - override def ack(): CompletionStage[Done] = cm.ack().toJava + override def ack(): CompletionStage[Done] = cm.ack().asJava } } diff --git a/mqtt/src/main/scala/akka/stream/alpakka/mqtt/javadsl/MqttSource.scala b/mqtt/src/main/scala/akka/stream/alpakka/mqtt/javadsl/MqttSource.scala index e825c6b785..5279071c68 100644 --- a/mqtt/src/main/scala/akka/stream/alpakka/mqtt/javadsl/MqttSource.scala +++ b/mqtt/src/main/scala/akka/stream/alpakka/mqtt/javadsl/MqttSource.scala @@ -10,7 +10,7 @@ import akka.Done import akka.stream.alpakka.mqtt._ import akka.stream.javadsl.Source -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ /** * Java API @@ -31,7 +31,7 @@ object MqttSource { bufferSize: Int): Source[MqttMessage, CompletionStage[Done]] = scaladsl.MqttSource .atMostOnce(settings, subscriptions, bufferSize) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -47,6 +47,6 @@ object MqttSource { scaladsl.MqttSource .atLeastOnce(settings, subscriptions, bufferSize) .map(MqttMessageWithAck.toJava) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/mqtt/src/main/scala/akka/stream/alpakka/mqtt/scaladsl/MqttMessageWithAck.scala b/mqtt/src/main/scala/akka/stream/alpakka/mqtt/scaladsl/MqttMessageWithAck.scala index 864a6ff25a..68c5350f1e 100644 --- a/mqtt/src/main/scala/akka/stream/alpakka/mqtt/scaladsl/MqttMessageWithAck.scala +++ b/mqtt/src/main/scala/akka/stream/alpakka/mqtt/scaladsl/MqttMessageWithAck.scala @@ -8,7 +8,7 @@ import akka.Done import akka.annotation.InternalApi import akka.stream.alpakka.mqtt.MqttMessage -import scala.compat.java8.FutureConverters +import scala.jdk.FutureConverters._ import scala.concurrent.Future /** @@ -43,6 +43,6 @@ private[scaladsl] object MqttMessageWithAck { * * @return a future indicating, if the acknowledge reached MQTT */ - override def ack(): Future[Done] = FutureConverters.toScala(e.ack()) + override def ack(): Future[Done] = e.ack().asScala } } diff --git a/mqtt/src/main/scala/akka/stream/alpakka/mqtt/settings.scala b/mqtt/src/main/scala/akka/stream/alpakka/mqtt/settings.scala index 0d8524c712..3eed1e3da6 100644 --- a/mqtt/src/main/scala/akka/stream/alpakka/mqtt/settings.scala +++ b/mqtt/src/main/scala/akka/stream/alpakka/mqtt/settings.scala @@ -4,12 +4,12 @@ package akka.stream.alpakka.mqtt -import akka.util.JavaDurationConverters._ import org.eclipse.paho.client.mqttv3.{MqttClientPersistence, MqttConnectOptions} -import scala.collection.JavaConverters._ -import scala.collection.immutable import scala.collection.immutable.Map +import scala.collection.immutable +import scala.jdk.CollectionConverters._ +import scala.jdk.DurationConverters._ import scala.concurrent.duration.{FiniteDuration, _} /** @@ -184,7 +184,7 @@ final class MqttConnectionSettings private (val broker: String, /** Java API */ def withKeepAliveInterval(value: java.time.Duration): MqttConnectionSettings = withKeepAliveInterval( - value.asScala + value.toScala ) /** Scala API */ @@ -194,7 +194,7 @@ final class MqttConnectionSettings private (val broker: String, /** Java API */ def withConnectionTimeout(value: java.time.Duration): MqttConnectionSettings = withConnectionTimeout( - value.asScala + value.toScala ) /** Scala API */ @@ -204,7 +204,7 @@ final class MqttConnectionSettings private (val broker: String, /** Java API */ def withDisconnectQuiesceTimeout(value: java.time.Duration): MqttConnectionSettings = withDisconnectQuiesceTimeout( - value.asScala + value.toScala ) /** Scala API */ @@ -214,7 +214,7 @@ final class MqttConnectionSettings private (val broker: String, /** Java API */ def withDisconnectTimeout(value: java.time.Duration): MqttConnectionSettings = withDisconnectTimeout( - value.asScala + value.toScala ) def withMaxInFlight(value: Int): MqttConnectionSettings = copy(maxInFlight = value) def withMqttVersion(value: Int): MqttConnectionSettings = copy(mqttVersion = value) diff --git a/pravega/src/main/java/akka/stream/alpakka/pravega/javadsl/Pravega.java b/pravega/src/main/java/akka/stream/alpakka/pravega/javadsl/Pravega.java index 62307199f7..4ce8fccbdc 100644 --- a/pravega/src/main/java/akka/stream/alpakka/pravega/javadsl/Pravega.java +++ b/pravega/src/main/java/akka/stream/alpakka/pravega/javadsl/Pravega.java @@ -19,9 +19,10 @@ import io.pravega.client.ClientConfig; import io.pravega.client.stream.ReaderGroup; -import scala.compat.java8.FutureConverters; import java.util.concurrent.CompletionStage; +import scala.jdk.javaapi.FutureConverters; + import akka.stream.alpakka.pravega.impl.PravegaFlow; import akka.stream.alpakka.pravega.impl.PravegaSource; @@ -40,7 +41,7 @@ public static PravegaReaderGroupManager readerGroup(String scope, ClientConfig c public static Source, CompletionStage> source( ReaderGroup readerGroup, ReaderSettings readerSettings) { return Source.fromGraph(new PravegaSource<>(readerGroup, readerSettings)) - .mapMaterializedValue(FutureConverters::toJava); + .mapMaterializedValue(FutureConverters::asJava); } /** Incoming messages are written to Pravega stream and emitted unchanged. */ diff --git a/pravega/src/main/java/akka/stream/alpakka/pravega/javadsl/PravegaTable.java b/pravega/src/main/java/akka/stream/alpakka/pravega/javadsl/PravegaTable.java index 4dc1f4ac86..ea3cbd6fea 100644 --- a/pravega/src/main/java/akka/stream/alpakka/pravega/javadsl/PravegaTable.java +++ b/pravega/src/main/java/akka/stream/alpakka/pravega/javadsl/PravegaTable.java @@ -17,7 +17,6 @@ import akka.stream.javadsl.Sink; import akka.stream.javadsl.Source; -import scala.compat.java8.FutureConverters; import java.util.Optional; import java.util.concurrent.CompletionStage; import java.util.function.Function; @@ -25,8 +24,9 @@ import io.pravega.client.tables.TableKey; -import scala.compat.java8.functionConverterImpls.FromJavaFunction; -import scala.compat.java8.OptionConverters; +import scala.jdk.javaapi.FutureConverters; +import scala.jdk.javaapi.FunctionConverters; +import scala.jdk.javaapi.OptionConverters; import scala.Option; @@ -59,12 +59,12 @@ public static Sink, CompletionStage> sink( public static Source, CompletionStage> source( String scope, String tableName, TableReaderSettings tableReaderSettings) { return Source.fromGraph(new PravegaTableSource(scope, tableName, tableReaderSettings)) - .mapMaterializedValue(FutureConverters::toJava); + .mapMaterializedValue(FutureConverters::asJava); } /** A flow from key to and Option[value]. */ public static Flow, NotUsed> readFlow( String scope, String tableName, TableSettings tableSettings) { return Flow.fromGraph(new PravegaTableReadFlow(scope, tableName, tableSettings)) - .map(o -> OptionConverters.toJava(o)); + .map(OptionConverters::toJava); } } diff --git a/pravega/src/main/scala/akka/stream/alpakka/pravega/impl/PravegaFlow.scala b/pravega/src/main/scala/akka/stream/alpakka/pravega/impl/PravegaFlow.scala index ffd2d2bba1..b29e07ffe6 100644 --- a/pravega/src/main/scala/akka/stream/alpakka/pravega/impl/PravegaFlow.scala +++ b/pravega/src/main/scala/akka/stream/alpakka/pravega/impl/PravegaFlow.scala @@ -12,7 +12,7 @@ import akka.stream.{Attributes, FlowShape, Inlet, Outlet} import io.pravega.client.stream.EventStreamWriter import scala.util.control.NonFatal -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.ExecutionContext.Implicits.global import akka.stream.alpakka.pravega.WriterSettings @@ -55,7 +55,7 @@ import scala.util.{Failure, Success, Try} } def handleSentEvent(completableFuture: CompletableFuture[Void], msg: A): Unit = - completableFuture.toScala.onComplete { t => + completableFuture.asScala.onComplete { t => semaphore.acquire() asyncPushback.invoke((t, msg)) } diff --git a/pravega/src/main/scala/akka/stream/alpakka/pravega/impl/PravegaTableReadFlow.scala b/pravega/src/main/scala/akka/stream/alpakka/pravega/impl/PravegaTableReadFlow.scala index 98a127406c..5bfee77e20 100644 --- a/pravega/src/main/scala/akka/stream/alpakka/pravega/impl/PravegaTableReadFlow.scala +++ b/pravega/src/main/scala/akka/stream/alpakka/pravega/impl/PravegaTableReadFlow.scala @@ -12,7 +12,7 @@ import akka.stream.stage.{AsyncCallback, GraphStage, GraphStageLogic, InHandler, import akka.stream.{Attributes, FlowShape, Inlet, Outlet} import scala.util.control.NonFatal -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.ExecutionContext.Implicits.global import akka.stream.alpakka.pravega.TableSettings @@ -83,7 +83,7 @@ import scala.util.Success } def handleSentEvent(completableFuture: CompletableFuture[TableEntry]): Unit = - completableFuture.toScala.onComplete { t => + completableFuture.asScala.onComplete { t => asyncMessageSendCallback.invokeWithFeedback((t)) } diff --git a/pravega/src/main/scala/akka/stream/alpakka/pravega/impl/PravegaTableWriteFlow.scala b/pravega/src/main/scala/akka/stream/alpakka/pravega/impl/PravegaTableWriteFlow.scala index 57aa14c9b2..8f337b7eef 100644 --- a/pravega/src/main/scala/akka/stream/alpakka/pravega/impl/PravegaTableWriteFlow.scala +++ b/pravega/src/main/scala/akka/stream/alpakka/pravega/impl/PravegaTableWriteFlow.scala @@ -11,7 +11,7 @@ import akka.stream.stage.{AsyncCallback, GraphStage, GraphStageLogic, InHandler, import akka.stream.{Attributes, FlowShape, Inlet, Outlet} import scala.util.control.NonFatal -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.ExecutionContext.Implicits.global import akka.stream.alpakka.pravega.TableWriterSettings @@ -83,7 +83,7 @@ import io.pravega.client.tables.TableKey } def handleSentEvent(completableFuture: CompletableFuture[Version], msg: KVPair): Unit = - completableFuture.toScala.onComplete { t => + completableFuture.asScala.onComplete { t => asyncPushback.invokeWithFeedback((t, msg)) } diff --git a/project/Dependencies.scala b/project/Dependencies.scala index fade866823..728357ca4f 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -10,7 +10,7 @@ object Dependencies { val Scala2Versions = Seq(Scala213) val ScalaVersions = Dependencies.Scala2Versions :+ Dependencies.Scala3 - val AkkaVersion = "2.9.3" + val AkkaVersion = "2.10.0-M1" val AkkaBinaryVersion = "2.9" val InfluxDBJavaVersion = "2.15" @@ -22,7 +22,7 @@ object Dependencies { // sync ignore prefix in scripts/link-validator.conf#L30 val AkkaHttpVersion = "10.6.3" val AkkaHttpBinaryVersion = "10.6" - val AlpakkaKafkaVersion = "6.0.0" + val AlpakkaKafkaVersion = "7.0.0-M1" val ScalaTestVersion = "3.2.18" val TestContainersScalaTestVersion = "0.40.3" // pulls Testcontainers 1.16.2 val mockitoVersion = "4.11.0" // check even https://github.com/scalatest/scalatestplus-mockito/releases @@ -35,13 +35,13 @@ object Dependencies { val JwtScalaVersion = "9.4.6" // https://github.com/akka/akka/blob/main/project/Dependencies.scala#L20 - val slf4jVersion = "1.7.36" + val slf4jVersion = "2.0.16" val log4jOverSlf4jVersion = slf4jVersion val jclOverSlf4jVersion = slf4jVersion // Akka 2.9 expects Slf4j 1.x // https://github.com/akka/akka/blob/main/project/Dependencies.scala#L28 - val LogbackWithSlf4jV1 = "1.2.13" + val LogbackWithSlf4jV1 = "1.5.7" val wiremock = ("com.github.tomakehurst" % "wiremock" % "3.0.1" % Test).exclude("org.slf4j", "slf4j-api") val Common = Seq( @@ -75,7 +75,7 @@ object Dependencies { // CVE issues https://github.com/FasterXML/jackson-databind/issues?utf8=%E2%9C%93&q=+label%3ACVE // This should align with the Jackson minor version used in Akka // https://github.com/akka/akka/blob/main/project/Dependencies.scala#L31 - val JacksonVersion = "2.15.4" + val JacksonVersion = "2.17.2" val JacksonDatabindVersion = JacksonVersion val JacksonDatabindDependencies = Seq( "com.fasterxml.jackson.core" % "jackson-core" % JacksonVersion, diff --git a/project/plugins.sbt b/project/plugins.sbt index d475410dae..cd986dde0b 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -15,6 +15,6 @@ addSbtPlugin("com.github.sbt" % "sbt-unidoc" % "0.5.0") addSbtPlugin("com.thoughtworks.sbt-api-mappings" % "sbt-api-mappings" % "3.0.2") addSbtPlugin("com.github.sbt" % "sbt-site-paradox" % "1.5.0") // Akka gRPC -- sync with version in Dependencies.scala:22 -addSbtPlugin("com.lightbend.akka.grpc" % "sbt-akka-grpc" % "2.4.3") +addSbtPlugin("com.lightbend.akka.grpc" % "sbt-akka-grpc" % "2.5.0-M1") // templating addSbtPlugin("com.github.sbt" % "sbt-boilerplate" % "0.7.0") diff --git a/reference/src/main/scala/akka/stream/alpakka/reference/javadsl/Reference.scala b/reference/src/main/scala/akka/stream/alpakka/reference/javadsl/Reference.scala index 1b4a2a8b1a..8d2a57a978 100644 --- a/reference/src/main/scala/akka/stream/alpakka/reference/javadsl/Reference.scala +++ b/reference/src/main/scala/akka/stream/alpakka/reference/javadsl/Reference.scala @@ -21,8 +21,8 @@ object Reference { * Call Scala source factory and convert both: the source and materialized values to Java classes. */ def source(settings: SourceSettings): Source[ReferenceReadResult, CompletionStage[Done]] = { - import scala.compat.java8.FutureConverters._ - scaladsl.Reference.source(settings).mapMaterializedValue(_.toJava).asJava + import scala.jdk.FutureConverters._ + scaladsl.Reference.source(settings).mapMaterializedValue(_.asJava).asJava } /** diff --git a/reference/src/main/scala/akka/stream/alpakka/reference/model.scala b/reference/src/main/scala/akka/stream/alpakka/reference/model.scala index 2eff177d5e..266997937c 100644 --- a/reference/src/main/scala/akka/stream/alpakka/reference/model.scala +++ b/reference/src/main/scala/akka/stream/alpakka/reference/model.scala @@ -10,8 +10,8 @@ import akka.annotation.InternalApi import akka.util.ByteString import scala.collection.immutable -import scala.collection.JavaConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ import scala.util.{Success, Try} /** @@ -44,7 +44,7 @@ final class ReferenceReadResult @InternalApi private[reference] ( * otherwise return empty Optional. */ def getBytesRead(): OptionalInt = - bytesRead.toOption.asPrimitive + bytesRead.toOption.toJavaPrimitive /** * Java API @@ -53,7 +53,7 @@ final class ReferenceReadResult @InternalApi private[reference] ( * otherwise return empty Optional. */ def getBytesReadFailure(): Optional[Throwable] = - bytesRead.failed.toOption.asJava + bytesRead.failed.toOption.toJava override def toString: String = s"ReferenceReadMessage(data=$data, bytesRead=$bytesRead)" diff --git a/reference/src/main/scala/akka/stream/alpakka/reference/settings.scala b/reference/src/main/scala/akka/stream/alpakka/reference/settings.scala index 29a5d9b4eb..a7681271b1 100644 --- a/reference/src/main/scala/akka/stream/alpakka/reference/settings.scala +++ b/reference/src/main/scala/akka/stream/alpakka/reference/settings.scala @@ -9,8 +9,8 @@ import java.time.{Duration => JavaDuration} import java.util.Optional import java.util.function.Predicate -import scala.compat.java8.FunctionConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.FunctionConverters._ +import scala.jdk.OptionConverters._ import scala.concurrent.duration._ /** @@ -60,7 +60,7 @@ final class SourceSettings private ( * A separate getter for Java API that converts Scala Option to Java Optional. */ def getTraceId(): Optional[String] = - traceId.asJava + traceId.toJava /** * Java API diff --git a/reference/src/main/scala/akka/stream/alpakka/reference/testkit/MessageFactory.scala b/reference/src/main/scala/akka/stream/alpakka/reference/testkit/MessageFactory.scala index 0d7c97308d..dee413070a 100644 --- a/reference/src/main/scala/akka/stream/alpakka/reference/testkit/MessageFactory.scala +++ b/reference/src/main/scala/akka/stream/alpakka/reference/testkit/MessageFactory.scala @@ -8,7 +8,7 @@ import akka.stream.alpakka.reference.{ReferenceReadResult, ReferenceWriteMessage import akka.util.ByteString import scala.collection.immutable -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.util.{Failure, Success, Try} @ApiMayChange diff --git a/s3/src/main/scala/akka/stream/alpakka/s3/S3Headers.scala b/s3/src/main/scala/akka/stream/alpakka/s3/S3Headers.scala index aba85da44c..2a340c25eb 100644 --- a/s3/src/main/scala/akka/stream/alpakka/s3/S3Headers.scala +++ b/s3/src/main/scala/akka/stream/alpakka/s3/S3Headers.scala @@ -13,7 +13,7 @@ import akka.stream.alpakka.s3.headers.{CannedAcl, ServerSideEncryption, StorageC import akka.stream.alpakka.s3.impl.S3Request import scala.collection.immutable.Seq -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ final class MetaHeaders private (val metaHeaders: Map[String, String]) { diff --git a/s3/src/main/scala/akka/stream/alpakka/s3/impl/DiskBuffer.scala b/s3/src/main/scala/akka/stream/alpakka/s3/impl/DiskBuffer.scala index 0c7b8cdab0..42c9fd66f3 100644 --- a/s3/src/main/scala/akka/stream/alpakka/s3/impl/DiskBuffer.scala +++ b/s3/src/main/scala/akka/stream/alpakka/s3/impl/DiskBuffer.scala @@ -7,10 +7,11 @@ package akka.stream.alpakka.s3.impl import java.io.{File, FileOutputStream} import java.nio.BufferOverflowException import java.nio.file.Files +import java.nio.file.Path import java.util.concurrent.atomic.AtomicInteger +import akka.annotation.InternalApi import akka.NotUsed -import akka.dispatch.ExecutionContexts import akka.stream.ActorAttributes import akka.stream.Attributes import akka.stream.FlowShape @@ -22,9 +23,8 @@ import akka.stream.stage.GraphStageLogic import akka.stream.stage.InHandler import akka.stream.stage.OutHandler import akka.util.ByteString -import java.nio.file.Path -import akka.annotation.InternalApi +import scala.concurrent.ExecutionContext /** * Internal Api @@ -92,7 +92,7 @@ import akka.annotation.InternalApi f.onComplete { _ => path.delete() - }(ExecutionContexts.parasitic) + }(ExecutionContext.parasitic) NotUsed } emit(out, DiskChunk(src, length), () => completeStage()) diff --git a/s3/src/main/scala/akka/stream/alpakka/s3/impl/S3Stream.scala b/s3/src/main/scala/akka/stream/alpakka/s3/impl/S3Stream.scala index e4b2dedf2a..4415d157b4 100644 --- a/s3/src/main/scala/akka/stream/alpakka/s3/impl/S3Stream.scala +++ b/s3/src/main/scala/akka/stream/alpakka/s3/impl/S3Stream.scala @@ -6,10 +6,11 @@ package akka.stream.alpakka.s3.impl import java.net.InetSocketAddress import java.time.{Instant, ZoneOffset, ZonedDateTime} + import scala.annotation.nowarn + import akka.actor.ActorSystem import akka.annotation.InternalApi -import akka.dispatch.ExecutionContexts import akka.http.scaladsl.Http.OutgoingConnection import akka.http.scaladsl.model.StatusCodes.{NoContent, NotFound, OK} import akka.http.scaladsl.model.headers._ @@ -24,9 +25,11 @@ import akka.stream.scaladsl.{Flow, Keep, RetryFlow, RunnableGraph, Sink, Source, import akka.stream.{Attributes, Materializer} import akka.util.ByteString import akka.{Done, NotUsed} + import software.amazon.awssdk.regions.Region import scala.collection.immutable +import scala.concurrent.ExecutionContext import scala.concurrent.{Future, Promise} import scala.util.{Failure, Success, Try} @@ -229,7 +232,7 @@ import scala.util.{Failure, Success, Try} } .mapMaterializedValue(_ => objectMetadataMat.future) } - .mapMaterializedValue(_.flatMap(identity)(ExecutionContexts.parasitic)) + .mapMaterializedValue(_.flatMap(identity)(ExecutionContext.parasitic)) } /** @@ -340,7 +343,7 @@ import scala.util.{Failure, Success, Try} HttpRequests.listBuckets(s3Headers.headers) ).map { (res: ListBucketsResult) => res.buckets - }(ExecutionContexts.parasitic) + }(ExecutionContext.parasitic) } .flatMapConcat(results => Source(results)) } @@ -599,7 +602,7 @@ import scala.util.{Failure, Success, Try} } } case HttpResponse(NotFound, _, entity, _) => - Source.future(entity.discardBytes().future().map(_ => None)(ExecutionContexts.parasitic)) + Source.future(entity.discardBytes().future().map(_ => None)(ExecutionContext.parasitic)) case response: HttpResponse => Source.future { unmarshalError(response.status, response.entity) @@ -624,7 +627,7 @@ import scala.util.{Failure, Success, Try} issueRequest(s3Location, HttpMethods.DELETE, versionId = versionId, s3Headers = headers)(mat, attr) .flatMapConcat { case HttpResponse(NoContent, _, entity, _) => - Source.future(entity.discardBytes().future().map(_ => Done)(ExecutionContexts.parasitic)) + Source.future(entity.discardBytes().future().map(_ => Done)(ExecutionContext.parasitic)) case response: HttpResponse => Source.future { unmarshalError(response.status, response.entity) @@ -747,7 +750,7 @@ import scala.util.{Failure, Success, Try} val maybeRegionPayload = region match { case Region.US_EAST_1 => None case region => - Some(HttpRequests.createBucketRegionPayload(region)(ExecutionContexts.parasitic)) + Some(HttpRequests.createBucketRegionPayload(region)(ExecutionContext.parasitic)) } s3ManagementRequest[Done]( @@ -1329,7 +1332,7 @@ import scala.util.{Failure, Success, Try} case ((response, (upload, index)), allContext) => handleChunkResponse(response, upload, index, conf.multipartUploadSettings.retrySettings).map { result => (result, allContext) - }(ExecutionContexts.parasitic) + }(ExecutionContext.parasitic) } .alsoTo(chunkUploadSink) .map { case (result, _) => result } @@ -1428,7 +1431,7 @@ import scala.util.{Failure, Success, Try} } .mapMaterializedValue(_.map(r => MultipartUploadResult(r.location, r.bucket, r.key, r.eTag, r.versionId))) } - .mapMaterializedValue(_.flatMap(identity)(ExecutionContexts.parasitic)) + .mapMaterializedValue(_.flatMap(identity)(ExecutionContext.parasitic)) private def signAndGetAs[T]( request: HttpRequest diff --git a/s3/src/main/scala/akka/stream/alpakka/s3/javadsl/S3.scala b/s3/src/main/scala/akka/stream/alpakka/s3/javadsl/S3.scala index 7d5c4b4fc8..20a9b29172 100644 --- a/s3/src/main/scala/akka/stream/alpakka/s3/javadsl/S3.scala +++ b/s3/src/main/scala/akka/stream/alpakka/s3/javadsl/S3.scala @@ -21,9 +21,9 @@ import akka.stream.javadsl.{RunnableGraph, Sink, Source} import akka.stream.scaladsl.SourceToCompletionStage import akka.util.ByteString -import scala.collection.JavaConverters._ -import scala.compat.java8.OptionConverters._ -import scala.compat.java8.FutureConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ +import scala.jdk.FutureConverters._ /** * Java API @@ -323,13 +323,13 @@ object S3 { contentLength: Long): Source[ObjectMetadata, NotUsed] = putObject(bucket, key, data, contentLength, ContentTypes.APPLICATION_OCTET_STREAM) - private def toJava[M]( + private def asJava[M]( download: akka.stream.scaladsl.Source[Option[ (akka.stream.scaladsl.Source[ByteString, M], ObjectMetadata) ], NotUsed] ): Source[Optional[JPair[Source[ByteString, M], ObjectMetadata]], NotUsed] = download.map { - _.map { case (stream, meta) => JPair(stream.asJava, meta) }.asJava + _.map { case (stream, meta) => JPair(stream.asJava, meta) }.toJava }.asJava /** @@ -342,7 +342,7 @@ object S3 { @deprecated("Use S3.getObject instead", "4.0.0") def download(bucket: String, key: String): Source[Optional[JPair[Source[ByteString, NotUsed], ObjectMetadata]], NotUsed] = - toJava(S3Stream.download(S3Location(bucket, key), None, None, S3Headers.empty)) + asJava(S3Stream.download(S3Location(bucket, key), None, None, S3Headers.empty)) /** * Downloads a S3 Object @@ -373,7 +373,7 @@ object S3 { key: String, range: ByteRange): Source[Optional[JPair[Source[ByteString, NotUsed], ObjectMetadata]], NotUsed] = { val scalaRange = range.asInstanceOf[ScalaByteRange] - toJava(S3Stream.download(S3Location(bucket, key), Some(scalaRange), None, S3Headers.empty)) + asJava(S3Stream.download(S3Location(bucket, key), Some(scalaRange), None, S3Headers.empty)) } /** @@ -428,7 +428,7 @@ object S3 { key: String, s3Headers: S3Headers ): Source[Optional[JPair[Source[ByteString, NotUsed], ObjectMetadata]], NotUsed] = - toJava( + asJava( S3Stream.download(S3Location(bucket, key), None, None, s3Headers) ) @@ -449,7 +449,7 @@ object S3 { s3Headers: S3Headers ): Source[Optional[JPair[Source[ByteString, NotUsed], ObjectMetadata]], NotUsed] = { val scalaRange = range.asInstanceOf[ScalaByteRange] - toJava( + asJava( S3Stream.download(S3Location(bucket, key), Some(scalaRange), None, s3Headers) ) } @@ -473,7 +473,7 @@ object S3 { s3Headers: S3Headers ): Source[Optional[JPair[Source[ByteString, NotUsed], ObjectMetadata]], NotUsed] = { val scalaRange = range.asInstanceOf[ScalaByteRange] - toJava( + asJava( S3Stream.download(S3Location(bucket, key), Option(scalaRange), Option(versionId.orElse(null)), s3Headers) ) } @@ -670,7 +670,7 @@ object S3 { prefix: Optional[String], s3Headers: S3Headers): Source[ListBucketResultContents, NotUsed] = S3Stream - .listBucket(bucket, prefix.asScala, s3Headers) + .listBucket(bucket, prefix.toScala, s3Headers) .asJava /** @@ -690,7 +690,7 @@ object S3 { delimiter: String, prefix: Optional[String]): Source[ListBucketResultContents, NotUsed] = scaladsl.S3 - .listBucket(bucket, delimiter, prefix.asScala) + .listBucket(bucket, delimiter, prefix.toScala) .asJava /** @@ -712,7 +712,7 @@ object S3 { prefix: Optional[String], s3Headers: S3Headers): Source[ListBucketResultContents, NotUsed] = scaladsl.S3 - .listBucket(bucket, delimiter, prefix.asScala, s3Headers) + .listBucket(bucket, delimiter, prefix.toScala, s3Headers) .asJava /** @@ -738,7 +738,7 @@ object S3 { ): Source[akka.japi.Pair[java.util.List[ListBucketResultContents], java.util.List[ListBucketResultCommonPrefixes]], NotUsed] = S3Stream - .listBucketAndCommonPrefixes(bucket, delimiter, prefix.asScala, s3Headers) + .listBucketAndCommonPrefixes(bucket, delimiter, prefix.toScala, s3Headers) .map { case (contents, commonPrefixes) => akka.japi.Pair(contents.asJava, commonPrefixes.asJava) } @@ -765,7 +765,7 @@ object S3 { def listMultipartUpload(bucket: String, prefix: Optional[String], s3Headers: S3Headers): Source[ListMultipartUploadResultUploads, NotUsed] = - scaladsl.S3.listMultipartUpload(bucket, prefix.asScala, s3Headers).asJava + scaladsl.S3.listMultipartUpload(bucket, prefix.toScala, s3Headers).asJava /** * Will return in progress or aborted multipart uploads with optional prefix and delimiter. This will automatically page through all keys with the given parameters. @@ -783,7 +783,7 @@ object S3 { s3Headers: S3Headers = S3Headers.empty ): Source[akka.japi.Pair[java.util.List[ListMultipartUploadResultUploads], java.util.List[CommonPrefixes]], NotUsed] = S3Stream - .listMultipartUploadAndCommonPrefixes(bucket, delimiter, prefix.asScala, s3Headers) + .listMultipartUploadAndCommonPrefixes(bucket, delimiter, prefix.toScala, s3Headers) .map { case (uploads, commonPrefixes) => akka.japi.Pair(uploads.asJava, commonPrefixes.asJava) } @@ -828,7 +828,7 @@ object S3 { prefix: Optional[String] ): Source[akka.japi.Pair[java.util.List[ListObjectVersionsResultVersions], java.util.List[DeleteMarkers]], NotUsed] = S3Stream - .listObjectVersions(bucket, prefix.asScala, S3Headers.empty) + .listObjectVersions(bucket, prefix.toScala, S3Headers.empty) .map { case (versions, markers) => akka.japi.Pair(versions.asJava, markers.asJava) } @@ -849,7 +849,7 @@ object S3 { s3Headers: S3Headers ): Source[akka.japi.Pair[java.util.List[ListObjectVersionsResultVersions], java.util.List[DeleteMarkers]], NotUsed] = S3Stream - .listObjectVersions(bucket, prefix.asScala, s3Headers) + .listObjectVersions(bucket, prefix.toScala, s3Headers) .map { case (versions, markers) => akka.japi.Pair(versions.asJava, markers.asJava) } @@ -872,7 +872,7 @@ object S3 { s3Headers: S3Headers ): Source[akka.japi.Pair[java.util.List[ListObjectVersionsResultVersions], java.util.List[DeleteMarkers]], NotUsed] = S3Stream - .listObjectVersionsAndCommonPrefixes(bucket, delimiter, prefix.asScala, s3Headers) + .listObjectVersionsAndCommonPrefixes(bucket, delimiter, prefix.toScala, s3Headers) .map { case (versions, markers, _) => akka.japi.Pair(versions.asJava, markers.asJava) @@ -918,7 +918,7 @@ object S3 { s3Headers: S3Headers): Sink[ByteString, CompletionStage[MultipartUploadResult]] = S3Stream .multipartUpload(S3Location(bucket, key), contentType.asInstanceOf[ScalaContentType], s3Headers) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -986,7 +986,7 @@ object S3 { s3Headers ) .contramap[JPair[ByteString, C]](_.toScala) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -1072,7 +1072,7 @@ object S3 { previousParts.asScala.toList, contentType.asInstanceOf[ScalaContentType], s3Headers) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } @@ -1164,7 +1164,7 @@ object S3 { s3Headers ) .contramap[JPair[ByteString, C]](_.toScala) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } @@ -1284,7 +1284,7 @@ object S3 { SystemMaterializer(system).materializer, attributes ) - .toJava + .asJava /** * Copy a S3 Object by making multiple requests. @@ -1316,7 +1316,7 @@ object S3 { s3Headers ) } - .mapMaterializedValue(func(_.toJava)) + .mapMaterializedValue(func(_.asJava)) /** * Copy a S3 Object by making multiple requests. @@ -1438,7 +1438,7 @@ object S3 { system: ClassicActorSystemProvider, attributes: Attributes, s3Headers: S3Headers): CompletionStage[Done] = - S3Stream.makeBucket(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).toJava + S3Stream.makeBucket(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).asJava /** * Create new bucket with a given name @@ -1489,7 +1489,7 @@ object S3 { system: ClassicActorSystemProvider, attributes: Attributes, s3Headers: S3Headers): CompletionStage[Done] = - S3Stream.deleteBucket(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).toJava + S3Stream.deleteBucket(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).asJava /** * Delete bucket with a given name @@ -1551,7 +1551,7 @@ object S3 { system: ClassicActorSystemProvider, attributes: Attributes, s3Headers: S3Headers): CompletionStage[BucketAccess] = - S3Stream.checkIfBucketExists(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).toJava + S3Stream.checkIfBucketExists(bucketName, s3Headers)(SystemMaterializer(system).materializer, attributes).asJava /** * Checks whether the bucket exits and user has rights to perform ListBucket operation @@ -1618,7 +1618,7 @@ object S3 { )(implicit system: ClassicActorSystemProvider, attributes: Attributes): CompletionStage[Done] = S3Stream .deleteUpload(bucketName, key, uploadId, s3Headers)(SystemMaterializer(system).materializer, attributes) - .toJava + .asJava /** * Delete all existing parts for a specific upload diff --git a/s3/src/main/scala/akka/stream/alpakka/s3/model.scala b/s3/src/main/scala/akka/stream/alpakka/s3/model.scala index 6fd5e071b9..4e0542bc77 100644 --- a/s3/src/main/scala/akka/stream/alpakka/s3/model.scala +++ b/s3/src/main/scala/akka/stream/alpakka/s3/model.scala @@ -13,8 +13,8 @@ import akka.stream.alpakka.s3.AccessStyle.PathAccessStyle import scala.annotation.nowarn import scala.collection.immutable.Seq import scala.collection.immutable -import scala.collection.JavaConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ final class MultipartUpload private (val bucket: String, val key: String, val uploadId: String) { @@ -208,7 +208,7 @@ final class MultipartUploadResult private ( def getETag: String = eTag /** Java API */ - def getVersionId: Optional[String] = versionId.asJava + def getVersionId: Optional[String] = versionId.toJava def withLocation(value: Uri): MultipartUploadResult = copy(location = value) def withBucket(value: String): MultipartUploadResult = copy(bucket = value) @@ -297,7 +297,7 @@ object MultipartUploadResult { bucket, key, eTag, - versionId.asScala + versionId.toScala ) } @@ -360,10 +360,10 @@ final class ListMultipartUploadResultUploads private (val key: String, def getUploadId: String = uploadId /** Java API */ - def getInitiator: Optional[AWSIdentity] = initiator.asJava + def getInitiator: Optional[AWSIdentity] = initiator.toJava /** Java API */ - def getOwner: Optional[AWSIdentity] = owner.asJava + def getOwner: Optional[AWSIdentity] = owner.toJava /** Java API */ def getStorageClass: String = storageClass @@ -437,7 +437,7 @@ object ListMultipartUploadResultUploads { owner: Optional[AWSIdentity], storageClass: String, initiated: Instant): ListMultipartUploadResultUploads = - apply(key, uploadId, initiator.asScala, owner.asScala, storageClass, initiated) + apply(key, uploadId, initiator.toScala, owner.toScala, storageClass, initiated) } final class ListObjectVersionsResultVersions private (val eTag: String, @@ -462,7 +462,7 @@ final class ListObjectVersionsResultVersions private (val eTag: String, def getLastModified: Instant = lastModified /** Java API */ - def getOwner: Optional[AWSIdentity] = owner.asJava + def getOwner: Optional[AWSIdentity] = owner.toJava /** Java API */ def getSize: Long = size @@ -471,7 +471,7 @@ final class ListObjectVersionsResultVersions private (val eTag: String, def getStorageClass: String = storageClass /** Java API */ - def getVersionId: Optional[String] = versionId.asJava + def getVersionId: Optional[String] = versionId.toJava def withETag(value: String): ListObjectVersionsResultVersions = copy(eTag = value) @@ -574,7 +574,7 @@ object ListObjectVersionsResultVersions { size: Long, storageClass: String, versionId: Optional[String]): ListObjectVersionsResultVersions = - apply(eTag, isLatest, key, lastModified, owner.asScala, size, storageClass, versionId.asScala) + apply(eTag, isLatest, key, lastModified, owner.toScala, size, storageClass, versionId.toScala) } final class DeleteMarkers private (val isLatest: Boolean, @@ -593,10 +593,10 @@ final class DeleteMarkers private (val isLatest: Boolean, def getLastModified: Instant = lastModified /** Java API */ - def getOwner: Optional[AWSIdentity] = owner.asJava + def getOwner: Optional[AWSIdentity] = owner.toJava /** Java API */ - def getVersionId: Optional[String] = versionId.asJava + def getVersionId: Optional[String] = versionId.toJava def withIsLatest(value: Boolean): DeleteMarkers = copy(isLatest = value) @@ -669,7 +669,7 @@ object DeleteMarkers { lastModified: Instant, owner: Optional[AWSIdentity], versionId: Optional[String]): DeleteMarkers = - apply(isLatest, key, lastModified, owner.asScala, versionId.asScala) + apply(isLatest, key, lastModified, owner.toScala, versionId.toScala) } final class CommonPrefixes private (val prefix: String) { @@ -1129,7 +1129,7 @@ final class ObjectMetadata private ( * as calculated by Amazon S3. */ lazy val getETag: Optional[String] = - eTag.asJava + eTag.toJava /** *

@@ -1239,7 +1239,7 @@ final class ObjectMetadata private ( * @see ObjectMetadata#setContentType(String) */ def getContentType: Optional[String] = - contentType.asJava + contentType.toJava /** * Gets the value of the Last-Modified header, indicating the date @@ -1279,7 +1279,7 @@ final class ObjectMetadata private ( * Gets the optional Cache-Control header */ def getCacheControl: Optional[String] = - cacheControl.asJava + cacheControl.toJava /** * Gets the value of the version id header. The version id will only be available @@ -1299,7 +1299,7 @@ final class ObjectMetadata private ( * * @return optional version id of the object */ - def getVersionId: Optional[String] = versionId.asJava + def getVersionId: Optional[String] = versionId.toJava } object ObjectMetadata { diff --git a/s3/src/main/scala/akka/stream/alpakka/s3/settings.scala b/s3/src/main/scala/akka/stream/alpakka/s3/settings.scala index 9aca148df4..4d80abffb4 100644 --- a/s3/src/main/scala/akka/stream/alpakka/s3/settings.scala +++ b/s3/src/main/scala/akka/stream/alpakka/s3/settings.scala @@ -18,7 +18,7 @@ import software.amazon.awssdk.auth.credentials._ import software.amazon.awssdk.regions.Region import software.amazon.awssdk.regions.providers._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.OptionConverters._ import scala.concurrent.duration._ import scala.util.Try @@ -135,7 +135,7 @@ final class ForwardProxy private (val scheme: String, def getPort: Int = port /** Java API */ - def getCredentials: java.util.Optional[ForwardProxyCredentials] = credentials.asJava + def getCredentials: java.util.Optional[ForwardProxyCredentials] = credentials.toJava def withScheme(value: String): ForwardProxy = copy(scheme = value) def withHost(host: String): ForwardProxy = copy(host = host) @@ -176,7 +176,7 @@ object ForwardProxy { /** Java API */ def create(host: String, port: Int, credentials: Optional[ForwardProxyCredentials]): ForwardProxy = - apply(host, port, credentials.asScala) + apply(host, port, credentials.toScala) /** Use an HTTP proxy. */ def http(host: String, port: Int): ForwardProxy = new ForwardProxy("http", host, port, credentials = None) @@ -369,13 +369,13 @@ final class S3Settings private ( def pathStyleAccess: Boolean = accessStyle == PathAccessStyle /** Java API */ - def getEndpointUrl: java.util.Optional[String] = endpointUrl.asJava + def getEndpointUrl: java.util.Optional[String] = endpointUrl.toJava /** Java API */ def getListBucketApiVersion: ApiVersion = listBucketApiVersion /** Java API */ - def getForwardProxy: java.util.Optional[ForwardProxy] = forwardProxy.asJava + def getForwardProxy: java.util.Optional[ForwardProxy] = forwardProxy.toJava /** Java API */ def getAccessStyle: AccessStyle = accessStyle @@ -707,7 +707,7 @@ sealed trait BufferType { def path: Option[Path] /** Java API */ - def getPath: java.util.Optional[Path] = path.asJava + def getPath: java.util.Optional[Path] = path.toJava } case object MemoryBufferType extends BufferType { diff --git a/s3/src/test/scala/akka/stream/alpakka/s3/impl/auth/SignerSpec.scala b/s3/src/test/scala/akka/stream/alpakka/s3/impl/auth/SignerSpec.scala index f78692f0d2..4d846b93de 100644 --- a/s3/src/test/scala/akka/stream/alpakka/s3/impl/auth/SignerSpec.scala +++ b/s3/src/test/scala/akka/stream/alpakka/s3/impl/auth/SignerSpec.scala @@ -21,7 +21,7 @@ import org.scalatest.time.{Millis, Seconds, Span} import software.amazon.awssdk.auth.credentials._ import software.amazon.awssdk.regions.Region -import scala.compat.java8.OptionConverters._ +import scala.jdk.OptionConverters._ class SignerSpec(_system: ActorSystem) extends TestKit(_system) @@ -138,7 +138,7 @@ class SignerSpec(_system: ActorSystem) val srFuture = Signer.signedRequest(req, signingKey(date), signAnonymousRequests = true).runWith(Sink.head) whenReady(srFuture) { signedRequest => - signedRequest.getHeader("Authorization").asScala.value shouldEqual RawHeader( + signedRequest.getHeader("Authorization").toScala.value shouldEqual RawHeader( "Authorization", "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-content-sha256;x-amz-date, Signature=dd479fa8a80364edf2119ec24bebde66712ee9c9cb2b0d92eb3ab9ccdc0c3947" ) diff --git a/s3/src/test/scala/akka/stream/alpakka/s3/scaladsl/S3ExtSpec.scala b/s3/src/test/scala/akka/stream/alpakka/s3/scaladsl/S3ExtSpec.scala index 81c6c3f2e2..1fc631d912 100644 --- a/s3/src/test/scala/akka/stream/alpakka/s3/scaladsl/S3ExtSpec.scala +++ b/s3/src/test/scala/akka/stream/alpakka/s3/scaladsl/S3ExtSpec.scala @@ -11,7 +11,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.flatspec.AnyFlatSpecLike import org.scalatest.matchers.should.Matchers -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ class S3ExtSpec extends AnyFlatSpecLike with Matchers { it should "reuse application config from actor system" in { diff --git a/s3/src/test/scala/akka/stream/alpakka/s3/scaladsl/S3IntegrationSpec.scala b/s3/src/test/scala/akka/stream/alpakka/s3/scaladsl/S3IntegrationSpec.scala index 8dec20b48f..a5ec7dd675 100644 --- a/s3/src/test/scala/akka/stream/alpakka/s3/scaladsl/S3IntegrationSpec.scala +++ b/s3/src/test/scala/akka/stream/alpakka/s3/scaladsl/S3IntegrationSpec.scala @@ -30,7 +30,7 @@ import scala.annotation.tailrec import scala.collection.immutable import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext, Future} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ trait S3IntegrationSpec extends AnyFlatSpecLike diff --git a/slick/src/main/scala/akka/stream/alpakka/slick/javadsl/Slick.scala b/slick/src/main/scala/akka/stream/alpakka/slick/javadsl/Slick.scala index a8dc4c0b99..89915f3e00 100644 --- a/slick/src/main/scala/akka/stream/alpakka/slick/javadsl/Slick.scala +++ b/slick/src/main/scala/akka/stream/alpakka/slick/javadsl/Slick.scala @@ -21,8 +21,8 @@ import slick.jdbc.SQLActionBuilder import slick.jdbc.SetParameter import slick.jdbc.SimpleJdbcAction -import scala.compat.java8.FunctionConverters._ -import scala.compat.java8.FutureConverters._ +import scala.jdk.FunctionConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.ExecutionContext object Slick { @@ -305,7 +305,7 @@ object Slick { ): Sink[T, CompletionStage[Done]] = ScalaSlick .sink[T](parallelism, toDBIO(toStatement))(session) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -329,7 +329,7 @@ object Slick { ): Sink[T, CompletionStage[Done]] = ScalaSlick .sink[T](parallelism, toDBIO(toStatement))(session) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** diff --git a/sns/src/main/scala/akka/stream/alpakka/sns/scaladsl/SnsPublisher.scala b/sns/src/main/scala/akka/stream/alpakka/sns/scaladsl/SnsPublisher.scala index b26ad0c085..f38a41362a 100644 --- a/sns/src/main/scala/akka/stream/alpakka/sns/scaladsl/SnsPublisher.scala +++ b/sns/src/main/scala/akka/stream/alpakka/sns/scaladsl/SnsPublisher.scala @@ -11,7 +11,7 @@ import software.amazon.awssdk.services.sns.model.{PublishRequest, PublishRespons import scala.concurrent.Future -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ /** * Scala API @@ -47,7 +47,7 @@ object SnsPublisher { )(implicit snsClient: SnsAsyncClient): Flow[PublishRequest, PublishResponse, NotUsed] = { require(snsClient != null, "The `SnsAsyncClient` passed in may not be null.") Flow[PublishRequest] - .mapAsyncUnordered(settings.concurrency)(snsClient.publish(_).toScala) + .mapAsyncUnordered(settings.concurrency)(snsClient.publish(_).asScala) } /** diff --git a/solr/src/main/scala/akka/stream/alpakka/solr/SolrMessages.scala b/solr/src/main/scala/akka/stream/alpakka/solr/SolrMessages.scala index f17ea9fd2f..d91c9b26a7 100644 --- a/solr/src/main/scala/akka/stream/alpakka/solr/SolrMessages.scala +++ b/solr/src/main/scala/akka/stream/alpakka/solr/SolrMessages.scala @@ -7,7 +7,7 @@ package akka.stream.alpakka.solr import akka.NotUsed import akka.annotation.InternalApi -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ object WriteMessage { def createUpsertMessage[T](source: T): WriteMessage[T, NotUsed] = diff --git a/solr/src/main/scala/akka/stream/alpakka/solr/impl/SolrFlowStage.scala b/solr/src/main/scala/akka/stream/alpakka/solr/impl/SolrFlowStage.scala index 09602edae6..4497b906a9 100644 --- a/solr/src/main/scala/akka/stream/alpakka/solr/impl/SolrFlowStage.scala +++ b/solr/src/main/scala/akka/stream/alpakka/solr/impl/SolrFlowStage.scala @@ -19,7 +19,7 @@ import org.apache.solr.common.SolrInputDocument import scala.annotation.tailrec import scala.util.control.NonFatal import scala.collection.immutable -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * Internal API diff --git a/solr/src/main/scala/akka/stream/alpakka/solr/javadsl/SolrFlow.scala b/solr/src/main/scala/akka/stream/alpakka/solr/javadsl/SolrFlow.scala index e6d6f9f049..5b12c22a75 100644 --- a/solr/src/main/scala/akka/stream/alpakka/solr/javadsl/SolrFlow.scala +++ b/solr/src/main/scala/akka/stream/alpakka/solr/javadsl/SolrFlow.scala @@ -13,7 +13,7 @@ import akka.stream.scaladsl.Flow import org.apache.solr.client.solrj.SolrClient import org.apache.solr.common.SolrInputDocument -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable /** diff --git a/sqs/src/main/scala/akka/stream/alpakka/sqs/SqsAckGroupedSettings.scala b/sqs/src/main/scala/akka/stream/alpakka/sqs/SqsAckGroupedSettings.scala index 4d4d6e236d..d57fc3f617 100644 --- a/sqs/src/main/scala/akka/stream/alpakka/sqs/SqsAckGroupedSettings.scala +++ b/sqs/src/main/scala/akka/stream/alpakka/sqs/SqsAckGroupedSettings.scala @@ -5,7 +5,7 @@ package akka.stream.alpakka.sqs import scala.concurrent.duration._ -import akka.util.JavaDurationConverters._ +import scala.jdk.DurationConverters._ final class SqsAckGroupedSettings private (val maxBatchSize: Int, val maxBatchWait: scala.concurrent.duration.FiniteDuration, @@ -74,7 +74,7 @@ object SqsAckGroupedSettings { concurrentRequests: Int ): SqsAckGroupedSettings = new SqsAckGroupedSettings( maxBatchSize, - maxBatchWait.asScala, + maxBatchWait.toScala, concurrentRequests ) } diff --git a/sqs/src/main/scala/akka/stream/alpakka/sqs/SqsSourceSettings.scala b/sqs/src/main/scala/akka/stream/alpakka/sqs/SqsSourceSettings.scala index e46792bab5..0545a131f5 100644 --- a/sqs/src/main/scala/akka/stream/alpakka/sqs/SqsSourceSettings.scala +++ b/sqs/src/main/scala/akka/stream/alpakka/sqs/SqsSourceSettings.scala @@ -9,7 +9,7 @@ import java.time.temporal.ChronoUnit import software.amazon.awssdk.services.sqs.model import scala.collection.immutable -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.duration.FiniteDuration final class SqsSourceSettings private ( diff --git a/sqs/src/main/scala/akka/stream/alpakka/sqs/impl/BalancingMapAsync.scala b/sqs/src/main/scala/akka/stream/alpakka/sqs/impl/BalancingMapAsync.scala index 34049e4214..28b8622682 100644 --- a/sqs/src/main/scala/akka/stream/alpakka/sqs/impl/BalancingMapAsync.scala +++ b/sqs/src/main/scala/akka/stream/alpakka/sqs/impl/BalancingMapAsync.scala @@ -11,8 +11,8 @@ import akka.stream._ import akka.stream.impl.fusing.MapAsync import akka.stream.impl.{BoundedBuffer, Buffer, FixedSizeBuffer} import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} - import scala.annotation.tailrec +import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.util.control.NonFatal import scala.util.{Failure, Success} @@ -80,7 +80,7 @@ import scala.util.{Failure, Success} buffer.enqueue(holder) future.value match { - case None => future.onComplete(holder)(akka.dispatch.ExecutionContexts.parasitic) + case None => future.onComplete(holder)(ExecutionContext.parasitic) case Some(v) => // #20217 the future is already here, optimization: avoid scheduling it on the dispatcher and // run the logic directly on this thread diff --git a/sqs/src/main/scala/akka/stream/alpakka/sqs/javadsl/SqsAckSink.scala b/sqs/src/main/scala/akka/stream/alpakka/sqs/javadsl/SqsAckSink.scala index 5a6f5e3ad8..5f0c0bd454 100644 --- a/sqs/src/main/scala/akka/stream/alpakka/sqs/javadsl/SqsAckSink.scala +++ b/sqs/src/main/scala/akka/stream/alpakka/sqs/javadsl/SqsAckSink.scala @@ -11,7 +11,7 @@ import akka.stream.alpakka.sqs.{MessageAction, SqsAckGroupedSettings, SqsAckSett import akka.stream.javadsl.Sink import software.amazon.awssdk.services.sqs.SqsAsyncClient -import scala.compat.java8.FutureConverters.FutureOps +import scala.jdk.FutureConverters.FutureOps /** * Java API to create acknowledging sinks. @@ -26,7 +26,7 @@ object SqsAckSink { sqsClient: SqsAsyncClient): Sink[MessageAction, CompletionStage[Done]] = akka.stream.alpakka.sqs.scaladsl.SqsAckSink .apply(queueUrl, settings)(sqsClient) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -37,6 +37,6 @@ object SqsAckSink { sqsClient: SqsAsyncClient): Sink[MessageAction, CompletionStage[Done]] = akka.stream.alpakka.sqs.scaladsl.SqsAckSink .grouped(queueUrl, settings)(sqsClient) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/sqs/src/main/scala/akka/stream/alpakka/sqs/javadsl/SqsPublishFlow.scala b/sqs/src/main/scala/akka/stream/alpakka/sqs/javadsl/SqsPublishFlow.scala index 9c33eeeb77..a6ce6d5a38 100644 --- a/sqs/src/main/scala/akka/stream/alpakka/sqs/javadsl/SqsPublishFlow.scala +++ b/sqs/src/main/scala/akka/stream/alpakka/sqs/javadsl/SqsPublishFlow.scala @@ -18,7 +18,7 @@ import akka.stream.scaladsl.{Flow => SFlow} import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model.SendMessageRequest -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ /** * Java API to create SQS flows. diff --git a/sqs/src/main/scala/akka/stream/alpakka/sqs/javadsl/SqsPublishSink.scala b/sqs/src/main/scala/akka/stream/alpakka/sqs/javadsl/SqsPublishSink.scala index 2b16ddacfa..96984519e0 100644 --- a/sqs/src/main/scala/akka/stream/alpakka/sqs/javadsl/SqsPublishSink.scala +++ b/sqs/src/main/scala/akka/stream/alpakka/sqs/javadsl/SqsPublishSink.scala @@ -13,8 +13,8 @@ import akka.stream.scaladsl.{Flow, Keep} import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model.SendMessageRequest -import scala.collection.JavaConverters._ -import scala.compat.java8.FutureConverters.FutureOps +import scala.jdk.CollectionConverters._ +import scala.jdk.FutureConverters.FutureOps /** * Java API to create SQS Sinks. @@ -27,7 +27,7 @@ object SqsPublishSink { def create(queueUrl: String, settings: SqsPublishSettings, sqsClient: SqsAsyncClient): Sink[String, CompletionStage[Done]] = - scaladsl.SqsPublishSink.apply(queueUrl, settings)(sqsClient).mapMaterializedValue(_.toJava).asJava + scaladsl.SqsPublishSink.apply(queueUrl, settings)(sqsClient).mapMaterializedValue(_.asJava).asJava /** * creates a [[akka.stream.javadsl.Sink Sink]] to publish messages to a SQS queue using an [[software.amazon.awssdk.services.sqs.SqsAsyncClient SqsAsyncClient]] @@ -37,7 +37,7 @@ object SqsPublishSink { sqsClient: SqsAsyncClient): Sink[SendMessageRequest, CompletionStage[Done]] = scaladsl.SqsPublishSink .messageSink(queueUrl, settings)(sqsClient) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -47,7 +47,7 @@ object SqsPublishSink { sqsClient: SqsAsyncClient): Sink[SendMessageRequest, CompletionStage[Done]] = scaladsl.SqsPublishSink .messageSink(settings)(sqsClient) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -57,7 +57,7 @@ object SqsPublishSink { def grouped(queueUrl: String, settings: SqsPublishGroupedSettings, sqsClient: SqsAsyncClient): Sink[String, CompletionStage[Done]] = - scaladsl.SqsPublishSink.grouped(queueUrl, settings)(sqsClient).mapMaterializedValue(_.toJava).asJava + scaladsl.SqsPublishSink.grouped(queueUrl, settings)(sqsClient).mapMaterializedValue(_.asJava).asJava /** * creates a [[akka.stream.javadsl.Sink Sink]] that groups messages and publishes them in batches to a SQS queue using an [[software.amazon.awssdk.services.sqs.SqsAsyncClient SqsAsyncClient]] @@ -68,7 +68,7 @@ object SqsPublishSink { sqsClient: SqsAsyncClient): Sink[SendMessageRequest, CompletionStage[Done]] = scaladsl.SqsPublishSink .groupedMessageSink(queueUrl, settings)(sqsClient) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -81,7 +81,7 @@ object SqsPublishSink { Flow[java.lang.Iterable[String]] .map(_.asScala) .toMat(scaladsl.SqsPublishSink.batch(queueUrl, settings)(sqsClient))(Keep.right) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava /** @@ -95,6 +95,6 @@ object SqsPublishSink { Flow[java.lang.Iterable[SendMessageRequest]] .map(_.asScala) .toMat(scaladsl.SqsPublishSink.batchedMessageSink(queueUrl, settings)(sqsClient))(Keep.right) - .mapMaterializedValue(_.toJava) + .mapMaterializedValue(_.asJava) .asJava } diff --git a/sqs/src/main/scala/akka/stream/alpakka/sqs/scaladsl/SqsAckFlow.scala b/sqs/src/main/scala/akka/stream/alpakka/sqs/scaladsl/SqsAckFlow.scala index 31ec9273db..c06f820030 100644 --- a/sqs/src/main/scala/akka/stream/alpakka/sqs/scaladsl/SqsAckFlow.scala +++ b/sqs/src/main/scala/akka/stream/alpakka/sqs/scaladsl/SqsAckFlow.scala @@ -8,7 +8,6 @@ import java.util.concurrent.CompletionException import akka.NotUsed import akka.annotation.{ApiMayChange, InternalApi} -import akka.dispatch.ExecutionContexts.parasitic import akka.stream.FlowShape import akka.stream.alpakka.sqs.MessageAction._ import akka.stream.alpakka.sqs.SqsAckResult._ @@ -17,10 +16,10 @@ import akka.stream.alpakka.sqs._ import akka.stream.scaladsl.{Flow, GraphDSL, Merge, Partition} import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model._ - -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ +import scala.concurrent.ExecutionContext import scala.concurrent.Future /** @@ -48,8 +47,8 @@ object SqsAckFlow { sqsClient .deleteMessage(request) - .toScala - .map(resp => new SqsDeleteResult(messageAction, resp))(parasitic) + .asScala + .map(resp => new SqsDeleteResult(messageAction, resp))(ExecutionContext.parasitic) case messageAction: MessageAction.ChangeMessageVisibility => val request = @@ -62,8 +61,8 @@ object SqsAckFlow { sqsClient .changeMessageVisibility(request) - .toScala - .map(resp => new SqsChangeMessageVisibilityResult(messageAction, resp))(parasitic) + .asScala + .map(resp => new SqsChangeMessageVisibilityResult(messageAction, resp))(ExecutionContext.parasitic) case messageAction: MessageAction.Ignore => Future.successful(new SqsIgnoreResult(messageAction)) @@ -128,7 +127,7 @@ object SqsAckFlow { case (actions: immutable.Seq[Delete], request) => sqsClient .deleteMessageBatch(request) - .toScala + .asScala .map { case response if response.failed().isEmpty => val responseMetadata = response.responseMetadata() @@ -145,13 +144,13 @@ object SqsAckFlow { numberOfMessages, s"Some messages are failed to delete. $nrOfFailedMessages of $numberOfMessages messages are failed" ) - }(parasitic) + }(ExecutionContext.parasitic) .recoverWith { case e: CompletionException => Future.failed(new SqsBatchException(request.entries().size(), e.getMessage, e.getCause)) case e => Future.failed(new SqsBatchException(request.entries().size(), e.getMessage, e)) - }(parasitic) + }(ExecutionContext.parasitic) } .mapConcat(identity) } @@ -182,7 +181,7 @@ object SqsAckFlow { case (actions, request) => sqsClient .changeMessageVisibilityBatch(request) - .toScala + .asScala .map { case response if response.failed().isEmpty => val responseMetadata = response.responseMetadata() @@ -199,13 +198,13 @@ object SqsAckFlow { numberOfMessages, s"Some messages are failed to change visibility. $nrOfFailedMessages of $numberOfMessages messages are failed" ) - }(parasitic) + }(ExecutionContext.parasitic) .recoverWith { case e: CompletionException => Future.failed(new SqsBatchException(request.entries().size(), e.getMessage, e.getCause)) case e => Future.failed(new SqsBatchException(request.entries().size(), e.getMessage, e)) - }(parasitic) + }(ExecutionContext.parasitic) } .mapConcat(identity) diff --git a/sqs/src/main/scala/akka/stream/alpakka/sqs/scaladsl/SqsPublishFlow.scala b/sqs/src/main/scala/akka/stream/alpakka/sqs/scaladsl/SqsPublishFlow.scala index 6d2b43b520..e56e1a9cb3 100644 --- a/sqs/src/main/scala/akka/stream/alpakka/sqs/scaladsl/SqsPublishFlow.scala +++ b/sqs/src/main/scala/akka/stream/alpakka/sqs/scaladsl/SqsPublishFlow.scala @@ -8,14 +8,14 @@ import java.util.concurrent.CompletionException import akka.NotUsed import akka.annotation.ApiMayChange -import akka.dispatch.ExecutionContexts.parasitic import akka.stream.alpakka.sqs.{SqsBatchException, _} import akka.stream.scaladsl.{Flow, Source} import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model._ -import scala.collection.JavaConverters._ -import scala.compat.java8.FutureConverters._ +import scala.jdk.CollectionConverters._ +import scala.concurrent.ExecutionContext +import scala.jdk.FutureConverters._ /** * Scala API to create publishing SQS flows. @@ -52,8 +52,8 @@ object SqsPublishFlow { .mapAsync(settings.maxInFlight) { req => sqsClient .sendMessage(req) - .toScala - .map(req -> _)(parasitic) + .asScala + .map(req -> _)(ExecutionContext.parasitic) } .map { case (request, response) => new SqsPublishResult(request, response) } } @@ -103,7 +103,7 @@ object SqsPublishFlow { case (requests, batchRequest) => sqsClient .sendMessageBatch(batchRequest) - .toScala + .asScala .map { case response if response.failed().isEmpty => val responseMetadata = response.responseMetadata() @@ -120,7 +120,7 @@ object SqsPublishFlow { numberOfMessages, s"Some messages are failed to send. $nrOfFailedMessages of $numberOfMessages messages are failed" ) - }(parasitic) + }(ExecutionContext.parasitic) } .recoverWithRetries(1, { case e: CompletionException => diff --git a/sqs/src/main/scala/akka/stream/alpakka/sqs/scaladsl/SqsSource.scala b/sqs/src/main/scala/akka/stream/alpakka/sqs/scaladsl/SqsSource.scala index ab74fa353b..c8ef8df269 100644 --- a/sqs/src/main/scala/akka/stream/alpakka/sqs/scaladsl/SqsSource.scala +++ b/sqs/src/main/scala/akka/stream/alpakka/sqs/scaladsl/SqsSource.scala @@ -12,8 +12,8 @@ import akka.stream.scaladsl.{Flow, Source} import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model._ -import scala.collection.JavaConverters._ -import scala.compat.java8.FutureConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.FutureConverters._ /** * Scala API to create SQS sources. @@ -53,11 +53,11 @@ object SqsSource { private def resolveHandler(parallelism: Int)(implicit sqsClient: SqsAsyncClient) = if (parallelism == 1) { - Flow[ReceiveMessageRequest].mapAsyncUnordered(parallelism)(sqsClient.receiveMessage(_).toScala) + Flow[ReceiveMessageRequest].mapAsyncUnordered(parallelism)(sqsClient.receiveMessage(_).asScala) } else { BalancingMapAsync[ReceiveMessageRequest, ReceiveMessageResponse]( parallelism, - sqsClient.receiveMessage(_).toScala, + sqsClient.receiveMessage(_).asScala, (response, _) => if (response.messages().isEmpty) 1 else parallelism ) } diff --git a/sqs/src/test/scala/akka/stream/alpakka/sqs/scaladsl/DefaultTestContext.scala b/sqs/src/test/scala/akka/stream/alpakka/sqs/scaladsl/DefaultTestContext.scala index 70e33620e5..4b8575ba3d 100644 --- a/sqs/src/test/scala/akka/stream/alpakka/sqs/scaladsl/DefaultTestContext.scala +++ b/sqs/src/test/scala/akka/stream/alpakka/sqs/scaladsl/DefaultTestContext.scala @@ -22,7 +22,7 @@ import software.amazon.awssdk.regions.Region import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model.CreateQueueRequest //#init-client -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.duration._ import scala.util.Random diff --git a/sqs/src/test/scala/akka/stream/alpakka/sqs/scaladsl/SqsSourceMockSpec.scala b/sqs/src/test/scala/akka/stream/alpakka/sqs/scaladsl/SqsSourceMockSpec.scala index 8fca967606..a282256e9e 100644 --- a/sqs/src/test/scala/akka/stream/alpakka/sqs/scaladsl/SqsSourceMockSpec.scala +++ b/sqs/src/test/scala/akka/stream/alpakka/sqs/scaladsl/SqsSourceMockSpec.scala @@ -18,7 +18,7 @@ import org.scalatest.matchers.should.Matchers import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model.{Message, ReceiveMessageRequest, ReceiveMessageResponse} -import scala.compat.java8.FutureConverters._ +import scala.jdk.FutureConverters._ import scala.concurrent.Future import scala.concurrent.duration._ @@ -76,7 +76,7 @@ class SqsSourceMockSpec extends AnyFlatSpec with Matchers with DefaultTestContex .build() ) }(system.dispatcher) - .toJava + .asJava .toCompletableFuture }) @@ -118,7 +118,7 @@ class SqsSourceMockSpec extends AnyFlatSpec with Matchers with DefaultTestContex .build() ) }(system.dispatcher) - .toJava + .asJava .toCompletableFuture } else { CompletableFuture.completedFuture( diff --git a/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala b/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala index 13f1402f4c..5793aa7492 100644 --- a/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala +++ b/sqs/src/test/scala/docs/scaladsl/SqsAckSpec.scala @@ -21,7 +21,7 @@ import org.scalatest.matchers.should.Matchers import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model._ -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.duration._ class SqsAckSpec extends AnyFlatSpec with Matchers with DefaultTestContext with LogCapturing { diff --git a/sqs/src/test/scala/docs/scaladsl/SqsPublishSpec.scala b/sqs/src/test/scala/docs/scaladsl/SqsPublishSpec.scala index 240a0fa673..418b789fe8 100644 --- a/sqs/src/test/scala/docs/scaladsl/SqsPublishSpec.scala +++ b/sqs/src/test/scala/docs/scaladsl/SqsPublishSpec.scala @@ -16,7 +16,7 @@ import org.scalatest.matchers.should.Matchers import software.amazon.awssdk.services.sqs.SqsAsyncClient import software.amazon.awssdk.services.sqs.model.{Message, ReceiveMessageRequest, SendMessageRequest} -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.concurrent.duration._ class SqsPublishSpec extends AnyFlatSpec with Matchers with DefaultTestContext with LogCapturing { diff --git a/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala b/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala index cd5abda96a..5ee25b2b15 100644 --- a/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala +++ b/sqs/src/test/scala/docs/scaladsl/SqsSourceSpec.scala @@ -29,7 +29,7 @@ import software.amazon.awssdk.services.sqs.model.{ SendMessageRequest } -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ import scala.collection.immutable import scala.concurrent.Future import scala.concurrent.duration._ diff --git a/sse/src/main/scala/akka/stream/alpakka/sse/javadsl/EventSource.scala b/sse/src/main/scala/akka/stream/alpakka/sse/javadsl/EventSource.scala index 98afd59e9f..a8f9493b81 100644 --- a/sse/src/main/scala/akka/stream/alpakka/sse/javadsl/EventSource.scala +++ b/sse/src/main/scala/akka/stream/alpakka/sse/javadsl/EventSource.scala @@ -17,8 +17,8 @@ import java.util.function.{Function => JFunction} import akka.actor.ClassicActorSystemProvider -import scala.compat.java8.FutureConverters -import scala.compat.java8.OptionConverters +import scala.jdk.FutureConverters +import scala.jdk.OptionConverters /** * This stream processing stage establishes a continuous source of server-sent events from the given URI. @@ -82,8 +82,8 @@ object EventSource { scaladsl .EventSource( uri.asScala, - send(_).toScala.map(_.asInstanceOf[SHttpResponse])(system.classicSystem.dispatcher), - lastEventId.asScala + send(_).asScala.map(_.asInstanceOf[SHttpResponse])(system.classicSystem.dispatcher), + lastEventId.toScala )(system) .map(v => v: ServerSentEvent) eventSource.asJava @@ -106,8 +106,8 @@ object EventSource { scaladsl .EventSource( uri.asScala, - send(_).toScala.map(_.asInstanceOf[SHttpResponse])(mat.executionContext), - lastEventId.asScala + send(_).asScala.map(_.asInstanceOf[SHttpResponse])(mat.executionContext), + lastEventId.toScala )(mat.system) .map(v => v: ServerSentEvent) eventSource.asJava diff --git a/testkit/src/main/scala/akka/stream/alpakka/testkit/CapturingAppender.scala b/testkit/src/main/scala/akka/stream/alpakka/testkit/CapturingAppender.scala index 02192edd13..47ab1ce0af 100644 --- a/testkit/src/main/scala/akka/stream/alpakka/testkit/CapturingAppender.scala +++ b/testkit/src/main/scala/akka/stream/alpakka/testkit/CapturingAppender.scala @@ -90,7 +90,7 @@ import org.slf4j.LoggerFactory * Also clears the buffer.. */ def flush(sourceActorSystem: Option[String]): Unit = synchronized { - import scala.collection.JavaConverters._ + import scala.jdk.CollectionConverters._ val logbackLogger = getLogbackLogger(classOf[CapturingAppender].getName + "Delegate") val appenders = logbackLogger.iteratorForAppenders().asScala.filterNot(_ == this).toList for (event <- buffer; appender <- appenders) { diff --git a/text/src/test/scala/akka/stream/alpakka/text/scaladsl/CharsetCodingFlowsSpec.scala b/text/src/test/scala/akka/stream/alpakka/text/scaladsl/CharsetCodingFlowsSpec.scala index a95e48dead..c2d494f63d 100644 --- a/text/src/test/scala/akka/stream/alpakka/text/scaladsl/CharsetCodingFlowsSpec.scala +++ b/text/src/test/scala/akka/stream/alpakka/text/scaladsl/CharsetCodingFlowsSpec.scala @@ -64,7 +64,7 @@ class CharsetCodingFlowsSpec import java.nio.charset.StandardCharsets // #encoding - import scala.collection.JavaConverters._ + import scala.jdk.CollectionConverters._ val targetFile = Paths.get("target/outdata.txt") val strings = System.getProperties.asScala.map(p => p._1 + " -> " + p._2).toList val stringSource: Source[String, _] = Source(strings) diff --git a/text/src/test/scala/docs/scaladsl/CharsetCodingFlowsDoc.scala b/text/src/test/scala/docs/scaladsl/CharsetCodingFlowsDoc.scala index 7b0cb2f469..9c844ececd 100644 --- a/text/src/test/scala/docs/scaladsl/CharsetCodingFlowsDoc.scala +++ b/text/src/test/scala/docs/scaladsl/CharsetCodingFlowsDoc.scala @@ -42,7 +42,7 @@ class CharsetCodingFlowsDoc import akka.stream.scaladsl.FileIO // #encoding - import scala.collection.JavaConverters._ + import scala.jdk.CollectionConverters._ val targetFile = Paths.get("target/outdata.txt") val strings = System.getProperties.asScala.map(p => p._1 + " -> " + p._2).toList // #encoding diff --git a/udp/src/main/scala/akka/stream/alpakka/udp/javadsl/Udp.scala b/udp/src/main/scala/akka/stream/alpakka/udp/javadsl/Udp.scala index 90fb5e6985..72f2c8cb29 100644 --- a/udp/src/main/scala/akka/stream/alpakka/udp/javadsl/Udp.scala +++ b/udp/src/main/scala/akka/stream/alpakka/udp/javadsl/Udp.scala @@ -13,9 +13,9 @@ import akka.actor.{ActorSystem, ClassicActorSystemProvider} import akka.stream.alpakka.udp.Datagram import akka.stream.javadsl.{Flow, Sink} import akka.stream.alpakka.udp.scaladsl -import akka.util.ccompat.JavaConverters._ -import scala.compat.java8.FutureConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.FutureConverters._ object Udp { import java.lang.{Iterable => JIterable} @@ -111,7 +111,7 @@ object Udp { */ def bindFlow(localAddress: InetSocketAddress, system: ActorSystem): Flow[Datagram, Datagram, CompletionStage[InetSocketAddress]] = - scaladsl.Udp.bindFlow(localAddress)(system).mapMaterializedValue(_.toJava).asJava + scaladsl.Udp.bindFlow(localAddress)(system).mapMaterializedValue(_.asJava).asJava /** * Creates a flow that upon materialization binds to the given `localAddress`. All incoming @@ -123,7 +123,7 @@ object Udp { */ def bindFlow(localAddress: InetSocketAddress, system: ClassicActorSystemProvider): Flow[Datagram, Datagram, CompletionStage[InetSocketAddress]] = - scaladsl.Udp.bindFlow(localAddress)(system).mapMaterializedValue(_.toJava).asJava + scaladsl.Udp.bindFlow(localAddress)(system).mapMaterializedValue(_.asJava).asJava /** * Creates a flow that upon materialization binds to the given `localAddress`. All incoming @@ -137,7 +137,7 @@ object Udp { def bindFlow(localAddress: InetSocketAddress, options: JIterable[SocketOption], system: ActorSystem): Flow[Datagram, Datagram, CompletionStage[InetSocketAddress]] = - scaladsl.Udp.bindFlow(localAddress, options.asScala.toIndexedSeq)(system).mapMaterializedValue(_.toJava).asJava + scaladsl.Udp.bindFlow(localAddress, options.asScala.toIndexedSeq)(system).mapMaterializedValue(_.asJava).asJava /** * Creates a flow that upon materialization binds to the given `localAddress`. All incoming @@ -151,5 +151,5 @@ object Udp { def bindFlow(localAddress: InetSocketAddress, options: JIterable[SocketOption], system: ClassicActorSystemProvider): Flow[Datagram, Datagram, CompletionStage[InetSocketAddress]] = - scaladsl.Udp.bindFlow(localAddress, options.asScala.toIndexedSeq)(system).mapMaterializedValue(_.toJava).asJava + scaladsl.Udp.bindFlow(localAddress, options.asScala.toIndexedSeq)(system).mapMaterializedValue(_.asJava).asJava } diff --git a/unix-domain-socket/src/main/scala/akka/stream/alpakka/unixdomainsocket/javadsl/UnixDomainSocket.scala b/unix-domain-socket/src/main/scala/akka/stream/alpakka/unixdomainsocket/javadsl/UnixDomainSocket.scala index 432da61f42..196080f908 100644 --- a/unix-domain-socket/src/main/scala/akka/stream/alpakka/unixdomainsocket/javadsl/UnixDomainSocket.scala +++ b/unix-domain-socket/src/main/scala/akka/stream/alpakka/unixdomainsocket/javadsl/UnixDomainSocket.scala @@ -9,14 +9,15 @@ import java.nio.file.Path import java.util.Optional import java.util.concurrent.CompletionStage -import scala.compat.java8.OptionConverters._ -import scala.compat.java8.FutureConverters._ +import scala.jdk.OptionConverters._ +import scala.jdk.FutureConverters._ +import scala.concurrent.ExecutionContext + import akka.NotUsed import akka.actor.{ClassicActorSystemProvider, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider} import akka.stream.javadsl.{Flow, Source} import akka.stream.Materializer import akka.util.ByteString - import scala.concurrent.duration.Duration object UnixDomainSocket extends ExtensionId[UnixDomainSocket] with ExtensionIdProvider { @@ -37,7 +38,7 @@ object UnixDomainSocket extends ExtensionId[UnixDomainSocket] with ExtensionIdPr * * The produced [[java.util.concurrent.CompletionStage]] is fulfilled when the unbinding has been completed. */ - def unbind(): CompletionStage[Unit] = delegate.unbind().toJava + def unbind(): CompletionStage[Unit] = delegate.unbind().asJava } /** @@ -106,7 +107,6 @@ object UnixDomainSocket extends ExtensionId[UnixDomainSocket] with ExtensionIdPr final class UnixDomainSocket(system: ExtendedActorSystem) extends akka.actor.Extension { import UnixDomainSocket._ - import akka.dispatch.ExecutionContexts.parasitic private lazy val delegate: scaladsl.UnixDomainSocket = scaladsl.UnixDomainSocket.apply(system) @@ -136,7 +136,7 @@ final class UnixDomainSocket(system: ExtendedActorSystem) extends akka.actor.Ext delegate .bind(path, backlog, halfClose) .map(new IncomingConnection(_)) - .mapMaterializedValue(_.map(new ServerBinding(_))(parasitic).toJava) + .mapMaterializedValue(_.map(new ServerBinding(_))(ExecutionContext.parasitic).asJava) ) /** @@ -152,7 +152,7 @@ final class UnixDomainSocket(system: ExtendedActorSystem) extends akka.actor.Ext delegate .bind(path) .map(new IncomingConnection(_)) - .mapMaterializedValue(_.map(new ServerBinding(_))(parasitic).toJava) + .mapMaterializedValue(_.map(new ServerBinding(_))(ExecutionContext.parasitic).asJava) ) /** @@ -182,8 +182,8 @@ final class UnixDomainSocket(system: ExtendedActorSystem) extends akka.actor.Ext connectTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] = Flow.fromGraph( delegate - .outgoingConnection(remoteAddress, localAddress.asScala, halfClose, connectTimeout) - .mapMaterializedValue(_.map(new OutgoingConnection(_))(parasitic).toJava) + .outgoingConnection(remoteAddress, localAddress.toScala, halfClose, connectTimeout) + .mapMaterializedValue(_.map(new OutgoingConnection(_))(ExecutionContext.parasitic).asJava) ) /** @@ -200,7 +200,7 @@ final class UnixDomainSocket(system: ExtendedActorSystem) extends akka.actor.Ext Flow.fromGraph( delegate .outgoingConnection(new UnixSocketAddress(path)) - .mapMaterializedValue(_.map(new OutgoingConnection(_))(parasitic).toJava) + .mapMaterializedValue(_.map(new OutgoingConnection(_))(ExecutionContext.parasitic).asJava) ) } diff --git a/xml/src/main/scala/akka/stream/alpakka/xml/javadsl/XmlParsing.scala b/xml/src/main/scala/akka/stream/alpakka/xml/javadsl/XmlParsing.scala index ab9b0bd517..1dfe3e731d 100644 --- a/xml/src/main/scala/akka/stream/alpakka/xml/javadsl/XmlParsing.scala +++ b/xml/src/main/scala/akka/stream/alpakka/xml/javadsl/XmlParsing.scala @@ -13,7 +13,7 @@ import org.w3c.dom.Element import java.util.function.Consumer -import scala.collection.JavaConverters._ +import scala.jdk.CollectionConverters._ object XmlParsing { diff --git a/xml/src/main/scala/akka/stream/alpakka/xml/model.scala b/xml/src/main/scala/akka/stream/alpakka/xml/model.scala index e1983546f5..f2f9174129 100644 --- a/xml/src/main/scala/akka/stream/alpakka/xml/model.scala +++ b/xml/src/main/scala/akka/stream/alpakka/xml/model.scala @@ -6,8 +6,8 @@ package akka.stream.alpakka.xml import java.util.Optional -import scala.collection.JavaConverters._ -import scala.compat.java8.OptionConverters._ +import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ /** * XML parsing events emitted by the parser flow. These roughly correspond to Java XMLEvent types. @@ -44,7 +44,7 @@ case object EndDocument extends ParseEvent { final case class Namespace(uri: String, prefix: Option[String] = None) { /** Java API */ - def getPrefix(): java.util.Optional[String] = prefix.asJava + def getPrefix(): java.util.Optional[String] = prefix.toJava } object Namespace { @@ -53,7 +53,7 @@ object Namespace { * Java API */ def create(uri: String, prefix: Optional[String]) = - Namespace(uri, prefix.asScala) + Namespace(uri, prefix.toScala) } @@ -63,10 +63,10 @@ final case class Attribute(name: String, namespace: Option[String] = None) { /** Java API */ - def getPrefix(): java.util.Optional[String] = prefix.asJava + def getPrefix(): java.util.Optional[String] = prefix.toJava /** Java API */ - def getNamespace(): java.util.Optional[String] = namespace.asJava + def getNamespace(): java.util.Optional[String] = namespace.toJava } object Attribute { @@ -75,7 +75,7 @@ object Attribute { * Java API */ def create(name: String, value: String, prefix: Optional[String], namespace: Optional[String]) = - Attribute(name, value, prefix.asScala, namespace.asScala) + Attribute(name, value, prefix.toScala, namespace.toScala) /** * Java API @@ -99,10 +99,10 @@ final case class StartElement(localName: String, def getAttributes(): java.util.Map[String, String] = attributes.asJava /** Java API */ - def getPrefix(): java.util.Optional[String] = prefix.asJava + def getPrefix(): java.util.Optional[String] = prefix.toJava /** Java API */ - def getNamespace(): java.util.Optional[String] = namespace.asJava + def getNamespace(): java.util.Optional[String] = namespace.toJava /** Java API */ def getNamespaceCtx(): java.util.List[Namespace] = namespaceCtx.asJava @@ -134,8 +134,8 @@ object StartElement { namespaceCtx: java.util.List[Namespace]): StartElement = new StartElement(localName, attributesList.asScala.toList, - prefix.asScala, - namespace.asScala, + prefix.toScala, + namespace.toScala, namespaceCtx.asScala.toList) /** @@ -145,7 +145,7 @@ object StartElement { attributesList: java.util.List[Attribute], prefix: Optional[String], namespace: Optional[String]): StartElement = - new StartElement(localName, attributesList.asScala.toList, prefix.asScala, namespace.asScala, List.empty[Namespace]) + new StartElement(localName, attributesList.asScala.toList, prefix.toScala, namespace.toScala, List.empty[Namespace]) /** * Java API @@ -195,10 +195,10 @@ final case class ProcessingInstruction(target: Option[String], data: Option[Stri val marker = ParseEventMarker.XMLProcessingInstruction /** Java API */ - def getTarget(): java.util.Optional[String] = target.asJava + def getTarget(): java.util.Optional[String] = target.toJava /** Java API */ - def getData(): java.util.Optional[String] = data.asJava + def getData(): java.util.Optional[String] = data.toJava } object ProcessingInstruction { @@ -207,7 +207,7 @@ object ProcessingInstruction { * Java API */ def create(target: Optional[String], data: Optional[String]) = - ProcessingInstruction(target.asScala, data.asScala) + ProcessingInstruction(target.toScala, data.toScala) } final case class Comment(text: String) extends ParseEvent {