diff --git a/example/src/main/scala/akka/persistence/cassandra/example/EventProcessorStream.scala b/example/src/main/scala/akka/persistence/cassandra/example/EventProcessorStream.scala index 3a046213..75bdcdd3 100644 --- a/example/src/main/scala/akka/persistence/cassandra/example/EventProcessorStream.scala +++ b/example/src/main/scala/akka/persistence/cassandra/example/EventProcessorStream.scala @@ -35,7 +35,7 @@ class EventProcessorStream[Event: ClassTag]( .withBackoff(minBackoff = 500.millis, maxBackoff = 20.seconds, randomFactor = 0.1) { () => Source.futureSource { readOffset().map { offset => - log.infoN("Starting stream for tag [{}] from offset [{}]", tag, offset) + log.info("Starting stream for tag [{}] from offset [{}]", tag, offset) processEventsByTag(offset, histogram) // groupedWithin can be used here to improve performance by reducing number of offset writes, // with the trade-off of possibility of more duplicate events when stream is restarted @@ -57,7 +57,7 @@ class EventProcessorStream[Event: ClassTag]( if (latency < histogram.getMaxValue) { histogram.recordValue(latency) } - log.debugN( + log.debug( "Tag {} Event {} persistenceId {}, sequenceNr {}. Latency {}", tag, event, diff --git a/example/src/main/scala/akka/persistence/cassandra/example/LoadGenerator.scala b/example/src/main/scala/akka/persistence/cassandra/example/LoadGenerator.scala index 76933dd3..d6f7a81f 100644 --- a/example/src/main/scala/akka/persistence/cassandra/example/LoadGenerator.scala +++ b/example/src/main/scala/akka/persistence/cassandra/example/LoadGenerator.scala @@ -6,14 +6,14 @@ import akka.cluster.sharding.typed.ShardingEnvelope import com.typesafe.config.Config import scala.concurrent.duration.FiniteDuration +import scala.jdk.DurationConverters._ import scala.util.Random -import akka.util.JavaDurationConverters._ object LoadGenerator { object Settings { def apply(config: Config): Settings = { - Settings(config.getInt("persistence-ids"), config.getDuration("load-tick-duration").asScala) + Settings(config.getInt("persistence-ids"), config.getDuration("load-tick-duration").toScala) } } diff --git a/example/src/main/scala/akka/persistence/cassandra/example/Main.scala b/example/src/main/scala/akka/persistence/cassandra/example/Main.scala index f56a05bf..ebba4fe1 100644 --- a/example/src/main/scala/akka/persistence/cassandra/example/Main.scala +++ b/example/src/main/scala/akka/persistence/cassandra/example/Main.scala @@ -46,7 +46,7 @@ object Main { Behaviors.receiveMessage { case SelfUp(state) => - ctx.log.infoN( + ctx.log.info( "Cluster member joined. Initializing persistent actors. Roles {}. Members {}", cluster.selfMember.roles, state.members) diff --git a/example/src/main/scala/akka/persistence/cassandra/example/Reporter.scala b/example/src/main/scala/akka/persistence/cassandra/example/Reporter.scala index a8925f3b..f9df8409 100644 --- a/example/src/main/scala/akka/persistence/cassandra/example/Reporter.scala +++ b/example/src/main/scala/akka/persistence/cassandra/example/Reporter.scala @@ -12,7 +12,7 @@ object Reporter { topic ! Topic.Subscribe(ctx.self) Behaviors.receiveMessage[ReadSideMetrics] { case ReadSideMetrics(count, max, p99, p50) => - ctx.log.infoN("Read side Count: {} Max: {} p99: {} p50: {}", count, max, p99, p50) + ctx.log.info("Read side Count: {} Max: {} p99: {} p50: {}", count, max, p99, p50) Behaviors.same } }