From b2ff1544d45422aaf5d0972232c96be87584e533 Mon Sep 17 00:00:00 2001 From: nayaghma Date: Tue, 7 Jul 2020 13:20:30 -0500 Subject: [PATCH 01/29] sync with upstream --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index afa40cd21..b8bf93097 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ target/ scalafmt-output.xml dependency-reduced-pom.xml metastore_db +.DS_Store From 0c09744e2a1a0f97bffcd2dd08bdc5489f0eebcd Mon Sep 17 00:00:00 2001 From: Navid Yaghmazadeh Date: Tue, 8 Dec 2020 14:53:06 -0800 Subject: [PATCH 02/29] fix slow partition adj when a new partition added --- .../eventhubs/PartitionsStatusTracker.scala | 72 ++++++++++--------- .../spark/sql/eventhubs/EventHubsSource.scala | 24 ++++--- 2 files changed, 56 insertions(+), 40 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala b/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala index bff298078..8d4b2134f 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala @@ -30,32 +30,32 @@ class PartitionsStatusTracker extends Logging { import PartitionsStatusTracker._ - // retrives the batchStatus object based on the local batchId + // retrieves the batchStatus object based on the local batchId private val batchesStatusList = mutable.Map[Long, BatchStatus]() /** - * retirives the local batchId for a pair of (NameAndPartition, SequenceNumber) - * it's useful to accss the right batch when a performance metric message is - * received for a parition-RequestSeqNo pair. + * retrieves the local batchId for a pair of (NameAndPartition, SequenceNumber) + * it's useful to access the right batch when a performance metric message is + * received for a partition-RequestSeqNo pair. * it's getting updated every time a batch is removed or added to the tracker */ private val partitionSeqNoPairToBatchIdMap = mutable.Map[String, Long]() /** * Add a batch to the tracker by creating a BatchStatus object and adding it to the map. - * Also, we add mappings from each partition-startSeqNo pair to the batchId in order to be able to retrive + * Also, we add mappings from each partition-startSeqNo pair to the batchId in order to be able to retrieve * the batchStatus object from the map when the performance metric message is received. - * Note that we ignore partitions with batchSize zero (startSeqNo == latestSeqNo) since we won't recieve + * Note that we ignore partitions with batchSize zero (startSeqNo == latestSeqNo) since we won't receive * any performance metric message for such partitions. */ - def addorUpdateBatch(batchId: Long, offsetRanges: Array[OffsetRange]): Unit = { + def addOrUpdateBatch(batchId: Long, offsetRanges: Array[OffsetRange]): Unit = { if (batchesStatusList.contains(batchId)) { // Batches are not supposed to be updated. Log an error if a batch is being updated logError( s"Batch with local batch id: $batchId already exists in the partition status tracker. Batches" + s"are not supposed to be updated in the partition status tracker.") } else { - // remove the oldest batch from the batchesStatusList to realse space for adding the new batch. + // remove the oldest batch from the batchesStatusList to release space for adding the new batch. val batchIdToRemove = batchId - PartitionsStatusTracker.TrackingBatchCount logDebug( s"Remove the batch ${if (batchIdToRemove >= 0) batchIdToRemove else None} from the tracker.") @@ -64,7 +64,7 @@ class PartitionsStatusTracker extends Logging { } } - // find partitions with a zero size batch.. No performance metric msg will be received for those partitions + // find partitions with a zero size batch. No performance metric msg will be received for those partitions. val isZeroSizeBatchPartition: Map[NameAndPartition, Boolean] = offsetRanges.map(range => (range.nameAndPartition, (range.fromSeqNo == range.untilSeqNo)))( breakOut) @@ -99,7 +99,7 @@ class PartitionsStatusTracker extends Logging { } // remove the mapping from partition-seqNo pair to the batchId (ignore partitions with empty batch size) val batchStatus = batchesStatusList(batchId) - batchStatus.paritionsStatusList + batchStatus.partitionsStatusList .filter(p => !p._2.emptyBatch) .values .foreach(ps => { @@ -128,7 +128,7 @@ class PartitionsStatusTracker extends Logging { } /** - * return the batch id for a given parition-RequestSeqNo pair. + * return the batch id for a given partition-RequestSeqNo pair. * if the batch doesn't exist in the tracker, return BATCH_NOT_FOUND */ private def getBatchIdForPartitionSeqNoPair(nAndP: NameAndPartition, @@ -138,7 +138,7 @@ class PartitionsStatusTracker extends Logging { } /** - * update the partition perforamcne in the underlying batch based on the information received + * update the partition performance in the underlying batch based on the information received * from the executor node. This is a best effort logic, so if the batch doesn't exist in the * tracker simply assumes this is an old performance metric and ignores it. * @@ -159,7 +159,7 @@ class PartitionsStatusTracker extends Logging { s"in the partition status tracker. Assume the message is for an old batch, so ignore it.") return } - // find the batch in batchesStatusList and update the partition performacne in the batch + // find the batch in batchesStatusList and update the partition performance in the batch // if it doesn't exist there should be an error adding/removing batches in the tracker if (!batchesStatusList.contains(batchId)) { throw new IllegalStateException( @@ -171,9 +171,9 @@ class PartitionsStatusTracker extends Logging { } /** - * Checks the latest batch with enough updates and retruns the perforamnce percentage for each partition as a - * value between [0-1] where 0 means the partition is not responding and 1 means it's working wihtout any - * perforamnce issue. This information can be used to adjust the batch size for each partition in the next batch. + * Checks the latest batch with enough updates and returns the performance percentage for each partition as a + * value between [0-1] where 0 means the partition is not responding and 1 means it's working without any + * performance issue. This information can be used to adjust the batch size for each partition in the next batch. */ def partitionsPerformancePercentage(): Option[Map[NameAndPartition, Double]] = { // if there is no batch in the tracker, return None @@ -200,14 +200,14 @@ class PartitionsStatusTracker extends Logging { } case Some(batch) => { logDebug( - s"Batch ${batch.batchId} is the latest batch with enough updates. Caculate and return its perforamnce.") + s"Batch ${batch.batchId} is the latest batch with enough updates. Calculate and return its performance.") val performancePercentages = batch.getPerformancePercentages PartitionsStatusTracker.throttlingStatusPlugin.foreach( _.onPartitionsPerformanceStatusUpdate( partitionContext, batch.batchId, - batch.paritionsStatusList.map(par => (par._1, par._2.batchSize))(breakOut), - batch.paritionsStatusList + batch.partitionsStatusList.map(par => (par._1, par._2.batchSize))(breakOut), + batch.partitionsStatusList .map(par => (par._1, par._2.batchReceiveTimeInMillis))(breakOut), performancePercentages ) @@ -222,7 +222,7 @@ class PartitionsStatusTracker extends Logging { * Clean up the tracker. This will be called when the source has been stopped */ def cleanUp() = { - batchesStatusList.map(b => b._2.paritionsStatusList.clear) + batchesStatusList.map(b => b._2.partitionsStatusList.clear) batchesStatusList.clear partitionSeqNoPairToBatchIdMap.clear } @@ -260,6 +260,14 @@ object PartitionsStatusTracker { yield (NameAndPartition(pContext.eventHubName, pid), 1.0))(breakOut)) } + def updateDefaultValuesInTracker(numOfPartitions: Int) = { + partitionsCount = numOfPartitions; + enoughUpdatesCount = (partitionsCount / 2) + 1 + defaultPartitionsPerformancePercentage = Some( + (for (pid <- 0 until partitionsCount) + yield (NameAndPartition(partitionContext.eventHubName, pid), 1.0))(breakOut)) + } + private def partitionSeqNoKey(nAndP: NameAndPartition, seqNo: SequenceNumber): String = s"(name=${nAndP.ehName},pid=${nAndP.partitionId},startSeqNo=$seqNo)".toLowerCase @@ -268,7 +276,7 @@ object PartitionsStatusTracker { private[eventhubs] class BatchStatus( val batchId: Long, - val paritionsStatusList: mutable.Map[NameAndPartition, PartitionStatus]) + val partitionsStatusList: mutable.Map[NameAndPartition, PartitionStatus]) extends Logging { private var hasEnoughUpdates: Boolean = false @@ -278,16 +286,16 @@ private[eventhubs] class BatchStatus( def updatePartitionPerformance(nAndP: NameAndPartition, batchSize: Int, receiveTimeInMillis: Long): Unit = { - if (!paritionsStatusList.contains(nAndP)) { + if (!partitionsStatusList.contains(nAndP)) { throw new IllegalStateException( s"Partition $nAndP doesn't exist in the batch status for batchId $batchId. This is an illegal state that shouldn't happen.") } - paritionsStatusList(nAndP).updatePerformanceMetrics(batchSize, receiveTimeInMillis) + partitionsStatusList(nAndP).updatePerformanceMetrics(batchSize, receiveTimeInMillis) } def receivedEnoughUpdates: Boolean = { if (!hasEnoughUpdates) { - hasEnoughUpdates = paritionsStatusList.values + hasEnoughUpdates = partitionsStatusList.values .filter(par => par.hasBeenUpdated) .size >= PartitionsStatusTracker.enoughUpdatesCount } @@ -300,8 +308,8 @@ private[eventhubs] class BatchStatus( case None => { // just use partitions which have batchSize > 0 and have been updated logInfo( - s"Calculate partition performacne percenatges for batch = $batchId with partitions status = $paritionsStatusList") - val partitionsTimePerEvent = paritionsStatusList + s"Calculate partition performance percentages for batch = $batchId with partitions status = $partitionsStatusList") + val partitionsTimePerEvent = partitionsStatusList .filter(p => (p._2.hasBeenUpdated && !p._2.emptyBatch)) .values .map(ps => ps.timePerEventInMillis) @@ -313,7 +321,7 @@ private[eventhubs] class BatchStatus( s"so return None ") None } else if (allPartitionsFinishedWithinAcceptableTime) { - logInfo(s"All partitions are within the range of normal perforamnce because " + + logInfo(s"All partitions are within the range of normal performance because " + s"their receive time was less than ${PartitionsStatusTracker.acceptableBatchReceiveTimeInMs}.") PartitionsStatusTracker.defaultPartitionsPerformancePercentage } else { @@ -331,12 +339,12 @@ private[eventhubs] class BatchStatus( s" for updated partitions in the batch $batchId.") // update performance metrics in each paritition and return that mapping - paritionsStatusList.foreach(par => + partitionsStatusList.foreach(par => par._2.updatePerformancePercentage(avgTimePerEvent, stdDevTimePerEvent)) val ppp: Map[NameAndPartition, Double] = - paritionsStatusList.map(par => (par._1, par._2.performancePercentage))(breakOut) + partitionsStatusList.map(par => (par._1, par._2.performancePercentage))(breakOut) // if all partitions have been updated, save the result in performancePercentages - if (paritionsStatusList.values + if (partitionsStatusList.values .filter(ps => ps.hasBeenUpdated) .size == PartitionsStatusTracker.partitionsCount) { performancePercentages = Some(ppp) @@ -351,7 +359,7 @@ private[eventhubs] class BatchStatus( * its portion of events. If all of partitions are within this time frame it means none of those is slow. */ private def allPartitionsFinishedWithinAcceptableTime: Boolean = { - val updatedPartitionsTime = paritionsStatusList + val updatedPartitionsTime = partitionsStatusList .filter(p => (p._2.hasBeenUpdated && !p._2.emptyBatch)) .values .map(ps => ps.batchReceiveTimeInMillis) @@ -364,7 +372,7 @@ private[eventhubs] class BatchStatus( } override def toString: String = { - s"BatchStatus(localBatchId=$batchId, PartitionsStatus=${paritionsStatusList.values.toString()})" + s"BatchStatus(localBatchId=$batchId, PartitionsStatus=${partitionsStatusList.values.toString()})" } } diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala index 3d8205790..3887a5bd1 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala @@ -111,9 +111,16 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, var partitionsThrottleFactor: mutable.Map[NameAndPartition, Double] = (for (pid <- 0 until partitionCount) yield (NameAndPartition(ehName, pid), 1.0))(breakOut) - val defaultPartitionsPerformancePercentage: Map[NameAndPartition, Double] = + var defaultPartitionsPerformancePercentage: Map[NameAndPartition, Double] = (for (pid <- 0 until partitionCount) yield (NameAndPartition(ehName, pid), 1.0))(breakOut) + private def updatePartitionCountInPartitionsStatusTracker(numberOfPartitions: Int) = { + logInfo(s"Update the partitionCount to ${numberOfPartitions} in the PartitionsStatusTracker.") + PartitionsStatusTracker.updateDefaultValuesInTracker(numberOfPartitions) + defaultPartitionsPerformancePercentage = + (for (pid <- 0 until numberOfPartitions) yield (NameAndPartition(ehName, pid), 1.0))(breakOut) + } + private lazy val initialPartitionSeqNos = { val metadataLog = new HDFSMetadataLog[EventHubsSourceOffset](sqlContext.sparkSession, metadataPath) { @@ -293,18 +300,18 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, .get(nameAndPartition) .map { size => val begin = from.getOrElse(nameAndPartition, fromNew(nameAndPartition)) - // adjust performance performance pewrcentages to use as much as events possible in the batch - val perforamnceFactor: Double = if (slowPartitionAdjustment) { - partitionsPerformancePercentage(nameAndPartition) + // adjust performance percentages to use as much as events possible in the batch + val performanceFactor: Double = if (slowPartitionAdjustment) { + partitionsPerformancePercentage.getOrElse(nameAndPartition, 1.0) } else 1.0 if (slowPartitionAdjustment) { - partitionsThrottleFactor(nameAndPartition) = perforamnceFactor + partitionsThrottleFactor(nameAndPartition) = performanceFactor logInfo( s"Slow partition adjustment is on, so prorate amount for $nameAndPartition will be adjusted by" + - s" the perfromanceFactor = $perforamnceFactor") + s" the performanceFactor = $performanceFactor") } - val prorate = limit * (size / total) * perforamnceFactor + val prorate = limit * (size / total) * performanceFactor logDebug(s"rateLimit $nameAndPartition prorated amount is $prorate") // Don't completely starve small partitions val off = begin + (if (prorate < 1) Math.ceil(prorate) else Math.floor(prorate)).toLong @@ -353,6 +360,7 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, val startingSeqNos = if (prevOffsets.size < untilSeqNos.size) { logInfo( s"Number of partitions has increased from ${prevOffsets.size} to ${untilSeqNos.size}") + updatePartitionCountInPartitionsStatusTracker(partitionCount) val defaultSeqNos = ehClient .translate(ehConf, partitionCount) .map { @@ -425,7 +433,7 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, localBatchId += 1 logDebug( s"Slow partition adjustment is on, add the current batch $localBatchId to the tracker.") - partitionsStatusTracker.addorUpdateBatch(localBatchId, offsetRanges) + partitionsStatusTracker.addOrUpdateBatch(localBatchId, offsetRanges) } /** From 7c9e7e3e543fba29c1058e768e4c9a479e37f665 Mon Sep 17 00:00:00 2001 From: nyaghma Date: Tue, 14 Jul 2020 14:56:06 -0500 Subject: [PATCH 03/29] Spark3 compatibility (#520) --- .../spark/sql/eventhubs/EventHubsSource.scala | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala index 3f9fd8c60..bcde2aca3 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala @@ -102,7 +102,7 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, val indexOfNewLine = content.indexOf("\n") if (indexOfNewLine > 0) { val version = - parseVersion(content.substring(0, indexOfNewLine), VERSION) + parseLogVersion(content.substring(0, indexOfNewLine), VERSION) EventHubsSourceOffset(SerializedOffset(content.substring(indexOfNewLine + 1))) } else { throw new IllegalStateException("Log file was malformed.") @@ -111,6 +111,31 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, EventHubsSourceOffset(SerializedOffset(content)) // Spark 2.1 log file } } + + private def parseLogVersion(text: String, maxSupportedVersion: Int): Int = { + if (text.length > 0 && text(0) == 'v') { + val version = + try { + text.substring(1, text.length).toInt + } catch { + case _: NumberFormatException => + throw new IllegalStateException(s"Log file was malformed: failed to read correct log " + + s"version from $text.") + } + if (version > 0) { + if (version > maxSupportedVersion) { + throw new IllegalStateException(s"UnsupportedLogVersion: maximum supported log version " + + s"is v${maxSupportedVersion}, but encountered v$version. The log file was produced " + + s"by a newer version of Spark and cannot be read by this version. Please upgrade.") + } else { + return version + } + } + } + // reaching here means we failed to read the correct log version + throw new IllegalStateException(s"Log file was malformed: failed to read correct log " + + s"version from $text.") + } } metadataLog From efecbffaf76a950475732d2b9ece47abaea2d56e Mon Sep 17 00:00:00 2001 From: nyaghma Date: Fri, 24 Jul 2020 12:58:49 -0500 Subject: [PATCH 04/29] Update option keys (#521) --- .../main/scala/org/apache/spark/eventhubs/EventHubsConf.scala | 1 + .../org/apache/spark/sql/eventhubs/EventHubsSource.scala | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala index 985e93cf0..266b45b28 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala @@ -564,6 +564,7 @@ object EventHubsConf extends Logging { val ThreadPoolSizeKey = "eventhubs.threadPoolSize" val UseExclusiveReceiverKey = "eventhubs.useExclusiveReceiver" val MaxEventsPerTriggerKey = "maxEventsPerTrigger" + val MaxEventsPerTriggerKeyAlias = "eventhubs.maxEventsPerTrigger" val UseSimulatedClientKey = "useSimulatedClient" val MetricPluginKey = "eventhubs.metricPlugin" val PartitionPreferredLocationStrategyKey = "partitionPreferredLocationStrategy" diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala index bcde2aca3..fe87ce5b7 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala @@ -80,7 +80,9 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, private val sc = sqlContext.sparkContext private val maxOffsetsPerTrigger: Option[Long] = - Option(parameters.get(MaxEventsPerTriggerKey).map(_.toLong).getOrElse(partitionCount * 1000)) + Option(parameters.get(MaxEventsPerTriggerKey).map(_.toLong).getOrElse( + parameters.get(MaxEventsPerTriggerKeyAlias).map(_.toLong).getOrElse( + partitionCount * 1000))) private lazy val initialPartitionSeqNos = { val metadataLog = From b14632db87c5d33f0db82a67e4af0a380d5fdad7 Mon Sep 17 00:00:00 2001 From: nyaghma Date: Fri, 24 Jul 2020 15:14:38 -0500 Subject: [PATCH 05/29] Slow partitions adjustment (#512) --- .DS_Store | Bin 0 -> 6148 bytes core/.DS_Store | Bin 0 -> 6148 bytes .../spark/eventhubs/EventHubsConf.scala | 54 ++- .../PartitionPerformanceReceiver.scala | 69 +++ .../eventhubs/PartitionsStatusTracker.scala | 410 ++++++++++++++++++ .../client/CachedEventHubsReceiver.scala | 39 +- .../org/apache/spark/eventhubs/package.scala | 2 + .../utils/SimpleThrottlingStatusPlugin.scala | 46 ++ .../SimulatedPartitionStatusTracker.scala | 47 ++ .../utils/ThrottlingStatusPlugin.scala | 36 ++ .../spark/sql/eventhubs/EventHubsSource.scala | 80 +++- .../eventhubs/EventHubsSourceProvider.scala | 3 +- .../spark/eventhubs/EventHubsConfSuite.scala | 37 +- .../utils/ThrottlingStatusPluginMock.scala | 38 ++ .../sql/eventhubs/EventHubsSinkSuite.scala | 3 +- .../sql/eventhubs/EventHubsSourceSuite.scala | 306 ++++++++++++- 16 files changed, 1145 insertions(+), 25 deletions(-) create mode 100644 .DS_Store create mode 100644 core/.DS_Store create mode 100644 core/src/main/scala/org/apache/spark/eventhubs/PartitionPerformanceReceiver.scala create mode 100644 core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala create mode 100644 core/src/main/scala/org/apache/spark/eventhubs/utils/SimpleThrottlingStatusPlugin.scala create mode 100644 core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedPartitionStatusTracker.scala create mode 100644 core/src/main/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPlugin.scala create mode 100644 core/src/test/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPluginMock.scala diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..0367a9ad152b5d9111589eee96b07a817f5a1036 GIT binary patch literal 6148 zcmeHK!A=4(5N!dqV2m70G@g3#hRFgRxR`7Z58h1H=s^wa3Xx54OJH%Mh}r+pKk@_o z9cS7?BxvHv7@0|?Z#tc6)3=*W%NXPJQM=4okufGf5i@ycz7QNoU673RAaa}|NQV8O zrvm7|Eku*!FEW7NF2`<|VguIs{QZJ{FC4~>^UgEb+{)^j5Cu`#C|rA8m3XlqkA`)B zaEZNBKkf$BK2xeUv;E*a>?FS_j%lOB05R}W2Jn24pop%;)Sx~(puy`S zj^_|jz{a-(qO|B*Obx;a2sf#KCY9SK1~=*8mp0C|m>M+cjO&$Q9=kHPFBGm<2fx(e zjJpP@B?gFrc?PnkTg3W*{QdiXK8bq705R~d7~r{v*QmpmY;9fG9M)P1+5$zvxYXb* k1q@w^Ar?#V4yYFJOEdsoi>X2IfY3!i(?AU|@T&}b038fb2LJ#7 literal 0 HcmV?d00001 diff --git a/core/.DS_Store b/core/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..e5d4565f5f75c2f1566478aed1500cbb5404ee9a GIT binary patch literal 6148 zcmeHK!A=4(5N!dmV2m70@Ystt4xk3Sm~0Ra-b~i$K@IGN4Q#UR5_Xa3irLT5Kk^U! z9cS7?LePU3V`L_ozUg$PP2X-hEn|$gC;bLv6~>qVMa-3;`9g3UbwzT*C2z6T+<*M8WKrtN4F_G2}Vtp|iC ziBh*xP1DwKO*WddnoJu9&3a89wGL;qlBn)CPR@Jxk5B3I?B!Jx%i$kXvSV-#Z(uC# z@xdO4K@<+aKhK%R5)uQ%05Px*44AV|Y^}p)X^+GJG4LY>@O+S;YL14Y5OOyeR2 j3{{FD7EAFSs1oo?GypAwnMUw{&_zJeKn*eQs| Duration.parse(str)) + } + + def setThrottlingStatusPlugin(throttlingStatusPlugin: ThrottlingStatusPlugin): EventHubsConf = { + set(ThrottlingStatusPluginKey, throttlingStatusPlugin.getClass.getName) + } + + def throttlingStatusPlugin(): Option[ThrottlingStatusPlugin] = { + self.get(ThrottlingStatusPluginKey) map (className => { + Class.forName(className).newInstance().asInstanceOf[ThrottlingStatusPlugin] + }) + } + /** * Set the size of thread pool. * Default: [[DefaultUseExclusiveReceiver]] @@ -568,6 +615,9 @@ object EventHubsConf extends Logging { val UseSimulatedClientKey = "useSimulatedClient" val MetricPluginKey = "eventhubs.metricPlugin" val PartitionPreferredLocationStrategyKey = "partitionPreferredLocationStrategy" + val SlowPartitionAdjustmentKey = "eventhubs.slowPartitionAdjustment" + val ThrottlingStatusPluginKey = "eventhubs.throttlingStatusPlugin" + val MaxAcceptableBatchReceiveTimeKey = "eventhubs.maxAcceptableBatchReceiveTime" /** Creates an EventHubsConf */ def apply(connectionString: String) = new EventHubsConf(connectionString) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/PartitionPerformanceReceiver.scala b/core/src/main/scala/org/apache/spark/eventhubs/PartitionPerformanceReceiver.scala new file mode 100644 index 000000000..ff03fe5f3 --- /dev/null +++ b/core/src/main/scala/org/apache/spark/eventhubs/PartitionPerformanceReceiver.scala @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.eventhubs + +import java.time.Duration + +import org.apache.spark.internal.Logging +import org.apache.spark.rpc.{ RpcEndpoint, RpcEnv } +import org.apache.spark.SparkContext + +private[spark] class PartitionPerformanceReceiver(override val rpcEnv: RpcEnv, + val statusTracker: PartitionsStatusTracker) + extends RpcEndpoint + with Logging { + + override def onStart(): Unit = { + logInfo("Start PartitionPerformanceReceiver RPC endpoint") + } + + override def receive: PartialFunction[Any, Unit] = { + case ppm: PartitionPerformanceMetric => { + logDebug(s"Received PartitionPerformanceMetric $ppm") + statusTracker.updatePartitionPerformance(ppm.nAndP, + ppm.requestSeqNo, + ppm.batchSize, + ppm.receiveTimeInMillis) + } + case _ => { + logError(s"Received an unknown message in PartitionPerformanceReceiver. It's not acceptable!") + } + } + + override def onStop(): Unit = { + logInfo("Stop PartitionPerformanceReceiver RPC endpoint") + } +} + +case class PartitionPerformanceMetric(val nAndP: NameAndPartition, + val taskContextSlim: TaskContextSlim, + //val executorId: String, + //val taskId: Long, + val requestSeqNo: SequenceNumber, + val batchSize: Int, + val receiveTimeInMillis: Long) { + + override def toString: String = { + s"PartitionPerformanceMetric received from task: $taskContextSlim for partition: $nAndP, requestSeqNo: $requestSeqNo. " + + s"Batch size: $batchSize, elapsed time: $receiveTimeInMillis (ms)." + } +} + +private[spark] object PartitionPerformanceReceiver { + val ENDPOINT_NAME = "PartitionPerformanceReceiver" +} diff --git a/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala b/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala new file mode 100644 index 000000000..dd8fe2ee1 --- /dev/null +++ b/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala @@ -0,0 +1,410 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.eventhubs + +import java.util.logging.Logger + +import scala.collection.mutable +import scala.collection.breakOut +import org.apache.spark.eventhubs.rdd.OffsetRange +import org.apache.spark.eventhubs.utils.ThrottlingStatusPlugin +import org.apache.spark.internal.Logging + +class PartitionsStatusTracker extends Logging { + + import PartitionsStatusTracker._ + + // retrives the batchStatus object based on the local batchId + private val batchesStatusList = mutable.Map[Long, BatchStatus]() + + /** + * retirives the local batchId for a pair of (NameAndPartition, SequenceNumber) + * it's useful to accss the right batch when a performance metric message is + * received for a parition-RequestSeqNo pair. + * it's getting updated every time a batch is removed or added to the tracker + */ + private val partitionSeqNoPairToBatchIdMap = mutable.Map[String, Long]() + + /** + * Add a batch to the tracker by creating a BatchStatus object and adding it to the map. + * Also, we add mappings from each partition-startSeqNo pair to the batchId in order to be able to retrive + * the batchStatus object from the map when the performance metric message is received. + * Note that we ignore partitions with batchSize zero (startSeqNo == latestSeqNo) since we won't recieve + * any performance metric message for such partitions. + */ + def addorUpdateBatch(batchId: Long, offsetRanges: Array[OffsetRange]): Unit = { + if (batchesStatusList.contains(batchId)) { + // Batches are not supposed to be updated. Log an error if a batch is being updated + logError( + s"Batch with local batch id: $batchId already exists in the partition status tracker. Batches" + + s"are not supposed to be updated in the partition status tracker.") + } else { + // remove the oldest batch from the batchesStatusList to realse space for adding the new batch. + val batchIdToRemove = batchId - PartitionsStatusTracker.TrackingBatchCount + logDebug( + s"Remove the batch ${if (batchIdToRemove >= 0) batchIdToRemove else None} from the tracker.") + if (batchIdToRemove >= 0) { + removeBatch(batchIdToRemove) + } + } + + // find partitions with a zero size batch.. No performance metric msg will be received for those partitions + val isZeroSizeBatchPartition: Map[NameAndPartition, Boolean] = + offsetRanges.map(range => (range.nameAndPartition, (range.fromSeqNo == range.untilSeqNo)))( + breakOut) + + // create the batchStatus tracker and add it to the map + batchesStatusList(batchId) = new BatchStatus(batchId, offsetRanges.map(range => { + val np = range.nameAndPartition + (np, new PartitionStatus(np, range.fromSeqNo, isZeroSizeBatchPartition(np))) + })(breakOut)) + + // add the mapping from partition-startSeqNo pair to the batchId ... ignore partitions with zero batch size + offsetRanges + .filter(r => !isZeroSizeBatchPartition(r.nameAndPartition)) + .foreach(range => { + val key = partitionSeqNoKey(range.nameAndPartition, range.fromSeqNo) + addPartitionSeqNoToBatchIdMapping(key, batchId) + }) + } + + /** + * Remove a batch from the tracker by finding and deleting the batchStatus object from the map. + * Also, we remove mappings from each partition-startSeqNo pair to the batchId since this batch is old + * and should not be updated even if we receive a performacne metric message for it later. + * Not that we ignore partitions with batchSize zero (PartitionStatus.emptyBatch) since those haven't been + * added to the mapping when we added the batch to the tracker. [See addBatch] + */ + private def removeBatch(batchId: Long): Unit = { + if (!batchesStatusList.contains(batchId)) { + logInfo( + s"Batch with local batchId = $batchId doesn't exist in the batch status tracker, so it can't be removed.") + return + } + // remove the mapping from partition-seqNo pair to the batchId (ignore partitions with empty batch size) + val batchStatus = batchesStatusList(batchId) + batchStatus.paritionsStatusList + .filter(p => !p._2.emptyBatch) + .values + .foreach(ps => { + val key = partitionSeqNoKey(ps.nAndP, ps.requestSeqNo) + removePartitionSeqNoToBatchIdMapping(key) + }) + // remove the batchStatus tracker from the map + batchesStatusList.remove(batchId) + } + + private def addPartitionSeqNoToBatchIdMapping(key: String, batchId: Long): Unit = { + if (partitionSeqNoPairToBatchIdMap.contains(key)) { + throw new IllegalStateException( + s"Partition-startSeqNo pair $key is already mapped to the batchId = " + + s"${partitionSeqNoPairToBatchIdMap.get(key)}, so cant be reassigned to batchId = $batchId") + } + partitionSeqNoPairToBatchIdMap(key) = batchId + } + + private def removePartitionSeqNoToBatchIdMapping(key: String): Unit = { + if (!partitionSeqNoPairToBatchIdMap.contains(key)) { + throw new IllegalStateException( + s"Partition-startSeqNo pair $key doesn't exist in the partitionSeqNoPairToBatchIdMap, so it can't be removed.") + } + partitionSeqNoPairToBatchIdMap.remove(key) + } + + /** + * return the batch id for a given parition-RequestSeqNo pair. + * if the batch doesn't exist in the tracker, return BATCH_NOT_FOUND + */ + private def getBatchIdForPartitionSeqNoPair(nAndP: NameAndPartition, + seqNo: SequenceNumber): Long = { + val key = partitionSeqNoKey(nAndP, seqNo) + partitionSeqNoPairToBatchIdMap.getOrElse(key, BatchNotFound) + } + + /** + * update the partition perforamcne in the underlying batch based on the information received + * from the executor node. This is a best effort logic, so if the batch doesn't exist in the + * tracker simply assumes this is an old performance metric and ignores it. + * + * @param nAndP Name and Id of the partition + * @param requestSeqNo requestSeqNo in the batch which help to identify the local batch id in combination with nAndP + * @param batchSize number of events received by this partition in the batch + * @param receiveTimeInMillis time (in MS) that took the partition to received the events in the batch + */ + def updatePartitionPerformance(nAndP: NameAndPartition, + requestSeqNo: SequenceNumber, + batchSize: Int, + receiveTimeInMillis: Long): Unit = { + // find the batchId based on partition-requestSeqNo pair in the partitionSeqNoPairToBatchIdMap ... ignore if it doesn't exist + val batchId = getBatchIdForPartitionSeqNoPair(nAndP, requestSeqNo) + if (batchId == BatchNotFound) { + logInfo( + s"Can't find the corresponding batchId for the partition-requestSeqNo pair ($nAndP, $requestSeqNo) " + + s"in the partition status tracker. Assume the message is for an old batch, so ignore it.") + return + } + // find the batch in batchesStatusList and update the partition performacne in the batch + // if it doesn't exist there should be an error adding/removing batches in the tracker + if (!batchesStatusList.contains(batchId)) { + throw new IllegalStateException( + s"Batch with local batch id = $batchId doesn't exist in the partition status tracker, while mapping " + + s"from a partition-seqNo to this batchId exists in the partition status tracker.") + } + val batchStatus = batchesStatusList(batchId) + batchStatus.updatePartitionPerformance(nAndP, batchSize, receiveTimeInMillis) + } + + /** + * Checks the latest batch with enough updates and retruns the perforamnce percentage for each partition as a + * value between [0-1] where 0 means the partition is not responding and 1 means it's working wihtout any + * perforamnce issue. This information can be used to adjust the batch size for each partition in the next batch. + */ + def partitionsPerformancePercentage(): Option[Map[NameAndPartition, Double]] = { + // if there is no batch in the tracker, return None + if (batchesStatusList.isEmpty) { + logDebug(s"There is no batch in the tracker, so return None") + None + } else { + // find the latest batch with enough updates + // In Scala 2.13 we can use: val latestUpdatedBatch = batchesStatusList.maxByOption(b => b._2.receivedEnoughUpdates) + implicit val ordering = new Ordering[(Long, BatchStatus)] { + override def compare(x: (Long, BatchStatus), y: (Long, BatchStatus)): Int = + (x._1 - y._1).toInt + } + val batchesWithEnoughUpdates = batchesStatusList.filter(b => b._2.receivedEnoughUpdates) + val latestUpdatedBatch: Option[BatchStatus] = + if (batchesWithEnoughUpdates.isEmpty) None else Some(batchesWithEnoughUpdates.max._2) + + latestUpdatedBatch match { + case None => { + logDebug( + s"No batch has ${PartitionsStatusTracker.enoughUpdatesCount} partitions with updates (enough updates), " + + s"so return None") + None + } + case Some(batch) => { + logDebug( + s"Batch ${batch.batchId} is the latest batch with enough updates. Caculate and return its perforamnce.") + val performancePercentages = batch.getPerformancePercentages + PartitionsStatusTracker.throttlingStatusPlugin.foreach( + _.onPartitionsPerformanceStatusUpdate( + batch.batchId, + batch.paritionsStatusList.map(par => (par._1, par._2.batchSize))(breakOut), + batch.paritionsStatusList + .map(par => (par._1, par._2.batchReceiveTimeInMillis))(breakOut), + performancePercentages + ) + ) + performancePercentages + } + } + } + } + + /** + * Clean up the tracker. This will be called when the source has been stopped + */ + def cleanUp() = { + batchesStatusList.map(b => b._2.paritionsStatusList.clear) + batchesStatusList.clear + partitionSeqNoPairToBatchIdMap.clear + } + + /** + * This methods i being used for testing + */ + def batchIdsInTracker: scala.collection.Set[Long] = { + this.batchesStatusList.keySet + } +} + +object PartitionsStatusTracker { + private val _partitionsStatusTrackerInstance = new PartitionsStatusTracker + private val TrackingBatchCount = 3 + val BatchNotFound: Long = -1 + var acceptableBatchReceiveTimeInMs: Long = DefaultMaxAcceptableBatchReceiveTime.toMillis + var partitionsCount: Int = 1 + var enoughUpdatesCount: Int = 1 + var throttlingStatusPlugin: Option[ThrottlingStatusPlugin] = None + var defaultPartitionsPerformancePercentage: Option[Map[NameAndPartition, Double]] = None + + def setDefaultValuesInTracker(numOfPartitions: Int, + ehName: String, + maxBatchReceiveTime: Long, + throttlingSP: Option[ThrottlingStatusPlugin]) = { + partitionsCount = numOfPartitions + acceptableBatchReceiveTimeInMs = maxBatchReceiveTime + enoughUpdatesCount = (partitionsCount / 2) + 1 + throttlingStatusPlugin = throttlingSP + defaultPartitionsPerformancePercentage = Some( + (for (pid <- 0 until partitionsCount) yield (NameAndPartition(ehName, pid), 1.0))(breakOut)) + } + + private def partitionSeqNoKey(nAndP: NameAndPartition, seqNo: SequenceNumber): String = + s"(name=${nAndP.ehName},pid=${nAndP.partitionId},startSeqNo=$seqNo)".toLowerCase + + def getPartitionStatusTracker: PartitionsStatusTracker = _partitionsStatusTrackerInstance +} + +private[eventhubs] class BatchStatus( + val batchId: Long, + val paritionsStatusList: mutable.Map[NameAndPartition, PartitionStatus]) + extends Logging { + + private var hasEnoughUpdates: Boolean = false + + private var performancePercentages: Option[Map[NameAndPartition, Double]] = None + + def updatePartitionPerformance(nAndP: NameAndPartition, + batchSize: Int, + receiveTimeInMillis: Long): Unit = { + if (!paritionsStatusList.contains(nAndP)) { + throw new IllegalStateException( + s"Partition $nAndP doesn't exist in the batch status for batchId $batchId. This is an illegal state that shouldn't happen.") + } + paritionsStatusList(nAndP).updatePerformanceMetrics(batchSize, receiveTimeInMillis) + } + + def receivedEnoughUpdates: Boolean = { + if (!hasEnoughUpdates) { + hasEnoughUpdates = paritionsStatusList.values + .filter(par => par.hasBeenUpdated) + .size >= PartitionsStatusTracker.enoughUpdatesCount + } + hasEnoughUpdates + } + + def getPerformancePercentages: Option[Map[NameAndPartition, Double]] = + performancePercentages match { + case Some(completedPerformancePercentages) => performancePercentages + case None => { + // just use partitions which have batchSize > 0 and have been updated + logInfo( + s"Calculate partition performacne percenatges for batch = $batchId with partitions status = $paritionsStatusList") + val partitionsTimePerEvent = paritionsStatusList + .filter(p => (p._2.hasBeenUpdated && !p._2.emptyBatch)) + .values + .map(ps => ps.timePerEventInMillis) + + // check if there is no updated partition with batchSize > 0 + if (partitionsTimePerEvent.isEmpty) { + logInfo( + s"There is no updated partition with batchSize greater than 0 in batch $batchId, " + + s"so return None ") + None + } else if (allPartitionsFinishedWithinAcceptableTime) { + logInfo(s"All partitions are within the range of normal perforamnce because " + + s"their receive time was less than ${PartitionsStatusTracker.acceptableBatchReceiveTimeInMs}.") + PartitionsStatusTracker.defaultPartitionsPerformancePercentage + } else { + // calculate the standard deviation + val avgTimePerEvent + : Double = partitionsTimePerEvent.sum.toDouble / partitionsTimePerEvent.size + val stdDevTimePerEvent: Double = math.sqrt( + partitionsTimePerEvent + .map(_.toDouble) + .map(time => math.pow(time - avgTimePerEvent, 2)) + .sum / partitionsTimePerEvent.size) + // average + standard deviation can't go beyond the receiver timeout + logInfo( + s"Calculated the average time per event = $avgTimePerEvent and the standard deviation = $stdDevTimePerEvent" + + s" for updated partitions in the batch $batchId.") + + // update performance metrics in each paritition and return that mapping + paritionsStatusList.foreach(par => + par._2.updatePerformancePercentage(avgTimePerEvent, stdDevTimePerEvent)) + val ppp: Map[NameAndPartition, Double] = + paritionsStatusList.map(par => (par._1, par._2.performancePercentage))(breakOut) + // if all partitions have been updated, save the result in performancePercentages + if (paritionsStatusList.values + .filter(ps => ps.hasBeenUpdated) + .size == PartitionsStatusTracker.partitionsCount) { + performancePercentages = Some(ppp) + } + Some(ppp) + } + } + } + + /** + * Check if any partition takes more than PartitionsStatusTracker.acceptableBatchReceiveTimeImMs to receive + * its portion of events. If all of partitions are within this time frame it means none of those is slow. + */ + private def allPartitionsFinishedWithinAcceptableTime: Boolean = { + val updatedPartitionsTime = paritionsStatusList + .filter(p => (p._2.hasBeenUpdated && !p._2.emptyBatch)) + .values + .map(ps => ps.batchReceiveTimeInMillis) + if (updatedPartitionsTime.isEmpty) + true + else { + val maxReceiveTime = updatedPartitionsTime.max + (maxReceiveTime < PartitionsStatusTracker.acceptableBatchReceiveTimeInMs) + } + } + + override def toString: String = { + s"BatchStatus(localBatchId=$batchId, PartitionsStatus=${paritionsStatusList.values.toString()})" + } +} + +private[eventhubs] class PartitionStatus(val nAndP: NameAndPartition, + val requestSeqNo: SequenceNumber, + val emptyBatch: Boolean) + extends Logging { + + // a partition with an empty batch (batchSize = 0) doesn't receive any update message from the executor + var hasBeenUpdated: Boolean = if (emptyBatch) true else false + + var performancePercentage: Double = 1 + + var batchSize: Int = if (emptyBatch) 0 else -1 + // total receive time for the batch in milli seconds + var batchReceiveTimeInMillis: Long = if (emptyBatch) 0 else -1 + + var timePerEventInMillis: Double = if (emptyBatch) 0 else -1 + + // Update the status of this partition with the received performance metrics + def updatePerformanceMetrics(bSize: Int, receiveTimeInMillis: Long): Any = { + this.batchSize = bSize + this.batchReceiveTimeInMillis = receiveTimeInMillis + this.hasBeenUpdated = true + if (batchSize != 0) + this.timePerEventInMillis = this.batchReceiveTimeInMillis.toDouble / this.batchSize + logDebug( + s"UpdatePerformanceMetrics for partition = $nAndP with request sequence number = $requestSeqNo contains" + + s" batchSize = $batchSize and total receive time(ms) = $batchReceiveTimeInMillis") + } + + def updatePerformancePercentage(averageTimePerEvent: Double, standardDeviation: Double): Unit = { + val averagePlusStdDev: Double = averageTimePerEvent + standardDeviation + if (!emptyBatch && hasBeenUpdated) { + if (timePerEventInMillis > averagePlusStdDev) { + performancePercentage = averageTimePerEvent / timePerEventInMillis + } + } + } + + override def toString: String = { + val partitionInfo: String = s"(${nAndP.ehName}/${nAndP.partitionId}/$requestSeqNo)" + if (hasBeenUpdated) + s"PartitionStatus[$partitionInfo -> (batchSize=$batchSize, time(ms)=$batchReceiveTimeInMillis, timePerEvent(ms)= $timePerEventInMillis)]" + else + s"PartitionStatus[$partitionInfo -> (No Update)]" + } +} diff --git a/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala b/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala index aba1ddda3..96f9ca25a 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala @@ -29,9 +29,11 @@ import org.apache.spark.eventhubs.{ EventHubsConf, EventHubsUtils, NameAndPartition, - SequenceNumber + SequenceNumber, + PartitionPerformanceReceiver } import org.apache.spark.internal.Logging +import org.apache.spark.util.RpcUtils import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext.Implicits.global @@ -241,6 +243,17 @@ private[client] class CachedEventHubsReceiver private (ehConf: EventHubsConf, val (result, validate) = sorted.duplicate val elapsedTimeMs = TimeUnit.NANOSECONDS.toMillis(elapsedTimeNs) + + // if slowPartitionAdjustment is on, send the partition performance for this batch to the driver + if (ehConf.slowPartitionAdjustment) { + sendPartitionPerformanceToDriver( + PartitionPerformanceMetric(nAndP, + EventHubsUtils.getTaskContextSlim, + requestSeqNo, + batchCount, + elapsedTimeMs)) + } + if (metricPlugin.isDefined) { val (validateSize, batchSizeInBytes) = validate @@ -279,6 +292,22 @@ private[client] class CachedEventHubsReceiver private (ehConf: EventHubsConf, throw e } } + + // send the partition perforamcne metric (elapsed time for receiving events in the batch) to the + // driver without waiting for any response. + private def sendPartitionPerformanceToDriver(partitionPerformance: PartitionPerformanceMetric) = { + logDebug( + s"(Task: ${EventHubsUtils.getTaskContextSlim}) sends PartitionPerformanceMetric: " + + s"$PartitionPerformanceMetric to the driver.") + try { + CachedEventHubsReceiver.partitionPerformanceReceiverRef.send(partitionPerformance) + } catch { + case e: Exception => + logError( + s"(Task: ${EventHubsUtils.getTaskContextSlim}) failed to send the RPC message containing " + + s"PartitionPerformanceMetric: $PartitionPerformanceMetric to the driver.") + } + } } /** @@ -288,10 +317,18 @@ private[client] class CachedEventHubsReceiver private (ehConf: EventHubsConf, */ private[spark] object CachedEventHubsReceiver extends CachedReceiver with Logging { + private val startRecieverTimeNs = System.nanoTime() + type MutableMap[A, B] = scala.collection.mutable.HashMap[A, B] private[this] val receivers = new MutableMap[String, CachedEventHubsReceiver]() + // RPC endpoint for partition performacne communciation in the executor + val partitionPerformanceReceiverRef = + RpcUtils.makeDriverRef(PartitionPerformanceReceiver.ENDPOINT_NAME, + SparkEnv.get.conf, + SparkEnv.get.rpcEnv) + private def key(ehConf: EventHubsConf, nAndP: NameAndPartition): String = { (ehConf.connectionString + ehConf.consumerGroup + nAndP.partitionId).toLowerCase } diff --git a/core/src/main/scala/org/apache/spark/eventhubs/package.scala b/core/src/main/scala/org/apache/spark/eventhubs/package.scala index 9f3b6b01e..31a3f98d4 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/package.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/package.scala @@ -40,6 +40,7 @@ package object eventhubs { val DefaultMaxSilentTime: Duration = EventHubClientOptions.SILENT_OFF val MinSilentTime: Duration = EventHubClientOptions.SILENT_MINIMUM val DefaultOperationTimeout: Duration = Duration.ofSeconds(300) + val DefaultMaxAcceptableBatchReceiveTime: Duration = Duration.ofSeconds(30) val DefaultConsumerGroup: String = EventHubClient.DEFAULT_CONSUMER_GROUP_NAME val PrefetchCountMinimum: Int = PartitionReceiver.MINIMUM_PREFETCH_COUNT val PrefetchCountMaximum: Int = PartitionReceiver.MAXIMUM_PREFETCH_COUNT @@ -48,6 +49,7 @@ package object eventhubs { val DefaultUseSimulatedClient = "false" val DefaultPartitionPreferredLocationStrategy = "Hash" val DefaultUseExclusiveReceiver = "true" + val DefaultSlowPartitionAdjustment = "false" val StartingSequenceNumber = 0L val DefaultThreadPoolSize = 16 val DefaultEpoch = 0L diff --git a/core/src/main/scala/org/apache/spark/eventhubs/utils/SimpleThrottlingStatusPlugin.scala b/core/src/main/scala/org/apache/spark/eventhubs/utils/SimpleThrottlingStatusPlugin.scala new file mode 100644 index 000000000..b7d751489 --- /dev/null +++ b/core/src/main/scala/org/apache/spark/eventhubs/utils/SimpleThrottlingStatusPlugin.scala @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.eventhubs.utils + +import org.apache.spark.eventhubs.NameAndPartition +import org.apache.spark.eventhubs.rdd.OffsetRange +import scala.collection.mutable +import org.apache.spark.internal.Logging + +class SimpleThrottlingStatusPlugin extends ThrottlingStatusPlugin with Logging { + + override def onBatchCreation( + nextBatchLocalId: Long, + nextBatchOffsetRanges: Array[OffsetRange], + partitionsThrottleFactor: mutable.Map[NameAndPartition, Double]): Unit = { + log.info( + s"New Batch with localId = $nextBatchLocalId has been created with start and end offsets:" + + s"${nextBatchOffsetRanges} and partitions throttle factors: ${partitionsThrottleFactor}") + } + + override def onPartitionsPerformanceStatusUpdate( + latestUpdatedBatchLocalId: Long, + partitionsBatchSizes: Map[NameAndPartition, Int], + partitionsBatchReceiveTimeMS: Map[NameAndPartition, Long], + partitionsPerformancePercentages: Option[Map[NameAndPartition, Double]]): Unit = { + log.info( + s"Latest updated batch with localId = $latestUpdatedBatchLocalId received these information:" + + s"Batch size: ${partitionsBatchSizes}, batch receive times in ms: ${partitionsBatchReceiveTimeMS}, " + + s"performance percentages: ${partitionsPerformancePercentages}") + } +} diff --git a/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedPartitionStatusTracker.scala b/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedPartitionStatusTracker.scala new file mode 100644 index 000000000..ba4ef4e1c --- /dev/null +++ b/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedPartitionStatusTracker.scala @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.eventhubs.utils + +import org.apache.spark.eventhubs.{ NameAndPartition, PartitionsStatusTracker, SequenceNumber } +import scala.collection.breakOut + +private[spark] object SimulatedPartitionStatusTracker { + val sourceTracker = PartitionsStatusTracker.getPartitionStatusTracker + + def updatePartitionPerformance(nAndP: NameAndPartition, + requestSeqNo: SequenceNumber, + batchSize: Int, + receiveTimeInMillis: Long): Unit = { + sourceTracker.updatePartitionPerformance(nAndP, requestSeqNo, batchSize, receiveTimeInMillis) + } + + def getPerformancePercentages: Map[NameAndPartition, Double] = { + + sourceTracker.partitionsPerformancePercentage match { + case Some(percentages) => (percentages.map(par => (par._1, roundDouble(par._2, 2))))(breakOut) + case None => Map[NameAndPartition, Double]() + } + } + + private def roundDouble(num: Double, precision: Int): Double = { + val scale = Math.pow(10, precision) + Math.round(num * scale) / scale + } + + def currentBatchIdsInTracker: scala.collection.Set[Long] = sourceTracker.batchIdsInTracker +} diff --git a/core/src/main/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPlugin.scala b/core/src/main/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPlugin.scala new file mode 100644 index 000000000..03c4330fa --- /dev/null +++ b/core/src/main/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPlugin.scala @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.eventhubs.utils + +import org.apache.spark.eventhubs.NameAndPartition +import org.apache.spark.eventhubs.rdd.OffsetRange + +import scala.collection.mutable + +trait ThrottlingStatusPlugin extends Serializable { + + def onBatchCreation(nextBatchLocalId: Long, + nextBatchOffsetRanges: Array[OffsetRange], + partitionsThrottleFactor: mutable.Map[NameAndPartition, Double]): Unit + + def onPartitionsPerformanceStatusUpdate( + latestUpdatedBatchLocalId: Long, + partitionsBatchSizes: Map[NameAndPartition, Int], + partitionsBatchReceiveTimeMS: Map[NameAndPartition, Long], + partitionsPerformancePercentages: Option[Map[NameAndPartition, Double]]): Unit +} diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala index fe87ce5b7..07df27d63 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala @@ -19,13 +19,20 @@ package org.apache.spark.sql.eventhubs import java.io._ import java.nio.charset.StandardCharsets +import java.time.Duration + +import scala.collection.breakOut +import scala.collection.mutable import org.apache.commons.io.IOUtils import org.apache.spark.SparkContext import org.apache.spark.eventhubs.rdd.{ EventHubsRDD, OffsetRange } +import org.apache.spark.eventhubs.utils.ThrottlingStatusPlugin import org.apache.spark.eventhubs.{ EventHubsConf, NameAndPartition, _ } import org.apache.spark.internal.Logging +import org.apache.spark.rpc.RpcEndpointRef import org.apache.spark.scheduler.ExecutorCacheTaskLocation +import org.apache.spark.SparkEnv import org.apache.spark.sql.execution.streaming.{ HDFSMetadataLog, Offset, @@ -84,6 +91,25 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, parameters.get(MaxEventsPerTriggerKeyAlias).map(_.toLong).getOrElse( partitionCount * 1000))) + // set slow partition adjustment flag and static values in the tracker + private val slowPartitionAdjustment: Boolean = + parameters.get(SlowPartitionAdjustmentKey).getOrElse(DefaultSlowPartitionAdjustment).toBoolean + + private lazy val throttlingStatusPlugin: Option[ThrottlingStatusPlugin] = + ehConf.throttlingStatusPlugin() + + PartitionsStatusTracker.setDefaultValuesInTracker( + partitionCount, + ehName, + ehConf.maxAcceptableBatchReceiveTime.getOrElse(DefaultMaxAcceptableBatchReceiveTime).toMillis, + throttlingStatusPlugin) + + var partitionsThrottleFactor: mutable.Map[NameAndPartition, Double] = + (for (pid <- 0 until partitionCount) yield (NameAndPartition(ehName, pid), 1.0))(breakOut) + + val defaultPartitionsPerformancePercentage: Map[NameAndPartition, Double] = + (for (pid <- 0 until partitionCount) yield (NameAndPartition(ehName, pid), 1.0))(breakOut) + private lazy val initialPartitionSeqNos = { val metadataLog = new HDFSMetadataLog[EventHubsSourceOffset](sqlContext.sparkSession, metadataPath) { @@ -225,6 +251,16 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, from: Map[NameAndPartition, SequenceNumber], until: Map[NameAndPartition, SequenceNumber], fromNew: Map[NameAndPartition, SequenceNumber]): Map[NameAndPartition, SequenceNumber] = { + + // if slowPartitionAdjustment is on, get the latest partition performance percentages + val partitionsPerformancePercentage: Map[NameAndPartition, Double] = + if (slowPartitionAdjustment) { + partitionsStatusTracker.partitionsPerformancePercentage.getOrElse( + defaultPartitionsPerformancePercentage) + } else { + defaultPartitionsPerformancePercentage + } + val sizes = until.flatMap { case (nameAndPartition, end) => // If begin isn't defined, something's wrong, but let alert logic in getBatch handle it @@ -244,7 +280,18 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, .get(nameAndPartition) .map { size => val begin = from.getOrElse(nameAndPartition, fromNew(nameAndPartition)) - val prorate = limit * (size / total) + // adjust performance performance pewrcentages to use as much as events possible in the batch + val perforamnceFactor: Double = if (slowPartitionAdjustment) { + partitionsPerformancePercentage(nameAndPartition) + } else 1.0 + + if (slowPartitionAdjustment) { + partitionsThrottleFactor(nameAndPartition) = perforamnceFactor + logInfo( + s"Slow partition adjustment is on, so prorate amount for $nameAndPartition will be adjusted by" + + s" the perfromanceFactor = $perforamnceFactor") + } + val prorate = limit * (size / total) * perforamnceFactor logDebug(s"rateLimit $nameAndPartition prorated amount is $prorate") // Don't completely starve small partitions val off = begin + (if (prorate < 1) Math.ceil(prorate) else Math.floor(prorate)).toLong @@ -333,6 +380,12 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, } }.toArray + // if slowPartitionAdjustment is on, add the current batch to the perforamnce tracker + if (slowPartitionAdjustment) { + addCurrentBatchToStatusTracker(offsetRanges) + throttlingStatusPlugin.foreach( + _.onBatchCreation(localBatchId, offsetRanges, partitionsThrottleFactor)) + } val rdd = EventHubsSourceProvider.toInternalRow(new EventHubsRDD(sc, ehConf.trimmed, offsetRanges)) logInfo( @@ -341,10 +394,27 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, sqlContext.internalCreateDataFrame(rdd, schema, isStreaming = true) } + /** + * Add the newly generated batch to the status tracker. + */ + private def addCurrentBatchToStatusTracker(offsetRanges: Array[OffsetRange]) = { + localBatchId += 1 + logDebug( + s"Slow partition adjustment is on, add the current batch $localBatchId to the tracker.") + partitionsStatusTracker.addorUpdateBatch(localBatchId, offsetRanges) + } + /** * Stop this source and any resources it has allocated */ override def stop(): Unit = synchronized { + // if slowPartitionAdjustment is on, clean up Partition Status Tracker before closing + if (slowPartitionAdjustment) { + logDebug( + s"Slow partition adjustment is on, cleaning up the partition performance tracker before stopping.") + partitionsStatusTracker.cleanUp + localBatchId = -1 + } ehClient.close() } @@ -369,6 +439,14 @@ private[eventhubs] object EventHubsSource { private[eventhubs] val VERSION = 1 + // RPC endpoint for partition performacne communciation in the driver + private var localBatchId = -1 + val partitionsStatusTracker = PartitionsStatusTracker.getPartitionStatusTracker + val partitionPerformanceReceiver: PartitionPerformanceReceiver = + new PartitionPerformanceReceiver(SparkEnv.get.rpcEnv, partitionsStatusTracker) + val partitionPerformanceReceiverRef: RpcEndpointRef = SparkEnv.get.rpcEnv + .setupEndpoint(PartitionPerformanceReceiver.ENDPOINT_NAME, partitionPerformanceReceiver) + def getSortedExecutorList(sc: SparkContext): Array[String] = { val bm = sc.env.blockManager bm.master diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSourceProvider.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSourceProvider.scala index 33d72cb2d..230b2ee0b 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSourceProvider.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSourceProvider.scala @@ -140,6 +140,7 @@ private[sql] class EventHubsSourceProvider } private[sql] object EventHubsSourceProvider extends Serializable { + def eventHubsSchema: StructType = { StructType( Seq( @@ -205,7 +206,7 @@ private[sql] object EventHubsSourceProvider extends Serializable { val arr = new Array[Byte](buf.remaining) buf.get(arr) arr.asInstanceOf[AnyRef] - case default => default + case default => default } .map { p => p._2 match { diff --git a/core/src/test/scala/org/apache/spark/eventhubs/EventHubsConfSuite.scala b/core/src/test/scala/org/apache/spark/eventhubs/EventHubsConfSuite.scala index 10e40d684..c5b11fb7e 100644 --- a/core/src/test/scala/org/apache/spark/eventhubs/EventHubsConfSuite.scala +++ b/core/src/test/scala/org/apache/spark/eventhubs/EventHubsConfSuite.scala @@ -20,7 +20,11 @@ package org.apache.spark.eventhubs import java.time.Duration import java.util.NoSuchElementException -import org.apache.spark.eventhubs.utils.{ EventHubsTestUtils, MetricPluginMock } +import org.apache.spark.eventhubs.utils.{ + EventHubsTestUtils, + MetricPluginMock, + ThrottlingStatusPluginMock +} import org.json4s.NoTypeHints import org.json4s.jackson.Serialization import org.json4s.jackson.Serialization.{ read => sread } @@ -219,6 +223,15 @@ class EventHubsConfSuite extends FunSuite with BeforeAndAfterAll { assert(idField.getInt(actualListener) == expectedListener.id) } + test("throttlingStatusPlugin set/get") { + val expectedListener = new ThrottlingStatusPluginMock + val conf = testUtils.getEventHubsConf().setThrottlingStatusPlugin(expectedListener) + val actualListener = conf.throttlingStatusPlugin.get + val idField = actualListener.getClass.getDeclaredField("id") + idField.setAccessible(true) + assert(idField.getInt(actualListener) == expectedListener.id) + } + test("trimmedConfig") { val originalConf = testUtils .getEventHubsConf() @@ -337,4 +350,26 @@ class EventHubsConfSuite extends FunSuite with BeforeAndAfterAll { eventHubConfig.setMaxSilentTime(Duration.ofMinutes(1)) assert(eventHubConfig.maxSilentTime.get.toMinutes == 1) } + + test("validate - slow partition adjustment config") { + val eventHubConfig = testUtils.getEventHubsConf() + + // check the default value. It should be DefaultSlowPartitionAdjustment = false + assert( + eventHubConfig.slowPartitionAdjustment == + DefaultSlowPartitionAdjustment.toBoolean) + + val expectedSlowPartionAdjustment = true + eventHubConfig.setSlowPartitionAdjustment(expectedSlowPartionAdjustment) + val actualSlowPartionAdjustment = eventHubConfig.slowPartitionAdjustment + assert(expectedSlowPartionAdjustment == actualSlowPartionAdjustment) + } + + test("validate - max acceptable batch receive time config") { + val eventHubConfig = testUtils.getEventHubsConf() + val expectedTime = Duration.ofSeconds(20) + eventHubConfig.setMaxAcceptableBatchReceiveTime(expectedTime) + val actualTime = eventHubConfig.maxAcceptableBatchReceiveTime.get + assert(expectedTime == actualTime) + } } diff --git a/core/src/test/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPluginMock.scala b/core/src/test/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPluginMock.scala new file mode 100644 index 000000000..f08d0d6ef --- /dev/null +++ b/core/src/test/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPluginMock.scala @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.eventhubs.utils + +import org.apache.spark.eventhubs.NameAndPartition +import org.apache.spark.eventhubs.rdd.OffsetRange +import scala.collection.mutable + +class ThrottlingStatusPluginMock extends ThrottlingStatusPlugin { + + val id = 1 + + override def onBatchCreation( + nextBatchLocalId: Long, + nextBatchOffsetRanges: Array[OffsetRange], + partitionsThrottleFactor: mutable.Map[NameAndPartition, Double]): Unit = {} + + override def onPartitionsPerformanceStatusUpdate( + latestUpdatedBatchLocalId: Long, + partitionsBatchSizes: Map[NameAndPartition, Int], + partitionsBatchReceiveTimeMS: Map[NameAndPartition, Long], + partitionsPerformancePercentages: Option[Map[NameAndPartition, Double]]): Unit = {} +} diff --git a/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSinkSuite.scala b/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSinkSuite.scala index af7f4b841..542186337 100644 --- a/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSinkSuite.scala +++ b/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSinkSuite.scala @@ -452,7 +452,8 @@ class EventHubsSinkSuite extends StreamTest with SharedSQLContext { try { ex = intercept[StreamingQueryException] { writer = createEventHubsWriter(input.toDF(), ehConf, properties = Some(targetProperties))( - "properties", "body") + "properties", + "body") input.addData("1", "2", "3", "4", "5") writer.processAllAvailable() } diff --git a/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala b/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala index 9371c3edc..f12672db6 100644 --- a/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala +++ b/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala @@ -17,30 +17,19 @@ package org.apache.spark.sql.eventhubs -import java.io.{ BufferedWriter, FileInputStream, OutputStream, OutputStreamWriter } +import java.io.{BufferedWriter, FileInputStream, OutputStream, OutputStreamWriter} import java.nio.charset.StandardCharsets.UTF_8 +import java.time.Duration import java.util.concurrent.atomic.AtomicInteger -import org.apache.qpid.proton.amqp.{ - Binary, - Decimal128, - Decimal32, - Decimal64, - DescribedType, - Symbol, - UnknownDescribedType, - UnsignedByte, - UnsignedInteger, - UnsignedLong, - UnsignedShort -} -import org.apache.spark.eventhubs.utils.{ EventHubsTestUtils, SimulatedClient } -import org.apache.spark.eventhubs.{ EventHubsConf, EventPosition, NameAndPartition } +import org.apache.qpid.proton.amqp.{Binary, Decimal128, Decimal32, Decimal64, DescribedType, Symbol, UnknownDescribedType, UnsignedByte, UnsignedInteger, UnsignedLong, UnsignedShort} +import org.apache.spark.eventhubs.utils.{EventHubsTestUtils, SimpleThrottlingStatusPlugin, SimulatedClient, SimulatedPartitionStatusTracker} +import org.apache.spark.eventhubs.{EventHubsConf, EventPosition, NameAndPartition} import org.apache.spark.sql.Dataset import org.apache.spark.sql.execution.streaming._ -import org.apache.spark.sql.functions.{ count, window } +import org.apache.spark.sql.functions.{count, window} import org.apache.spark.sql.streaming.util.StreamManualClock -import org.apache.spark.sql.streaming.{ ProcessingTime, StreamTest } +import org.apache.spark.sql.streaming.{ProcessingTime, StreamTest} import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.util.Utils import org.json4s.NoTypeHints @@ -129,6 +118,13 @@ class EventHubsSourceSuite extends EventHubsSourceTest { private def getEventHubsConf(ehName: String): EventHubsConf = testUtils.getEventHubsConf(ehName) + case class PartitionsStatusTrackerUpdate(updates: List[(NameAndPartition, Long, Int, Long)]) extends ExternalAction { + override def runAction(): Unit = { + updates.foreach{ u => + SimulatedPartitionStatusTracker.updatePartitionPerformance(u._1, u._2, u._3, u._4)} + } + } + testWithUninterruptibleThread("deserialization of initial offset with Spark 2.1.0") { val eventHub = testUtils.createEventHubs(newEventHubs(), DefaultPartitionCount) testUtils.populateUniformly(eventHub.name, 5000) @@ -706,4 +702,278 @@ class EventHubsSourceSuite extends EventHubsSourceTest { assert(row.getAs[Int]("count") === 1, s"Unexpected results: $row") query.stop() } + + test("setSlowPartitionAdjustment without any slow partition") { + val eventHub = testUtils.createEventHubs(newEventHubs(), DefaultPartitionCount) + testUtils.populateUniformly(eventHub.name, 5000) + val partitions: List[NameAndPartition] = List(NameAndPartition(eventHub.name, 0), + NameAndPartition(eventHub.name, 1), + NameAndPartition(eventHub.name, 2), + NameAndPartition(eventHub.name, 3)) + + val parameters = + getEventHubsConf(eventHub.name) + .setMaxEventsPerTrigger(20) + .setSlowPartitionAdjustment(true) + .setMaxAcceptableBatchReceiveTime(Duration.ofMillis(4)) + .setThrottlingStatusPlugin(new SimpleThrottlingStatusPlugin) + .setStartingPosition(EventPosition.fromSequenceNumber(0L)) + .toMap + + val reader = spark.readStream + .format("eventhubs") + .options(parameters) + + val eventhubs = reader + .load() + .select("body") + .as[String] + + val mapped: org.apache.spark.sql.Dataset[_] = eventhubs.map(_.toInt) + + val clock = new StreamManualClock + + val waitUntilBatchProcessed = AssertOnQuery { q => + eventually(Timeout(streamingTimeout)) { + if (q.exception.isEmpty) { + assert(clock.isStreamWaitingAt(clock.getTimeMillis())) + } + } + if (q.exception.isDefined) { + throw q.exception.get + } + true + } + + val noSlowPartition: Map[NameAndPartition, Double] = + Map(partitions(0) -> 1.0, partitions(1) -> 1.0, partitions(2) -> 1.0, partitions(3) -> 1.0) + + testStream(mapped)( + StartStream(ProcessingTime(100), clock), + waitUntilBatchProcessed, + // we'll get 5 events per partition per trigger + Assert(Set[Long](0).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4), + PartitionsStatusTrackerUpdate(List( (partitions(0), 0L, 5, 9L), (partitions(1), 0L, 5, 11L), + (partitions(2), 0L, 5, 9L), (partitions(3), 0L, 5, 11L))), + //Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty), + Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + AdvanceManualClock(100), + waitUntilBatchProcessed, + // the difference between max and min time per event is less than the acceptable time difference (1 MS) + // we should get 5 events per partition per trigger + Assert(Set[Long](0, 1).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5, 6, 7, 8, 9), + PartitionsStatusTrackerUpdate(List( (partitions(0), 5L, 5, 16L), (partitions(1), 5L, 5, 13L), + (partitions(2), 5L, 5, 16L), (partitions(3), 5L, 5, 15L))), + Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + //Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty), + AdvanceManualClock(100), + waitUntilBatchProcessed, + // the difference between max and min time per event is less than the acceptable time difference (1 MS) + // we should get 5 events per partition per trigger + Assert(Set[Long](0, 1, 2).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10, 11, 12, 13, 14), + // miss the perforamnce update for this batch. Next round every partitions is considered as normal speed + Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + //Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty), + AdvanceManualClock(100), + waitUntilBatchProcessed, + // we should get 5 events per partition per trigger + Assert(Set[Long](1, 2, 3).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15, 16, 17, 18, 19), + // get update for three partitions (missing partition 1) + PartitionsStatusTrackerUpdate(List( (partitions(0), 15L, 5, 55L), + (partitions(2), 15L, 5, 52L), (partitions(3), 15L, 5, 43L))), + Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + //Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty), + AdvanceManualClock(100), + waitUntilBatchProcessed, + // all partitions have receiveTimePerEvent <= avg + stdDev + // we should get 5 events per partition per trigger + Assert(Set[Long](2, 3, 4).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20, 21, 22, 23, 24), + StopStream, + StartStream(ProcessingTime(100), clock), + // get update for the last batch before stopping the stream. It should be ignored because the tracker + // state should be clean at the start of the stream + PartitionsStatusTrackerUpdate(List( (partitions(0), 20L, 5, 100L), (partitions(1), 20L, 5, 13L), + (partitions(2), 20L, 5, 16L), (partitions(3), 20L, 5, 15L))), + Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty), + waitUntilBatchProcessed, + // last received status update should be ignored since it belongs to a batch before restarting the stream + // we should get 5 events per partition per trigger + Assert(Set[Long](0, 1).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(25, 26, 27, 28, 29, 25, 26, 27, 28, 29, 25, 26, 27, 28, 29, 25, 26, 27, 28, 29), + PartitionsStatusTrackerUpdate(List( (partitions(0), 25L, 5, 73L), (partitions(1), 25L, 5, 72L), + (partitions(2), 25L, 5, 66L), (partitions(3), 25L, 5, 73L))), + Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + //Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty), + AdvanceManualClock(100), + waitUntilBatchProcessed, + // all partitions have receiveTimePerEvent <= avg + stdDev + // we should get 5 events per partition per trigger + Assert(Set[Long](0, 1, 2).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(30, 31, 32, 33, 34, 30, 31, 32, 33, 34, 30, 31, 32, 33, 34, 30, 31, 32, 33, 34) + ) + } + + test("setSlowPartitionAdjustment with slow partitions") { + val eventHub = testUtils.createEventHubs(newEventHubs(), DefaultPartitionCount) + testUtils.populateUniformly(eventHub.name, 10000) + val partitions: List[NameAndPartition] = List(NameAndPartition(eventHub.name, 0), + NameAndPartition(eventHub.name, 1), + NameAndPartition(eventHub.name, 2), + NameAndPartition(eventHub.name, 3)) + + val parameters = + getEventHubsConf(eventHub.name) + .setMaxEventsPerTrigger(20) + .setSlowPartitionAdjustment(true) + .setMaxAcceptableBatchReceiveTime(Duration.ofMillis(3)) + .setThrottlingStatusPlugin(new SimpleThrottlingStatusPlugin) + .setStartingPosition(EventPosition.fromSequenceNumber(0L)) + .toMap + + val reader = spark.readStream + .format("eventhubs") + .options(parameters) + + val eventhubs = reader + .load() + .select("body") + .as[String] + + val mapped: org.apache.spark.sql.Dataset[_] = eventhubs.map(_.toInt) + + val clock = new StreamManualClock + + val waitUntilBatchProcessed = AssertOnQuery { q => + eventually(Timeout(streamingTimeout)) { + if (q.exception.isEmpty) { + assert(clock.isStreamWaitingAt(clock.getTimeMillis())) + } + } + if (q.exception.isDefined) { + throw q.exception.get + } + true + } + + testStream(mapped)( + StartStream(ProcessingTime(100), clock), + waitUntilBatchProcessed, + // we'll get 5 events per partition per trigger + Assert(Set[Long](0).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4), + // for the next batch, let's make partition 2 slow + PartitionsStatusTrackerUpdate(List( (partitions(0), 0L, 5, 18L), (partitions(1), 0L, 5, 21L), + (partitions(2), 0L, 5, 42L), (partitions(3), 0L, 5, 25L))), + Assert(Map(partitions(0) -> 1.0, partitions(1) -> 1.0, partitions(2) -> 0.63, partitions(3) -> 1.0) + .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + AdvanceManualClock(100), + waitUntilBatchProcessed, + // we should get 3 events for partition 2, 5 events for other partitions + Assert(Set[Long](0, 1).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5, 6, 7, 5, 6, 7, 8, 9), + // for the next batch, let's make partition 1 slow and recover partition 2 from being slow + PartitionsStatusTrackerUpdate(List( (partitions(0), 5L, 5, 18L), (partitions(1), 5L, 5, 163L), + (partitions(2), 5L, 3, 10L), (partitions(3), 5L, 5, 15L))), + Assert(Map(partitions(0) -> 1.0, partitions(1) -> 0.33, partitions(2) -> 1.0, partitions(3) -> 1.0) + .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + AdvanceManualClock(100), + waitUntilBatchProcessed, + // we should get 4 events for partitions 0 and 3, 5 events for partition 2, and just 1 event for partition 1 + // partitions 0 and 3 gets 4 eventsbecause of the fewer number of events in those partitions (this is not related to the adjusment logic) + Assert(Set[Long](0, 1, 2).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(10, 11, 12, 13, 10, 8, 9, 10, 11, 12, 10, 11, 12, 13), + // for the next batch, let's only have 2 updates (one slow, on fast parttion) + // since we don't have enough updated partitions, we should continue with the previous partition performance + PartitionsStatusTrackerUpdate(List( (partitions(0), 10L, 4, 13L), (partitions(3), 10L, 4, 168L))), + Assert(Map(partitions(0) -> 1.0, partitions(1) -> 0.33, partitions(2) -> 1.0, partitions(3) -> 1.0) + .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + AdvanceManualClock(100), + waitUntilBatchProcessed, + // we should get 4 events for partitions 0 and 3, 5 events for partition 2, and just 1 event for partition 1 + Assert(Set[Long](1, 2, 3).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(14, 15, 16, 17, 11, 13, 14, 15, 16, 17, 14, 15, 16, 17), + // let's get back to normal fro all partitions + PartitionsStatusTrackerUpdate(List( (partitions(0), 14L, 4, 12L), (partitions(1), 11L, 1, 3L), + (partitions(2), 13L, 5, 14L), (partitions(3), 14L, 4, 11L))), + Assert( Map(partitions(0) -> 1.0, partitions(1) -> 1.0, partitions(2) -> 1.0, partitions(3) -> 1.0) + .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + AdvanceManualClock(100), + waitUntilBatchProcessed, + // all partitions have receiveTimePerEvent <= avg + stdDev + // Since partition 1 is behind, the prorate logic (irrelevent of slow partitions logics) tries to catch it up + // therefore, partition 1 gets 5 events and other partitions are getting 4 each + Assert(Set[Long](2, 3, 4).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(18, 19, 20, 21, 12, 13, 14, 15, 16, 18, 19, 20, 21, 18, 19, 20, 21) + ) + } + + test("setSlowPartitionAdjustment with more than one slow partitions") { + val eventHub = testUtils.createEventHubs(newEventHubs(), 5) + testUtils.populateUniformly(eventHub.name, 1000) + val partitions: List[NameAndPartition] = List(NameAndPartition(eventHub.name, 0), + NameAndPartition(eventHub.name, 1), + NameAndPartition(eventHub.name, 2), + NameAndPartition(eventHub.name, 3), + NameAndPartition(eventHub.name, 4)) + + val parameters = + getEventHubsConf(eventHub.name) + .setMaxEventsPerTrigger(50) + .setSlowPartitionAdjustment(true) + .setMaxAcceptableBatchReceiveTime(Duration.ofMillis(4)) + .setThrottlingStatusPlugin(new SimpleThrottlingStatusPlugin) + .setStartingPosition(EventPosition.fromSequenceNumber(0L)) + .toMap + + val reader = spark.readStream + .format("eventhubs") + .options(parameters) + + val eventhubs = reader + .load() + .select("body") + .as[String] + + val mapped: org.apache.spark.sql.Dataset[_] = eventhubs.map(_.toInt) + + val clock = new StreamManualClock + + val waitUntilBatchProcessed = AssertOnQuery { q => + eventually(Timeout(streamingTimeout)) { + if (q.exception.isEmpty) { + assert(clock.isStreamWaitingAt(clock.getTimeMillis())) + } + } + if (q.exception.isDefined) { + throw q.exception.get + } + true + } + + testStream(mapped)( + StartStream(ProcessingTime(100), clock), + waitUntilBatchProcessed, + // we'll get 10 events per partition per trigger + Assert(Set[Long](0).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9), + // for the next batch, let's make partitions 0 and 4 slow + PartitionsStatusTrackerUpdate(List( (partitions(0), 0L, 10, 62L), (partitions(1), 0L, 10, 21L), + (partitions(2), 0L, 10, 20L), (partitions(3), 0L, 10, 40L), (partitions(4), 0L, 10, 65L))), + Assert(Map(partitions(0) -> 0.67, partitions(1) -> 1.0, partitions(2) -> 1.0, partitions(3) -> 1.0, partitions(4) -> 0.64) + .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + AdvanceManualClock(100), + waitUntilBatchProcessed, + // we should get 10 events for partition 1, 2, 3 and 6 events for partitions 0, 4 + Assert(Set[Long](0, 1).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), + CheckLastBatch(10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 11, 12, 13, 14, 15) + ) + } + } From 74485caaa58aca8d9b501d8cf77a9394766341db Mon Sep 17 00:00:00 2001 From: JamesBirdsall Date: Wed, 29 Jul 2020 21:53:42 -0700 Subject: [PATCH 06/29] Do not retry receiver close, log and ignore transient errors. (#523) --- .../client/CachedEventHubsReceiver.scala | 10 +++++-- .../spark/eventhubs/utils/RetryUtils.scala | 28 ++++++++++++++----- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala b/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala index 96f9ca25a..4a8008920 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala @@ -38,7 +38,7 @@ import org.apache.spark.util.RpcUtils import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ -import scala.concurrent.{ Await, Awaitable, Future } +import scala.concurrent.{ Await, Awaitable, Future, Promise } private[spark] trait CachedReceiver { private[eventhubs] def receive(ehConf: EventHubsConf, @@ -130,7 +130,13 @@ private[client] class CachedEventHubsReceiver private (ehConf: EventHubsConf, } private def closeReceiver(): Future[Void] = { - retryJava(receiver.close(), "closing a receiver") + // Closing a PartitionReceiver is not a retryable operation: after the first call, it always + // returns the same CompletableFuture. Therefore, if it fails with a transient + // error, log and continue. + // val dummyResult = Future[Void](null) + val dummyResult = Promise[Void]() + dummyResult success null + retryJava(receiver.close(), "closing a receiver", replaceTransientErrors = dummyResult.future) } private def recreateReceiver(seqNo: SequenceNumber): Unit = { diff --git a/core/src/main/scala/org/apache/spark/eventhubs/utils/RetryUtils.scala b/core/src/main/scala/org/apache/spark/eventhubs/utils/RetryUtils.scala index 12ee42177..13b2e5320 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/utils/RetryUtils.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/utils/RetryUtils.scala @@ -79,14 +79,16 @@ private[spark] object RetryUtils extends Logging { * @param opName the name of the operation. This is to assist with logging. * @param maxRetry The number of times the operation will be retried. * @param delay The delay (in milliseconds) before the Future is run again. + * @param replaceTransientErrors If not null a transient error returns this Future instead. * @tparam T the result type from the [[CompletableFuture]] * @return the [[Future]] returned by the async operation */ final def retryJava[T](fn: => CompletableFuture[T], opName: String, maxRetry: Int = RetryCount, - delay: Int = 10): Future[T] = { - retryScala(toScala(fn), opName, maxRetry, delay) + delay: Int = 10, + replaceTransientErrors: Future[T] = null): Future[T] = { + retryScala(toScala(fn), opName, maxRetry, delay, replaceTransientErrors) } /** @@ -100,13 +102,15 @@ private[spark] object RetryUtils extends Logging { * @param opName the name of the operation. This is to assist with logging. * @param maxRetry The number of times the operation will be retried. * @param delay The delay (in milliseconds) before the Future is run again. + * @param replaceTransientErrors If not null a transient error returns this Future instead. * @tparam T the result type from the [[Future]] * @return the [[Future]] returned by the async operation */ final def retryScala[T](fn: => Future[T], opName: String, maxRetry: Int = RetryCount, - delay: Int = 10): Future[T] = { + delay: Int = 10, + replaceTransientErrors: Future[T] = null): Future[T] = { def retryHelper(fn: => Future[T], retryCount: Int): Future[T] = { val taskId = EventHubsUtils.getTaskId fn.recoverWith { @@ -115,8 +119,13 @@ private[spark] object RetryUtils extends Logging { logInfo(s"(TID $taskId) failure: $opName") throw eh } - logInfo(s"(TID $taskId) retrying $opName after $delay ms") - after(delay.milliseconds)(retryHelper(fn, retryCount + 1)) + if (replaceTransientErrors != null) { + logInfo(s"(TID $taskId) ignoring transient failure in $opName") + replaceTransientErrors + } else { + logInfo(s"(TID $taskId) retrying $opName after $delay ms") + after(delay.milliseconds)(retryHelper(fn, retryCount + 1)) + } case t: Throwable => t.getCause match { case eh: EventHubException if eh.getIsTransient => @@ -124,8 +133,13 @@ private[spark] object RetryUtils extends Logging { logInfo(s"(TID $taskId) failure: $opName") throw eh } - logInfo(s"(TID $taskId) retrying $opName after $delay ms") - after(delay.milliseconds)(retryHelper(fn, retryCount + 1)) + if (replaceTransientErrors != null) { + logInfo(s"(TID $taskId) ignoring transient failure in $opName") + replaceTransientErrors + } else { + logInfo(s"(TID $taskId) retrying $opName after $delay ms") + after(delay.milliseconds)(retryHelper(fn, retryCount + 1)) + } case _ => logInfo(s"(TID $taskId) failure: $opName") throw t From c2cf8c70fd4d3f9d46cd4f2b8ba7212cd023c344 Mon Sep 17 00:00:00 2001 From: nyaghma Date: Wed, 5 Aug 2020 11:53:00 -0700 Subject: [PATCH 07/29] Update FAQ (#527) --- FAQ.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/FAQ.md b/FAQ.md index 4d847e921..e6827518b 100644 --- a/FAQ.md +++ b/FAQ.md @@ -21,6 +21,9 @@ your retention policy in Event Hubs, or both. It's also worth noting that you may see this more often in testing scenarios due to irregular send patterns. If that's the case, simply send fresh events to the Event Hubs and continue testing with those. +You may also see this error if there are multiple receivers per consumer group-partition combo. For more information on +how to handle this please read the next question and answer. + **Why am I getting a `ReceiverDisconnectedException`?** In version 2.3.2 and above, the connector uses epoch receivers from the Event Hubs Java client. @@ -32,6 +35,13 @@ Now, if we open a new receiver, `receiverB`, for the same consumer group and par In order to avoid this issue, please have one consumer group per Spark application being run. In general, you should have a unique consumer group for each consuming application being run. +Note that this error could happen if the same structured stream is accessed by multiple queries (writers). +Spark will read from the input source and process the dataframe separately for each defined sink. +This results in having multiple readers on the same consumer group-partition combo. +In order to prevent this, you can create a separate reader for each writer using a separate consumer group or +use an intermediate delta table if you are using Databricks. + + **Why am I getting events from the `EndofStream`, despite using `setStartingPositions`?** When you start reading events from Event Hubs, the initial starting positions must be determined. @@ -47,5 +57,21 @@ So if you set the starting positions for a subset of partitions, the starting po set to the latest sequence numbers available in those partitions. +**How can I fix the `"Send operation timed out"` when I send a batch with large number of events?** + +As a general guideline, we don't encourage sending large number of events in a single batch. Large transfers are more +vulnerable to being interrupted which results in retrying the entire operation regardless of its partial completion. + +However, if you decide to send a large number of events within a single batch, you need to make sure that + +(I) You have enough Throughput Units to handle the transfer rate. You can use the **Auto-inflate** feature to automatically +increase the number of throughput unites to meet usage needs. + +(II) You have set the timeout value in a way that the send operation has enough time to complete the entire transfer. +The send operation uses the **`receiverTimeout`** value as the amount of time it allows the operation to get completed. +Since a single batch is being transferred by a single send operation, if the batch contains a large number events you +have to adjust the `receiverTimeout` to give enough time to the send operation to complete its entire transfer. + + **What else? If you have suggestions for this FAQ please share them on the [gitter chat](https://gitter.im/azure-event-hubs-spark/Lobby) or open an issue!** \ No newline at end of file From 5da86a3c81891bbb98e5aa64f2cc3ee0681e6479 Mon Sep 17 00:00:00 2001 From: SJ Date: Wed, 5 Aug 2020 12:04:40 -0700 Subject: [PATCH 08/29] Update version number for new release (2.3.17) (#529) --- core/pom.xml | 2 +- core/src/main/scala/org/apache/spark/eventhubs/package.scala | 2 +- pom.xml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/pom.xml b/core/pom.xml index 89d9c7241..382dd1f0f 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -23,7 +23,7 @@ com.microsoft.azure azure-eventhubs-spark-parent_${scala.binary.version} - 2.3.16 + 2.3.17 ../pom.xml azure-eventhubs-spark_${scala.binary.version} diff --git a/core/src/main/scala/org/apache/spark/eventhubs/package.scala b/core/src/main/scala/org/apache/spark/eventhubs/package.scala index 31a3f98d4..f47b96546 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/package.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/package.scala @@ -60,7 +60,7 @@ package object eventhubs { val EnqueuedTimeAnnotation = "x-opt-enqueued-time" val SequenceNumberAnnotation = "x-opt-sequence-number" - val SparkConnectorVersion = "2.3.16" + val SparkConnectorVersion = "2.3.17" type PartitionId = Int val PartitionId: Int.type = Int diff --git a/pom.xml b/pom.xml index 083cc17bf..fff3ac0c5 100644 --- a/pom.xml +++ b/pom.xml @@ -25,7 +25,7 @@ com.microsoft.azure azure-eventhubs-spark-parent_${scala.binary.version} - 2.3.16 + 2.3.17 pom EventHubs+Spark Parent POM From 2251c24bddaff9206512778cd30aec6937ef57ba Mon Sep 17 00:00:00 2001 From: SJ Date: Fri, 7 Aug 2020 12:25:11 -0700 Subject: [PATCH 09/29] Update documentation for the current release (2.3.17) (#530) --- .github/CONTRIBUTING.md | 2 +- README.md | 24 ++++++++++--------- docs/PySpark/structured-streaming-pyspark.md | 8 +++---- docs/spark-streaming-eventhubs-integration.md | 4 ++-- ...uctured-streaming-eventhubs-integration.md | 8 +++---- 5 files changed, 24 insertions(+), 22 deletions(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 753f00c96..83a5c070e 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -18,7 +18,7 @@ run all unit/integration tests and build a JAR. ### SBT Dependency - libraryDependencies += "com.microsoft.azure" %% "azure-eventhubs-spark" %% "2.3.16" + libraryDependencies += "com.microsoft.azure" %% "azure-eventhubs-spark" %% "2.3.17" ## Filing Issues diff --git a/README.md b/README.md index 2a9fef06e..e81fb5ae5 100644 --- a/README.md +++ b/README.md @@ -30,21 +30,23 @@ By making Event Hubs and Spark easier to use together, we hope this connector ma #### Spark |Spark Version|Package Name|Package Version| |-------------|------------|----------------| -|Spark 2.4|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.16-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.16%7Cjar)| -|Spark 2.4|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.16-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.16%7Cjar)| -|Spark 2.3|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.16-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.16%7Cjar)| +|Spark 3.0|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.17%7Cjar)| +|Spark 2.4|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.17%7Cjar)| +|Spark 2.4|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.17%7Cjar)| +|Spark 2.3|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.17%7Cjar)| |Spark 2.2|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.2.10-blue.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.2.10%7Cjar)| |Spark 2.1|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.2.10-blue.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.2.10%7Cjar)| #### Databricks |Databricks Runtime Version|Artifact Id|Package Version| |-------------|------------|----------------| -|Databricks Runtime 6.X|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.16-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.16%7Cjar)| -|Databricks Runtime 6.X|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.16-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.16%7Cjar)| -|Databricks Runtime 5.X|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.16-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.16%7Cjar)| -|Databricks Runtime 5.X|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.16-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.16%7Cjar)| -|Databricks Runtime 4.X|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.16-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.16%7Cjar)| -|Databricks Runtime 3.5|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.16-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.16%7Cjar)| +|Databricks Runtime 7.X|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.17%7Cjar)| +|Databricks Runtime 6.X|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.17%7Cjar)| +|Databricks Runtime 6.X|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.17%7Cjar)| +|Databricks Runtime 5.X|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.17%7Cjar)| +|Databricks Runtime 5.X|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.17%7Cjar)| +|Databricks Runtime 4.X|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.17%7Cjar)| +|Databricks Runtime 3.5|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.17%7Cjar)| #### Roadmap @@ -59,13 +61,13 @@ For Scala/Java applications using SBT/Maven project definitions, link your appli groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.11 - version = 2.3.16 + version = 2.3.17 or groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.12 - version = 2.3.16 + version = 2.3.17 ### Documentation diff --git a/docs/PySpark/structured-streaming-pyspark.md b/docs/PySpark/structured-streaming-pyspark.md index 022356d8f..070d13c28 100644 --- a/docs/PySpark/structured-streaming-pyspark.md +++ b/docs/PySpark/structured-streaming-pyspark.md @@ -23,13 +23,13 @@ Structured streaming integration for Azure Event Hubs is ultimately run on the J ``` groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.11 - version = 2.3.16 + version = 2.3.17 or groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.12 - version = 2.3.16 + version = 2.3.17 ``` For Python applications, you need to add this above library and its dependencies when deploying your application. @@ -397,11 +397,11 @@ AMQP types need to be handled explicitly by the connector. Below we list the AMQ As with any Spark applications, `spark-submit` is used to launch your application. `azure-eventhubs-spark_2.11` and its dependencies can be directly added to `spark-submit` using `--packages`, such as, - ./bin/spark-submit --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.16 ... + ./bin/spark-submit --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.17 ... For experimenting on `spark-shell`, you can also use `--packages` to add `azure-eventhubs-spark_2.11` and its dependencies directly, - ./bin/spark-shell --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.16 ... + ./bin/spark-shell --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.17 ... See [Application Submission Guide](https://spark.apache.org/docs/latest/submitting-applications.html) for more details about submitting applications with external dependencies. diff --git a/docs/spark-streaming-eventhubs-integration.md b/docs/spark-streaming-eventhubs-integration.md index 81646fbdf..3988459c3 100644 --- a/docs/spark-streaming-eventhubs-integration.md +++ b/docs/spark-streaming-eventhubs-integration.md @@ -23,13 +23,13 @@ For Scala/Java applications using SBT/Maven project definitions, link your appli ``` groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.11 - version = 2.3.16 + version = 2.3.17 or groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.12 - version = 2.3.16 + version = 2.3.17 ``` For Python applications, you need to add this above library and its dependencies when deploying your application. diff --git a/docs/structured-streaming-eventhubs-integration.md b/docs/structured-streaming-eventhubs-integration.md index e490fb92f..8b8d56c9f 100644 --- a/docs/structured-streaming-eventhubs-integration.md +++ b/docs/structured-streaming-eventhubs-integration.md @@ -23,13 +23,13 @@ For Scala/Java applications using SBT/Maven project definitions, link your appli ``` groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.11 - version = 2.3.16 + version = 2.3.17 or groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.12 - version = 2.3.16 + version = 2.3.17 ``` For Python applications, you need to add this above library and its dependencies when deploying your application. @@ -397,11 +397,11 @@ AMQP types need to be handled explicitly by the connector. Below we list the AMQ As with any Spark applications, `spark-submit` is used to launch your application. `azure-eventhubs-spark_2.11` and its dependencies can be directly added to `spark-submit` using `--packages`, such as, - ./bin/spark-submit --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.16 ... + ./bin/spark-submit --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.17 ... For experimenting on `spark-shell`, you can also use `--packages` to add `azure-eventhubs-spark_2.11` and its dependencies directly, - ./bin/spark-shell --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.16 ... + ./bin/spark-shell --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.17 ... See [Application Submission Guide](https://spark.apache.org/docs/latest/submitting-applications.html) for more details about submitting applications with external dependencies. From 3e251e664e5c4c212938aa38c3143b4276567319 Mon Sep 17 00:00:00 2001 From: Richard Du <69235163+duhuan@users.noreply.github.com> Date: Thu, 1 Oct 2020 02:23:31 +0800 Subject: [PATCH 10/29] add AAD auth client to connect ehs (#535) --- .../eventhubs/ConnectionStringBuilder.scala | 14 +++ .../spark/eventhubs/EventHubsConf.scala | 51 ++++++++- .../client/ClientConnectionPool.scala | 32 ++++-- .../org/apache/spark/eventhubs/package.scala | 3 + .../utils/AadAuthenticationCallback.scala | 7 ++ .../eventhubs/utils/EventHubsTestUtils.scala | 15 +-- .../ConnectionStringBuilderSuite.scala | 20 ++++ .../spark/eventhubs/EventHubsConfSuite.scala | 33 ++++-- .../utils/AadAuthenticationCallbackMock.scala | 10 ++ ...aad-authentication-to-connect-eventhubs.md | 107 ++++++++++++++++++ pom.xml | 5 + 11 files changed, 268 insertions(+), 29 deletions(-) create mode 100644 core/src/main/scala/org/apache/spark/eventhubs/utils/AadAuthenticationCallback.scala create mode 100644 core/src/test/scala/org/apache/spark/eventhubs/utils/AadAuthenticationCallbackMock.scala create mode 100644 docs/use-aad-authentication-to-connect-eventhubs.md diff --git a/core/src/main/scala/org/apache/spark/eventhubs/ConnectionStringBuilder.scala b/core/src/main/scala/org/apache/spark/eventhubs/ConnectionStringBuilder.scala index af6ba7f0e..37c840f64 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/ConnectionStringBuilder.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/ConnectionStringBuilder.scala @@ -240,6 +240,20 @@ class ConnectionStringBuilder private () { this } + /** + * Set a connection string for AAD authentication + * + * @param endpoint the eventhubs instance endpoint + * @param eventHubName the eventhubs name + * + * @return the { @link ConnectionStringBuilder} being set. + */ + def setAadAuthConnectionString(endpoint: URI, eventHubName: String): ConnectionStringBuilder = { + this + .setEndpoint(endpoint) + .setEventHubName(eventHubName) + } + /** * Identical to [[build]]. */ diff --git a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala index 9b3334707..03398e2f9 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala @@ -20,9 +20,10 @@ package org.apache.spark.eventhubs import java.time.Duration import java.util.concurrent.ConcurrentHashMap +import com.microsoft.azure.eventhubs.AzureActiveDirectoryTokenProvider.AuthenticationCallback import org.apache.spark.eventhubs.PartitionPreferredLocationStrategy.PartitionPreferredLocationStrategy import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap -import org.apache.spark.eventhubs.utils.{ MetricPlugin, ThrottlingStatusPlugin } +import org.apache.spark.eventhubs.utils.{AadAuthenticationCallback, MetricPlugin, ThrottlingStatusPlugin} import org.apache.spark.internal.Logging import org.json4s.NoTypeHints import org.json4s.jackson.Serialization @@ -53,6 +54,7 @@ final class EventHubsConf private (private val connectionStr: String) self => import EventHubsConf._ + private val settings = new ConcurrentHashMap[String, String]() this.setConnectionString(connectionStr) @@ -168,7 +170,9 @@ final class EventHubsConf private (private val connectionStr: String) MetricPluginKey, SlowPartitionAdjustmentKey, ThrottlingStatusPluginKey, - MaxAcceptableBatchReceiveTimeKey + MaxAcceptableBatchReceiveTimeKey, + UseAadAuthKey, + AadAuthCallbackKey ).map(_.toLowerCase).toSet val trimmedConfig = EventHubsConf(connectionString) @@ -493,10 +497,10 @@ final class EventHubsConf private (private val connectionStr: String) } /** Set the max time that is acceptable for a partition to receive events in a single batch. - * This value is being used to identify slow partitions when the slowPartitionAdjustment is on. - * Only partitions that tale more than this time to receive thier portion of events in batch are considered - * as potential slow partitrions. - * Default: [[DefaultMaxAcceptableBatchReceiveTime]] + * This value is being used to identify slow partitions when the slowPartitionAdjustment is on. + * Only partitions that tale more than this time to receive thier portion of events in batch are considered + * as potential slow partitrions. + * Default: [[DefaultMaxAcceptableBatchReceiveTime]] * * @param d the new maximum acceptable time for a partition to receive events in a single batch * @return the updated [[EventHubsConf]] instance @@ -561,6 +565,39 @@ final class EventHubsConf private (private val connectionStr: String) s"${PartitionPreferredLocationStrategy.values.mkString(",")}")) } + /** + * Use AAD auth to connect eventhubs instead of connection string. It's internal. + * + * Default: [[false]] + * @return the updated [[EventHubsConf]] instance + */ + private def setUseAadAuth(b: Boolean): EventHubsConf = { + set(UseAadAuthKey, b) + } + + def useAadAuth: Boolean = { + self.get(UseAadAuthKey).getOrElse(DefaultUseAadAuth).toBoolean + } + + /** + * set a callback class for aad auth. The class should be Serializable and derived from + * org.apache.spark.eventhubs.utils.AadAuthenticationCallback. + * More info about this: https://docs.microsoft.com/en-us/azure/event-hubs/authorize-access-azure-active-directory + * + * @param callback The callback class which implements org.apache.spark.eventhubs.utils.AadAuthenticationCallback + * @return the updated [[EventHubsConf]] instance + */ + def setAadAuthCallback(callback: AadAuthenticationCallback): EventHubsConf = { + setUseAadAuth(true) + set(AadAuthCallbackKey, callback.getClass.getName) + } + + def aadAuthCallback(): Option[AadAuthenticationCallback] = { + self.get(AadAuthCallbackKey) map (className => { + Class.forName(className).newInstance().asInstanceOf[AadAuthenticationCallback] + }) + } + // The simulated client (and simulated eventhubs) will be used. These // can be found in EventHubsTestUtils. private[spark] def setUseSimulatedClient(b: Boolean): EventHubsConf = { @@ -618,6 +655,8 @@ object EventHubsConf extends Logging { val SlowPartitionAdjustmentKey = "eventhubs.slowPartitionAdjustment" val ThrottlingStatusPluginKey = "eventhubs.throttlingStatusPlugin" val MaxAcceptableBatchReceiveTimeKey = "eventhubs.maxAcceptableBatchReceiveTime" + val UseAadAuthKey = "eventhubs.useAadAuth" + val AadAuthCallbackKey = "eventhubs.aadAuthCallback" /** Creates an EventHubsConf */ def apply(connectionString: String) = new EventHubsConf(connectionString) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/client/ClientConnectionPool.scala b/core/src/main/scala/org/apache/spark/eventhubs/client/ClientConnectionPool.scala index c6e82cf46..b4bb76a50 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/client/ClientConnectionPool.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/client/ClientConnectionPool.scala @@ -17,10 +17,11 @@ package org.apache.spark.eventhubs.client +import java.net.URI import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{ ConcurrentLinkedQueue, Executors, ScheduledExecutorService } -import com.microsoft.azure.eventhubs.{ EventHubClient, RetryPolicy } +import com.microsoft.azure.eventhubs.{ EventHubClient, EventHubClientOptions, RetryPolicy } import org.apache.spark.eventhubs._ import org.apache.spark.internal.Logging @@ -55,12 +56,29 @@ private class ClientConnectionPool(val ehConf: EventHubsConf) extends Logging { EventHubsClient.userAgent = s"SparkConnector-$SparkConnectorVersion-[${ehConf.name}]-[$consumerGroup]" while (client == null) { - client = EventHubClient.createFromConnectionStringSync( - connStr.toString, - RetryPolicy.getDefault, - ClientThreadPool.get(ehConf), - null, - ehConf.maxSilentTime.getOrElse(DefaultMaxSilentTime)) + if (ehConf.useAadAuth) { + val ehClientOption: EventHubClientOptions = new EventHubClientOptions() + .setMaximumSilentTime(ehConf.maxSilentTime.getOrElse(DefaultMaxSilentTime)) + .setOperationTimeout(ehConf.receiverTimeout.getOrElse(DefaultReceiverTimeout)) + .setRetryPolicy(RetryPolicy.getDefault) + client = EventHubClient + .createWithAzureActiveDirectory( + connStr.getEndpoint, + ehConf.name, + ehConf.aadAuthCallback().get, + ehConf.aadAuthCallback().get.authority, + ClientThreadPool.get(ehConf), + ehClientOption + ) + .get() + } else { + client = EventHubClient.createFromConnectionStringSync( + connStr.toString, + RetryPolicy.getDefault, + ClientThreadPool.get(ehConf), + null, + ehConf.maxSilentTime.getOrElse(DefaultMaxSilentTime)) + } } } else { logInfo( diff --git a/core/src/main/scala/org/apache/spark/eventhubs/package.scala b/core/src/main/scala/org/apache/spark/eventhubs/package.scala index f47b96546..daefcfabf 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/package.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/package.scala @@ -62,6 +62,8 @@ package object eventhubs { val SparkConnectorVersion = "2.3.17" + val DefaultUseAadAuth = "false" + type PartitionId = Int val PartitionId: Int.type = Int @@ -94,4 +96,5 @@ package object eventhubs { def toSequenceNumber: SequenceNumber = str.toLong } + } diff --git a/core/src/main/scala/org/apache/spark/eventhubs/utils/AadAuthenticationCallback.scala b/core/src/main/scala/org/apache/spark/eventhubs/utils/AadAuthenticationCallback.scala new file mode 100644 index 000000000..229c91293 --- /dev/null +++ b/core/src/main/scala/org/apache/spark/eventhubs/utils/AadAuthenticationCallback.scala @@ -0,0 +1,7 @@ +package org.apache.spark.eventhubs.utils + +import com.microsoft.azure.eventhubs.AzureActiveDirectoryTokenProvider.AuthenticationCallback + +trait AadAuthenticationCallback extends AuthenticationCallback with Serializable { + def authority: String +} diff --git a/core/src/main/scala/org/apache/spark/eventhubs/utils/EventHubsTestUtils.scala b/core/src/main/scala/org/apache/spark/eventhubs/utils/EventHubsTestUtils.scala index aa65b2102..fe4e35b30 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/utils/EventHubsTestUtils.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/utils/EventHubsTestUtils.scala @@ -56,9 +56,9 @@ private[spark] class EventHubsTestUtils { /** * Sends events to the specified simulated event hub. * - * @param ehName the event hub to send to - * @param partition the partition id that will receive the data - * @param data the data being sent + * @param ehName the event hub to send to + * @param partition the partition id that will receive the data + * @param data the data being sent * @param properties additional application properties * @return */ @@ -98,7 +98,7 @@ private[spark] class EventHubsTestUtils { /** * Creates a [[SimulatedEventHubs]]. * - * @param ehName the name of the simulated event hub + * @param ehName the name of the simulated event hub * @param partitionCount the number of partitions in the simulated event hub * @return the newly created [[SimulatedEventHubs]] */ @@ -109,6 +109,7 @@ private[spark] class EventHubsTestUtils { /** * Destroys the the event hub if it is present. + * * @param ehName the name of the simulated event hub to be destroyed. */ def destroyEventHubs(ehName: String): Unit = { @@ -161,9 +162,9 @@ private[spark] class EventHubsTestUtils { * counter. In this example, the payloads would be 0, 1, 2, 3, * 4, 5, 6, 7, 8, 9. * - * @param ehName the simulated event hub to receive the events - * @param count the number of events to be generated for each - * partition + * @param ehName the simulated event hub to receive the events + * @param count the number of events to be generated for each + * partition * @param properties the [[ApplicationProperties]] to be inserted * to each event */ diff --git a/core/src/test/scala/org/apache/spark/eventhubs/ConnectionStringBuilderSuite.scala b/core/src/test/scala/org/apache/spark/eventhubs/ConnectionStringBuilderSuite.scala index 472ed01ee..409c70d9c 100644 --- a/core/src/test/scala/org/apache/spark/eventhubs/ConnectionStringBuilderSuite.scala +++ b/core/src/test/scala/org/apache/spark/eventhubs/ConnectionStringBuilderSuite.scala @@ -33,6 +33,11 @@ class ConnectionStringBuilderSuite extends FunSuite { assert(connStrBuilder.getOperationTimeout == CorrectOperationTimeout) } + private val validateConnStrForAadAuthBuilder = (connStrBuilder: ConnectionStringBuilder) => { + assert(connStrBuilder.getEventHubName == CorrectEntityPath) + assert(connStrBuilder.getEndpoint.getHost == CorrectEndpoint) + } + test("parse invalid connection string") { intercept[Exception] { ConnectionStringBuilder("something") @@ -50,6 +55,18 @@ class ConnectionStringBuilderSuite extends FunSuite { validateConnStrBuilder(connStrBuilder) } + test("parse valid connection string for AAD auth") { + val connStrBuilder = ConnectionStringBuilder(correctConnectionStringForAadAuth) + validateConnStrForAadAuthBuilder(connStrBuilder) + } + + test("setAadAuthConnectionString") { + val connStrBuilder = ConnectionStringBuilder(correctConnectionStringForAadAuth) + val secondConnStrBuilder = ConnectionStringBuilder() + secondConnStrBuilder.setAadAuthConnectionString(connStrBuilder.getEndpoint, connStrBuilder.getEventHubName) + validateConnStrForAadAuthBuilder(ConnectionStringBuilder(secondConnStrBuilder.build)) + } + test("exchange connection string across constructors") { val connStrBuilder = ConnectionStringBuilder(correctConnectionString) val secondConnStr = ConnectionStringBuilder() @@ -81,4 +98,7 @@ object ConnectionStringBuilderSuite { private val correctConnectionString = s"Endpoint=sb://$CorrectEndpoint;EntityPath=$CorrectEntityPath;SharedAccessKeyName=$CorrectKeyName;" + s"SharedAccessKey=$CorrectKey;OperationTimeout=$CorrectOperationTimeout;" + + private val correctConnectionStringForAadAuth = + s"Endpoint=sb://$CorrectEndpoint;EntityPath=$CorrectEntityPath;" } diff --git a/core/src/test/scala/org/apache/spark/eventhubs/EventHubsConfSuite.scala b/core/src/test/scala/org/apache/spark/eventhubs/EventHubsConfSuite.scala index c5b11fb7e..ca5e92b34 100644 --- a/core/src/test/scala/org/apache/spark/eventhubs/EventHubsConfSuite.scala +++ b/core/src/test/scala/org/apache/spark/eventhubs/EventHubsConfSuite.scala @@ -20,16 +20,12 @@ package org.apache.spark.eventhubs import java.time.Duration import java.util.NoSuchElementException -import org.apache.spark.eventhubs.utils.{ - EventHubsTestUtils, - MetricPluginMock, - ThrottlingStatusPluginMock -} +import org.apache.spark.eventhubs.utils.{AadAuthenticationCallbackMock, EventHubsTestUtils, MetricPluginMock, ThrottlingStatusPluginMock} import org.json4s.NoTypeHints import org.json4s.jackson.Serialization -import org.json4s.jackson.Serialization.{ read => sread } -import org.json4s.jackson.Serialization.{ write => swrite } -import org.scalatest.{ BeforeAndAfterAll, FunSuite } +import org.json4s.jackson.Serialization.{read => sread} +import org.json4s.jackson.Serialization.{write => swrite} +import org.scalatest.{BeforeAndAfterAll, FunSuite} /** * Tests [[EventHubsConf]] for correctness. @@ -99,6 +95,8 @@ class EventHubsConfSuite extends FunSuite with BeforeAndAfterAll { intercept[Exception] { map(OperationTimeoutKey) } intercept[Exception] { map(MaxEventsPerTriggerKey) } assert(map(UseSimulatedClientKey).toBoolean) + intercept[Exception] { map(UseAadAuthKey) } + intercept[Exception] { map(AadAuthCallbackKey) } } test("toConf") { @@ -118,7 +116,9 @@ class EventHubsConfSuite extends FunSuite with BeforeAndAfterAll { StartingPositionsKey -> Serialization.write(expectedPositions.map { case (k, v) => k.toString -> v }), - MaxEventsPerTriggerKey -> 4.toString + MaxEventsPerTriggerKey -> 4.toString, + UseAadAuthKey -> "true", + AadAuthCallbackKey -> classOf[AadAuthenticationCallbackMock].getName )) val expectedConf = EventHubsConf(expectedConnStr) @@ -126,6 +126,7 @@ class EventHubsConfSuite extends FunSuite with BeforeAndAfterAll { .setStartingPosition(expectedPosition) .setStartingPositions(expectedPositions) .setMaxEventsPerTrigger(4L) + .setAadAuthCallback(new AadAuthenticationCallbackMock()) assert(expectedConf.equals(actualConf)) } @@ -247,6 +248,7 @@ class EventHubsConfSuite extends FunSuite with BeforeAndAfterAll { .setOperationTimeout(Duration.ofSeconds(10)) .setThreadPoolSize(16) .setPrefetchCount(100) + .setAadAuthCallback(new AadAuthenticationCallbackMock()) .setUseExclusiveReceiver(true) val newConf = originalConf.trimmed @@ -268,6 +270,8 @@ class EventHubsConfSuite extends FunSuite with BeforeAndAfterAll { originalConf("eventhubs.useExclusiveReceiver") originalConf("maxEventsPerTrigger") originalConf("useSimulatedClient") + originalConf("eventhubs.useAadAuth") + originalConf("eventhubs.aadAuthCallback") // newConf should be trimmed newConf("eventhubs.connectionString") @@ -372,4 +376,15 @@ class EventHubsConfSuite extends FunSuite with BeforeAndAfterAll { val actualTime = eventHubConfig.maxAcceptableBatchReceiveTime.get assert(expectedTime == actualTime) } + + + test("validate - AadAuthenticationCallback") { + val aadAuthCallback = new AadAuthenticationCallbackMock() + val eventHubConfig = testUtils.getEventHubsConf() + .setAadAuthCallback(aadAuthCallback) + + val actualCallback = eventHubConfig.aadAuthCallback() + assert(eventHubConfig.useAadAuth) + assert(actualCallback.get.isInstanceOf[AadAuthenticationCallbackMock]) + } } diff --git a/core/src/test/scala/org/apache/spark/eventhubs/utils/AadAuthenticationCallbackMock.scala b/core/src/test/scala/org/apache/spark/eventhubs/utils/AadAuthenticationCallbackMock.scala new file mode 100644 index 000000000..e93856b2c --- /dev/null +++ b/core/src/test/scala/org/apache/spark/eventhubs/utils/AadAuthenticationCallbackMock.scala @@ -0,0 +1,10 @@ +package org.apache.spark.eventhubs.utils +import java.util.concurrent.CompletableFuture + +class AadAuthenticationCallbackMock extends AadAuthenticationCallback { + override def acquireToken(s: String, s1: String, o: Any): CompletableFuture[String] = { + new CompletableFuture[String]() + } + + override def authority: String = "Fake-tenant-id" +} diff --git a/docs/use-aad-authentication-to-connect-eventhubs.md b/docs/use-aad-authentication-to-connect-eventhubs.md new file mode 100644 index 000000000..343be29a1 --- /dev/null +++ b/docs/use-aad-authentication-to-connect-eventhubs.md @@ -0,0 +1,107 @@ +# Use AAD Authentication to Connect Eventhubs +This guide will show you how you can +use AAD authentication to access Eventhubs. + +* [Use Service Principal + Secret to authorize](#use-service-principal-+-secret-to-authorize) +* [Use Service Principal + Certificate to authorize](#use-service-principal-+-certificate-to-authorize) + + +## Use Service Principal + Secret to authorize +First, you need to create a callback class extends from `org.apache.spark.eventhubs.utils.AadAuthenticationCallback`, +```scala +import java.util.Collections +import java.util.concurrent.CompletableFuture + +import com.microsoft.aad.msal4j.{IAuthenticationResult, _} +import org.apache.spark.eventhubs.utils.AadAuthenticationCallback + +case class AuthBySecretCallBack() extends AadAuthenticationCallback{ + + implicit def toJavaFunction[A, B](f: Function1[A, B]) = new java.util.function.Function[A, B] { + override def apply(a: A): B = f(a) + } + override def authority: String = "your-tenant-id" + + val clientId: String = "your-client-id" + val clientSecret: String = "your-client-secret" + + override def acquireToken(audience: String, authority: String, state: Any): CompletableFuture[String] = try { + var app = ConfidentialClientApplication + .builder("clientId", ClientCredentialFactory.createFromSecret(this.clientSecret)) + .authority("https://login.microsoftonline.com/" + authority) + .build + + val parameters = ClientCredentialParameters.builder(Collections.singleton(audience + ".default")).build + + app.acquireToken(parameters).thenApply((result: IAuthenticationResult) => result.accessToken()) + } catch { + case e: Exception => + val failed = new CompletableFuture[String] + failed.completeExceptionally(e) + failed + } +} +``` +and then set the authentication to use AAD auth. +```scala +val connectionString = ConnectionStringBuilder() + .setAadAuthConnectionString(new URI("your-ehs-endpoint"), "your-ehs-name") + .build +val ehConf = EventHubsConf(connectionString) + .setConsumerGroup("consumerGroup") + .setAadAuthCallback(AuthBySecretCallBack()) +``` + + +## Use Service Principal + Certificate to authorize + +Alternatively, you can use certificate to make your connections. + +```scala +import java.io.{ByteArrayInputStream, File} +import java.util.Collections +import java.util.concurrent.CompletableFuture + +import com.microsoft.aad.msal4j.{ClientCredentialFactory, ClientCredentialParameters, ConfidentialClientApplication, IAuthenticationResult} +import org.apache.commons.io.FileUtils +import org.apache.spark.eventhubs.utils.AadAuthenticationCallback +case class AuthByCertCallBack() extends AadAuthenticationCallback { + implicit def toJavaFunction[A, B](f: Function1[A, B]) = new java.util.function.Function[A, B] { + override def apply(a: A): B = f(a) + } + val clientId: String = "your-client-id" + val cert: Array[Byte] = FileUtils.readFileToByteArray(new File("your-cert-local-path")) + val certPassword: String = "password-of-your-cert" + + override def authority: String = "your-tenant-id" + override def acquireToken(audience: String, + authority: String, + state: Any): CompletableFuture[String] = + try { + val app = ConfidentialClientApplication + .builder(clientId, + ClientCredentialFactory.createFromCertificate(new ByteArrayInputStream(cert), certPassword)) + .authority("https://login.microsoftonline.com/" + authority) + .build + val parameters = + ClientCredentialParameters.builder(Collections.singleton(audience + ".default")).build + app + .acquireToken(parameters) + .thenApply((result: IAuthenticationResult) => result.accessToken()) + } catch { + case e: Exception => + val failed = new CompletableFuture[String] + failed.completeExceptionally(e) + failed + } +} +``` +and then set the authentication to use AAD auth. +```scala +val connectionString = ConnectionStringBuilder() + .setAadAuthConnectionString(new URI("your-ehs-endpoint"), "your-ehs-name") + .build +val ehConf = EventHubsConf(connectionString) + .setConsumerGroup("consumerGroup") + .setAadAuthCallback(AuthByCertCallBack()) +``` diff --git a/pom.xml b/pom.xml index fff3ac0c5..14d73b02f 100644 --- a/pom.xml +++ b/pom.xml @@ -170,6 +170,11 @@ test 3.0.3 + + com.microsoft.azure + msal4j + 1.7.0 + From 03377d1260ef47013b17617fc7bec9983db50d2b Mon Sep 17 00:00:00 2001 From: nyaghma Date: Wed, 30 Sep 2020 18:57:11 -0700 Subject: [PATCH 11/29] Multi readers example (#540) --- examples/files/two-actions-using-persist.png | Bin 0 -> 59730 bytes .../files/two-actions-without-persist.png | Bin 0 -> 63259 bytes .../two-writes-using-same-input-stream.png | Bin 0 -> 61233 bytes examples/multiple-readers-example.md | 241 ++++++++++++++++++ 4 files changed, 241 insertions(+) create mode 100644 examples/files/two-actions-using-persist.png create mode 100644 examples/files/two-actions-without-persist.png create mode 100644 examples/files/two-writes-using-same-input-stream.png create mode 100644 examples/multiple-readers-example.md diff --git a/examples/files/two-actions-using-persist.png b/examples/files/two-actions-using-persist.png new file mode 100644 index 0000000000000000000000000000000000000000..15dacb7a1a166113eba9d846b95b605f65fc8368 GIT binary patch literal 59730 zcmd42c{JPG`#-8$)mBw|v=l{q+R_@^5{jCtv#F&qrH0m=!AuaLbRJYEw1`ke31Ujj zQwLfk#v~*nS`|Y?j0qy#^qkM>=ll8Hb-(Mg?)~RxE#YltXYc3P&whs2^V*ND+FX>} zA-_XJL`3rPrE_*7B3r2)M z;reJ^BNL$G%Op0y+XrjL--g;a4*s8mB%;2!@T+VgAtrZf9sX+5KcDU{q~6Se700F;T=2x) ze^eV6Tvk`4brHa4#bf!kKFT4oxI#koc(%3LoONVgTwDU48?= zCacf}G2CM*L7&XKUT>Suh_vCw@mvH*TsryKov;aoi`?bVARiJTFY-%?}&aU3&H z)y&k4Lh>1YJ=1696}Vz0F~5?G3b@iXyp~(iG@2S6J8Uyn?Vl)}W4^(;NpP<@w^7GA z6`mCIKczUU*6rYM1hxF(>=%RNfOfg|uUGHu%spe)J6=qG^2HLs|AuUQG*i3S>%Xr1 z&+A8oH@Kx#G+T3k7)5xERz5{c6P9Vbq4oUlgR&O+x+rC!Q z##+_XO-FAOsEXE%t|5}+uO^w5F;+Q{u<&8#NkYmL0%9;V!wx2WoA5}Ts zjwPs>fG!h${*u7GU+M0ae{{5MU(UrO^7LQ@L=qomt_N!STP#`~?yF>Z@^Wi#JK;F6 z<!n(Kr7F*9CCp0w3+G>}|b<-1dO+O?PThXt>hv61sS z$07MC4R|vJdgJ63J=0tT?K%fWAswq+QeXL@oIDZZIS*!Mjj*-!2 z+%YosVq*!X32JT0B^A|eRJtNa(-99t>p2=I@7ULauo0hv`*b|lD34}EG7}Q5f1k&k+yn5{<^`F zHeP?pSJD<(nl|NPZ=v?_{JF@cCGkz8`IF+dO~56kCvMSprq%eI3YmW-GXq^U9w5t` z>-9n7@jvRXj42ibjZaaKmtNlL%en4km#1a%E^M%>cj%~Z3IBb^*j?(5%)V)!4Vh`T zqM{kuB5(ba9Il+6eIyP{;C7NZ>r74)>?f?L2jwW`@bb&1!?*g8t5Qn~pf%iguE6t| z=^NQs@89587I~WE{55+{ta(x# zJmms6jFB_$;Tqis4=a#9b0!aIqx0It(+AA#-Ua0Bp@+UN;m`RB_~QSFm6f<{Cmwg< zIYHMl3$_6I5-m`ziRPz9MQ6xSpF&9m%YfF5z`QV3q0dBTxwTB_&mrr|teWp7FMwN1ygw66tUb?uPsHL!6Hkqq;Dpcf@* zNtAn6i%7NX&1KZv>e#^y9Xpg(YfVmiJ|#AN7y8DfYrenZz-|1I(weW$K1V=yed5$z zuN}k&>rE)!0JV_R%m7+>$Udfnn$~eEWR7k-3(hWXn(QGpDh_VzF;<-*BLXRr@byH=L$j#g<}q~@^qkPw+k#g~3}Cx} zN|5R5{o!Wyjoe5nx(*g=vhd{Dy1!wMR>VG8>m&;pQ)o_6JUAAa54Ng|Ij4PpHR%vQQ5~fVbSUPdnBi z^d=L9PFxU}K^=Dv8~VoXLI@Pu9qptt5*G6Pkp4-(%e9LV?m&0n=={v}k3uJ}DnK8n z@RVv0WH6T#htbdAIb-rUkyj+(ic4{B_A%Gf8HkIJp+UKmh;LvN$QyNhUX#m-3RzwA7CFw9!X z^n$0VEoFrg<6;^;8_re_b|`tN8=I$<$g}0}dAzWtr<1kI5}{$A(P2JYk(Z^0O)+W% zdOY(DUqc2@Y1}>ZPAoSGH6Pt(G~XJylnBwDj~lk)cgjw#ipzX830Pc2Pmtv#cb@*Y zRli(K_$4HHhs5^Zr_X+C$5rq<(6@7(MwMe0v7RcQ>^^^0xQx%8ZYF%sP zUeCWQ`1yEOz&rKuyx95AwqbO?K!Kn4Q>CJrfLXRxc+-UHY8T6Pir7Wy&ms=1&NsAW zy{(<$c1_(FA6+B~rLf=^s%6NazsE2GH`c`j9Q}&^hPQQJf9F_!fgjb7Lz5TyBRw%} zS9>+uIVYz#f7G6zd`y!b%FPD}z}YDqMp!UXfhVVs^Q7_ZyCwpyFny!(tE+u&aCDq; z#KK*J0$4ohY&6bvmf9zW$VNOwg0?7!}x=8%2SQA*JADGDaM=_<5J1 zH&Wr^XM-od$KKXEI$;Ki{5)x1@pAr@ue1O8-1XU(vcm6dSd>ll#^vBClT+=kP1hfn zbYH@5*7-dyOLDtmw+vdoWKU|Vx_;GXNqq9LPzN^QLUc)8U5-h3;6>#0vt6}gJpohQ z>g}$-w^5{0(OjI`*_m#)G-qTqHPm&KWpB!P|A?YeXE~)8bgTY88l5%pStlCt@2N-` z*WR-lijPt=_LfM0^2C{_Sz&l7wU%Hw!;%%&^#oFQUsF7E=;-eeX0+f7@p!sw`_hl8 zcZJ`t2HdNAAEAvM%0aKetw}+;RbT&OL6!7vzlUy#H&giiuzQOr?DvD&?roL79h-=` zUw%A(b2OWn9!9~453Qu&?WE|}X?%TxFK8vGFQ#cTb~ra($!(JOdrVfg%du`5<}>S& zFdc(xJIiRVLY8F(34G|imTT@&^>CMCP!d%0Mc|-Z``SDstSfmY!ohfNSZNU}nKu#W zP7E9Wy_W8)Hy7lMb z41b*{vx~klNAfSxc8wYKshs-r#%R~KpmF&&us>lYHc3UR;j*R5nswFFVA;z3m>&E& z`6|0Y?z*~iqosH)ob(65Q0|zKy^J5o@WpMpe->dTVVGkE@+;4i^Tn{b0I6Y{ z{9JL@pL3YZZWbA6p@dP;NZQ-#a~0v1z%2IbuCbUr^$f#6nG-RWDs+RtS*-IUoP4ms z2@e|q0jp0O(~BXA=xd_^2HwgVlO~qpZfTmXp-Bs?4~DvaFqoRX?n?8NEjuGo8%8yg z?fn`I6X!o>75|Yzgs8{HCgFhc@yC)fp?MV$g*)DDK5)k!IJPa6gHTI&ybnBzAaj*t!*Svz{$yiU=m%Id#Dh%LaV zWZLlwJZi(++M`+D+DaUWhDT2iETw^ly8LNVg`4?P5#8}kep_L2Y?Eu{XXT*gV`uCsss322qt+2;gPMTml*J6o5P81Jga5o30h^uII&e^{C-x2xEkcC!fG z*6tluOx_ zxh_{MRh_o#2|3NsYcjMHF9h$?01;=ttq{t443ySO_pN3KPtT*vDG^0x3()` z?4l&S%7=Hb{@i&h>my%(qZPzk{eBjAoFwrNQtEW33Y|^s@>}yx{Psvhng75ty2JjR zQ~z(S?_l@u2}J&DfBD}I`V|@d|51PU*4M9Jf2gh=kQ2>^o!GNy4{^7WlG|u?;c zeWEAFFy!aMUCJg!NIn-=&^$2sQDVY5OZ!B%FJN{$(%GYPYaq2Xz7L>@fhZN{xHrY7 zo%Fol`b-g%ZSerSwme4+4p95|1lG5frbyK4;2eiym)w2EN%%x}i}m(hzX2_?Zl&1eo5(SB_uz4nEOx2S zQFjCzASti=)!P3|9O}kD+b2J{vCi4pR=&xv0w2gIL2w!z74)D%_VP);gKSn$X5^3U zf^pN0Uta}_UY*Qen(xqt(D1N;E29~~eJ{SF>4lH;w^oO4OJTQ}k$96;Xp?t=ZfS)k zitmQ@Zdna-e{?@0DPp|)YTdJxsoZ@ljfZUAY&?)(-E{PN^p6BJ{%!7A7eIKZK~5f7 zIi5d)p+6kP9!b=YFQdUTR(Qt7=^6Iy`eg6v_Vf z$);3N63&xo3mg6Ppuf)IJ#)CnVj=dYGL+ieDCplKSo|_N^Dk7e`LgM1PtwqF5kEe; z$T??rHmDd>MYQ%mEebny5R-NGt?4lPDSW*In;u=rK<4FCK*zt^x$^m&1Jp#3^S0vK z@qq5cqyu)~n7h(W=>WYRYvXUdZfgnO(CJ7 z^w;+fb<2nrIe#6Ndl9|HBwi`bxsa|InxNcR*042y_4D0x2{1_yz~F_Y$30bA*5USG zU)De^SN`waYK9Ki528eGb3NHq+!P6u8o?Q6=jmEu#Hf!SKMreyF(}Hhd$~VAD=N^AJYnGpA=L8eQ~tRNYjfLQ?&?tn=v-+q{V~l zlIkg%p{Zmdw{1+8*gtNvbb0)A#&)Col2Khcb6@MB)*FLvlB?@ln!^r73tQXtP7nB? z2{iq92AnIg5q>}AR-36)OFh|bVW#zC;lXs$yQ0Z6<+>mrC>_waLLKhnqSzLJaa04Z zaXb5MXOyhNZ3f42teivRk(SEJe+hXTfACEnN#tOE!^S#)YPg|JcqpfT$w>CIw6f|i zKWdq7e6TM=eD2o ziMoy%iOWlOmK}VC(&tiJqy5hhA4ir);Yv()d_X3%$zHUmpI&JVt!@as|K#Z3HBq(tabdy($g48%;$<42GD!|Dso$L9YW8ssg9=GlJQ;7yxq5kL7baq02bb*_r ztZkwRN1|~*pn@jT`BELl57D8I<8c#iYh6vYka=PXg0(e|HUKUioPBHSman`qE56FB z=$fLcW6u=Jb>5GJ+hH?8|aZgMPIR3aQ&T53Wb8VeVUte0ZCpK<}GP^-l zc-~0|qu}ABXx-CqM#X}s0f_}-&gno?PHtPkOcDA<`uDuAZ}GaIh&-?%#R@k5GO;1F zwYBY;Qh2X%{P=Z?XNd(Yr$~+?ZDo+!_BV=maIbZm?zg|rcITUukDorh?(LnM-g`n) zZ9CrN6IemMyxs&|HzyH29=Czn;S&X2cegbymUTlC{9n$bGX$CIbIYDrx-JRBYNq|y z0Q2|LMbKh5TyGR1jf6?=$-bkbIPs!s%~?8TLj-E1_f*kz__F)#Fyir(BzSK?;wFi0XjIQuHLU{~ zHEi+6iHYH&$`Xt?>{Q*LN!h`CBv+wn@u>0YPt(H22R!5A0Ji(+% z?i>=gHD0e?pY3)>d4sB;2JB@$vX1gkH4~K^?x(c}w_jZ!8O&K-z9ZhhQCF9dRYHoW z@R4$NO&;%8nPM2UBgYDgRA`57&rHSy&`jga9T)ngZ71@uG3TE+l!P$#t6?@>wcJr4 zpG{?z^sPmOqQWYKppq|Rz)+_0ZS+i-uQ9Fl0gFOep)1ID`ae5pgWh#*b*<)GKC~-O zlaK=FAEwuZxAY&+-%<-UqJZm4*wL{gvOEJmxtBc@1)uqbgw}_)r@)2BR?W9m~|TXvvLjkMgkV#%X4mK{tNBl{rNT*m6+^)Z`tz>9vqHElZt?C^2- z6Q8UN{HcqRwTtuW8U6^AY;#2mhV-)mn=6MPl`D}CNRokL)aV)3EH_|W4<>K)rNqbNDBPYFKpLVe!=KvJYXS0df|%F4n(0W6>| z)w30% ztB9G}zTAc^p*v^d;cLPk8lu&gDcQ<>wCLUVMn-x^#Zm+lnaILPg5tf%{f z`&bylKxJ+&KP3v?;^rv~NK_&&jhLH|5Tn{ncaq7%Ob+Z_IdzWa=HgPT<1VfJkh)tl zIOgxHf|}5XZMPEpVmIRS0?Y#wZ=Ck@^vF< zlHqd?w=^ITaHJ2IAgCa0T$3suM1qTJ(*|O<6}6C`dpr??B+6D6XrAyL&lIB;Qi#xF z+HX9qn~BwdSbId_@jIt0T8~bAq}U`&5v7rqc_$CQ~d?6bm&(P0Z&0&(#-FO1I2LAbr6H^26Xq- zi@f*4Eql$ZDlMdGU9e9{DZ-`VMQeg`6+QNWy&ngrdZOFT^$NJq$1Nt9#CI{n;^kp@ znEG^8_?-g6*wPE2Bg2+NU!FCmcRw_#oYrw+pMm_@!+iy48(+!?G8$+5BQ!Mj{{Rg7 z(kgj#XIFw~Xrb{UuobOE44_J9jOb?C@#STNBF4Z_t$_ipNMDKvPv2gl=&iFD& zr;Lf)8Iw2y+;40nf56D3KxthGaP0as{D zQV8_fD3qY+Qjtp@cXieHiuZw+Nk@JvNLyI``!rPUb94<)c0(=!h3GBynY&=h(dN0P z&~v|auL;SuKlEu80@*376F%->$E|)mX*?JEtTEg_t>l|kkm#Y-v^<2rccW6w=w16) zY*mU+2H+k)A{Ovo+t3!VqY_Y6ha9KLGisJrCcot`HTfXRokL$ZykZA1R{mA4_cLXi zmha6}kM4r+(mblBcIEi-<5z5K9PI2~_Nk>yZr{G0SYBR!Dmf)Z2q6#2(PA!Nz6^{W zsn^HL8xO~vKQEvw0=yJGX`9b}`TR(FY2=QSp=Z~N!xtBcMx4_ETk2B(e%}Lr7Jbz4 z%Im*!XQ2m2rJeJ+j@_?1;Dsy);8z;)0l%`#!tdKE0U4;kiqvsd?!J7kxnI0K>lGVn z#Q0lJyEA0nBF%r*QI-CED*9q1=UEJdQev}Vi!y>cT)1$*x35pQO6Hc%`mDE-WcFn% ztIp#VVPRpwT5B4Y`iBHwY?sWmL`Ky?_mTQknbS_N!wLv2ZZeH5#2xj6j%jF|@vhCv zn)B}RRr~YLv){jeC+^quo#@Us5r}Z67xUbn;eEnhY8NLJ6e33 z868_)HCS@pEbp1ebiY6$G1az!9{`Ue@k!*BS8r9Seoma2^-Z_gsplm1%RtuuRTWC~t zqv`3q)TYdLdyM}SVkpj4RaTB0CqYJ>t|0r}J7(e<%r;!?-uEvDHT*Ox*+RSO8Y)h8 zX_&m!k-SB?%@NZBCw+(WV8@Of z3u6AgRZsUV>wO+xJ7rvMa1m5)Ikfk#yhJ(jd)a}8$~4dZ%%6K5$nstlA`lo(kqzLw z+)Y_RbS&m?q`v=7KO9a_u268b-*ei&SjMj+4I0slWjN;frtUTBbMTh6RzE3BF~2c^ zH!7KjVP>PwSp;xJRJ>j~KD)HE`z`iSgU@Jd;>>}+v~n9QHmEfGI%ki6Pg+I3nN+~n zKSyK|ic}O@^bUqOTitBI+vUs9(ChAhw&+zpc95tPh7OC<9ck$e<7@`MBqFY|!`eWHtV+8l>-J%BF)Tm& z=UA%y%=_@E{%@|Mozue&pbeo0U!&2Syng7X`;x?~uWC~Ux$aS8`x^K&buJY?i76V+ zKi<;@QgBCDuLx;R4hX%YPGFy&(?|$0!@vaxPJX52H`bnPb7H1BezR~H(2c=ZXd&l1 z#RRtF;^?}!Z+owfDQ$&!@B5Be6d4Pi5oBCQs;xQJN6I;AP($6*p~(cmV!#l+GQwUC`f z5`_tMDW&xUo&d2IKWEU_7c;68J~6WC=#e9SWAkIhe;mWuB2}G*30~|Ri}AscV-nE* z_EBOYFf6RH59;OQd-F?Mn=r!WHTh{g^o-BH{7PlA zeu3Ec;aZMv@Q-uw$;)^fi_OM_mV^uVI4Pww6M4(dF2b}Snmg?r=+OuMHW*vUxy7i{ zl+%Mck%G7HY$oY4$~-R;-kkhYbKx(uKMitv&gLnUFZSbaG~iX6)V`7rfA{JO-)#R5 zBN~u+)BBP`ikJ75?4mqmx7*(CztWv zwym^0yWSkg9~JsAQyWsPEoLEL-@>eKiM#B;&~=w#$a3r zWR1N)E1H5=E{?M0Ybo(Ng(*wPstI=xCRk00ny#aTXm0~|6i;5D*~UYS1LLAt2wcy! zalQ9&kefT6;;^(nN@?18dTAJ$(XwL4|Y*1@7j zmn2FAP-rypIibCFN}W@fqI|L7M03H!Jv=*X+I{`PW1TJP zS|-B<+vbI6*L7Fbcnv~b3bYjI%WtoHmdIA+$m1a0OiZA95L(;~K^lZKEY-uJIlgyL zMsceGj*XGK=%C(|p)aNj0bVU>iH?kYktg}E#Lz?-L_eJT8o|MBCx$@I_O8Mt?(>jP z25r0wQdGk6@W*qY%F%7_R^e4J@{u581e*&3(U0PkJ^43=0q%l3Qk799aYrsztiR4dxN8R4+oUy=UlK1@ zUmRxI>F4?k+aGcO4ky55N@s2M8+BuenXW*Xk?~;*K5wQN?rfbI$CRVMPnAW>G#!Y(?A#8t{3a_3&RTl&c z3?-o5{I|HB+(G5lQRVQBz&IAPEk*yljtKen&zkW})|UPqF|tnKGRy6=>^uH_dA~Xd zw;zhxO#2_8xq00Fn&Ry7Ug#oh2-pJqe8c#H)fBOEn2Y2YS_weW8PwgL5Kn&p9=VGZ zr*-t`dCOESQg^lmiVaFE|5X!-Wlc9s8$-Ye<=I8Hcqx-*VXt#R?8=oZAts2B=HQ|v zK%M_4O9Tp;G=Pec^{CsJtd+YNi|$ z%lh`!D%GY+wXY5+v4NtIfvrMr+UQ9*Iwps0(#|NmV)W<6qx(5urUDqPVN?_jy(D1kvVqDf-vZCl<_|37H zP)2Nr^<0I>eX&E+F7w#hu~8H)?)>+PGM@uPv-?`OZCw zu(2X%o7x66MG^>GX_}i40{LK<()5=^Zx8baeDPgg8XHv$pL*HLViA;mk?X=DlawZ& zp?OxNI|X?45GyTcA^TKZLWkDNb=2XdM2XQb{=V?4YcSh;k+OGCTOtyo<$Bfy{IG>$ z_u^WEH>bkW`rOJ3AkNnH;r4cRfS8|`e35;{Ah<77HqHCK%oqH&8E+k}pv(b-;CiM) z?xR<#klF6+nW4+v!LCUP_S-5%!b=eEyt^G-Ek-^jv9p}?Iy<_)IZ+I6<#GE%Wo7^1 z%36qFXM|x|yf+z093y?^rn2>EDRW$34|el#1Hw_Pb#-wv=iR$^g&vNs9ac*R zC*n7Y_v0XrzEn?^(+L=<5STC4qv0{3sE$yk!DWgdYQy7IT^@i{VTTpv={L=PlV@`A zul(8XICaV{w8#lFujQE6o%C$K4ZVT|E6^}G0wH?Z}^+4Ay1{xb(@vdUe+R{duV zl6rA@(Ob^kjTs|R`klGt{Xp;J#+GO9*51!kB1mmMKt&J;zA7ernm((0&J|nb?b+(% zpji=OVV8Nh2rL(Q1`UZ&W08BBXG(TzUzs7F%pptCwi(3*nc@yxxAnb-ayaE{tkh z)2aeB+$)+L!evxTJ^hP9lFB#^vY>#rzK$n^qp9w0`_3#y_6>7k8`M~Cf5cPGU2_=z zOsLv!vBKjUyx~tBpAx_5KtT_r6hoK|JpJKQoO;}dhjp9w>{>yl=@#aumJgZ(^nt~c zy??~xDP$Q|{Dx)!M_OOl;H=-c&tYtJ@3#|ZSC^tD`t$eb8>p;dIUFz4TRe8DJsBwX zKr$jb5?IaxyW;wN1GX6S0MP3_uG^QHxp)w;LiPoVUBz$^ z_QhU3iWUNH3ojikJ60i>N55JRZXVs-e6}O4$@+_^1GlDpsIv!L1M~%~6)^`s-jlHP z|5yeXFu29{uUO>DmjYw?3AJW+G-=ctJK5rA% zcat<#sF%MiV^vW;T~s73uYWV4!d4$PWiSSyAZX*jE{96$1wH*(&- z@b4||1;^t*zPw_L&$?u*sTLA&u*jHn$n|i>hlMpl?$naNvv#TA#`s77ZBG`Hh@C~X zC!nQI1~6JMIiLs52IDpfkaDJ!dkmQFHkyPKmeBUggvXA386-qx?`Jg;wqjm*x9DLS zuDd@cVD60p027i9M*hNV+@@HAmLm4%>3O2&h?j7u_tuX*1+)#kEK*4!ZLQ=m%aCAx z$m~dqysqE#^k}t?ORy%fnTs?mnUd~UH!URh0OPg!LSec)uCrX2Wl<70#PqH?OM^IT z3tgtU1(*3dhbu|MiYUz7Kt>w>z+bgcR<6@+g2ax2=k`ufSDq6*`w06ybR;z~V5sx! zeSeIPG1WBb4BUNEz*>M@&%Ke*;@*#l1*Sn;{h_Jo21z|O>2{gVd5pe-Z%eYU zGf-v^Ztu@K`m7<@0Hpb$+2x7emPYp_Z@mFX=BnK%2+D>1XPpYU#BH<_A+*`G8bgV^ z!CC}wIygp0vRI85M=TV4Z)otx~wlS_m=z%q| z7D%`+z!p8un|nL>GZ7`?dWzt-o88Y3<-cyA1>@>lXctr5oiaY^{@f3;FLJOiJ_N8Y zj_~$a`!rl-KTagZc$^pj37H@XP`BOmG&H^3ze2~=)nfj3>%Uw>ztC=!*Hmp8aVEC5 zaaY5x3dYf&DH}PE&Um?YBrMfv z$Mn>^si%g!<=>7tG$;miY{M$?#TUo0vADu$*pkhT^efTPwS62XZ*Om){loA%QEfM! z2THUD5oMr601Mu+DpBF$;$Yc6{K@HgKHj^fyg3P(XVBQ$-YP^}LPd4(NQJbz`(!Sg z7Cx5jI7NkE!$sZkQbk(_Yr|O?&ACX%6u&)klO-)M9_lI)5oN7Qtt6#XaBk6*rGxAp zDCK5cr3jN~!pW`WT{rn(@izfeMFp3!TeHMwrV#_>-W@3hf)b$APqFy~FT5g)SG*cy zq1$HgH^Kp#NZ#YNZ*OVK4qowzS*wIC-T47hP%#S_bO^TdS~iYJ15huVhK5YzRhqpyWVITYhoRI+Xn0UXI& z4M7kpH%SKG`$j~tuQqAA*$Bi%i}aAG{&xza>9Fu6Eq*QRFkj*dKsr%tc(=9U?_MU0=ujW!#_91gztuCbu`-D=@T19#v7~TC{mtb2Z5;TOt*wnh^-WsfaX7k8MsUJ^=KGb$hp2ByU8-Q5adc7uN*V=QU( z^4;{uwNC6(VX;*xWN1!J8%O#I`QzHeWJe-A8wow>W@kn0_Fh2=@W89(YD8r=@E+F$3tw1ol?q;Jd_ zpTtK!j6kc9aSy;i(Mofoa_mHBSN-xWBR(;Y+iIYy08*ZHdZm!ws_9aB$tYs_s_?#z&fNzOj66rJFP=Yt+`zzD z*sZ%NOb3!PGfRZD;M}}CwWCMBIQt0cz(f9b8e~LnMMcxEvnwmF8zZ>={RQ^Psj2zGq6q`%4BNOQDH&&^i;MU{gf!v| zWs8-S6(J2P?Eb@t+u(ooC`O>(@i^u6*$2^V9A)S=Q&vF%^YrP{!sZM~HxDgCL+Ww# zLi;52hMnCHD}+UGa4@M-vm!k0rR?=H($Xqc-POUPy3m0Wk_S#-6VPrD7pP2qPLC zaad$z+B+(V_mlC<2Ptol7!8cNPqzX+AKr@i*ws2L^|=tLVbZpkdR`>@F zK9W`qTU!qHEnVwTGAuPq7aI~COi~CeRn|Fq@+8orDOQl7tiknqhjde@S2l~cRaH59 z)L>M|yjHy}elSefLGtfN(fq}0{?*~UCn2kL=#V+eFubsqs2NxeIWuc$Yh_hbJFM9x zG&(c>?Cd8gmb&}OW1|Pm#Xj`<^(uOV0RQnNZdj&RhsIS0<4ZD+HJp4m>8n@s(zcX&`b!;I zob0Gd5bF1hE zy7$?2fw2qeGY7i$aQ9-`on8$_XfJi@bX6ZGVDqT;dIOucy6KoYc#^lKOJ(e;$NFnJ zIGv1|_ZoMi3i^eG{LoP|TED}*x+l6169u`~Bs^xJy$$l*_@>+KmG!i_#)sg0B#bqPQW7(?u(h*(vVM>MBiZW7^@I243y) zL|-Xw7xlyRIye^W|2cD}swE-FVPIk(QGA!1Az&sxPZ}Jx(*_qZb|q2Kh>Ws;lxYQ~ zv3NoDu6IOk+vA+FV841>(hqffI)gH`cHRfN3K{549O8GYU+l~%JD(q)9|i2_j&N4? z3LPhg$9g$G_?SrA8`Y134@X_NXLg!&IO)v0^nluLsLj;It6%e<0?J^vWiHZ|e)7La zLMUoH8LHB)boWZEH@an2yu1+)dL=em?-N`sH$7GFa0zV3xRV&JiG?^X@iI=_Oz3<3 z2p00<$L6nq!4E7ipe!InwQ8EAQO4D(r&z zCz@Kx-|{S)%%4+l;e=Z137@g7BT&sfTrS;m)q5l!GE!J~-28%DEy%56nE& z)^iI&+6$SqXB*groZ2rW4$uv<*yU$@gt!lJ1sY{*p?hA*n^sEQ=@%`tTZH8jpBo`m)!?f*9vzN0dXUHc<=B|%ff3`kx!Ji3$CHfZ_`?W} z&?oFN6-?Tqe2&C(-{mf-BeW5_XJ@~E3fMvH&oBrL8}v@C2{rTcZ$Hzj6Nh*vPg4*? z30~3*DpJQ9$PL@hwq+V4`g1Ny!Aqianetmi`b(TQ!?@?}8XFix$Qt=8kJHXQ8}uIa*Zz`%x(dYhb)Cx~2YNb|4CotHwYN=K8d9t}t&pP>{bEw2GdW}#mWFgn2aG|;4*5E1R zQ?O}=Y|mP z#1)AZds=tOJSNpWc5rZ4noX*fYX-Brudk2(+CGYRBC{iA{EI7Zt<(RHD$Rm*@WgdQT6OxlHoolDcnF$enL>=rvT_XQ7k(0IJYa z^hZteX6SXq^n-}BlWJ=1{^0z)c#|X-BD`@y5k57I6OxQ!3_Jn#YS%Ple6I!$&OJOg zFX0RB>^?;Nzu5cEXgJ%pT_I9prjZI?v-gj^lbRr(-_m1$sNu((JF2<`Pq@Y8qCt z#NuLpZ>lWIQ{w)%?v~N|d90&>SRb8!Goo0a=H^zRqoXq|v37bl)~s3*m2w+lnUyt8SEzeKRRsC)?xu)pRU+%gyAzCbznW~BRx7#nN&BGznIqGen?w8 z9gm0q*%n?kNo(0~1C#b;a=apZn%`&^V*cSVu%%PPW_{Yy72GF)W9eb_?mobfZ%?UC zweo5fG^*?JHJT_HX*u05rIGa3M2MFF)mOc{dT2o5(RzMjVnFQ+l_U-;|}sZ zE1T4%&4snK;@qdtHm?nwCH0P;pd|5slHqHOoAnH|R&%SutCP&*G{GFOrmv~*|Jdt5 zj1@W{jM$gbYnVko9ViB-E|?W%6(5uZ(z3Eri&T%wJcd+0$A-2XRh_F(j$*txrc(nG z4a;PIhSaV*8a^f`swZ~(WE^?~_u!Jy+({#*p`%Gs8%Lw1k%We}a0%y@q4}!Ht>Ddb z1&m6l12_fHtVxAw#Q`)9jgew5;M|a(Nd*JvMq4ftS?>ONxR8J^i92c8QkXuHn0;Ow# zjr;JaSxJBeSHsQKH0Do^I|c+uMpZrQnjw^c?nVzlwWe|T{x zqaOo@20Q!H3e8*sIQ=;6C4V$LhgctioAAL+HDg;cg4Qvfg?KBxeaGZ%sp#Wyn*Gzy zvPU^f*=mBG85jU&J=>k_>kxHb>yX9aID8KitghEOlM~b)8@OMdRiE#aJ9Eviqli^E zPAvyKi|P$#Avk*9^Q<0S>M0((yu?W77go(=&~Q$w7m?C;CU)r~6gZ0{EWkXC0pF!A zH^JIVB9*9_p(7&YU-G`ESQYMs6HDsWkJ7z+ZsDt{1@J7Gt?>Z$F-7+kNXPLr(TmFz zBfIQ)|BEHgnLV;rb6~cQ+gGQPk~uDx&>zac?_^4%@^*M>t0s&z9zDwHZJHyW3E0q| zVUP%Od;Er0Zfm-xwPCwyqdjR#Ccb96gca{;)N12y!hTFc`IbN4V)L}=1dD7rOB9ki zN^Hs}m^z+*KI*WGJkm3L^axScQcKHsODf;o=gilZ2#BSYcta@g!H)YI4}KDVm6U$Q zxzAQvmKFt-TKgu}WF)Wn0-wRuXDVQ$M+^a(x}>U+u)sg;fM` zcAFi2M316gC&?m$nq_HakR-V?J4MZL2STEL?GlM5zx-FYA33^(v|pNT6A{HIJwa1E zQ+u`;BULzijO&v%uIj1(i;El9g|XM`VfXD}GfWlO9Y1Js#p)N%j*a_IHJ1SbvMo6e zh-SsIssr(l*;qMXst$%*PVG_+Z8k`|0LlPNJ~Gg;^UJ{_I9{KgueKM9rLI>=^aiR@ zqE&f52<_5ypgtF16;x)JHlM+zmb99NJ3+dK+Jyo;fZJsz_3PKK2)B}vT;81meJqb$c3=GEOTvuAh4q&m{NKuWzF%$Kp^d4hBQ=_|?!Uqe} z$Fdz}yPt)&hmg@6qaQUXgGlltnNMr_95ZK{AMZ=!HCsrhyuNpjbnO-)az3`X57_~T zzTuLQ0(Z^xn)>W73R~z=kyuk@uO^>^!|l#umtiZgulLsUk6y~o>LY;ZAU*`hR;p&P znB)`QpFl*L01<8Y9!>n^$TQ`9tC?gCwVGcxcJ?G^>_~IAL&G(VyGzebc4()$?5>5N z@B56!?xXD(O*7ORmeog zKz|)n7KfCG<~skH7MzKJ#4wsf|4Jjlf-Y(M8otZPTc`cw_8 zLy?M=Cfr{Wd`Yy0ZafM>NDY=gM!1KKWayAP3@fVPN zLeV=$^2wg2a*kZ+;mFM~Gw<|DIP2zG*ZQs}TibeLjDNJc+~HhkFHJU$sA-_DX>qzM zHgtUu=)END7et(Pc)WR2vEMG4wR!dA+78clzPk0$%W-6X`Sh?#A+o+ig70HN^K`@V z=49!XvI=YoazKG&H!FirRyQq!1!$|undlEq$#;33GktpI$Da>XXVkFqeXJy}Uq5_hse92R2NlxyTxOG|PE8+Ig-Lhl(pFQ_ zzo^{(c2GkT0`7BcnecVkY%R-Z@~7PI&tx0BKyew`G}`N2^zN2V#>8n^)%*ZL{pqz^ z!iu+U-P*owl$MUCxb%n1ipJRBc?;&$!(vBGO${+LOpzM9FQvgxpuy_uCKZ*NmJW0x zXBGt?sfn*3nzx5hjrUvt_To|$#YF7ohSuVDHO(`3j4W*SU;X9W&_vurxtcH^Yg>k0 zLn8N^M9#yp1%I!jIrj9u-|oJI22IMIk(0=l{gZ})gH)vlfy9g)HKCU2lo~)iriqxK zy?NX?3u{bsKOR__DZA{{X@z$NdqxG0r3StA6+1mp%+9YS6h)4;B-no=X7%o{|5vyv z1v-y}K}x)mv+H~`cYMb@_vfJdMl`L3_*7~Wr=yG|X6VDb>Q{ZEz4^s=)?c9@h#(|NQ{%o-TF0~R*nILD`fu0`^h*m58{|W_w*iGV0x6o&8(z`ZmYuaT zT^THE66l+yQAqb;a-oo(hzYP(x~Z}m1O;AVYrQg!%t!j)iq&@)7Zucy@k3`6| zW*Y6tg5EQe?Ewgw#8iD!SE--`p2hoWYUJ4E`Y{%H(?>+jRxn!SOQr*<^8RU+H%@E zVc9@xlV;NOw@e{fTw@ImQsB0&&uW}2psB`h-PE{3@>QERVj2T_@A3v7y0CL_bmAN*Y^Aqt6&ien={DAMDsjMAOX&vYEB%UUP8E|1WaeYx&mKR_k0UIZW1ZM?VKNN~D>?|gsyz5*E!sQn%0 zWtL1%!>9PJ9AqHQ@GV=s-P;-=kp!#0YSIB|)+1=oSJT%|Q|I!IL-fs3=fZVzXCl0e zK%nv=$CWnQh|7#-KC`sU#y1%?FNcC%=YFM1m2sCha#t1#m8t=r7SVDq$-zLQ)^mF+ z+P~8@VtHQX`wC;iq{J530v47zibyi$fXc>J&F9gf&%9Q;9j$2Nf3mD?EasWN^xX zR=e$hu99Hv<;IiNB1#>ItTnFH0q#?!u-RbP!xYTE92`H5g%s}Y_?4n+*T3aEjK&`g z?{b_O4EY5<$vi$xo~i1eP7*ZIP1L-_w|MGuN-SidIS9rz%obArG3S4z2;Xe(D$&M+ zqkVsVLi(o^6!6%l5SqbYPs6nHC|Lla1l~z(rYhzdi=VU%suEbY2BHM}YvBy`rNK;n z{~iUpj~CWe)r*?eQ5h`$3uYL1SmQ zeA%T&(Vy%X(f!j)CHQ7kmSd(7eUeuTtIvs%s_{3gNjjmI4ep)SRz8k0?d_+&H>uux zQTbARO%>-Gl;|wKQ6W1Nbh1#NRtaDGS6omIr7VZ4l@7J>wX8}{*lB{zWcP~d7gKkE zYQ<@$L9B6&>e$t$$N5$%TL9}O$`r5i9M5avZ#fZE>k~HD^PTT4Aw%I{wjql=^LEi1 zZ^il)?a6G2CThxC>>zY-@{IO`|A2TdE7Odchz0voN&QrDfK|5z9U6taC+s0>WMT;u zje(A#V~TMEjLqEf2-6&U3_ZwiTCEB9r2f*$SZ69EqbpeBDz-?Bk`l)DhwX zb&oLjt~J_R=kKAfby8;;H1vWjYBL`+DP^V}(EuP(u-lkCq~TWwD+>0K0qxz*s zRpoc==KQB*iV61>)o}FD*A82B&DqZvK9w)lUTu@`+VpN|chCSz&ul6bRGpC?*>XHm zs^NRRqnEXmSDV#;B`&=)8l7@Fz7Q)lMs3t5clL|cbJM`v{HH)50PC-h7Kc=8q)Xc4 zn(H$~&xL>)-cyfDoe=oWQKgmXY`L@b8)B3_-!n`iVgWphrDL*=kRg6;QKpskTSjVi z{_8a3eM0kAj1(fDc1u|k9#$LDxIE`ru*NGq)eNS5F;x`XR65dF&?}4kHegj4)IU+p zemH~Im_Ro5ngI?6<8D@As$TAJP1n(b7xtb9K?ikPHdB0xJL4bR4-{(&p@XU7^(%lx zePgQUv^!F$glJ)Gpf!hC!kGv*{#5YXnjA?v!v~W#rtkOx88QlOwJ~blf>%Z^J@T|K zly)%ayrofjE!o8FUBgB7wSy^V@B9;f94csgs%i)D(U4d{mzOg-d`tyez@-dgiG;s7 zwv&8f#qJyZ$jHR8n60dJCZWVX2)ZA*Zn-g>plLYkgzMoG5uRPy8{hd%x=`J>`EY{P zop1BcOe%Ch*Y1Y-@$tt;yGQfMPLf+^-x`{mY_3Ny@s8{=T8*WcIHC*gg>^T5mY8WT`lF(|2+RX4qF=KP1+cN zmMx8$`GyI8lo;Y|bQXj^Xdwc!ebB?V$i03SipxZfi}$^naBHxQOHpyn)PGfNJmL#>dyD@3c_*Q<6u2}bKu5_Y}IKA&EirlQ#bJ)zS6km zU5KOa+-dsN6B-j-Amk2p1fVE~fyN#NeNkL#YLf17xI0OxJkCmL=C0TY&3b$qxIUKH zIWAf4WSRj)-6lhs~u45c&wdq;4y!YPZLTIyX*!cN0;j#@hvxIjzAhV3NA ze*1Q6JR6>dRkYUKw&PVd^e$S!gd(f3^IRRSV zZP`Tt|3#@~`s=TETun#FPO_79gVS+B6Q*>Mo3+1r_~}zY*M$jWLXAIP$pR_5C^z@3 z`XhHD*8D8n*r~2P9riJ){$Gf}5g@Ha80l0o%JyZ*O!>NH8`EfZ>e-TRGw~bD^=9$P zocwHy27RKoHhY8~3wWQ;MTJd^C$#bPr!ai+mmA&uUQ=DhaV|3kM9uZE+{@y2gzUm9xS~kT|PmVN=z+M^Z%aT7{FCF zahKjPi}gsiCsi_2c=X2+FlyL4T!?MpwF9Xo@ejPddKMJRf^E2T1(RO4+&d)j?RlV3 zRpOhwlSV<-nm^=D_lAHKjiA&EKB&yBbGHqbH(mfQH-GoPQiR^b)!>4RBbkL{xncn% zW}`>|cD{<(lPzx8otSN=(&5^t-hv+Ep4RG7;gb()0;aZ|M~OiV{6?5>SADYW4c~8^ z9plHI;+j&TL)FxX6MAiKIsMICtia@ z4}ZVBrUDdM1Deq+ZJkmP-h@^&^K|me=etiQV0TSLJ1qu-rioXaQbH4qXVN2`fjxvd$k&W2&?1qmMNfz7OY?8 zT=`La#bInSM6?nnCoC*;`or;C=k) zQOMAX0?OrLMDHbko}si7*lgEcYFi3JzSs8jM7o1as~IqjO|5$RsmH+hCljKAJ2rKm z_bT7@YVMTc^?I3i@GymtO__Rc{PxLiMQg^j^5EV;;+`wHLv=B)BoPyQaFDwMFp#PM zUAk>q&}9Iv%b32Z2UHS+IkB?q(vK)Gm%Z9#ythrbUD%(QhpMM5k9>W#H~YcwfB`p< zqwEPt?YQ4MYkE(`b^u(AI+z&Z)Ms_3p=L<>wG97r-wYYvjEszJ>8ec{CaucWp!b(z z0Ji$Hz|lktWzm);?ahlZP4;-y*LBT%lh1MAGa{q_G1HI+U^ZcCTHN26iYjA00VrY1 zm_7;&GyKoYo$=ZGZ2WXF(2RA`}M$Aq7uB2=9(_LYNuVq}Zi|yS5;2=avJ2||ADsJrGDs(4@p|Zjy zXf2Gg7y9WomHL)}MLnbF)X>LKWK~t7BjpyfR>pZ&I={b*3c1B$qNW>N16o_j@Gt3z zy0sv}tRf(aVP=}Eeq|Nnyxg6`()G#^`VxLYfO>JP{T1wh?2&P86555|r3`O- zlp%8>|D47)^dLcVD6LIP+)prB%}#bE9Ri_m;anfOaZK2 z(x@=BSs%>}&n#)7MsmWyI6q_PA}33S5!E4atYy3R0=l#bM}Hx!1XTizyHQ3W{WhE5 z?XdjFPADfMt5eMlFZe&-@+%GIDD`ThBs9HYL$8r4%SX{8dq*RsK}!s|mqLv}GT zfJ)xjwR%ziAjphE&MXr%`+RM7j`4}#BkAK}Q5^ISfNX-Auyy!-SyIfw7he_R59 z8$Gi2r5gTC7Mr~DkKrKjoB!QX_y3&t^}lM~`~Uyb-*4p6{|yWA+Ka=^|FowDW3%Pw zy`8JfL3&f!9f0wVb*f+5AP(C)c?|w7oCiMhOCn}U-q_14rrCXp&2I-~JDu=>zwBIO z@2z2U*lKxtcrdlMh$9t#c|~q3@lV)kC3~^jsi~-pI&;)&iUl0_iM5y3)*3iBJ=FYg zFUosa;EukQL4jbp6z{!zPlo!^k+fn?Zb|?ASF$RJ4I;ouCCNl{q@jj~b;Zu_ruqBJ zuCWr-Z=8!R(m!cE5G9$>Oj_1A&>A zM@+15ZK!APUmt?(Wj*HjA72tOvi{qX0iWRQ5sZLy(7(QVF9CIhXTA|L_&@KjdEH?3 zuU{Y|i~e7~z~+C0Hu`Vt5wiaWEIR(La1i;R<%24Xm;QX&(my7=h> z>#ztRAstJla!b8+r>)%Kmc~_$s{j0plW!v;RIP27c*OW{C>iL-LAJ)zz$Egu^qg>-yyv3YfaHy!n^_5rffp+=3U#dT8-l5*9?nK|^ygQA~2>kbFf?{#T zS(`QHL*qF`(+s>~(okdJaFAoa9gVE_tNQ)TO~DH<58&ICr68|%6Pg~}8d5|f4-(4% z#Mh!66tPFc^jBnro}n%RWVxVsgI=ipE3dSmMjX@a$XNYn3XPT@=n!Ov0hX`gFJMVPiZNu`cd!Q~IW070_fk7ikK%R*s|84C> zCGPR-r60d4wsW`h{rR2awwkJ{*3AnQ?;rh(d@lcCqQWW7tC<+lf%k0zz4yAqsmHN@ zE8l4d30qVQ{t(9sbs7xuWYsNq|4`3?%i6Z?O!u(rNkOaMDvrNkHSE0Ov(Gx%33&6v zm7TmYm`=T8B#YBZ-6LpgQEoe8>Wx}Mwmlw@{-hiKGPrT+nDzf9pv3P7Y1^r8;2k7> zy#Qj7Rml;Q&}5UctwrTaxk?+SC7~gs9khu)&(kg~ZtE0l96Z9v39I`zT+=`KIl%JJ zs)CMg9=JW=x*tmUmZ8@@GJX%pI24#U{v^R+Vj;RFe-6W9r z_HAo+;|1hXW2=UZ!)RGS?w*i?6lz&xyvjENH0{2=R^7Z0JE(Z%Th zlM#lCii=I$Nxxpz7YC{}sm9cz1|y;dBDxU8_G2cwKyk1Ml8C;qote?X;ftDw!6fw$s^(hOFirdZwQHN23fG%9SQc^;Sr;> zrTC{Nbf|LOx7kYBE$rAlmDetG87%I@$OLv4ZD8ENZYM6>=&E9qT2|xGWCtaimt1HgO4y70{}a4(;$K)i#>4%GSB}Y~(97w}d^s zJv=cDyp_v(05i=?a7rE?!Lhn zX@36`2%|J%gXXBecgxGlJN*a5G-s6YSP2Vw&^&BrIWu7WmM!sMs(B+f7$~#0PunTF zdU}S!9t!m*JDngZ|CZB`iL>(i`p508w3Ku&Y)muQt2xqI(;z-QVmA9Ud+J|{g6nHr zyn}Wd=S~Z-CSsh6E-m8$-}<53@173##t*6-ESC?Xo*f-u zfB`q8Jy(qIqCu3zRhDSgWTemN&gB9H8n(B0Kr{GEZ3`Ft3G@jHI19C_C9O5C4I!3D zK^E1r{-@mgIA-(FVcrkJdG6dYJF5%z#6rH|2IejB?|P))jFCAJu!~pZYR_wKl@k=? z*EMuaQB_mZh3XW#u0*?!U7F->xHfybLcNZ0I^zY4LOr^57e9WNbRG?JQa#34VV$;2 z8WolrM2oQHTkCp$!>02!5+(T>d+aDdt4{m9;Q=Sm%L5xi%qj6_;@Gh5)a7-Zpo-Cr zoohLXKHEyE9s>+wlm!!p`Z+xdl#?ZOJ8n&T>$kb*nCR(qz_2kGDTJbaGd^+}{JXaN zoKmoN%E+8lSWx!us#Bto7FH}F5wLQ~6Jp>e-xSZs?mIZ^Tgy0F~2j(!+a^BIczg2gh}JQBcN*Zz@N zb4UL*{W!?;m7&H>-C9Pv^q223|@^FhaU}1cRGaVAp z9~g#9O=tzNNozc|eU`JvgHhSd$jtd@?F_R!(-2cKN|S!d)pidyoyg1s7MM_P&pK7E z`fIi*b|^kQ-;5QMueetKYTyOdTO%2v`RKYmnu*u--Fyx}3&R;U5>z7dfqK zuTsOiF1>3Rogv+&et~7Xm>T1&t~&cOWg{K)`QBew|Gr5kRmaRJB!)vGYtzaQ)P>bg z8JVsi(?_*jN~WyrQi}+*Dymwdwefv~zF$ciE&9xdG~Q1ADt(|f3DsPQ&!^T z1Publ0*W39obcI>2Llli2Q&|~BfL5vxIT#3&r_Dd228`jI4*ssjvOJ%LWc1>*sBpa-w}Et)v;dqfu7d`LM#MMN+=eixD5pSRBXuNn8kxhTavFfhcvf${uCG0Q^5Ax)b z`hippv(TRGcYU4xU+EbGVP}1A}y~@I0bx`DLu!_m`;}4p*X$ay0P?9!zk%a(|LkL0LRkat*nxT4*TKUI3V*ayDa{EfIh_E z_MB+c3RJyThdMLLPxZ+{_NA^N$<*^_F8|$I+Qu$lLnQJjaC6cnu%;Be`uG=PCl^Xu z)=>2+^o35L+3GTl<&r4s&FFvvLtC5HuYVkfc@Gumt?bBp3XJ+ z$Xuf-6}6P%Nh&>4kpSBpP{7ZdG&0`V^&a?D!3!?;n0kHn+as5GcgI=8+8vUSc4ME{ zSR^*6h)|_Uhux5BMm}imZ|}mF2guqo#iKe(P#2$$eNaf3+b#ptpy1~`xzcg?_TmY6t&w6X3?v~ zs>h9f&CF?QQZCq74FP=BT_FQT6|qZe?FP-HCgtF821##K++uAw2!HMFXgJiZ^PF2zDKZv@UTVD$exC?J5WgCQ9C2F(}v zCAGr_R6sFREVe2-mJ!)HI*e(l*{?CWT;5M1;m%K0b_FO>Rg{$SWIsMMrz}cxn9K51 zMSt^7eyYqFUzM}LlNXg<5VsNo%=wEOE*4BFOC8uRPzx3HZCkI;iy9lD?hC9*n6e?C z4b|>;NZ4OdhcT57g;-+~%Dg{ZTHMlgv`%SI1VL&?^R7WBUTRC)?p)kwod%kNJN=C- zD?FA<{wyi(J0VHw1EwA?zWS!_z2i$n!~^?QAoLsvv4ce5GSEMBb$NM}etbvu0{PDf z$ki-lEPNY^F)qNh_7YWFakdqkq1Ay~00{m~h(*avFGq z?zxHj`C~!Eu1h?%Mj=|(=QNN}FhJQVHq()*hu45z*7UKEw7Cq?!xJ(9U3{@(zu(9O zGHKBpMm`&wExDU;Ul_7r^9)*y5Bfvg7;WCHeLQ=X8NO*)yx7GF6nTU2 z2+6)igBs?X zNW?xmX1XbkmGUxapO66cd{5e`WQ+q&!EBRu<`qyKzq$QVU4%~9aaVXb`H#V}r;Nv! zSd+t*3izeFQUnRnSWZs88{26I(eUQM<+}lRx*HbS(%;2OzwzL%uQhn(Fx+5Uh-wS8 zzcJ8U?kqW|5_^DVNAcL+&MPpWK4|9w-P3u{>w%e{x4L=)E(h`i<5BO2;-D`Oe54de!E!rQFg18!Cgo12#VXCxCnhEv+y*3;=g74%Sn$=pM$Uf#9mEIVTU8 z?UwiT8+he6e&07V8TE>r^eLWemtny66Ue>t^znC;oGdG-n>eE)*<^#D+(0}>={wuE0ls*6E;qIOG649Bgb zV(^;&kZ+fUOZ1v|XmB$gVzH~y-$(wE>1U*_wPFJ3Zcbn*Y*w$1f|;LxafUZL&%gn&NV~q?;Vbsn3Tq`?TY!2A*H_<1XFbjMy#? z4SZ5$wd`Z9fz`c9%Zy+w_H7NtH=?pGLwUZ_RKE!0wj27qJ96CU4t_Cwhnr?AnQ=l8 zXe272rCM)J(=@JPQWu(cE&MyPB5q4QY0KBt>g>I>P4{Wb_@I~0S^$yX9jXT0;C%K| z>3{B>N1P0V!G)2dxlx*{U=w2kIj zRrR!?pv0u3PSJ#^T9Y5WwYon<6Ows~8%^REts@F};GrQcE{WgBgKmJX5-yVvsliZe zU-(;|d}Jn2o;I=sN4Ryy#l^E8R({H)Bo~;oZEd+?enw}pu=><+D5E+yP0|H=b6F}F z@fj9RWl5*Ul9n5B5lf0^Si`PZLVlm7!y31`RzD)Y=K2s4$x^%GYnb_9M|AtMHAbdg zfdnBzJ_FpObpqX!=#lr|Q@U2)uM0l%|JD0Yh|h0JA^O|IJn|4G&SY!$>8gH@mL;d} z_JjkzgkrMM1%?}=L^h{mTf~uf$d?Z!)Be;jP9o`@iP3|(e5C1(`rFz zX8dEPLJUygiTqMf&jb&tUr?YuZ?pUFC2+`onr?8`Q^Y+|C7ub*4Bt6)3Iw?c@Fcq% z-R})|GiJJPe(TwDL1~2_-?=H=LRuxbudh%p)16x7Yq7zN)P}{SZp5t)i}Sg`{?Pks zJjZVs#|yRM(R;UsQeAsrvKNC=efrkj+}G>QPyL!#WjxkWA(bz3 z`u)s?Kb_1(PpcQ+C45Id|9)zegHw;$0zZET>Olf?V(u4%;JF;(R`Lqohzb^46fdyL zn;_vs!)7J{QOBwnN`YI4eAKbJ_5r{ZKjlakbnHW5d6KeoqdbBk(djyomW5Vsh z@H%RSCZH9thI>FxMLP)EPNZj^><^7{?!~x3Zgep<9+X5Rb&u8N0QI`Px9(-O+Y*pA zp^Pji8L2vy{EWu!zR}4eHQh#wL_fBPVnJ)}X|5nIGqzPx6O|lc@ULuC#M>)qn-bT9 ziGv>+aQ_AM{fWB8gw!dlR1|c+j@f=t2{O&_3I2Bic#W89DTvryP(5mEVPyIO{tUu) zO;5Txp(Sv0!=H9vxr=&*y7yiO!b$em3v}&^;5)r*&Vmml?(nQ!TBW0#(NCW-0=Xoy zrWY%~9E5t&4-yiJKPKq58%00Fy`O9)=dh26&G*$N`<%inz&kWQhJy^Gari(Eu&qvgx~FaxBUlLXxDHK zi@toXe=^Go3OKN6H#-Q9Xj=CZ+vm6L!TMehXO(&+S(=0ho;0?(Aep~b*YWnP2Zejc zqdFFHkJ;;tTg^YaF;C~ZQyrpIbSymLRS`ge8}U@dlic=~1|-^Eb?qZECVh~Reo6#K zP4;rMEO0>(9_*Y&SA*{FKS_5g#H|l!f!OY=%5+2x{zl+Ll50pu@!6vSg9R@qSy>nS zi|kI6Qhi3AIn?E}2V$%;s|5`Te+m?V55L}P*Uuo{!69pByVhk)09xqKN8TJDiMqN~ zxK$m7NvKYgttQA`6#LKa_fz2SpjR;4rl>&$j6u0bdw(`1r!6A0|NNpB$zb@tsNmIY zC8lLi_8ntw(OyE&@gTyUb;=EQ-R4vttl+B~)in>%O?tn{@w)|^SbNxo6hPl^`ShK_ zw(bX$xSFn+?kg&*t)}>NBum?ec!tFI!b6Nmer)A%jD1%bi|xf;GBR5feWyES9Zk@y zb4q!_W}g4;C8^GM-f!I}b71|xB{wLI=fpCq=_3!SVuA1HPDE0{sizznA+KU~ve?DL)HqyrqokBcNWRW&9y+o_C!k#{y1xamS5TBW^ zk=5GHCM5W}pg0$4rN(_R&3z2}4JU3iH=k^oVl>YoYWZ*2`hBZjpElYvlzp(wKFU!9 z{k6-|vw;66P2KjmnC{|H$5C%>&WGi+(x1SpWEbrduJgkrd8e)-xC;A$0MOLQ8_89B z2Gw;doFCLlco9;0ZEby=%XDBsdzc0h$fVWj*nH|Ai5 z3v}#~NMt+HT5CQgtw%7T?rNjDBGmOB1*C%FrFb}HOn3CC3jyYS&GA?_-5a-x2F0lnTn#`|!}TKf7>2OU z1+@Wd@~6usGI=%N>a5TM0i*0|Qu+nI>*y~11OP?k+Gj11U3gMRdf&WFr|Z3GmjrDT zSLRJi${3WMT*p{MNDze8n>`8v{|uS)Vb5$H(9gM}bs{T<%SF^4fPm}&O2qk9-`s^H zu%#itycS91%Hsnyh0aTQ=^EAau21Jw3QOkJ!*bAmq&#n32U_Oa=L3(+cXcqZIge#I zP)w%fyoufDpKa@s^f3rijxF39zoE3uu5U7XHIE@vt8>t?z3^;#HIf0<2E4|1Di90H z`Yy;IV_a(1YmM}T(W(!Ugw^c5YrNZ&R~U^);X^Ssv(U_Rwc$Kzta@US_f7OyccH=D zNMtSV=ZUc2$=%+pGH$o!fHrUwHACDhV365Gv1_oLKaQEUMR`)=uZ)N>{&UDG(Eu3l z#8tO@3=OciB}zNiK6%!wtJ5_xGWdDg)?Du}l~zR3KPQb79^YN&WXLMrKhznO|I z!nW>qAWnPn^No7Zqewp(EMu1B#-YCPcqk5dAOY*62CS#Y8t49aUs<6;p6WDM=J)a=vlui>~M79k$8}_@f$CW z;j2ZryblXhHE-+r#Ee8qmAojrr5}&>Nvs7N?eFueDi@}rp6M&O)n`k3cmUse6Qee= z&(9jTNXL2i{?i}-`>eUE27e4*2rTav=-S~b9jN5Z-CXvOkWI?&I>SReQp~7N- z_f*5_KigF@^52O2pM1v+B+bMyO24z+;IU+L=})Da8e<$K+v|b=n?)Wb>9zQJw6RKl zvh0~vOsT%X_;*g^zRGFhSm67?H_`p8?hmJ)eTY!L#y(W2kJFh)aP~<kTj@fOkE&pASow zg(1_(Kgs?+)1HF-{Hr6(d~_0%gH+cd!;9Q4bFZBNy5@5tPnKiPe3fLGPr2ll5!zk# z8ZeJ7f2+9l?PhVAdw2n%c7Wyn-fe(hTU1am+(7c3Ay_cmcfZ>Q9vocV;MQSUyj#*> z5*K%lzOvT<6Ll4@(xuZ-hyJ7h-tpc+_s?8LF0uPiPv#i*2TVd}CPZ$Sb=4 zV8$LPT>qGx%WFMX1bCIUD^S{wCeol3$n>mPI2P>+$ql{1I|+8Ff6`m8FcXk@Lw@1+ z#QaZQf-O$#C`86@C*pV?O_@hr3uE&f>(!uD zpn2@B>bck%I{aq{cLb0MCMy~n$pwOU0~684$Smbi-sXz0rkwDIjlAL#ibN5Q*WsG% zISH7*yj6w{=b{OTH6B?{ZX2O$yL7m>o+?&&{gIAxrZ%r$gW9I=0qsE=2TJKA(2APO#xabK-ypNTVb21b0)W`ksyeNpSf$ zjqdgB{E4TT8f$X;_P%+b;0N6)z-_FbCw}~F%`qbMDkufiKFiSrF2g<*{p$eY6x*uG zA*+y+Zw-=^>{6+*8I<)oV$&-&%k5r(rHfSFtuNAuYIw?Q5?uAA?Jh9&x0|A(j!aK= z?L6RsZfCWfJvT{`yQzNH4lzczZ((e$vH4eO z@I%epK#oM9yZE}QR`)4E233j_*Ggpam^x!VmPiizzE40DMxoV)TMl0~C>jksfS?`< zjg7Ba%>a4mCGt1e{5s)P@R<@9HEt01X1V~sh0kA+oq0SK{mk`$F!r8tO{Lo#D04W< z85`pOqSD535D@_p1q{uO7y$w4Rgfml0HG(rVH6bs1?eS76_6kTfrJuNAkra}&{0|l zEkZ&kaNnIUbI$*F?}xiTiF@z2uJWvBt?V}*e0uhqTlq|A53JOum^f!%$gy`M`|r;O zVE#`VZ2Th5!#Y_ifTl-tb*pis0cl{CM==1OgmO`WzLr_mUyLlrSFb)ED}R4?eFE+? z@_s)kBmmHX!|(s+m=HPYG>xn*BVSz^7&n>`#jn?k0SDJ|zo!Vmq50=wfCMiW$e3NA z9wYriV`z={YF}987A}T#aIy2t==$x;gX)AYg2%*jj?BS>o-A;&wTs#qk$0l zP{G+V#^lexvg#M}7C2wk1rJ={I@M1Ftt(z*Do%GwAXo#g{~)1n{#=YSzz`0>;f=84oqNRM_DW2LbqYlA`a8epb z$?z?9iMqIkIWO#bfrVF=B0u-lzs^GiH0eFDXECG4NJlQd`KL`qZktAE+bBhAkHJj5YHhgcl zg?X3q$Kd=hQ4|fY%9=0)A67H~AGC=pG|dC&Q=}`tw<;eo^Zd>izC1qY*%bpF(!76P zN-dP-60Z**L4fTsD|yXo=YH@s1Ya<`cK{sDv^>bS&~*@7J)-makT~r9T7`;J;=z+z z2dJRvI4}LfT`QQM!JRuVbFiv95*|YnH9)d2l+CS<_%^r(ff6#eox|o&|0A36`)U7w|6BCi+DY-{iXZ8x(@!@c#P|-qIst$5?XQz(zRLN; zA5wB?UwEyg-GyPOwp&o5@h-&Zs!;}w={4NWuF)PjwiJM_UJwacb}6CdOa|6<@=fl8 z#wuo3PjPsmz*D=o>&Ns&!p@hXhH5Lm`eMxuw8DPQ114eo0}YX47B6{1R@}aiZvPrY zx}bo@5C6P~w_iEy2vq{v&L0$ml^%6BhYj$AM2yAir3v=HXYHHfapBkeruADOqjQ;T zWE=#jujB^Tb`9(`d~W5TUp_)FcewlqymioHPGUQ5Rv`3HqY%_BjQ5HN&xcR$J8Nvh z0%}Xu3o2O;X}b(;irZa7n*@nLxkRypLRNOPPlJLb&>J0#KSS+2WVM!0Z2X7}o8rtb z^$AdyzH~fhBF)lIb7h{u8H>{iU`Lb^Fq(vOhn)SDqWI>DwQ!t9%s7LY!dWadE!VrR zHgN|dz!}WkkG^Wcp#ZYNZX>@#jXX;})%tvVCU*kS)A40@l-HqA{!%`ofb5qz4!7TS z^gQDTwVv*M7_7kTFvTI>DrYo?oGhVm-UPZa?e=eXp4!0Gu1%-9=g63)iom|tyPcc_ zo%B}DGJ;Ov&0(6y3tThcjGFqh&|5xoe=70VRtE;4uZjNHVh0SdUmf0k>ksIy0=QP| zOT!uRBx3nT=I+pLfuX%OiC0&VSutPo5;yT68FD9n3|hTq$ED_Q`@WW+11!thuk5xZ z3AL4s9nn0gTUf$1DU(;AW|U1LQUy+mh_{aCqxjpnDvHdv{Mk3kfr$R9sjHe0Zg*2yLK& z6GuPbj-ekK1JaBB&P@L>-2Q8{2@^4?#Ouu!EnR3zF*X`1GP;GX=1L!DLauS|c>M+z zU|=pg;Gn|#=dj+XCIe?5}|DV@h6qk$@9@Lva zP*Z(dIcwAeiKKIkw!G{M;VGp$a;8iF*O~iTTz#Cp3bogmotYFDAxiLnhyz;;uBHgi zI|bynNUP&E;rUPN^*cRFp5d&*U7_3QYLM8LtrSTesmpkpy_lH)eBD$TSXdsONzRU! zLEA?iKce}@b@>t4BIt$|G~u%3+?Mb(_-w}NpVTG9*)k zg+z>vwJ2}ky@rrnsVTzD-!c*m?WeJ{tjPMJdt6V0q&}g(cl{_dxu#fXyBKh4Tv3hs`%3gAxLIGp)v`DQP~O6|u(_x@NSOZZcE2 zeL@oQ{u5_MgGWbDc8%V8laMqtbEUzoe~-I>luo3fuI5P_ib}vnqSLrNcI{qTICJP;hD+s#vYj zM~o0?bv>kwfNnXjagE5wAtP6UUf{oA9f04bN#mhiab*@R8#T98T^? zZ*N1dh5Iq^b>{GLQMBou)Rjkw{XC~VNx`5_C= z*`L>|CAL-CcA79`J@ne}8q;R`NW1#ZRfH^0$53{r;}p#9R=a8NC<(HM`W$P-<51x< z;ZeEpD{6D4aoF93{&i+1ze;`(e7z{T2zwrBK3BZy5paNKa_)sfiMKklc3Xd^V_FR- zE(e4R>{Il|eFMHnD^M&j8Qn|lm%-XWCG%c!J>$r&5Z z{BB0YXc`Ye!of5BYqo)0xuOwZx5*nt;o^@o=cpNHdb_PjS4C#(ivIKtmY ztbi;Lj~fhp&sz3oa$F=3)ji`j?;?27TMrHlyoaT&&7~(W>8q3L==hNoj#hPKyUY+Kf;-nzPJFJ>gluWtA8&3Bh zeuwP+P^;n8RN%@Fu-LU?0dD=bplLWKg2un%#70Y%2k)z{JaZ2;xxs#eSg#1I05`y8 z4*AcI&05Mpem%ATv3wfWHeD;`>QZM)^pQGSwh0LIZA<-$LC0U8!k#)UXHW<;Ix<J-@vi#^q&O-4-o|e7KwQD}~aVm9DzyKP| zO-b#%G*vFyb9XSIZvqOTE(T3_dBhqLXb;B;mkuht!x)K{2Qk!4R%=P#Wn9SjUL9!q zo98xaDl*i=K)#c@^~951|G`0}X+Wu)>VQ;D1Wr9>ds{6nF^he+hu*z=|MCTzcZu_( zjE+FyT~G6tYn<0S(tWX^X0W^N$Kzq9CHvi=ci~5FKc&y1hH57>!yQiYt+rguOTGZ3 z&U^FHVcM_9l&Auo#Bk5x|I@iWZ0)~*e!yLWg>5T;M!w3cB*0$vjVnKCn zy!p%uoT<{sS@BI82eNTy;^UQ_K0jZ{YP;x?aPABh;4f~xz`bd=e^z8XQtHU{a-7xJ z98mqN?bl&mg&IG8aII;+(+;L>R*=fonHgs zG-|Ng1gAZmQ0Y}D?t72pz%LI19F-oEq){LTfe|H!*)b=vtqmZkG2|(8bxUB=z%>h@ z!PM^do2@O#x^L9V{y4SG2Xz?$3L6UuRc~+upUW1Urik~)hAemNi>SP1M18-Bhg>Uc z>IYVC`mdXsCL--^c~PlK1(7>_o6G5i_g`?)i7kpVP@n#3GeYE1iXt@&F_L1s)D#|I zdFj!k-LV2Anl3;@1Jt88T<(d}=4{wMJ_ZS5ln~u1PyY7sJ;;b+hSJ5w-c)Oba{=}O zY}c~XZh(jjJkwNM;w`4Iw){%g#ox{xXS6BKbz>hq0B-MS+(Iu6S+=WuumP(z4OVLi zoy%QKBxk`V@-vd>e_T?9r!2shH*}_2-+=*UCt9uAF-_$)84-}Yd)M__? zp9#^myE06v7;oIuS?GFWixj#cTypqelwL&kGE%>@#P41=>og~n4M7_DUKt##4j7tgE~ z0CZGKjC~c`U5o6At3F@DdeVNg++%obWbW#~~j=4rT&eb;%pJ)a5d&Ji{99z2DWS~Nn@rgdCy&<)G=(C9Zt9ZErMDgILcSZ)<(j7rmrviAls|uP9LZ!>IgUWjk4QV`lNSs_KJ8^BVJj07O zBy#R4P>OLsMvSXA3Rt)MyCYvNgb?9}Y&m?qHCpnBytQbQ=Js{B;`)$D@5VQIU3O8! ze$Dng)Bh3o_r0&CSgGL)P>t?zk*7lCo$+YXCDwR=X&LP&F4UXVGuz{y7e2o>9vKlX zSM!Gk>vf5j)?iz+TsEQ9(_&|vZ5Q@<<-a>t{wU9MNFY{Ic8YqjVYGed#0cKI$WF;_TTlMQQ)&B5avt{C%$E^k4o|@|3;&1k zNZ%S_+r_WVRreIbN!bYY*ZO+5m`^6bigx`wQ&_d}A=82Ej|rUcG17Co<88fj7X?sF zVtt!9j2wEkBV}#qKJQo%*~6;k=7l?A5=_j}@-)TsGHbnek{iN*(1`632a?~P^-A`V z`wp5A*BwES`X6(Ifz&r0cT`b^`o(YVFrS-63|Vs7kUVLwzQuJCA{`3!f=I#Z|aHV2AomnWKJK zaKWC^0bpN!E^A^S6mvKpOBGHh6O`nyzAbT+JPyAF|uiQ%j z-BQU$l?&WR@56n_AcY2aIil|PBlO)mOI%Yfx?NTn%v#vSfCxFB8~O1li9)@di;qZ4 zoPBmeJyXl*hIo9p=7m##hgG$EA+fW_ZsA8Ma+CNt(20|MP(hIN6N5{2uyKXh`YvDTAd zMi8&Dk2UHyX};(GYR zce_DA!%3}#{E5~9>*bHHWB)NGi_k5xpx~QUFEMYG@YDSlHPl!U5M&BG_s@bt^YooF z$#j|oB*xrm_4%k|iyFf)*ohR;s0K*S;B2w9w7*kjJXQ@Ss-nVy{=XO97s-mANbBkuTcf|XXLoXgTbO+^b7R}ZM6_ z9=9GrW2|v?w5p49*Tui4*E?p^h@DSHA z_xV7kM|bEP!UK2GZy|NIX{KQ$*>$;tORw=L$pR}-%DbvjeJ-axKl~GK{`eO@mW-oG zX>k87t_#0h2`IP_-)~6(Qv(cZjQ@e5Wix3F(X<>If~wB{m4g8KN}-Mpy@`vsbHxHa zBVMIQZMssqyGao8?5o>WL0Y$1RsFFtm~%>ZAQ!lujEzS7zaeg^1m__f_U>7cOIed1 z_^j{Wp94Bd7*LcT;tzs0KxeL;A9$Dm5P*4GnL!FZ4-EP}pTZ^%^>5n+Xv^ADKJ`GA zYaH&js{{&(xi80%AxzID&_Y%q?ligMpg=NYt|9emUUk~#RZC@{5nOldk$aBO(9nEH zG_FHbmRGqw_#)6{NB_JYFys*K&c(R{d|=t!ekUCt2Ei1x(>w02Oa2ba0yiiBYtIMx zIS&;9##M$Ich*M9D}ieU(yIaLt3CDOYeBgE(EhSwha%kK2m~l)9;U26yil_hEbcdq zWN^*r3M84ywMFFiMu0-o6h7D(i5I;`w+sUvaVXk-Cfh9(522%>jaAIVVpqGGh{R@H z?%12I_ODEy4{xnB0R!QJ!5iga&&%yvip!h#;;vZi>ve2N&=ebGAqJT0+9T<5J8zJe9+WAUGM0Eb>d!zP`VqD%d*hOp-JH(Su#aL6p zYJdhaHMJ12@GNuineCMw@mjmDfE@U;4&&+j1((d(UMJQ&k9ABvl7Hy6YkX@tmk@Er zG|4$vvPb>ulf%S+0bjb4apbWbH?MJ<_+|vw&~}&ESOw%89DcPe>7we&uv3xUULGZ! zrf#B$x3F@<_MmT6jB%OU#QFb&92})d4mf zyfo_A)a_DCUZ|W@;Cb(76@xL)hk5=2UAGKk8ez8Xu3y)p~-VSM~Bsu`v+pG>ymhjI1@A2Ga`l zQPW?i5d%^nngmyRwe%pca7uU?Krbs|R)eb??nYHG$3tKAD<`d%mDn}9%4;#?Ek$Eh z=^vrEBxEK33!l3^S!w=0FVFmNzn@WkXwkifox#i#8u&R%^o&_i!wP2YhPdY9YbIA* zjW762ci5G>(18v`*fxBYEM8X@k4(Y|A;@Ikm-l&t1gYlva1Gi~Z zK#)MgQ=c)o$79P&d^k5}sdb26)&!!5g!3BI{J%-_QX5EZ{=;!8kcA$I&JI6v^NV5B z1Y-H|E?LQardAwXCfA^0BVw^KbDS|y=%=uG6Pd|qDr@c?cHReBHBa^$tP$E7Er`W@?H^bT}FFRC*jZ`CWc75+Zv=I)=N18w?d^?S44+ zaMjB5p7Q;BXRRo|2muoDp9D@v8gS@fV(8CcVhcku8$I`?5O5z37ju~mMO6o(%aEf}B=chGg*${^DXywMjLw5iD{BV&&z3{aZR4GDds7zSLcU z(6I~^2ouW|1YK2^IQ}o)p3{`qc-HUEB6eKao_-08=!c&yFF6|HJdbss9I0u}R~?9J zOB8~qq0UDYtcY+yLWctI(6+o4_bD8ZfKs*??h69p@81DiBxEyY`(5|P*J+9;OAHqf z>%N|F?Zdj~?;4;?fJ15nS0I`MxbLvJj+0R)+4pDG0J@XlqPq?VhuErDqOTq$s1pavvsU+#$yWHjQ(Q@ekXS9u8E{&Bq>H=42?lssLJ(2!?OCa))*fzn&_Uqbz946HoaBPn90bf;@Nah7Uq0@P8~=|> zaDp&W*`wCAoLBSDuBeYESOrcl&U9OFS#lVJvfe+Ll@c6wyVg6@8h3US>%N<0=1GMD z2Xp%5`a#5?Ju@@N=tetXw?Rr>cyGoLz#|jUkJqBqtWzQU zR5)P2v-`vi=m{V8Ur;<=H3d-R2i!+yuhl{KTc`Y@|ZF{ENasq&t`*perII z+tpr^js6ZmI1^46!eRSB;%;C6%0Rw_c#$qUix1eBz{=f@pzLMgqifR6B2`lWsl5dZ z-`}F|0*U^?1ytaDHHA#Mrw8773~#%^MQp%~cT*2AC{6tyXpc3+aquk$Y^g4LZ8Ldq z&oE@1bvRHiy$ga!#{rxaC3wpa`{VLJ=7Z`H_L7S>r&A!Pu*}$b#!1`(N1Q@SLr?Xu z^+{>O(&R`>)PO@g(WE=TV?T_q`^j@gT#Mhus>2v){=n|O=EWLy;QbfJraP;Hwx$MT z#)UQ6qesIhKXQ1tZV$Tw6IFB3b$itDOJ+L}J2EAwpKF0JLcxm^nLgD5C~~3eVE2 zBRQ-^wzX~kK|4Kd1o2G=MaW>(^+jCf+jj@_ z7yBoJ2Tm=f`|$n-VEl~Z?6EI=zU||{C=Cgg{u3kt&fk=HM`16|K0Y?9@hIEUed~vn z1=;D4*xeS~0OdxR0?j*j3PEDn2Fh#AX3v z28|jg#*~9d$MciWX!wT0>h<#1jK~EkbOrZ4SjoqMOrV(dz=nqgO${7}Z0|pG|6MsK z+-a%SlYc4Y*jwM0>=zF5PC_LG)N{z-vxQa( z%)PMZE+|3yR?ekN-$($v;*bYs3d+e65d)rS&14YFM^@wW8^?Q10SR($w=V*4cz2CY zk*ZX-XN%--koM=+_hSMT8md2>#?N;l=ZXDkfqxu#s!1YS`3nKXkU2+rFXt{- zHhPaQb(V}VqT1vge}Bq9Cr)1IRkZo%glC|a`liuCo0)^Ex@WiyK2%VW zer@B+n}C3TuRB3LXaxa8U3yw`C3R&e-$J2s`>gxBC<6A9WSpG)%bL-uvu8Futu3MQ z0G>A|!92wpWSAmjV0p(sb}HI$yax-FnGfG0kD%iC+^X{{C)N+X^%z1G|5)02K5cM} zjb5s>vVcQiZBD zb_%C~L;l|$V}VC)`)N>KJVj{#%yH(>3aVhNI_Tmah!kt_7P0s;ZfFnAYK|te<*Ij) ze-&_%&cYsP=au=~%GJ)35}u3Z#}cW&{Z(LrT)MXxlT3pLB{F6gHg6Q^rP^?r@kxM_ zYfmu;K1=EhyYjcZq?gPvEs+j4f^B!uH)?woiOBhAnsQmd=$6~r0CXTrjmgwUe!IUr z$u=-aH=orzHVM3VkevgI!(n00#)YZb4kp>*QO?2Z>j^>MF;m6d5FzAOuo)sc`>5#k zZ|@-^4fc^TqVwIHcwk!JtOYM0zP(H{Wo^o2dd<#ftWwdIyTb&uRPEm7udYZ$f2iU( zjlbmuNJ|BHB#&5)v#Qh{6sg$VZC9hFweEE8>HP7dZW{fWFLSU{YB!rk09N3`l`WT< zsHd6~SGWIQcq&M?Z|T zm{$BD&%X{Tq#9Ue%KMPMx(yPo2x=#d#8bwfADZ8#(4GUJ!32u^2gxSR!)+cOI}nv)`af zm(>9vG~tQQR_BMMbo)xAZD+d*;R=I@*7eVP5OLHw3*Qm2@;hq4T_Rv|P2o{BcojJb zbyYdBi;5r}J4KKU^lfdvR^v^zMtnV_zGUgtan9YYWoL0uzz~Cy%*_y-hOEdLOrny! zU(xW=*DbqKW3r)vb@a{E=AQ&yE}afcLwa!e#tu+YR(qy3 z-|E-+sI=HhDj@sh9Z$BwDlk<}Q5sN1!;8c=mxU)g$0Fo4CDb*yv)=*u@IUX04DGah z276L6mS0=~=4z!}6d5H^Wwu~F=pOD-??x+|AkY;U7TDI^;tw8KFxU^4`+5!jULJv! zIWbTMQmXb~HD3DH6d9W~&m}<)ae=XU%SUy|iYw@~vp~=tW{N;I6dRFrjH_KL z55g7}iwg<8n@^G^EqTT7o3 z2|@0_0NE*xWqK}1LI2$@P`1(_*T!B240w_#M6Oh)Q{4M6jQa=Hi##LB9ZlPVFTM@XKdeSXPFYFf=R62}GhqgPth7X0YDoO&sch7Ro^AVF(%wxXBp zCFjb8OSY=$sFBX1oR0|($cGpvpsJ{a`pDW7oy<%dZly8L2qtpsfNcjsYmi7)6ZIhf+7H_lEHmlwZxH@yi=m!WuLJpmtC&Ya& z1Eqn$1o=*z08!NKFZXmCEzeJIaKCT|b9TzcOILh1ZZZX>znA&EdisC^li_ohWb~Dp z1bg*`0H0}Hf55$L+EZDsa_bcF(7?q&)Z!p_H7_@V)tny!4A7Vp|ApOE>G3K2I_&Bs z-lw_J_t7HFez=u10q;D=?wm%H z{(n)cpG0bN&XI5m+cX^2u0t}aq1Z&dTJ3=8I-5yKF zY9z{o;^I_VNIIM7v$07Rvm)qlO{`x|0JtJmZj&*<5sLhd>ZWoB=AsyLwoPV#c%<9#1orQb@Upp z&_2GkQmEX~{>*&4F|uQ~450KTR3V7y4}|JDGs5Mc2Qa6|VH%GfLR2lN+U?cZ*9BQ# z8%m9CFfCL zDZ~7u5g_d$-R}RQ6#kR?TxGBI1vQO}Qtl_kU6?jT0|6T%n;E;cREYDlX8Jjp=8&V zLRcht*QC%$%wdT-P=>(_CW2G9%D34PAe^=}qKxj(CW%ob*{IjsZKqx6;5>LtSQeZ~ z(g&r2_@&W9tvk3|NmyPNbY$lG)}+Zc43x3^PrTL|uf*7(j8IwiY z6!+WZo^&VoGobGB+d+y%7K!~uVw^WOO%&A4HhV{DtoVyF&OO@PBGd7MZUU9_L~l!P zcEn@4@h1|I<1Abl?Ad%f_u7+KV+<@!(nRb653n=%c-Bq=Ub zFme9viwuxi3hBLl!kq5;BCgHkO&emavfUcN_C$+>iJw1ikHaTh1ut*k5ZV$bD z>DG6uZ28cySoWL~6S1$dW^s51Lur>y-&@Kw?1yO_E|V1Xmw$I1-s449oN#_ z(4QmD;?`ZwqdynZyK>1cxv6+HQTTXtEXk8Xp{DqcJ#S{{EyWfEOV+HndQNTP^8kNUz*S?*E+&}hs0NoL+yEsZZ=$yLFq>){Vs*OP_vdal8 zci}-FvKLBww8dPs#E{)fk|$Nq0EL&iB~^WUhf+W&4ZFRREg$R~@E@=nqy8%d+7juE zo>YAr=6n4S(h;6?f`OJfT&fz4VPePM#BAF?K@BFsgNQ%zls2&FdgMseg}b6K2%!UJyV3!LN2E7mxb?3or?-0cvj#%xhk@Eoa{24@dxSodGy6BER)w>ZU( z*8DMFfcK$O63*Sfx}ZDg)huh@lAy3o6nsBl+NIkZT9#*+i|-Qxr|PrDQBPDaeuw7bZNW79&!eaO!nq#N!A|@lCr~VkNJatR71v1C-q&Icj+dLfVhxj< z@9d>eFOV>FR|6@_Bv<}3aXHS1xi;5S2K*w6bEO{ykc4w}?EU{~#-X3RKv#$o2x~b1 zx-7zBApBH>w`Y4QzF~_5wTdcjC*ly>&tzYm94NSfsYLWoD%W?;K8#d%=%1Vzd$uz3 znGx|~e~oIsO5pGwo1;f)7_=$kdJyt^mJ8xjoeOwYuFr2(1%n+9KgsFDSPRf(oa_HM z7kt{q3l|_fl)+o{)okUVt1FXJEze_%Y&zu}1eth9seu$^CiI^JhcgLawuDORgm(o6GL9#=my0kruo|c_$gDdmRo90LlTB)tVuP{UtC$^S-ysC~ z-Me?s;Bthliu#RJu;UGhPHBnQ@L}IUS@dGK&BSJL4I7*R_OhU;hNxn$N?*U#{)Hd? zBh)UZu?W3fur-iFt*B1WKm~5mFxyUQ>k~2SWuoV7FQYv$tCgcd<<7}__8$?<6lzVm zq}Zm)sG4NN%7ccBojWte1|nvGJr4H_9v7+CCvVKfzR?J!`?WQ6SX+}=#B3~9PAbjc zPHm;n9*~al)?Z56V6IPoK~1h+v53`x%h#-p1+JskwwUeYT#e;C|K%Q&@MF-5rdLxs z@oC_6z_V!ftc@09q&hsYtgAX`HQ~I&ZH>;PtQflA5JKyKgoc_#xv+qgZp*5BTgoMc zfTan4aCDiM@aQF4is4Ykd=j6SqtZS+I9KIiwRgL^GWL~c0pPJNiu-?ioN&?ASVMOioPF2MoWBv+2A!l`X@w6jLm z%*e?(W$pj{NrI}M#}*m0rG+TKyGY-;)BXu^VXAWa4WvC@Ed_qv$^C= zTxy56+XN1oSO++u&(Z^=t97w!by7?2lwYw0fe~HQ_ytB2BPK4ba_W#etE10%(A|Cr zcFU{+LqZj?xC*zr`mObOz zMaV!*7C8lN%w)v=nfQmgp7epEFdt{MoME|b;GATuDvC0$`}j(rE+uF~5xaf|9ZbLI zq8(V+g>sSGlS#j{_Ki=oVk_X*DV?O!&-OtZ^jxOBc2p*7jS{p{Im-6fT8zTHj6%)X zgQF8oCI(Ac+h?k&U7IEA-YKv<$(R*`+<~Uvm6G_hL6&IlCiY)gA8az_NY^aBMl(pV zRWlR-KN&~m6dm?PS+`{3zt&mHbb=0J)KK@5q~ND1BLn6lTbSl>^64HhQ}> zj)Eq3Vb;eV}v*^+tCnQbJS1&oe7#i2iS{ zeV3y_QiK*W9t3{px;47Pw%MAL$23ZX!lkC+)dBb z*H!0B4H3;k*)eSTW^S!C$_mzK5lvJ!`1%^=uCcusMU(uwYxTC34+hv0CJHWv=+d4C zhxeV@dAYOA*l8=*Yd(=<`vbeP<%CwFBiKN@+ddiX`>iY5iH@gd1;Q4Cn`O)2|EPu# z591oqeP56P^#<%%%D;*P_h#vSn7Xe6Ptx?GKk$N^oBJxJy6H+~fzR-TC+6UySZo8Q z=BAZlztOcVnkLp}l$AhXZi6{pBGpL$2TmEc>~XdHxU~6)I#ou~Po^M0|Ak-GxY+vg zEveZqswjHx`+6OQ_?{ksULerC&b_kEExl(m<3?ESwP4P7+3T(b~|j#^|R3Uv0;(5=VX8$=%o6UjaQacQh&X`I>fTn7HVzUljA znC_Ljrnh$kB`#pxGPR=QaL>1@Mt!<;7B@$CG&ERm`6t`!!H}g`}ZztmhLkjyQ=k%VU=?q;cobT$MubwnVDcaWaT`* zxw$A?U@luXF*bXhg``h-%^U^V7#i;S`)Iq*l5*-xouKF0F|CY%_8>2<>jOp7d-TFY zy>2ZWg-bCEonzz7;KPd2ziFqQcaeDOlgz3x;JZoG%j=S!S7h1jEZZQ!F`HArNUAHh zl1!L9r_^(-s{#@`8%GA4g}=REKk? zzFWX4+H-#Zsb4K;RpqAJ85JQKfyrXkn649C!Pc@=44h+XXu!Fp(JYv1xpLWp`0U7Z z!I7Mp?L1dmB+v{!jQPkSl`^jV)3uEn>8x8t*Y>R?VTugmb)rY?>8QlC!BWiqeJ3eF zta#Ga$2CzmQb7&*ewSa`(>n2dzGI8+fyi7lS{ie zEcb+-XQ+s}Y49WEbDQ8)_F&PLs=74WK1{JqjD$%kyk;Q)L0ZmpD)THk&$GO>z{FPZ~De_NbhoI{TNGgh!JOQ1^nGGQVb zzOoIC@|gvmNA@xVgv=4RdtK87W_!6q&8XgG@sF%FOR;id`wx@d3+7l zZ?j?T;!-swrsOiDc2t)Up}sn#$af%{c2=EA&%z8;7M2;DE?yFUl1*hi74a|?Kc0z#I+Jw$Omn*KhB<$|&4Kaek@_O2`Xet?=F+=A}0vReqB}!Wiit)G6Q~Sc--;9oX|!aT{1bGEv%qd_$V& zR;sj%9{AzyfaWwHyUf1_WlJ820oGy9FL{}BAQo(Ud>xjUrfZuUGbC+$`-fpUhqcg=eh_Gr3j6T5|C(T*G9 zGMZ_ney01RyOxmj*)GaoE`-mN)!V}w^nlxxcI}^T%;4d}W^@%~ZTJT;376gV7@3Pks z)03Vy8{U*rkA$|}>pi30dgj~hE6xJ~bF(%(feUAK;Pr7D;nHUbG>w`r#)17BN>p=ZZWbrksw0*S~yRQ|_s zSgGa1eYfE!ER!tO{jSVhccZJv{5dz(?#XO7fRp&UsO6I-Z?+Y#%tTwPmvBCaQYtS5 zHw?eRhx|^mj@ZAwzR@@wz5k}iF!(8@>S>f6zG4f=R{vkl1I_ui-n#j z*D`o!;aOyGmRy`Cnwd-p%9K6Ao^No79|fV~tQYHfC+#nOL9JPnnP0V2t@B(ZpZre$ z%ip4GnP7n(*zl*2HTFnJ3p&qkGPq%6<=BW>S{X?y;nZvf~;JEN+A?)ZZ3$^7+s~^^qDO6(}v0e_EG9_m8Fzl!(2!oOUup7Jrss> zS%Ep{6!wI={7m+djxc=o=3n_`)mgP4eYD?tKP#s^V;1Qt5#K0X?dxtL(_A(x`i|OKG~db8)XqFANU+rAsf)3i$?tCnt^d zp(6$R26kiVp@{S7tu;=hrjQnA2ahjP*G>!G*&L0s+1a2H_Dcd0jG^YmoPM%L`oOJt zAFI5$6{L*1RJQqyxi$prwpbh4sAkHt%%~FbT+Tv@U`AHV57#as{Hbhm%!1R5KU~ZR z0DRx?UNf-U@aa^{_Bz4-p?l#O=gl#xl}LDv((h4SH2L}YSPhJ{;GHYl%%l;m)~GIK z5|KcOj;Ala@rXg|WYdt7kFqa#&?|hjtz7bQXc%Uza{F625de5O>1X|>y=14N2UeP) zt^%dmG)?4R*^{>oYyAe@%O}}Po3T3YiBa{dFPx`wzPHp@0dwzInJi~D2W_SJjRb%s-y;ar zXR~3UJ$u~wS4K`Tf?}`YV^M2gB-*4(x}v?Mzux}oHZkkRc_jFnE=(b9rS|qGQ*(+O zl0W;^NhI3+48SoK)-?(}rT0Me?`~q*r2y8X^4tC|V<{!85sYA(Dq;V-YWQYY_PGm4 zwQR{>yU`qS%i!2{lBp=t5=er$Xoa4r_X0IMvrT4RU>SsZ z%3&TZdDW4Q$SZ>r`lI2a>;=F2JLGtnw&9Ox*qJU$XkO@9WCAW~AzLTAKHZ(NHqD%E zwp);q7Z#Y5*@L4?C}pYlLO!0yI2*378XM-ZgUUVFAMM?CN$O@qpnYxUv9k|Nf5j4j zL}#=ufbiZsc>0<|Q}gYd+IfD=+BJj@SqaUuQ8oGsn9r}8AS^&E)(dlZhoc(7{?_MN zy%wIb9-TkGln_`P4*$#TNxXHSw0v`ap0z#Ht!?#e%9TM9Ov|eby<*+z*QTR}W&mX;z(++9{e?b_QV7ot zhT~#EXhRe8Ki1gv?%in;^#_27ayP}gOZ0?!&CfeDy|LpM&JP*70XKtxbd(}JY{}NhT9^-E5Q2Yr` zjWcb4w;6lKB#ga~v7=h~$wtDko@oK*3TJGBKn7Z2#@}~%{KFb6SJO%~HjKSGN&+JZ zniDH#AFBVUwR&8P@$CDT<|4QN)Tndni|l5>@k6xFnCj!gTg=$8X{7MWq4RAL#WyTL zEV&5y%=Co$HQ&DbVHI0T&Gr*tUjv!Mb2FtmDuGt5#kqAQm7>4UQvIxlKu}cg7C_jT zDHsox*s?`*YgnyS1FpkQO9+sQkdke+^M@DUh-cbV#$VdunKPp#jM~!Ilg0H%ECdzB zzC4M^sox`zE2Pz|_R(#tmH8sH7vwPXQXYN&xp;LXNNW^)sm_lv(k%b~w05phO(tg? zPtP8!!h+fpTs1)st4M)O(3J!TB*?n1l?YTWaux_Ghy;S%!YvBM!m-pKV7-7CAS?ut zJBlK=5KBVBrAU^m4FnWCfN}{*1PlW9d1Jfne&6?7zRa7Mcjo`h{Ql3(gf_M>D4LJc zXbQa?;C<6_nQS|)DfW6-SC>(khN@|_aodBzXo5R$nQ0$4MX&CLuaF6#?d!d;Uc6@$ z>T!}sA+vcymjHzY)(Dm@Gm++iMlMUOKmT*lG1W|QmIR3~Sq??z<>fDdumtAN;w>(a z(m;6#(t}R0ZU6us&^GNl5T4u77$^ejNtv+0vGZ_$PzTOdI=Q0oJlTT37Pc81bp*@&=OJy~?WPZ;Koo(!v9=~Jjk38$ zf2(03>b>H^G+C9UO{D1K|7M&p`Uw09coK`>)BBjO(^n^Xgy?(G0^)rkwE zjat4o&z>1G?V{)9u}fCrQ-i%v5Wb%swm#Fh#BHnVrC>fm+R`L41I+RM7dR!FCDC6A zo4n_LWTezDFT?A4IDYk!`KEM6 z2D*iB*nn=kL(+Jph04ug_feczg}(g7c*qKvTku>ReA^Im{ijqHg>xzI%B4}LUgJLH zU7vvKQWLf#;@ZXEyIGaT&#cZ*7_(2P%?A^X<^C9uhu~lnkhmw+RMvJ47Hu$ z-*)2wh(4T+r}>VSpID_hX*=B~U=la09w}f=47Vea!jy3kruTz*QyDQsb{| z#}b1I+%NRpH_~+;Pu{8*GR)F0c0yC^R>D$d64nG=(n_wu(kkKr2v){0rTOu`8)IPX z_(gwqpP!fCb(Jl5?8_35j4jtALT1dIeu0C`T3Mm9uFrv}nIM2!z4Bq`S*sCjl1d+k z-USCHD$eY7EvF?=9C?PA!A3QL_=YNw!}^uI^wc6l#bco>C9oMEtwWSUCFa5(Ck~_PKY)F&(ClVCJ&(_zvV; zCC~?A{1|H*JH&T;$d4RvRf$YC4jMWxK3`+r8X(^#@o%A7-B|g;P*)rn!8# zN=owTn?qcQy!DFi#VE$tbiY)j`&(LCg2q`Il4c5DhE-I>T#W~ujeeF`Ttdx}xY{bv zIsP#x=3oHY50q6_ep4MB6xKbf+p>d_?MbO8%nVlew9H?OOb5vAsBDh!Aq>kc;;+?0->!;fppQ}@O zQda+aU58P!KV2S;8>Xq$-PTfTwzfMFv4i`08?n%}1P$J|K{6zW7nG!MbC^fljz%5D z#tocmEatcdT?}v{R(d3y(@^#J3@#A^ zg^Pz17hin<^dV?7!@bVU_MV62{g!F6k|kO~1W7d*9v0w)yzHv&AD&cgE-MD#5YGXw z{|qjf36TOk+zF8+8A^A}ClSDSG7)R1dAt?F;&sjar;6Hhq6zAm)jqu(w2P0A&$JoE z^UnhH67&G(uZuR?3n5KdFLjsCDZ9|I~?F yVH_#;zo|7`4*3^d|4sbd|AmR)w79cX$JW^Futyd4!LzzQeBzkf_toE>P5Cz_KD6He literal 0 HcmV?d00001 diff --git a/examples/files/two-actions-without-persist.png b/examples/files/two-actions-without-persist.png new file mode 100644 index 0000000000000000000000000000000000000000..e8d5facbbc7080bc968fcb0df8e6f924bbc02774 GIT binary patch literal 63259 zcmcF~cT`i`yDc^>hI0TBUF={@uoYCs@>p+!MN zIwaJDB2q#Np$7=%ZO-`}&%5uBJMMVny*&m4Hp$w1m2Z7z&bh+1HI?Zvuw0;_p`lk* zQP82Gp~cbAoTQ$k1^yF-^_>TPop94penL~$b7KLxIc@hC{FsKOB8qO`@(gf){-uhM z8x75+mgAojZ7zk@G&I|vR23fUd7CefpY^^mIJ&*2Jl;r|cq#Om^**iBxh?w8nULE; zcP{&?XPWT1p>O!zUPEkG#xs zQ)EO4wq8nAZoLvb`F9@yZ~xDAgKIGI#6LGQZ*OXx`hDYn8S(EYvgd%o{c~}=h5Yj; z&1;5c(cd>;&RVkkzWEt^pZ@pF>f>YsftepUc7eRg7n%8$_nqH3-Tl=1`*W2-78Zm+ zta8}J#O!R#Jn0>e$aJP|v6Pq9lmNrdx80rw3C4nPna14cprWx}p3Y_HS765fbKN!& zC`w7>W0bY9Z!Rnx0w;!B;1|3*Mmam>0(99Mwl#bLf&}>D%w0RGZ{=lMt8K(-#!Lw~ zJ@}_R&6VQ0FbD~+PW^J15>ad2QT9A0bCv^h@58dYH1ufZ#O`1>!w(Z(!*+HP12H4) z(osetWW%q$n^Y+jtw+0NgukJkGh#UVs@J$~Y9mSAqDfu2qXB(FGR~m`}><>ZJKnQ32~^t=>e9bkqG}|Auc@L)#O1$xRRhZAM__ z#W5li`WoSIt7N@^uP5H>cyWTJMnr@XbYnVpPoUFn4Ri{wmq!5HFF~fov1?Aj- z3<_ITB|62OH>|HzwvL(kQS8MLP4|6~^k9AemrAFC_9yW5M(R7$K+QEr7RhZ5^zx9n zjZdVRl1+K6kVTV`pp@H3m^6%piC-d97+8LrU~KDD7>6Jux5nze-y>hb za|(#x?}=NM@u>_f;7Pw%pw!aVsYr5n2-etlP2uIU91mNcvy*&atjUopQ>ys%(__Tg zE5!cHd*zwwazoV`fgW=ib=iP@9SKqzk9Ew@OTGdd7gH`d%rw}@de5sfQKq8dfI}{J zN$cTb!OzqBDV2FUp!PC-VQTO~<-qo}r22h*^v@ZQS$LaV>}ZL+`WbVoaLW4jlTl}v z-`nZ72B~I6Pq&ZrAwjX16HqpnT3EP$~ZV_OCCpHAu zNqY}<3%8!OO%Yx_a!z96!4s2rw9u6GGMzU5&yTssC@S~oOT>+x{Yh1QZvLP(8}wRN zqAl+~ruKS~Q(9YHAsTUo1u?rU5Zh=%M?zw&$vEvr(az{o=v^#AD>QAt9^t9kTFv3{ zKKUW^C{=iBWBH+W zPu=X;&%NIUSIFHEWX$zOG~v^I=&qf_dOvAYcp<29y>)WCGsvu8zPh{Qg3V;c)depN z5%li3pzC2KK_!*F^`>5khqPjk?9GR}Yk3RVh|6wvTG7OSiy8w`CYtOS*Z4OpVEyg{ zi_vouu#ZFXd_}`kR-}PI>!}M;M%*zR!;@So`<0{xr!O2c-!qjQb1F+VE65g2{tAbi z%kD-tse%9Qa_jTwah|2T(W6IrN4UcPH>;tLEawN=htG*hHOrmPiO*$13sjXhHA>Q0 zIz~i#t~2(+wnP0~VBs3MqpQr%)S#c}C&9J4RWmC%+Rnb4mM!_^_+50Y zZduucuc|n%*5squ5of3Aky9{LWpiSJr9&0dsP$c}f59o`U|?{4qat{AWP>BR_PUdZ z`agC-qsKFWU~2@+e%F1`jEeBd=Iq`IstFib{2T#@+Or)JJX=HIaO8OVIU9)v6u1?OX>2v5`8L8Hv z8xPiMRaY~9_aSGM4x8qE*5Q>_xq;BVX!=gNe3IqXOfi$x4>R5R4hc$a{3d@4edh)B z(++slH2lkYG3DvP`dSXXYbSy3NsASS5NP99@ieb%&G{_XJBC!&60e#x4^w%8yJnMX zu2WCHwktBE3nj@&nHfNP5=~)r5~Hz=&ME%;>)q==+wM>~J4;RWcXZ|8{H&ffo!e(P z+LKbDmS|9XoM3}a9ehE59UlTXdW%amCr5WDsHA($3f1t1tmjxZ#|myH4oAIbOBqa_&#N5 z?Hx|!wv5Q1h26V`WnBRaS^jG~CwMyqqqiG%x`mf&$Vq-Yf*0sN46TS#qRK;E2iAB9 zkeIH_OsU%Q6(!#%{X;`&bTw0sagzAdMxQN!driq(-aII(y8Bw#%S=&goS=pbgR)xQfNn25k4ts;CP+u>k1=V~q zCPCNLrl;hr&Gr$Um`LH&GwQ@04*VGh-jI+=*G1XgfbA2b6%Uj>%O&on6qOb0&DuY- zZAi0eMsBV?UyKP{*&JpwOJ0sK29>tYm9S7RiQ>Dxk30hD~Iwbe> z%yyDdEqLniKDBKkM(aq}9MAK)Q}Q3He7(q>fA@=KQs+nFcKcq4PYy#|Y%JFn z!RAMi_+YIYL-o&tQ^G`2WA?1an2mj6+A87u`tksICYFe)RDW-J9}(8@vOQ+|`ajR3 z(MI3X6M>SqpBPEPFxGa&;~dAf0TSY=z+LIC9=X5W2+p4JBD)CB(i^LU&w8X840oOU z-8xn+q!~4Uk`JyJd*d%6RQ^fjP}0iJJ{jd&&P#q!T!!LEwoK|}Z0C;;DwF>=W#kOk zyQ@($d|RJ3O;4+`$_<*8$S41uD!w+K=Ao><+bSb4`!Utz$82wP-1T(f$LRw! z)SVV|=8~(g&c6XOzqQ-ih_s$biC9jX_l+#&Ifz0Ed9dj1-1*&v=KRyC+e;f{R~w%e zz0pxjq<>rt7u^tXRN`)B?K55V37Ggd?RgxxZ3 zT=DA80arW2OO5RtCb*i+0hSgw!WM%zbx?kNe_7yPk7du4z1jc9((gE z%-%pqA3n%n>u!M@clo~f$UNpF$$U~n%cM@V5{v6XN&fnjXxre?``8FALC`4KMCmb4 zAkmHbV#WlAC)9fDnbHq7bMNeC*S~ry<@duOB8s8ijOYCheY7UISqZfXDZ)(;FP7Y; z$Js{QzWRIK-sN5hgS4xrK276 zbX$ijp7*aVx$h~Z+<$7lKW>cJaEUvg+I8zkG#^)p$5(1Z!;=|ammk#&r_iMEFVi2> zco}EbwUZ5gd87=x7Z=qdvVZCdn=AGB-`nxe@bTqfyh@w+c}LZd4+;G?e78kVsN#}s z4TlgbkLG2G13|Z=f@R0^vU9UsTC&$l`-y0aRhdy&X0GW|OV$XY%JBx>&#HGMvVi&@(I zdwC`&$B4G0)z4~_Ox27VoU>axVD$l+MMBqkLv+;siO`0B&@g`xMQpO?CGFW2KgFNp z4y)IEBWl~bV!T4wbx(=7964;*zBeuFy>|0lDrH848M5N#^L!jV%KzFE-QV_7rBI;q>W;g4%M@CCQ`mJxI)(ZABdGrouaO^uR;z{+_^0euNoag^s#wQc z6QRTpLPL*(Q4y8>gEUeKCd|S*)^j)LGv?aghC^+v!;PJSsAU{nNq%rUCqMkol@rei zIHB5dU5*l@)0npLyKCMN!sddM(>C&&92Pl8H8(ATGKQo11Q}>!Y{$>AcaEt})_Td< zcZ*__fA_JF+2MJzn3io%KVDyO`GQzx;R%x;`vo&K^wWHOQTWTcC7pwN7lvr7s<`@I zmXf}m7j`++81>%sX4TUd0ebyiEd51c>+h|s?KaSGA9|>fVh5Mn3^mJ`yHMR*=_T22 zmH~fmJY50}i>TRt_66Z1ETRUk(uB^npVo>C&!-!T^jmC?2zupxBfpm|nFt+PJ2)Ri zd7fItpY+8s{``7;t&uYHp6g8XdYC2jBsp7wG0wQNjDP9(FtYZFURfHqbF-&EcMB0A z^2ah{By+NggQE#f$m-`J5kYrQ%-_Om&va%y*@pe01BlVjJGGB3zXKt+^8Ltqc?!&(-s#<)40BsW10nNBf#${);rJ}AE-#3~Ou zv{mDQ`}}rCAXBxcuhVHetS4|xon3$Q8f2k0{^{cD8G}<)4f*_$%^;0r5e20ydHi1A3$hgb_7Y?X2RYhm_HjsiYr{n$ zToJ6|V!>Fr=p#{rS;f*xTaPW4-_UQBgL_Z?KC~ z5QWKGD#w^-z_yJ{PSUhbe`C~&kCHnanYd!qGZ!VAO?rkmEsjI71Tm55+9i&iJXHJz< z(VJxr>uN@yUuHMnLG-p{?$7Wb67d}gd{Bt$D*zDlUGdV{S+v}~-l0C6Yq%qMaNuY% zTgwBA5fl)1I^U4!^dq5(BtjDS=CD;Go$}Dj;`s=lvrE=NUWP2{@$QN2eWVLQ_R)AYuetE^)uHJNY({^Eu%M39#_84)<`*0qWcyKs~bR`CQj^zKn176FM z#?ZQgjhi~h@0%>h3~+GB@u{}&f8tA75%|C)RCA~G94_`-*+Gc#Ok%Zd!(x9~4JuE$ z7Xq0>;$wO2WR*M?RC*4n+$XF5G+XVmh3E)%zqOkV6Z@3kidvS#xbQ?QA>cOud7FxD zSMtqcv#6!X@_kn0l#+gL2oo5%yVk~WM@ov2c=`{-o%7J<3bze+pS9Q_>}|Df(R90$ zkzbyXgJT0^b4~;qUqmV4vowv?>*L@fR|4xk3YJj*vzePAz+M{c&QbiZtXHwQ>Ao;f z6jy3iKa_Y{ACm03`tvb*H*{&XJZir+@iYmK#>Fq}E0}DxP+TX?ybL_!@H6aUVI zw8y(rMA|e@JldM_y7`E@O=2j5n#_W5ETZ_OYf)b9BsURhtH}mr*(1A)EKtI~qLhh)KgwR;VV5+f19!T!NfDzU z)+e?3tFk`Su-Q>jkGHCsFP_a8i4Qj-VG!d&V!-N)x01J}^RnVgMY5!?i2VW>VaB=EXg>XaKWduOFQoftqz zh8Z4=&Yc-2(aRwMuHP&@X2$v>oEEZI_Ioe+c&(W4_LA{xliMjZsAm**8S9=>qo!^N z_7RNSrrE0{WuM6c%E6K!mFQRw_n3yp_6aOeq}3dmH4k*dl25TR4a~zjbW;0!_x+^W z>5@>1gkC3S^=}%EDLo~HDwCjDiW#t$`xO)&dG%>^kY8dYff3Cn;6MavXn6 z?rZB~Z!L9(X0Yw`iBrpZs(2UUr%z)VuU*|pF(}tlqn~i( zPXhT64;VByUSS%yC4FXFnevP_Y%9IU=5AbO!8zmgo}F)YH@aZg1)p(to1pIF%zLGG z-+ufP)FB;tNj{80d=@8nWV$$1V%H&v{&fL2kc|bF0=n0iz}MIt&XdVi-ng4Mw(%u+ z9fR2kfvBy)QB%cx^nAiWKnl|JKj0S#Y^ zeYf(HW`1=F#+A=?i`Qbz8b&_0W$sq>`ATh+R$1c*coC)o=BOW~tY%8VDY=pCR*AT_ zXY1YKPbd9KgTP#G;6Tut8CDj*x8^O0w2>4tSQ;)%n2waJR7eJvZi7(f;TJG}NbRZ{ zbUj$v_flJvG+2?!-yonMj2EU3WLcRq&^~o((-~Tsa`+mPgiMCN7W7f&5|A~yQ%+=g zE{xaLrr+b0le5s#(OE`jhTym%c>ZS} zl!8U+d%qtoYmC~pfROFN^Q$f0{>oa*O9zoM&ZPppV!mB}N-V_dcB3YGL);}l<5gFD zsx=m1d%JmwHXa(}y1CQ-Mhh@nkM;8YxjhXWC!b$$hBgW1u`~o{vEOTQ|GbfCVoh2c zv^9T7rfyRCPfp$wv5-Gs8*qc3ce85najO16Tj9IjB$7C+HGTC9tx{e&5VwqHR z5&!4fILJLWJoeQ3*Eg{B2@bq!!Q8^Oc?{%}_oRHF*b;FKBW=p1jnmdo4H&QyrNMdt zYk*K$BH11JB^sFz2bD3I$_^s8ej;tEM>pRF<>$uu-%l`UC^k# zKUqHdpvm!jnQPNv?V=@VtX=M`q(u^40pZ&ghMK-RX8MU8>~ZA(w}ZNpZ&K^*d4Z-l z?A7%5-B8HcOE>Nl_ZnjtKbD7z`91+)&6JwkDvs)C<2#b}G&yWvARLA)d2Azrk3ysV zi8Zh^v$vG#ODg#Yt-V0 zAt9@m+4Uv@dS&hmK}tc3`?7Rwj^HxDbq&^{8@&5I{i*<>(yp?2JVUt)Pi=a?Y2^G< zWtM^S0SO=#>gM-LScK{(XSe!r6s@Rn@I4lKN9V?cC|4qkl0qp1?d*T%#MPH|u#8+p z+aQ-KVM(nbg%U9P;64hN1GA7nnQHsMY!aeYP+e;DJj0LPm!r|eXcsoj1xwl{Puevv zS7I*DU{o*lT~iC%k~zcI|l@Lsd`RF1S?r655V`6*KRnBr7!5V;Qyt7AW+l1x<$yCMC&DS|PU9P!^~`lMUIM^5gTEYLXW zeEUm!R#2>*+;%c#QN%dYbLz^!!7yFYBX+5*zGvd(>zVCZ%mMfkgX4JLQX5BLJWwo* znmktZnln6=V&3Q%@`kVRp#Dqb$ksuAu+$n!vw^Za*g3W`ksFqWK-|E7%f-H)4o!6y z0Vr+9*juL?L-LJx13DI$U7}ic24O$IfWE#BOx<6(lq% zA#Dn}nHPPrhz-*tgM1`i8@a0QOADyHAgZW$8b0-*HqdlL4KA)yRjal)^L<;G8ioxP z&7b;(nG`$}w9j&Fzh;O>$1G5X5%*RZIW@O>jSDus-=w}LGx*5NF$FRTO0wW?`zV8g z7#K48%j}9E}{r1r{FQJj=hqZP7?SM5< zGsXgFF_VRVg+G;4TXU5rHcX7B(O)hk&CXp6V z5OJUt^@n9W4=Fkc9)o$L~y~3_ndg01=NSB*;!d% zE{24JWH0RR@AsiXjnHWHw)wS!uOqopviEK&ef)GM20C7+&#jfgUy$KjEb27Umq^=s zM?>BpIg!W{71*GT5RiH9aJuMmdZk8I9xN}Rx~Or{8IL{iyyy8AQh_5A-bzrde~rb$ zP68VmmN!^KX8pTo9d@%F36lVN{w#u8I<0ywg9~(G{Nf4>hcZ`sGSvU?0msz84{ad)Q-ZZxyv^KfjS@# zsDPi*(zD$DtL_D=w*O2Z&8uu4G={n&#VqceW^HZ#qdA1mraktKaNwbTiWn5`e{?|c zY+rn=HR4EO<}?y@7*H{;b!ID`%?~n&;|6Q^cz>QfDJZ^u2?Bv^|4|}@?4F~aA9Htb z7&YD^H2T$QqzEheZZGo!wR%Ne#iV2>ZUs{7GNY_o)OJJr)6Gg$x!a6m8`PhrTf0P{$auL_*HZy?G!$ zWc=b?rcGyJ3QM)|2LYW3(Xf934nuqxJR@s}rdU4g68HU=mXCaW>&%Mh`tO6a&`S^T zS)HF{bH`AW0OKm9vq;D^qQ#{i!saM&3*#e6%ZcEq0T7hbgE-phrQvdMD&oW`+T;UW z(cik8*Khmch8%~>KnLu=w(As4{lN>N(SO`BE9tSA%<`Hg$76AzXy9`9f6?=3V;KKg z02)A#^AE~HL-XH2TRL-HDZ1AGf>Si!c~{Sj{44*VVHe%_O;i1M5BhiUX=wiM^h5un z{{H`a&GEPw-j1N%3ex{2kM0@b5MYwtuC~a3kvjc9?5grH{;BycUCIm3tCw$9@7Zpb z3F8JFjO*S)fjSV~mT$zj0Z?Ao%VWRyABa;=yt{nVwqU>UrR6P&gH;VbI#BD92t$Ve zc{y(UY=@_B!>jSDXkypidPz*01(@5>>*|leBbV|9*Qd8Om`_Z>#h?xa9X!0vD7iD9 z#s~O9LhnRW^H@3ppH=&#$I$^YL|dIb!p|nRA^Ig)9UCrx1O#n|W06AKt39 zY%kDnwvc^cUA@Qa4u~FCb5tGD=y8{+=8wWbX)~8GL9+SyGg^0*+B+3S4YC#id$r@0 zi(ltzPPhO38nxXiXU87Ty`^LG;-}jkZTZjdLm{gDUAZf-he&r(wU=Fv4k;LLbd4Xy zSwtgMtb0I*P-PcocCrGCw`a+LPBx8?4MEs|1k1(wZ;;}yOcCpcE$aGg%B-Jt>x!w# z8|&Q1_zRVN%vi^N6q_JpAyq znr^-&d`JZ+v@OoMJ{yrDVQXA6%+&+{9nT%|&}#4S1G$&(O3~v`25>r@yKð)al> zhb0P{Ik^0d37v*SZ*Nx02xmbM{-2#O(uX9$C)Ky}K-hBwuV?a8nl}@C5Mubrqw*6$ zELU>WT127vwIA%v*t@HD$RqrN8eR^Gw!}mWP0h$LKO)F*IWw}s6D>)5BaGIEeO?~yu>lA^Wb)KtfeHke?3KNXxJoIgPRn+ybX z2kmnieU@p{sOKbMCn9Fv8F;@eM4%8i{Pfjj=dhk(0jm&Mxj`>%Ir>}axZn>$K{@yevfZ} z22rDOft;}lD}g&g*djM36P*Wu502^T(!hPQQd3ha#E8;A5>0zA)|Dgzft{nI&BF(oHmy!a-fCr|Co851jF+_QcZ=qn1$m ze5uiJrIn9J(r(VMf~NWv`y@zw?}I0X9g2S z*T2@mvN@6R2fu9Re8qY$hB(+{6QegxV>5KsY%9`C&FXW%XeO8`t}H22bvG_mI1Fb8 z?-PR=Y&w!ey0SK#4^(*2gRMyxH+eFB;f?xr&yvc!v~zP}Nmb?VXz5woO^*D>CyHf@ zJ1I7MUOwxy9S!du!?Di!n#|ER>nTj_=w2m;FM&Qi5_f8JnI0y1r0UJmf7O<`t(XbH z{F$P^k|!7Ig4tDyWc6w|FJw?|Y@_}PtIE^COO!de*(f`S^1wa9~K_C?qOh6+qhopO9UcO?vM^ScO znQ}hmSb9rC0}7_YMnX-7-`ZHXv`gGp*}!TeYw8xO8(p`K2#zkTE5%;`FVL>0)wP7~ zuu)GOx$?F|HS^%dEv1wV1gOYBQ@C%nln_Cf%?4amFvkjo9t)tratJojYu@IKpJUQ> z>;A}%jNVEZyTVc&m3Iz@3O#llf{jhj{*d_S$BxC-$7~PF-T=}> z{8yIyu02XDVY_`OM{%{|hReAbxWE86i^np_kRLCt9E>h++Ut1hZV`CkQ8?PXilD3J zP8!O=-B(LB9)N56T0pPsa9ErjcF&pc>PeHBli)LsgSJJoRa&;t8dcdwxRWQ$fTRuT zUt?m`ps}|gsvJyMu-cy{2TIMPJ-hAu%ddZLUpV9*3u6BKW@Vaa`GpC$guTNp z5$sm}lB!EmzEZT8Px^L7iTjd4+cDC&d!TO^Dl0^`E0H(K&{eF*Q2c)Oc=%%IoHP7j zQ?UzGffV?6+q>m;TZ<_j?Bz|HI`&d)Sm9cMg{&pb;RHftLK#nO#G+VXZ3{@l^^v7Ns2Xq5WZ>NxwHT|YqM#YlHF@J7~Fk}7D7og zn(P{$W3T3@UVlke9&T(382_Hrcvb-CQLxkpyFNuW;}E$*cJ-Yk8wS`BcLKlf)s2a9 z9F7LAu6%npI|2LbHCYBQ1jK%uF(Mhp&bHqKU_g{=*{xa(eAXcrFy%B0nJVS)N3f`| zjl%#F9jE4o;L2BC+5Sz^y@?8QWh;FSB$h(R-KnOldzMXdmQM=bdsNMJ&$ewXsi!lw zo0MY&DF?fjKD_q>erD8zQzpm_bVBi3vip5wLqb`HaRcj72#thU^&~j=-E!(QcpWFw zbDuI;qBGD{-XttF!jl2hWD&bJvAg6LxablXcb3EFn{;GPlyU+Ly1h1^8zq)KdiX`@ zs%8svF397)@1{LKFwo10-smrJ*j#J48Qp_h&4p375wyckmDy~X3$SxO3sw$bNth{g zu3sn_`$a1?a9}w5;48>^?3ryh{0{JCP*gdn1&E#3E~IRtHzjU?n3(xUx1h%qX5TPz z@ROzd2ANF;e+4G7|s{#HbV%mO?LV&%|^&eA=l4Sn%6 zSgsA0SeLQ&3Z)8$2{`m-$i&9PSax_E0u&y@`Q;n1uBn!=R1eo*;XEAx+^jYhd9|l* zxO^jFX!*mI)L$O{QKd<(kkHk<5Zq<3)}^JNCqZlsO;!adf#-s{_y!wN`~DaR4>U3S&m-u zOS30rFhc7o8F5XEUKpV@*!e&~{ULJcq@M?IsZ&jE2}Gdd3XlVQfx!eX9}KyNcQ*^c zr9E%0w3(Bc-|L?>9hsT8%ieHjJKUHgt1O8@yU(1K52dGrDRqQAH7{>i^n;J^_tq`zCDwghCmiwBG7S6pS`1$;qT3zYn0Fi+&T~I1pJWgi?(HR1 zCsL?YeiXq;*duTZpiu{?%AD~Nf|F?+{GZ(A=ldni$|Rb~fP!^XEO%@15+!7q!5H(_ z*=}PxighNIuk0cl%?+Ocdt5-pdoiIawNBID!Ffktf^2;T%gp>lh9XqGyx~fYSNzsT zrn?fvrMwhm|77v`_6N4!#lo*7xoO*ZyuJ;2Bq~J8R1fAx7R@1<=jgFqBvCe|9((Wn zhMU^v<{3U?EtjOax^SyX!GBzXnoG}5J3cA|wV({*hMKW~3WA~o#A5qEv!bG+xr4>5 zo5|QY_ALoik&>AgfMML9HD*+o7pITl>IXa_m_r@CwSb%*M!ONUcLwv-nIybdpWp3! zFQyf8Hl0=6Ilcx;fJ2m%`oB0G?(c@3wqdzY*Q=JPJe*GJy7tQ)(?p9*NdlA$vhw+4 zhxC;NFZkRyBs}J@$ZSiuA7ZRo@i!@bL9c&>E$?8i)d>>N zUDKCU%U2vsd@_v-@bw*VbFukD(vbPj&5aN4mvlN0i67UpdkvD<+<@}}ooUY$>M=d6 z((Wque;abKmC2Vq>U0#_l}B&Y6MM}ed=A$;-AHNfS{?3puS?Ap@QaSGBvG9%Cp!e=|&71{ z;O~eDJFE@z_tDjrKnEfuvPM#Mtv%nnbalko^}5`xJwi3PJFiTV;{!g^&2`g}Zn*bF zeAUm49GZY|;hzkqr>nmhzYMYZsN@wt5m>o~cpN?S1PfQkcV*E&U~!1Qp2s_$Wx4@l zF@|{OpS5K<2`QX88S}D#uJuEcC;f$LgSbn^R^4Yb*K#aVYH5d25S%qOPxY84tIBnU z5ZXwVzRxU=B11*W5E+-s zY-4C`eFgo7%hH9NzMZSmOcF3-SsePoHv|=8Cl>NGlN%G00_G?gMw83RjgwI)NVslo z1LL@^#v1Q|D*}A<NI8- z(q}=i{Sb&{LU6@q4dhT-2<~FLf+4c4B6^de>dU>X;dfXi68Y5 zp@VhRXcE2uxHJsPl84SRO?_`x;e<}P#MCL9#s42~kPr$HKXfcNJsL9ifqwsbh3!mn zBE62HR~UGyE#UGL^jCjBgK3G_Qj+SC(L&Or5+?^_H5;Ke4DR|-th}M8epTEy_*M0c zsj`1v#k>+DvPxh1sNK|eHX*zC(-hmO@9I^y-5PSw6%`e;ZRv7fO}%%F?d$g_WJL{0 z;{raRjs%g={Oa^giFjIxfQ=6I7B(1|gVs|;l3mOx`MkyEnum%zxP**3GS`$MiArpA ziGV-nFFkU6BVgVz@-}WSpk+A06)5$@h0F|22@I#@XgVUrVM2R5!7^vffr4tmZJyE( zN}ta!mmiLi4ITQ3v%P5smZc|d!sZ)BtL#z(`kGrLg_zzhOxw#jP1Rb(yd(dOaO!94 zZVdE=2eNIRZpv6KQMsH2_Cs%g@%x6QMCtt=eV>Fa)A$TO9O7_n9sf3E(kHkwjnU1t zFb8x=^b3}e7=8&mFqMv-B}sgZ>tVv6lm-7ZQ=WjQ-?H+h75de*I$!77LI@jzyKZv9 zpjW`d;f}IB<>j9gux|PI_elX-qG@XZHG6vjYk|a_p)0Q4lW1~~r`NO>&>_E{6B`5V zaZfUTSoFO zP|T@Ot~c{0nigv@aTFenb4ZH=f6Y*xL}o1#x)p#=U<^va@Loy^dV``N5}$Ee}7 zubo=zch$jalZZUaK0uoUMwU&VwD2X>U+9O}6CKpI0973p7cAvSA35PeWOL*61F+P# ztC~t1q}oaND*#XF8sWn?ykxP41VZp>R;4Iz{Q7JsI4&`udK6Gh$yJxvYTDR4{FQ>g zF3tfxL$!amNUS(wS3rCXrM+=4<1suylO_5s&e)D65!_Js%AOgSzFm9dc()OMTIi5c zFZAXFd^-kV`Go(gk!bkda_%c|ML^Zf%-)BppMki4##dfMU>N_V4kP37+<%W#p*!!I z%DrPWFu_9m_5vh)@&uP8RMQ0jV-cQkF_CA(246!k5^i(JuhkrV>6wMUCDOK_cy$rU zcpm_o6ES?Qry$)dO26H-x5x)j$&M+_h1TbBn;!P0hx8{8%KpYkIaBQWuw|+aXsvxM zOwkLT0|zcc-Mblo#rEF7LpPK6_Nc_+V*QdlKgtRgUn8(&pM^l3 z|Ng$%J{b`-32;Ei!hi>Z&o_!Y?t>k3AX_i;mtO6xPGuu_{&Mjuo^3}qNunj0_h9%$ z^4OR30=EYco4)%3EKGU(Kt7jpAM+Vfw-#JA1B7Y3Ngv!EKQM{CquxaVSlVz?N?YnZ z{}>;A+#_&mu)?4uW%M}*M1TC_tzz+ig|#$aloeY8crjCaC>wTkRO}O^=1kkL(9MxR7Pl|Z~=r>wHspr02#-or0^c= zf&rdUwLmjH52&!QWoA+kLJbn-JdVG(ck8%|gM^?{XY+vX0Ed0_B9FO`H)rU&$m4*Q z?wo0lJJ#ZQI=6mg0jaHTY~*{9C$NovUJD}?xw~LOu|D51#ffqnjXhwe$x_pUBqstT zhNhs)Xl3jEEm`l?toW_j`2?X7!*a1Blt+Vg`;}uEZn}xoeE1pwi^I2uv*pjay?y}T zu|*b`CbN@;Mxy76bqgW^>EAd1Nzi?Vkx$v_ta7vC%0TFOmf^P|F6D7dT*P%J06?f?29 z)HMx);s;I#n$@{Kpq3w(XcMf36R|M4YkZAwK8xL3I=ocaQ`~u|mSh2dY!Zn(01s`w zfX%lx8JrCEmCi1=h_Z9!)N-16wbpqDc#ek$gBom{>2)5n!I*8wDt=<&laO z)2wQjN!ihQuX3X>Uz4-xrb>lsx5)P)Oh=1&ySK~ zm+^pFSe!%kr5|h##MK>OsNfPSakeE{bH`!*?_LF!k>Y%gtfyb&v(%bJWq$MrHu<^^ zhRCgiX&M!~Yo&^Tk9EC96)!>rf|3ri76DO9nkOFO3Q$*Y!Xt2&VG)|~LaMlnm}T9H zGVR4bFDC^zudn0+z~3#(WFSU*^~~};Eo;&wW^&(tkWm&8 zhS`7s_8M?1vrpcYnC^wY^6iOZz_AR&wfQ_x@)pCuZQqkG z`g0V8ff{_FKQ~N3pdOgQ%`bXnU1lC1hhRXpVOoZsWAURaWG?oVyVlK<>CeSv<{q>A zDnfptJ(MTI`$?qip1sM*0jKs#40f7N2I5CaN8gAgC7uesi2IMWiIWkwlF}?tBH6l>g{ggv}VOIyI`G71M$d3@SH4>x@3X%?an_A!J1_+-T>pdD#^bv^_~hr zoWGF8U9@}<&w&J+m{1i;&LOAPfNmbIg@HbmPv(_6{kt9XY@*4`2cf1-WiLd zy`;u712+)7|2e@rLF;trJ&LR(miuFDGSf(=-nEepp=Dt}{4T zZA^3Bf4H$jJox1_S`8=x7%#fIG)?)6XeIUAJe5E}GW}N2ZKAx{!J2LOF)iS@Ot)+< zs-)yzU6gTycZJ!7@EkC8vF?(%*S1rYS=~*40`kIgfkWovr_2i+O@JU~cgf<&fgHYG zHwSmj`}C=A;(b^Bc->=)*M zCO-V8Y;g}*eB8P~&5)VENi*Pio&ks2c=j&TB*P*tr%*(zf|OaQ+Dx6G>F!Pap1*Lt!1!eC86W6cK! z^e>R3dK7mTFd4;$c^wj=XDGNP)_DxG`=qI0#PPA^Wx1H z9(aqb`TiEcaBq`3_oDQBh_*+*{F}erdPie-R1>X`Q4@g{89wXh{Yq@lVG8;G7klp+ z)MOX_4F(ZV5l}%?6!DSXMXDf0vCu;AMNo<)5Rl%xV53NfP!$L@fI#R)P(gYLH8hpp zf)r^XuqQm~^X`Xt=D#z$v$HdMJ~0Wo@9Ug%ovZw=a?W0Gj14ZozS%@Ac%>ww_hyP7 z(j9M}yhcoFEqYNA1=HK)b?KRL_K?LN7G8cbUbHkfO+1K@-FWIqN~87BsX_?G9nRo4 zXiar1gnUk~-sf8rlL{Gn@ws>Qs$S6}g#B)Q0ZH1_JbwZ6oOukC>!7^i zUj6W1sLJG4=yDgY%x*d--&_6G3Bg`bze?R%K?69g9DUZIe?VWkiPY};n{LFbuT2!x z=Nzx|CC<|YjM`Njq`J>uDh+p;#}=8!66Xhkq@{muWNbXSb*9y;t7C0eBPCz;J}fDsTMo}H>#YR75jf@ZbkgI{C)cQ=kh|z5MkrZ3_Y}tEkfDo$ zh<6$y4h9gZR19=->tN70-vN*ImYyQIOfT(vG$^b z?RVxNry9j0W%W{=Mtjq*qhIi8!gfweqt1w10_D1)QAb4A8}_n@A(>{6g?MQTT-*41 z-}1bui3{i#Q|v8*cY8t0qKf1*sd1@r;(2ip;RDc>b4Sy0B-s4l6QQD{2rSzp1LcQ~|&TGMfOFrFgb@o1j zAbvO?A9vEU1)q1XVQvGPZ94feV*UpU2WPT-RMitue>#)~!hlN%e!acDRKMAUb7=u? za5X;k4&8N8X*9y4eWfcH|$_tEGD61Fk;^G2$^6Q`BvZ7GnA z4o&pc6Rcmp>2zJeM$Q^~qQca1@=gVM-gRvt9^jXu^CvS|Lr&AKTvi&LZQ6FTOVYyE z@9EoJ?r-TGjVyn4Z#6r{3o7ZpP5zy5bz7#keaQwpG(V22n+f0kFm~{Tv*u`_xje_V1?hId_6(3a%BP`C}|oIIp#fX zH%2t3Ifs6iU*VF5YM(C?n5v(&OInvf;fKbyF4ZHvDk$7o`2D8eQ6XT?t(AU`+r8`e zu09NwTAcr}y(Ht76|tHszpzn5j8`@F8OgE&>SAHyjlgQ13FCl+pAK$GZr2BdyIgq` z8nCOizv2?>2?tb4&&lzAwmVz%)*rnr9KaBb+Qr zEq$YH{Mqsb<-smXffzmz&eK{XXiUuYHrA;MZZrL=zjf z_(H-io*kYO6UFY;Z*FMrUG#0aw^ubU)4W3M(lhkKM>=xCUIeFIK)sZ)V(y&Shl~va zY@S~x$3J+jGonz>Bzw=t1w?M+T?!7f>l#GMR2pRW42;*8Hac&w930eESn<&t8G&`OAOh&NcNx)$qfj4Xl-Q?0{Bks@uCU36J#U*6)ii z?_m#4zTX9PGl@acS>|=GH5MAjQ!2)upz1b1uD2-*a65>WOsp1;+VMO1&-;u;$nL8P z$jG{@=Q{5rxP3IGw|;(_eIDBvt?cNk5m<`bQYZxG%Hr zZL+14Dqk(mved$fU4Z#zS72@<^M=*9iDdYRh(5w?=R=$EDaWj za>_BIBWpZ$z96RuTf9L{Oo#JUs&swJO;O9k>AHwG&P55HWJ=igw5J8mb)t0pf>Lzgws+cVQV6~NcY|o^(gwNu2FbNy4Ry8M^sn$N&JFLEmPyI%qrrNa#Md$}2s8y7 z8O-8enoP>liz*60Mj5+Ng{=^Q0gcICQnaD388w{Z$9d9Jm^B`g(%F+~uCZl%g=^WN zfS;%7yE1MsNZc|irNy|;HB5T9dw1b9gA29liXYg}(K5@7O~?z%z<5a2K#AGy*4PN2 zy)Dthq+fp7ecg9#w6A)N={p%M1V1;_jhh-=?^}(xldo}$S#>-AgA;c+*ET_NCzRb2 zbhWh^w7do%vG7yxyPIrfE)O@{!HLeZk0WWLzh_Y4suVyL!R}|(`-Ayzf=>Q9vJYZX zx%$X&yNPC8J-+D8yeYRJSlh_`SIasD8vMLGU( zWaKGxNo~#bF;Vn%suUx$l#7@-QRBPnS2{`W;!9HBBZ{ax4Ot%))w}9(dL}8YiS5qf zH`*@SBZI6{vMcH$GN`^Lkp~qTtxi?CtUfO?C!UU}@W}c(_vW73<^ox5AGwuuqYmTK zRF^mpTjouk)}N!jGM}D-Jl_hf#s%P=ja@mun2!Cvk32fI3q}P$r10cIh~aHoO+V3>K8CS^Y(}_QgMre7M0#LF0Zff zjhV;ay&wC_$zjtgd(w?Kv6MdQZ^t=7O;&C{$p|0p?=&rU&K;p0tN)zh|K``{_g*UF zshA?;2J^&j8RF*-Cqm$2rJrGql}i@$!(i-`nMt?j-MRt{bki?V%0Uv;NkPM8RuLdK z;jwK&-qb6%)~%EqNguq9_3GFVsQmdB zVNjtw{t))^EH%y7F$a0T(=mAJv6~Z+d+|=tor5jh(sew&5`o4T2c++&hv>^*BBp{v zn~zBxcKuM;w>ELGTVY#hVif3{)h9a4TS7dy)p^z4q9$2QCMDf%-Y$4zS9AN`G)TJQ zwF{{Un@-XNPrG}3ei6{3Jav=8(pLuuC3rHb9#){ooDPZQ$933oZuKPX}@d75x+7^XX$Z%$<#1HWY zay>y`6~@**&*X6OeNtnrYFV*Jll8t5cJQ-@o|>m`y|6>tjnDInYl4UDp7}y_gp7!& z2~yPFZZ0lUkA1PR%%yEieE~+OR#@T#l@Y-FPuAXC5U_HU&Ptf(9#t**>J|_ z!A6ta>haqktR$+kuWG0J%U4NFjM=R}66JBpZNU9SI5R6gd-FM3rcfZe-75+12*l)+ zdzDXJ>OBeXuV;i;zWDN#v<(CKnEQA|HJ+t3UYT&mvgZBeg9i~;5!p)XqiVO93f+?+ zSj4qtyN87aSOr4(`cD}Y{eph8gXq#?Kbn_WlG$_rf?~-^8qbDATYIp#0PuDV#&re{ z7Be%ti=EEc2T{eJE;m6RST_PWrxQbvp1Ue|wHBJivnumXpNw!a^=IRREV6n$=wu{Y zT`;+7INnf6O|wUH(M*|@cSZaetOc6oaN!b6A@|Ux%`;Ntrc%F5bMLq1tdT})G3WiI zv*-@z0%J&CP@xy`bQ3I*(rhMc(#!46i||hsukpb) z2%|_^h#mn<-?89v-Yi7@~QE5&=>7qlhPJQ!m#>wgR8(fOO@yKd{54=P$ksh zOx4`ybzXZud0%G8y7mKU;MmPfy&d$>j}KP*06<)kmHDxs=<>!}AGuL%HcSrYbB`uP z^WdpcWDYXbWQAmx#|Du2*Vcl11o>T;AFI;ELwC|9Q$DZBn8Kh|lDGzC07f3`)-et$ zF0pnx9&Uq_E6Q=y(+}>BnEe<)e%5c=UB{UPQ~#Z_uhzVE*5r&|o#(F-tuLJNzob@> zNdxrBxXoBQ7SD6K4TpHP;MAUhUC(2zeOvT}Wr!{*(`~QnLu_=1XGzuVb9HHbE2i7z zc8A~KPWOOJ^6J4|D0F>MFOGh9ZtMj`(Fr#~%riUFRiq1b%U=emh#R|}A(g@^D=VX) zfAaj0M8U#c&OTqIg;!(jliu`siRp*i<6Defm-}jn&sn4k`{tasx!b=`M21@e)TiV2 zc5NgwYctxGkew$HzTVf;Tc1;l8RyY*88hWq^K0q4Q~L`)KB`JlwW1R&zMAbL2P@6b zBI;`3FQ%9IMEJ@Y_k6=^;SM?-MyuS6eho5hO!+`TjF$Ho@gBj1Kq|7Wvfrtfj_UIz zk9Qqo;5Es{jb&b4^^|RB(*&|);1xAYkN7iaSA`g8GBh@|Z14rT^CihKqQ9ou%)-a@HG`+E*bQIvLiPT9`iND}s$^-r(!xk-w#l(qH_A?T+DYt@ zt0c8)7Mgf6Z!Q|S-7l)a=@s1`WZ}f$bGu#@t&fL4*&h3_{)mim0bcyGWxL*fbzpXH zBKE+#zkH{z=>955BIR`Yg*v!~&CjjVyJtgxd2dQrZLc!_#`x4NJm?5e5M-nJ)x>4N zN2cM7mTATIYZy_ZVsq4tv)nT{x1Q3i$vOjgpn7o`x8w-mEX2b5MOezkUE-27nKubXFsE&_NMW3gR(346nPS0v}_8|`JYMk_$i4>xD( z0{fJ?3Adf4)01?;rZb(sd5haxczw8Yi&LhZr~ECe9d1`{ZS8T%NI_H&G3M$j*Q%Ro z1)a3UcQ|rE=5tM*hew6UxZ3>II|7YhUBh4kKq7o|TwhIadP{|};XbH9hhgGodDIE%-^m&+vQ>>Oqoq?0|G{|@J?roK}q`}JOKI3d3@qCpeq%4HOGHb&K^ zkCsW~>jclNZ)vc7gche7?SKgHihdhH-foiq>iZhPlIzr#A6}+9?Nu{{?*zi&MIZdl zqRp>*4j&d!$q~Lc+P>NcWq4ik~9y1D(_j&&8f6Oqe|cN2OokUjb3;o<(=xeGP(+s&ecT=Q+| zzEuYU8~qxqs}}@}1X%pe)hVB0i`#DMdlp+q0>|Qi-azZ*>y|v0-^_2G1HhyG$X4qo z8Z7EnwTZwd$+YWgvcxj}``2SC_Uf-%RPny38;h-c-DC zQo=jc^y*COJavIIrBH4(-Ly0ht(E7Xg9<)`o?emNCZ)S9BU}G=4n9fKqpd&_0_3WMAPplNhNFBIfXq^ud zvr~9l!Y)5m?cr1ck}iy8lgeXZR}XRW$9=WDMK{sgCk9uDzbX!0zRJi~L=Oi1gR?*1 zQq#R=SGqU+y~S{Xm?p3^-o4wq0z~ai-92wVpGB6wDx0C4)fT{#@uRYEGZ(LK7#rD; zN0O6X-r2Pc0m@Q#>wSm3O#YQth?MWF50G=Ngo|jMoK>}m7d(W7`Ld52l^Z8|w}e1P zl*`cCEumLG>VDp(YIws_TT9sgIrHl<_|e-rfl!B*vz~X_LE$oOl|P5^&f(%c$*p1Y znXOU#iVF2a->uR##ksBnpI5qDM;MNhMxa-GM^{%Iu=Vfx?qSpRT*GA$I1y4*$=I=# zz5H`pYH|HWmlSAh)BcJxm5ar1|3jV5#(Y5j)y!%ZOObCJs=&hU1_0MJg6ukeWsTEr#S}`;$UjrZ8A1mRZ@{j%+^|& z?tGC?jIRtXZuf_y2pNUn_Johyw+*~j;i}jDFXlMi@vvv9X}|ny`ZCwTC8lS zUO**H2T&=6oa0IZF(MUG2o0;*>~X>PVukr>nz=G71D9dSD4nt?j_pG^^47M>YnbbW zbz6p{aAuuORLC#CUFA)g@gG$YuTbsM8`FVj(JE;$!GrfNu8c}MI(6zO%5MABZz1=Zv!4o1UMXJ@wM{Y@s7{ zJgD?JP;@UUm;1u6UwrLJDa$~t)TJflo&(R+bNAa-EP9X5-|EHtzFjzPJMdV#_V9o( zTeDE^hL|$iX3a9u$lA<%D(&EURKw=ChI=?OGm~gZsy#3Yd@TW#gGGnN!?xk62ywkf zx*_iNPQCpx#F)_{_arXO8R-wrz@hL9 z1YL($6WKOz(|!KYiKo7nH+rC?HnzAbvvT3Yj5vrHI*|qAjZi8`wuY){O{wf3C$MdW zd@WG?-TcK8Urvg8QxY++d_u)oF1LS?0`Z1>!YhOy$mcIA@;RZ4s8OR<`3hbHt;i)= zCt~Qq{+w((qGH@i`K^Lr{m%RSy`hHk3MD-Q625K~dk(cQAkX5v^DgwVc$bLj8Py^_ zLlDgcvFg6Kd)WcV)O`<7+l)BL142@%Z+UrgJ^b6wEwH(`NZmmRQ8D)MwJi{|tL^LZh;t!@stPolL@1wv6-YH4OH!gA0UJ_5JPw~u5V^Y32F zZwo!}6y0(eGI?rGCTd!;T(k)%fKZmcE%AFYgN?mNv z%k6tpaEbGR=7yJEcQ+9P#8A7Pph{X;RZUHNDl{~-rxHYCjIR}So~r&J6Q^3^fY|QW zQfsx0H?gv3*YzMde`p?l6)n$!*QqLlyf1&aV1iO524|HvKY+L(uU59IDB@>^Gp-hU zNSr(zF_QaL0ZJDWL5nY#R;_7+Z@Gx;tKo+_<)F?J$=hCTJMsjcWKB=!`K#jKTR*6z zUSZP){oO40ch*pej46Xb?&%!LgG7c4T=%n~)7CU}E4;^Z_@fYhSs0DY&*(=vnGJ+8 zMlX<|kz(W}YCkb{MV{HEUxO4AcDi$}MJtDPXC*{IWUP1c11a1NJ+=fQEFnGihQyBT zWp2B|QjXq(w$1Ampze4$>T}*aN%CrBg}{^!cNZekLBaP?<|cQWauL@MYI^h3H6rH@ z$ng>Gmnlhe>bmt1U%R17D$(L6%afHRE70RyRDAy0SVIT9t+BhiJDct?RA}<<1MO&q zz4#R9O_v~N2LcaPJuE_d<`3p#bwJ-cP+B)x&?02soIs4dnz=NG1@%kgxAZ+Ruc(8j41e7lq$sWath0R~LY|miAltkNDdoWM zhT{}L`r&FNUU?H@KyEP}F@N58t$IDtM_bX;Q87qjJZ=6PYFq-u_si7i2bmIj6e3NTDP-PZ;oE@?2OIWF}SihHyZU z3Lpd{ACOQ;%^X6@48Lj=RN5vHNv%z}C8R)Ey*lXQI9~12`Q`n@BpP3lZ-XpB`PiR3 zvJo}eA0soQ9P5d)0`Nq*TAB!`}fc^M?Pr2%PsLuBw@ z3?!cSzxRZNTpQg+N(>Oo>HAwH4}mCH<+`3GrCA+~Yp4IV)PwxL(~9wbSQh{PT$%a* zt&JuB|JnTQm;m{|a8t?uC_V0f%K{Nh%qV{2&9MC+$OImMu8ZJI_=)UDqwftl9D;0~+ zIpkr7lVF?L;hgruSZ;>Fe^g?$(AKj;oSYB0SR}4NnZ)CuNfiop)h<~MQxB{zN~AY< z*H|Pb7Tqz_)r||iocCmMVB$+0!%md<*8D$jk7s>Y^%ZAHYOrY2G(zUzF10MGnUNK+EbwdD$t_ZTcAK+jZQ6F_{U0)cI;~gA;(EL<7 z_EEjqBZ&XJ;u22lwd-QJk>`pC4K}dR@P;*YupJah)lMFinF*SMrf>aeW|Cw1dTUj* zQQuvC76nx=cZ)EQbWLOp7kX#tnf3{B(*Is-60vI9TlD^VJbxMc^@VXy#pbV5Y)<_S zQBjeX`1tr@no^9ip0$W^*3xWmc!E40-ZYZPdrqdYCF6y#CTCMVTpL%V)qE5gU58MZxezVL5}`{qx=pDvtH3 zw(xg~cLKPTX>2ntWdxEL8ZK?23JSn%g#(fXErcH@Q6j3li?K|V6mh2nmRZ?_^Cc{> z9Bk47Mz5*Ij6YIA2L32eIni`X>valC)X10da@%f!8fRzjFr)&KlS`Ph8p@&+u+Z#k z+#Ed-oHUqZ#_u+9ntR^?4R*n1(%@zYi+K|!rWuOh_Sc8;-=yhw<{GQ+jZ-qKVPZ9S z7#h1d5oD|Qr0N;k>;-zDoL4`;lfte#f(Hu!)GYRlIs_rfY^!-w0^N8G!p3@(So%)yH(uKEAn>~#PPq>Ac4Ii`w%v_JNF-Xn6eFM=>jEn#1^guwc%3G;!FG^^W ztM#H|Oy>5yjVWkX{wnK`^B0>-FFxf9Sz@>}F&3t-8_=07+Wc( zRmI6R_KfxU0^=ulMQMg!-zGElEDC5m7cnU2z& z>2>UhmfKjNK@MS2NhWdXkSCr`MnbF_(>ubK*d`TcYbi{XQ%??BC0KE#bbL2g5q^7# z7&3egvdh~LJ641`p5#!Ljd1ZA`l*uSsA4lR7^9eg@bn`nX>3B4Z5W^SwGvs~DJYH#nmK6pjQwm6QTGAP_R zV2`KvyUjl|M;%AiGBHCfX8*Rf%2WwIwxD@2!L{S$X_~>1tL(58uVGDy9GY>5W%*di zPmT-9$1QJBtuXH=9tNWZ6DXfB#Zd$a*+;+eG}6r5ZvFnAoN(;Wydh?PSD4Ly;jR&r zyAcgjA`)(B&W|=1H#*gSm5HMYf__G@GUBmH`g+kL*<$mmwK6>5qdha?L5cO881{t` zmo@b`txAi!vfg$EJ2IkQW^A(@uSPx#FXlF)?Z`t|Iz(MrEO+=jO+5V zL^EY6toT2AX1-keed}fr##;MlyD>dJZ{ppyMYlh+dljXtN)E7P=7o1koG&ot_rR^T zCogDLBBJ?eYGY^7(44s3MkdaENcsaF$x2y<(8Ld%SEvVhuxAi2Oe7_xA1~d#2`&c+ zwt^ZBTU{i#{KYh!WfRfFe;G>u*3cYnM&vPK+L0YC&#(wTAx56NM48j7;*aW-DBNj| zJ5HP7wHe#N+Dwt;P%um8`JMqk&U#;poG_(6T#lJu`6yE@!foIZctIfHV`qr1*fV8^ zfTV+ZGRPZ}sU4GcGBxnA_Kp5iA?h5NC1g6U{M#uW34p)iJWj8cdED-v!V$9O_w!Da zACrBeb+hzXgRO8N^V(A@%4UNv*i`jg35)BQd5Wh7eeDY~hn|7eXEb8cC!%pkt81kp zVR8MgDF|-JwZtlaKJDlZN{fk`t+&kL!|xJ)ouvEPQF;H>N0q+5W#(_$W&`UV4@WLF z9+n1le^gw(^>M36jKh75ydN+*V-k}!>e55yAaE%{GZF$79J~7^!Iw)~bR_U`(gdyQ zl77-RekeSHgTu_}JMz+PGGiq(8Gjt%P0uq4GlT zvcABLL7XXjpg=b-9rIR-+jLAI~nStY$Mg z@*Iwe!04{nazoNuqFIlCpdNf?^AAiCaZH?_T9rKKDoF$9+@qTC zRdq!9n%T4aQ>qKNm;9r9BW69sBvCW7efS>+_26U0e3RGOrF9*Cp661|TDUH^pm=V@ z6#5DCzRt@wH^Fz{CZaeF7ii@LNq;_eMaziq;#(Mtr5KkJSt4_bc2a@rxEYIu7?+gD z4V!;*!;)}W8Y%ONHZ+5d7(WaKbXWg&qs+ytmC2(Z_e1kZ7~7j#OKPZpoYYc7IonSY znk(d#{!6*JQKIky3v1zU={?GJ^(6-U$iir7CZ*l|kuPYY9mUo>`cp%i$cm^kMl^d? z;$^C~%VOoq#}wE|5|>92ucu{knC~YnTE#TJo0&;T8%Wmgy{mjd0Y4^4{>eEqIjCHl z4Q->=dPP$5#uZCAP#n4E&fAW(@QoCd30%HNl`0&$dHfx9+v}hYQ2~jxB5R=v>-C8? zdwRl{Ch0?S;gBmZWI)Ho8)I>hlW)`Pn@W>EuGR*!#EsmI1V&3Brsu&Gb7g<7Q|t1L z$nZC=i0ETOnVD5}b)OIT&uuhl%a_lhJ3T`B83W!WhLcwSh2i-NC=8y!qeB+uvn{Gw z?2xkiSZB4?#+KV3_c?j9)TbaZdO+86TLyJWfo4dvvV4zuq#Z;K z!O+ZUXA@{+MsS-b6`P(K zsVaC>oGJvZS&S%USQ);4pNejnomH4vE^2@2qVX`}>5;&xIF3+qb;+acxYJn8!bUi- z>2xOMuf7eH3F;65-Y5`OmkT=IURN_cwIEGMvbrYxB1}xyo9uIX+;Jn6YaD0t&Bi#I z?6Q;A_Gzfbn>1y%*q+p$_UC4JA>%~mG3R-qzWK}N2b90#Q#hu-((Fg@>tmYY;KRLz zm42dzFXb+sw>7kfzk``t?=cJa^?`Ha@)^u;P)eUtn~WYbir@>DDpzPM=hrNgzA;fh z5-RE~+#;x9EgV^BWmJ5rRX5d&jU2nM;HsGJ$_g%0NI$pi_x)@u&3?`hj+3Uat~>6S zoH{Ki0=?SGG#FUCZV*r75=_>vWMYnBc47L1(k zOnSVhhm|p_y}3sta4b4}$^)dA>UKxxmYt zSV>wNq z2|{vY(Y+ZFnJ#pvS0qQT@m_>i`K#EtCc+>uBg-$oduKQiScFhC^V7;MTrhlA$A zSt@H7G1wV%-ZE7a!t}f@aZ9>MG>v{oOs5FhEcE77_p0JxgMOVwjl0je*2F~gmzkAD zG8I-?v2|8j-mtY9{w(lv@ovQT4Lv{3;3d68NayjVGVg5QK@a-o-^H(>7fBbJ_#Yqj z`2^fvw+M?cLT*2CgaS0DeWDkWb^%U#(|+xW5mTmKpUok#WTBc`2=2Gurv{6}Fr6Y2 z?T5a(4EQmtIR@pWp8I_@F!#Yxk+6c4ALMfH8CFJlt4dLcxNp0>@xy%|8?bQ*cBjum zsAfa>o56AhQK&}OomLE)VIwjY$j-XSGgvz0t@2}MVz-C|P=%0VVhG8b{mIje+-(z< z@tcv6u)CrN0nP1-2_(WZNhUL2wfCUz#d9l)%Y%!_-;O<6xHQ{TQ3ft6FyT~03=D4S zAQ>;hHHFiUwoQE^%A;g;)h%fu#Lrv5Ptrvk3sHaM`J?4IoT-pM(Ox;IESE=m%^Y4p zrjuKIt>40YqK%#7cJxQLisxk_imMaiH#WsYyZdT>iHXWX(s>x{CazVL%EXU-Jk(Ed z?K?)`SRw6KZ---N`UMV{f1J2q2>)3{#HdEegD_D{PucymkhOO{bk)AN*RIFO#q8;$ z?h{H@q9U{Ub1y6=@n0;DEg0c2xFV&i|M;p zI0i%0SvIoCbYwam#}Rf`P<|nyWS=q*X8cOBAZJ+i$REm!O0UH?*uLYZQBXV?48Qau zq9j=E2ZPlr?UsvGl5K}6W~_@F)wu)wQ~J~0)fCMpw53jvYgN|#nA%&W#}691Zt$x} z?8y#n7KvGB^51cJ#~HFwp9%_S#dS&1zpLxH&;_BF^7_uCTTU54w2%MVH4KrRl zUB@@}=~J2g_7`mhN)oqv8EsW>GSpHui%>k$Y;4A87Td)xbazlZjWm3t4onk8c#Ud# zxxrJdQ5E^p7|O8aa~L`jgnm-9(kpwpYT>T1$!e1gRM-~smJ4_K#eyj`2y=cCFcO)c zVYS6+>AvSSI5xI3biqbqan>atK5D)Bq-ZlLG;ExKC>KIo8i5<-^5|CdIw@>am8??C za>Sm5!lD>kbLYSLwi4UKrYiANMm)23TS6mS6ldSE0`SEmt8?uE{I2FF@szl+D>W7t zxnand<)Y5x_T>#?G^4-|h!TB6X;D2X{5X-)+Hll%N>WN(MK?vAK5W5oO2sA#1Lp*w z(lEt0lAHbZ8r7tSBCRTtD;GVgoH2T6r%ZNjt!!$4tO^T7i(+e2qa*)Gln)~2TW+$z zsRR08SV7cmBBDg2!TkH?^AcJCIpth6mJ1O0+p#OHiMK?5Hn@aL7wOwlf9dx<@vEr$ zKDpvQ_TTwh2fE>nL*@56Ui}EwXi2Q^DdV8_N8X80G@*}bEJi55vSNR;z5!SbX2x)O z)G&At##=ZNx9_tJ@06xT5-U;n5W*`=*UqhUcl6(M674dqpgUO2yF)~>pF7%)lBL(e z9hF_Mh8W50WJ?rTddYeG#AA{j2LQO@EHVQZHsv79yN8M-1nZwOI~rGa)|gLs#ZlU% zQJ_Al-OGeWOLE-Ca$NS(5+;{JzlTQ7bnV+0`(apyaGH^2h0<>bdih%2aqn+36qs7V z(-+b)zhbvOC3<|8z{Z#xw%DOg%dKcu+ISULePuR6I7Km4SIx3j9FKd+7?$~R0c;PV z@c;+RgT`XCgK&*8I~UJQ!Y!>&9e}a?mhQj5lTO{yCwkjvw6|YOIhW$^yxvMVA(z2r zIUmoew<%{`~h58f7*wrah8u zH${SHGGQzyaovd_PYnqLP%6M}@QT2JNkx2tS+Q=}(|G>#trtb^0zgV$EAXTRvT_iz zlFzMK{20SeQ%LBDZ0WV<^VeKh%`hvrBEW>F%zo-lW8?J|3_Xjh6pTt&-p;bMk5uS? z9}H{k>G%aJcSvCX#>ZS-jAl^);=RztKva$ibBp^#$ai4z$`-11+h`IEV;OrdeL_e&A4 zNEjfB(hwy!rC(9cPvL)xi|_PO5!!-23a93)ooJ9++&N%d8uaK{N*c?lj=PNp-z4g| z1czk&AHofb4IbFjhZ&BB`gZc1Cvlns>K|kXEU$5(0_)A>pQ%^8+hP_bN;7y&_D3=> zHQCMQBRPySE;YcFlAiBAPMogdxZGgMai3~yB=HW8Au-<}t`I7=b;5fvfMprt@5u92 z95b*!>-~?1@&#OzS*a0I9;^~Y?7!x^Yts;adnk0OkqAs$iv$P)$g`!MFEM0tS*_ir zjktGE1xCaqPp}u=in#sYnKrzw=TUF`<#wGgAIt>){7FrH;`NJ z^?g}KmSl6c5320x$#sKTw0F3W9&oh~i4A_(4xN9R;4oe)7+GMhDh8AsGt7y#$_bUk zWCH{xuFvdZG%N^HraF$`&I>#^(`{OHZyoUr<-_w_d1;s*2v4!mVRXdF2V{^~wNGVc zVHf}t+0>Aj`Nr*G28(ZpJonaTyWb`kp%f0poe+xO{0zRiv+7f?NN8m+LHyeufZp74 zS7ua@TvQVtt(&*{=eszo+8E@*-VyU)IdM{R%1$m>(YW_v7POZUaCGly%^wSMW=*B5 zpk?A^YPZ#5r@%7%t61X9xG-yWIu6o5*&&ggU#phfR%;a zsu@W8JKBj3SjD0S2XyUFz}`c(n9M$$t4ODu6)iL3u2@Xrv+k}R z^<3iytFKkiAH;xCRfD{Yq}G&lnT<{XmLXmLcTIJRNEB!_x@p=+poyi_n|PuO;u1ZxRPv36 zTXse~sNIL?4_15n0G*9{Z-({c?m<2+Z*2}leHfykF zTk^#V&F_6iTvgQpUe6v_S|%X9as>h^qYuX`qNHq)(*VG^f2;A}Gn;6q2svy|Z)rqU zKh6T2v6$qd?1VGuXB(78JW)c-unI@nv?n zqw}MuX{2F82RUOhQF{phW{jOB?2QM)!Vj;~FPowfoJNK&Wnw9m)&M;ij{105TPO`Y zNl|#R!h;YJm&ngWavmxiw`wOTjLj|2&p1 zVXFwh9byFSaR&S_9BC^e+70^o(B!WqKLHj=5<+DN0!^|lnC$5yI|B2s78CP{dh34r zR>~Mkck$LMzM;7v;`-7f`JuNFCBvIRVorGsGjmBLAcn$j)2~nezSoZjl3tzP`xu8G z_nPkVVtEV7e7J(>ShyN$_D;37wM9k9?5aHS{pC^`$~#%+@dgx@$Va8r>4menB^x_8 zdRWOoAX%*`8^OTYjr{00zyNcei;e;k09@nyk^^`b*wdLIHkxN{uc7)#Nb%zgXc(sL z_E~*KiY2`yoE1~@!pYA~DWl8D=`o%(Dq+I^N(R>@#DN+Fa>hP*R7@G)0prl;-P~+Q zoN#*vOpD{9cT9oUnuf_*n?kwQYH`zbl-3JAv=v~-wf#gTRgBigmFQDl@cq5Vtygg6 zM`2n&*)##ryF_fJYstQr{~_}&C0v;ud-v___xqcEs{BIW9xd6CZBt%~CEvFlApTWn zde{|r4AOSp{<}(R5+Msl+c$dUiDwZRhc@;ZaN2#03|mi8--U|JqK%(42|M@<>gV#0 z4kMrXu(96AET5@34oP1JK&_okfsX)CV-(a{nkrCH1m(?)?Q}B(iiB^fNe;&KU*nD+ zm@XYQfb%?f*8F~7AK*=h=tDGYw)Zls>RU&!rD4#@iW|q*ss_o?=l0YWd{|}e#<)1s z+yfKoApU8AVM}ZTyiJF5P`;h8p5HMDPxB^~@T#)#Rv+(}FgRhzpqK*5a*=~mrEXLN z$x^7;;;YtYAo!6$|H4ioHw3D63<58&{Zbm#h#M0GIv+lTI+`Kc{y}} z^d)HQk5VDP+k1R_!Y}gX@-J)yfmJ&2`UavC=dwN%S~R2Auw&{(sJdVpsTfUuxKG|C zyRGLqof8R!wZG%Y;o>CQc0PC1U{pH@yFWK zbhKjhNucyC%a06Uyuk&GQ7C-J}dJZp1}KMd^BejcwSgG3yi_ zwJ_J%vXkQE!WbxHTL4*c6Lm$IF7r?3NCBMl__3>}22Y6#IT^8xuVEYEM;R4A_nl_KDh|#sa`M++Qh-3Nx2Eg`Ec+2| zDWWIUnI_gUfdgg+-f=FMOHjPO!SH~DW~(=+N22_IvMGy5c%K|3CSk->;KP_8(AHg{ zOqJy_&jTCxQVyYf8sGrOGP=?N{)*os6f_!kb-+ld06#zKLcFzmoDjSq6Jw1~CWGXp zfW(SfkTt<9iR}ivZT8n6+>XzHQ zz-6`lEFg*5-t?0b*!X4D)|tfqy&1wEY4u+s27m*ZhdFDCz*PW44d1q-guFxlq-Usa zWa7Ye>n}CBp3-+nU@VOSfsP@K^pGRmFVvBv!KOKp^+eylANwPZ9QvDIQ^e(8uRC@+ z&;B&;xcKjPa-H`{M#QCQ)l}=|#7`{>@ceQsU@eoz@9#oSmG5)y=IbWF_P|OX4fxT8 zyH8@Ixb=DYfyL1}FWdj;>eif*BO6wuYg4UL!s)or@5@Ue7z&aVF7%bS&nm2%JLJ!C z^=O?S{)W#<7}3Qm`?i6Xo`zp!%>`l2KbbJ1ETkW0eI)@wc9Ty~!Y{C&S^uR$J&EK7 ztG`ZGr0$?cbM{ zU}#A1(wJ899SVh-!nnEruEPo`eZ`*+G4Wyk8<}v*t-!yu<6Z*VAuP9|Igk#`Br~tN z?(Z(ukmx&e=lfP?)FDw*7=d*EeRs1uFDW17%$=^MjfSv=6fcEl|HuEmOP;hJ$8V+i zSu|hz^dI4!B*ITbOA)oe>KU2deAx5fLL5DA_FJYIeFSL>K~lzlkGn@A{o%pldCkh1 zhyQ2VNn8PmNbcrHaPUmK6E%pg_)EJN>5*pVXJ(Wp_KP~4XP^DudN~q#3ixTaP6>>M z&EGkhktS=#oP4`CR5*l%d51313Vd)h`t3;O^6_c@RMz{1E#yMSS&_AW+eSjJk(STj zA8cWVgwwpWq7An3&lW(d~4ZS5jOn! z&)fY;yO4ufAMzChc_4`kr~gj$4A~K3Z5T{npBGKcCf?`o^V~^`2G1tNDmH=>GmHMb z$CtEq7|pKClUB*N+E|Oz*{>e{CL!|ZCs{$!lcH0=Qe|V89{)pvoCGi(<}V6tzxV7!lC}aK+cYl4?zekgI7KM1{~k_@c{k{$kA=0KE=UUlA)1A1d> z^AR~4vDo(y!~Xs5sjBc)$Jzh(5nS*E@n;fz+ILS_6@~-I2Q%UigI#0%{au@vRkG-l z&`b-9|HvRoo%9Forse9V+z{f}#80pvXaA*8Zscg8M20?_H-BSg!~8Ex_~D4h^n*ug z*zH@r!Kv{Mz4Tl3$r&8|s$i^j73p9|5QuWv;rhGumLx%u^MMw3gFndd&q}ntBCQ1L zOy}<1%xk8aC5sBPCrDnw(U>_}ibzLeblKw!c8sFaf6oGWdu1S9@GFa5rGc+Q7sdLw zPZ3JMa^Nh5qU64>*TBqrvyEWeM>3-2$oi^Ex|kRKw#VM9md;}9HE|ASf@AZLV>?v*YkT*h zJnizNeUm5z^okRP1zyT#88ih^cz|vHJJhcNQ+LTjpN^Dt3GTgyZ7}BQRTPTYjeR}M zRN@0NE0<-T$&svF0|SGbv-7Xx6zjG?e7qS9VxNBWoA)$ME(*qjT5>NLT%u*{Zd|A34>+%z44&S?b9VQX(d586;wg zcFMxF-foW_zO{I|8oSX=j7|3baT53@>3hS3{<^3J_i&4=GD?dkj-(Wcqv5{}ydJ=t zTU?UGYObbU;faNQYcx?pJ173Kz-bw=}rEDwZNO0mH^PmMpV@Q{X5(`R^#yl8!PL052mKZ7Y)2v=Cn>QjE!=g{ zvnrtWVe#s4@d`B7V$ic?KwD)bHzh4Om;5kbO> za$H|K(kg+_SEqCHGx#5KMBxfaykeq{7#p+7mna`D^5<=DPaM6C6ZD}w(hS1C7sLPF z(YrtcT=U)opX-+r|KpPHN6Vzt6|)UA*|0Up2krYS$lo6=Q&T;5 z-XlVq8cE^_WYk1yW*7cZkpA~b<{?{M-~ewXfGU7y*#D1^Hj-jj@TB5|9xWqVotF4t zLSh4e1bf>LY)5wciPOTsIQmTq&t<^D*K%iuf$T>}DK)>%_^S-H8Az_sc_biU1XYqif)O9^RpbAMQ z|ItvtPnxdc!Z(s`1qiam<|T_iOOakeVznsEu70`>3G;dDziXa%G*!teVU8pRr72Vt zX+M6mCq(CHs?Lt6`hUrYss8mJ85BokaO)HP57yp19_sIlAJ?jqB^6~!3)w=+60($i zNh(X$Np@q%o@G#pQ1*S_34@p!J0(l@Y=ap~c7q|tlKpqbJNo<{-^b(o*Z2O@V;Zly z_jS*?XL+7;?x_N5%_#G({KrE9o6SX{NqXqr4Sm48_a6Qi1N^&4jr~QYWhYD7nR!|R zBlaIH)gt!4yWzYH^cslg@%wxNuho3E=j>6U+00a6qD@voJ3_3}QK4t{F_=5&>t zP1B56{~-tSlV(&ei|1U7159H5@dSl{l$%WfmC+EqLqk-+)1ExViLe}PWH zGA%Ro4vDDyLu&G8t8y`4+t*Y-P8ItHI@5HQyV!06fQ3)w+3?-*k3jhWNaK(Uewi^* zrjU#imY}kVSAnsIQ`w+09Yw;g#z1g5@~vYo$tXQM%-MReaEp_4JZb?9ndj1vkkN9Q zHCLF%-+2*Da>x!DfnM zqn%Gpj8)}^0@w(j_|J=RcTfkym^DD*jgH^#Pk=ybmqBJVNTSBcbak;B-2fwf-s7+LTsa&{7*e-DgAM#9<-i~-j3ZLR8lw-Jg}>M@&HRQ#Tg0`>m1 znvy`)?F)pV8zA{Wu$yEO?bTFmdD-L`?A_aSNDp|@JbabwD3$kHe*NU^QrQP58ZXjY zSAokZWWTxaSr@py88hG*qf_%4BRg^8g3Oxro5*~JUE+Nai|)|yr#jg5r1iN(H;-v& z)hrz7xj3Stt zkixSHZwr4Zum}2fW~#-g+&(hh!>?&L6rLUVE+9+#1Ygz7nHg_+gk!K_x;h6zWi1|!#X_e0rg zV^KQDA4iDng<_gPiy$bRPu0CwgqBS0qPu;DEXs=e&e=mh$J^U6Ix;enS9r*-qLP1u zGOLmMnuNiw+uqPJ26}!H<2lUn<;xfT^$HieMp(%YZa#7D0c~x%e6SHdd@A;A>-2sf z3J3PZ*!=VP@Ao4!z6~HsMl7U7XM=;tyk^G}H@+Z@Tq%Qp zOd;|P#9~USs*gn)U)fKe*FD#mQA#}G@T_8TGPMZ*B7Rnz(IAY|>J@Y7UoxFybL3Yc zHZ|EQgKEXP`g-hri#Q>hFT3^1kxHQIAN*+c{q>d4=)eVs;8H48^x=DjiqdD64Ox>m z7E2SPr3bHye_gxOm8bnogwT^>w|MM)o+Rhz5C`*-_A(#}itJ2Pic#DyUWfx3)+2Ml zi`=|biv)#{VxX)8*10*ZNLyo)knT9g!wL7j zSM~P=)C5Dr*4ujc)q&*5(Ho7V43$^_+_N|BY1MvkR!_+~T7wT#tM-rg`Z4*^p@+7a zw*W~^MA&!i4r1;!9s4)6Kgkcsurq_3&VjwHcdYBg92gdA`@ZW_k-M1#qM3bUAs)P}7l^`qTp(QC>W+(5%E&uPkXtyZ=-8q^A zzlElqq<7s|7fX`T&COzESbL(Phvh{M*o`LfzptIA*h$OaI7i8#eYgx#>^iyjObINa zQj$(bEOTAzP^#YBC)D#LJqxgTE4%uL112B+Na6{@h082BZ14Xpe2}zoF4pxe{B>Db z+TOf~x)sr8{DD96d*4{58KDX4Y}yguqldxBPLbn9-WOTACEn@}zCnR86Z^YlgLAT1 zR2czsa|V+aoUpf%g<|{&*Gttw0a4v^(?aA#5l@90+60d+evSF8y^(?5>H8d$2v2nM zNU$4NZcLv#gY|@;RGk_-tyX9)x30@hI?Rfh&S)8TAuAg zV>~R2pf4}l_`0M88>@YFq@XF(1zH5#*_pj%8>Kk=GPSZ73Ky$$J0|ovZdP!AmfG_q zD(Nj0NQBRG7AQFQ9?RH8oR2pkb5uu(4~nX+7LAeLm>R&b=t6aaWYGPsyEr!`tsjI+vq>feA`-qxkk^;9S?qXrNzgJ z{RJ_v&$MOvLP>F!410oG+i8qL!k|wX0L}%uM~FcIt%KK_e(?OxA4+sg%%>2ad#D>V z&yD8>fsB^T&LYqAQA|Hs8;a|icR%T$^#qnz}AMjPy)$Cg3}IX<9wOKaSSpB?VR{jv5x17%jeGk32HErAIsgE4LmB9&sci zyNV>4x<|Q`aJ&{JJKwRZHsxBW)c5+aWJ)c3;a(9h*xp3*-M066>adnEN#oxuTVvhI zNre$G*G(*%8-#ALl_?!XJ1uqP^4J#~k>Y6T{mHHHYLdk=LPwWlYG6^DJol)**0Y(6 z@3ehEpTb@970eL0Cr>3Cy4P;)!iP~=d=1@!OUQ&(f0)a%R*vrO^0q;@i>#JaOGH9q zBeP27w1@k*SYdObk9GM_N=S>y8G!7sku31z;o_4QZ9P|0O3SbCuI5*@;7`p~uVKiVTc)U0`;U?rkB))jggnA=*rv_r7pXTgSj~%5ZdAc+e$(AJ*Gqi$CKzd9 zR-HExGOcZA0*fsm=-RQ>6SV`@-508qz3W^g7OT+(lI_F}C8(Q(d-d+Uj`c^Mjpm)$ za83}XkM@iei1q51tI}u53`sySjT+W*TIq>LVqH+B+{-OWJJ&bZwgkuw<@chUhXq64 zhUJI=&1em}luJ2X1)}R%EYl)Q2(-vV+pDZ^4a)S4>&0CCAe5{&no7D9>NV*NQT3XH zENQqVl@#<~Pq#Ys*UcuUXxVCL*_vp@pq}+=xI*y1S|l%ZJ378M5PYzQm_od?7t?^L z{+!Yl@a7IWX<5xB1l@Wo)RC&;Bvtxz@eXA@CUb+_c(}e#OMi4{_`{0(S>^ZE$6u4x z9w%uN`Z7uP8`=f;c|$;L=({hE6}-6{29C`$iuu3eHPg4OaJW5HGyfJw^n^iY zqNCzfQ@AQ_t=?r#8&}NNUIA{vDR6!CPH;q&yI{plzkuaWM@1$@FZ7fnXUs;Com*nw z;FLCcS`$~PntfT+*SJjMh3bn;!fd%_Ud@*B(SlGV9th;?*A_f?RRQboF2O#B^}t=X zcE)^>2XT-YG%rtw2v_0SC#5OV;?WyB4~4}Gq+mr7`rJ)+J4cH`x$f%E)f{n_-_uRk zPi-0ADnD*Hw~h#OPp*+KCIX@-wa3itNT6lc$!Gh<#Y>8DTj-sj;;mcw0>61Q^!#6a_lx`UFB$W zah=$;0bbY;3(x7qy?aL=);|O?i0_3i-HQcOpy+z+`8I$J3|1ayYFKW(e}3d$!Sj)yJ{jK&pMHyiK#&G-S%uDk6*^?bA1!1Vtg4w< z^(0Y~GPUY~?lX3CBE=Ac%dYq-^$drO#oG`LP1is5p}+H932Panx{B^G5w|ON)zmUa z98n>BUfhgD7s+i^v*|gKyVYzSzZF~DncwofOY`f9BPruFIUIPa#?w2(Iu)bLLCVH+ zYZ$qq^gShx)88u}MDd*^@-jqkfC_Lo`*Z5P^2;Y*?gkX-wAP5WiW#Twp{HwPy+q|D zqeK^DPgnt-o|Wo_p3C6sLn7$u-po!QZV`Qq%Xf8g?Q07@cHI=-qUm@KMPA*x#iQQT z6hGX-yE6!gcRh$(!%KWks*Vu2 zfHMg!LWWi|Cwj&aR0B3URc~j`MeD!im%E&OPn^Blt6kE#_!lhdSNme=(VFq?t<`@1 z#;7xY?yr9)S}#D^jwA|OQC#??eMzh71>f-LrR}ZD67&+T3G&>gRm#;?EnF-4;^59y z*M3+3Y>U+_8y4J};UQxP(KsKKs8#kD76fIe@AiA#8$ZEELq8bkVG%GmI zNm=KvVY2%m#zy~2mC!g#Bw-p{R^Qj(?SyHYwLN!Es<(47dn}o!p0Rub?KnL2f_D6R z)wftmdOV|OS4A63hBy0;P4d%gF=k6iB`UazG*1pSAr5X_W-|xULaL_aRdk|b#!=B4 z51S+#Trv8#+E$gl_agL3$nXmubE#?dc0yMHVd%Is+r`&xjrg$*mK?w88UeCmsaxG! zja_seBF`*dxcnJ!-$IfNTu1NaiS;8JgzPG|+l82If7~x{9_m0Gsy!}M+ z-Z5@>mqSBiN|H6rOdox;C=cy7oSbKrHWT`*wU&ReUWP22Qh@2ptG1l}%4>1L<}IkQ zt+$37+~PJdyq8UO;TIIdDC(S;ojUuhAIjdyc|%1x zOEGloWAU%{(?jvQV_qE(OP9Bl$(_q>`gR9rV@r`tnqbG{}=MSlB$q#G#j9rFdFoH-! z=C9vOTDrbz2oiWGb!YwK=g^&D89&RK8i7B3(yDwpu?EpA)a6Qiy;^LT+d^vZqj@v7 zsxH19mTK)gNs~wiYvxRJesl~id91YxlM<54`U0ACRsJ>R6wBsFw}~(0mVr%2#Dxye z^eA~gx|2Cv#1p2xyO~F&KH!jdM^5)`PgxMRt9uB{BH#9G^G7^ICFA(?a3jP7b3*2@ zP);MJb$cv@u1Xp2(jt_;Se>K7ca)sLsk4UpoKD}+8zpXzm6Onmlneg31ouXft)r{t zdGkajt@B3`sr(>Z#@rWwo;WprE1A3OQ^5%1Pm#uOLLb_OpuKzsc{DFsoh`bAaNN)X zW@A1zG#paAWoZ1aGe>)bMKX%2QK@9rP3_(7k@%Hz(|#D(8?U65FIozQvvhuvbJzWt z-F<*J=YXS+o0J?$zo+Y_x~J1j?|LGKh8tJ#o!xaW62ohK6R(d6 zbrdPD#(xyCQ^e!HFz|IiPmm@XP(pitmz4+?PW;D=?BhWM1V;zD-I zIWwxIIjCQ;Ii)3PT#SZz${rKd7x3i5EqW%o2-5u0h&0`(_dHI}UKCYQ!=rgW#m~ur z6C(y3NV2fK#)nO>U&hPS^@W`@Vu|`8_b6UGo6AT7%hAz_GllI?3yerwD)D{WLU>E? zcJUe?`&zvmxBpT)^=tCLI1pvU4?=D0kKjTGwDIEo^ho}(MRjLeLG6LBd6lP^SA~_5 zgIl!>F{u&{ILXxI$}4^{=4brTKmx6~8zSw3mw-Ai_~^V$n#a)-ov3wmGcY7cfViXIy{-JcUP`$X1zGA!v% zrGDA8&ugqICEDQpvJEuiQtuZ*lI#6~KdB6|?6IZzlAJ2!4>8}t^~uE*zIa{_o2tqs zXe>C-K`WA1K_TZ1Ms?BNi zRL96odGXP8YDQ#*1^D+GKsWF^q&vH<1}<~^tge6N)8nSX#*eZ>?NNoAFfvY4eV#U4kc&CC{t1ShEc+&j5S+~lQAl0hBBve3g^-zc-{*{Od~CT=ydKU~ z?#qp;IFG517`w?3MX5RHw-+G@0UEkz(wA)?L*IyTLtLo(a49=S{{6axa!xi15Ojj-3# z%v#0kC{8iEb}|M>sK?-SeCq}i(0<>zn~_7vE1uEE$g)L9i*1V8ET3@SO_XuQ^R5;! zPOrSQ!F$eI1`C;KMApg4|EM#BO;j%UwHuI~f{1dit>+elv}nyjMEe=FD!M03c>_#V z%wrjkq@#+3x6_TM#3GE@uEl6Z4Ct9AW>iWx^Z{wj#9bBmh2)6~=e>F>2r1PZT)Q=? z;e=m2LxG94{7QFY%gw{{^>_3oi?X;B>h2*O4Z7EX{E!M@X3(D(m#lG6m@0zp38f7j zBeNu>wtCH%8hR__>(a^@@Z8xsDOT0bNFg-wJUFwLLDYIL9V|Hg-K?0~k>rI9crosw zAml6Bp(WqG0ADX)Q#eK0zlo$yC<8U%XO3w`Y)tXv@Vv}7BYU6ZHpjopn}7AOb@3=! z_9;qKR0-CXq0-sOP}a*^+!t@VG#(fZbIATjrJ#RP}v2<$P=eq z@55EDQvm>)(RnKBIP5c9N?d-oUGv_%Ak)>#ULzA<{S{OU{Y5f0df?qjF_#d(>1Mo` znvnyV)*+V!(2i(vbqC5ZzYZx zf=C9KRbR8v_S1ajv@2Bj&$I3d)FtIgE}5XFlNGv)(WK=bYx@O#X1h-__DuHI-&xeH>`aHeI^< z5h)UsL3R{4u3n;C>$t~ka|C+U9DR0+SGe!0d}}>zNmilqQ|Yd_V{t#E*-MZ@WXV&v zFlBuhQ)QouLUl)NIG4{pup2>w5FOPw^^MYB&B^&m60iG-@1D(~cYC?A#W<0XRHD0; zI3Cx++$dQ57fSZ}{VmD#s1$QJNY6WAac=4K*sS1JysWIr6kW53O&$+bSK>Vs+k?MB z1DecPndr3vdPq~PY>k-iOTxJud|zbOKmY%L{Y|o~dS#@z2EdzTZVIA=xJ;#8tK&DY zIxdSP4t^hLTvi9scCeb8Kw|tiMsMbQMby(`W zil0@N-b+_%ptKT8=rfv{&TAd4jj36F7vl06$Wjo&pIY{InXAn%P~J$Bi| zwaY^c&EX-0bm<#W&g_3TN;IYYy@lvwtRFiQA1Sn;=YWss5-ZCRfMV2D#T>H-x4s^~ zd}o;!l&7;YV@vFXxMV1${x)>skMlvmdE?eQb;^vrj6EULqGg z-4s)Ri1@7Rtn{v5!Nt>9Y(u_)^^g_INeP9udJDn;f{(Z_b)8YTSg6utD3~X!x=5#b zN9-9^(5?xPWg_n#qo~{8m<`F_snvO+ik@gsg2HmTl)A}aGu+V0^)_pfblE(W%L4`{ znbk%H4&ogKKzY+N47Tf8bdob$QdBx9G+sNe)#9^q|MJf}4>CVm@-@N64w+Dmq?T%< z`Bb{ET^SYJh+?+e%!Sq^Kv<-HPVO&(5gf4ksgG6ga<#yRE2N>7I%}s2u zl;GCv&Crp$f!3A>i_YdCt>JFj2pl$-jb^UzO-n+}#R5HRhs}jf5M(WA7yZLa+M(Q0 zEFlvI=ibX71RA_EeU5qTzWahYB0@Wjmzli;n%p zgW#;*8KJz$5B;U0B6Z_I&|;GxEx9J&e3iNf?IzlpkAE`Uwm0GkF>-bGs(k9A&_&JndPYvUE zCWnp!7!d3r__9Vv+E>3p8;LI{?y?2-3~_#T4Y`_{Z&Z?hdWUvO%rErFDLIhv^)gUa}kp(ppMW37nxdD_YQsSt$*y>|%aY<8-FZR#Uf5 z6QNOnRGhXy)Jgf-r))*;R%uw|FUTvYSEypB(pY98oxhsNZ^)t;h@W1 z*)tTZIgjN16c-j(e4<8#D@=X|OdVxt&*6JMx$`bM)&&5@w$sXhOt!md%w#x zLSr?qHWDBmZ;{fv4I&3@@nVEzaL`QVD%r2j^MyRpc`l|$@{i(|n;D=%E_7 zU=c~MNL+Ql6~XphTnmuVhY?3hQbDwL^may#Qs>$QKNZXC`3A@0hkpuf?yT|~YWkyC z#4;iV1+!xg6i9L=X(b<;Y<*rktTJ6ax3rqaoU?q!eD_} z(3ZzYd~xl)1&+;g$sOwJs-dH#`R1&LMT82ljs0w&kd92*y|R!)YVBwGpSo5ya!2P3P%6bc;2|-QG~Htdih!)=3TaCp4z|+--MOY0fqo$! zc-Gx1R2H&7ZN@ z?b*_huwmEZv6^=sAwPHT-y{;GNv_+Oib~&Ko41)$OS4a-bhV9`pu5oYS)cKFFy^!E z$A9m^PI~hrCm1!AzvWC`&%b25$}qFy6R1dZ{-IrfK&@q8`DA6@*zixOlc7ikjJH>4 zS=UJ=_2Nhj)y1H;M-ls$JakhiQmX@Y<4MTBoo_Cj#UME%i~dG{O>MH5OoZ%CY-;eFgGaBaA+W;ko34^ z^cHcQ)opeT6KL6|2o`+f32eQXfL$P(T(;|~>%~ZXv4>N1>GTAl4^-nRa==e&?}`jO zWb8Z>VFVzX*gPhBfeQ=8p@wuRx+f}2FxqZ1>oPF{d7l78(0@oy3et-nk;RL#N>9t{ zE+S_A#qfH5VypL|zHhUpVPUzFSaSk*kkze@_|9gKX0$(OtijH{U}@PiN->sjDYJYUw`UprB$DG`zHc=^ zkjFbpixib+>KBl^NeSiN5v>E*$A5d9Y)w)Emccvi0b{#U=b;r;s*&Cl1tA|!ezFp~ zuw8yl_hc1P2W@gup+LxXpXXl%JU?XI9%d3JL;$TuBIjBhtl^4tP961@FG{+;g!}Gf z^t6_?+wUW*F!M$s%CtjkVK!FuawaKBY`J+tIB#hD_xA}OXycE}Hr_I(|bV=DN0@|vucfr0|m+JLVB@+!hZB^`EP zF6gVXe@EJwwS|DrXbQe-g!n5V!o#jwo9lBoK*m?pG$fuQD?t?5<3Yypcp+#+36tKK z$2?FP!}_sOC$++%grK%G_82!=L@o4utII%fd29f2m@wj~op4L|d5vLO_p*|*aujeE zDY{-78BP96B2%l5r)H-oCmG#0LA;!A;w^H|Bkn0v`j9~K=?4edclTzEqciu@$vAJw z8N8I1I4WggA^}gDaNl@?a#U#C>UI_dm(jgEQ(_ei&}^mQMM<2~cA;lKlSPW@3OVD%H|@_DkL~bC z{rEKxV<#+a}^pg&)z{<}aje_4=XMiBi*aB4By zbJOwIa14QsIJLBO?G#EIug0QCS!%}9?Ptr(dp7L4g2w@^|FmqZFgL}lWunrkXKpUt zCKj7(&iDerjwi>l`v+kpp`@|qRed9Kqf49gxe`3NoOQR|q9KoxD5KoQ) zmdcj|kY&bZ2*hb`c@@kz)WUN<{24^sh|paj7_2yrjF7VpIW(j>NCCm)!K<_9L&a<| zfy%k3GbV3v<6LJanpw#}KlmnnRYg0NeoZA@@jF&Y#vAq*IUwUAIg{HLOiUFT!p1W| zh#9?umr|5;TbljT$;`(>+FRuwk0zF${jVeBjxSr2qL85g*!Rw_XQm_{*=hk3vSWjLonzwfLINQyjW|C2SPN|^;e!Z+`P z?6bkzHOd44`t1*|X5eC|v0u#FRR+O>$7i~oLB@gHSVzUvmzzv;;MGVT!Dd;+@>~Ox zMcho$RoKtn|6!sk;149XH-W0rB7>bI)D7K|C~sn;fL}eooiI{bWj|{X4lO>4sxRr8V-fx}8;djFY3}Qxb4}4&3hQlpRwe(fAhGI`|Mj za_t*e7nM2f_<_g=^YVs#lKR)wJSXo9rye_Ep;mb{y#d%6`8(x(#;)sqJfk^#095CEP zt@J~-$+=|6;8h6j)XF!mUI|8}=^#n*m2eQj=y&39o!x@jweye#QJB|AcY#L({`|t2L4pH)b77}hpeBTH2H~j zdKQH>`7i`w7OM|9Na=bUx{#oD$4w*h{ylD434`)({5g@iUnlu<%tGTk5pVuIA4Ph; zvQ;ric{pg~xtP;08b8TJC8)1|pT~fqv&|*K@#WTbo{a{#Q3bA2$gMed*##VhTX93aj76);It7e%{15NCqE$ z!3chE1Y&_5d6ji?ZzIllcH3NJfHtN_W=vl1TE4WKm>aa9=BfQ;$^hr`ATyjcH*RW=urU2ai zPE9;%p5d*p%C}v~J~q7>iC?DMuMc zS;)uPPK8ihoi?}#Vt*vBfzwMv>pv($qB^P49M+g5@${u{JdA+7$z7m!B{sq`!dz_V zJ;wPJ#iF};n(Ix)IG;`skNaX*SLU)raqXv7L?k0VH=64@DySikHL#LTWR^% zk)xP=3y@*D{BvI2<+YQ)@w7{g1U2+BJN>L$pg3)YEPP7(0P=YSa)OgJIiJ@k@h9yB zUC6YK{NTMAdV8DSc2%NzK-b;_7jAS7n@gcIB`pU0fy)DFA(Z&PrO9NcNF`^Mp9Ygm z6)|=4z|0;c1F4E-GS2sYqM^;~p7Tf=LR^jVlyn8~mnkRj&5-@i!~n`l*8H*Yhq7s7 zULRT!O0GcS8K75Secoms{npYV`3h|)0&q9&HqI#ug^z#Z`S;$lB==B|m-zm4=z7aK zCQxjN3FJw+Ll>c+wLukV-USy2)tm{s*EI|kCH`T`p^Q!0iJxw%9URym9@4^9z};S< z8}rA&y{rfYmAtkfI$%8%JCAIbNC&<*2jbA)RmR8ZGapc+p#b}uS^3K}WV0lq34&Wk zs6$t4k8^U}3I{+To8g~}v?xz{v&HJA(J=>?*Qzs)7K#jaKVu5fH&&&rhJW+--z7c&Xf3Y8&uwiQ6DYo*?#Hr54b?(9T7YM-B*o3Hd{{AO zX4kqh@LX=yoOLPxRMr71w{MdqH(xX{!)}}Jn+n^%TUgg-!VvlqQ~B*hN=RxCz8Go( zGM{p?%fQc0NBEFy$>zlYJ87s**Ej z0wz)hJ}=M2s}ID7*udLnWt%{N0_$#0pw&N=TKl-D8e7pw62}7m8G_lqM=|vFm>qNx z5rMOx90kw!tmA<5lZY6fi1m16JTdAw=Ku0g=t`{uprp`wnaDO?M$YQFbQJedY>U>9 z(IFPK-cZ6_Y{k(C;uoUg*MX_cp~V!!%Fe9*Phusz0!S?z`4;7=JViX8e%rBFM#%c+ z(zL$`D zA-2t*Ag}{#iFVU-?&h3AEIO8&Z&p#+EIh(g=GRG9tS06EFwb2Q>wbw38VQc?7)CkG zdyTVs)LpzD@&sMQ9+^=(83IJ+f68h?$kqH!gV}W(?BFOl_x>z#%7}|fn zy=X1*&t}0{7l(GCjg^kjji2o^7`28K#n5qT<9|Cxy5=JJ;Td5>R4e^1_X);6CF7T& zbxC)1^u)JB+_+80wwZyXvWWxYvLe&QsBG*;(jnPmOn- zeo=W%zCc?nv*Qb`%W@H{*Q z_%E5CU7XiN(!+>Q`m8;yG^_*V|Ct$-0C+uJ zj@%wgVBulc1!xS%>1hMgIVq=!3s|+lp!P>0XG@$Vkeco8=25Gvp>j+APW&ktIPv}< zx*P^fj3OtM#Wd2Xqtym?F>dLW-f7JFX`o&B9e)l#BxGRC(627Xg!y@>WUsCOmc0!E zf$R$dNxHhza@`_b&9Q5ZsuZ23c|ROnW!gncQ#4X3XQF@odsm55-Cm;k95;rhPo1Evw?A{*cc=?FW3 zv(M0?Aukl^dMD)H=?6tE_LWPRimn<63n^xOe{W2NRQ=VJ0v9vmqwyf+J7^b3!eClK z9wb%nXWp+cRnUPZBdkKYt&&7*W2j;O!w|C|=oSQm@Gk4`s7-n{zOxAlY0~j->I+$c zJFF;0hDDB^;u~0ed;!#>VFDTLB0;K%l7wEU^)+l>gf^z2U7`*cXnR!uXq6pzDT^{& z+TU&{bg4&t_-fl^s5eE!4~jM)rL5B}ldVnAfOkm7P2muY5~s*@UUT__N_w*FZqlZw zz95Vg5fkSC0DqLzazNE!5&o%X3;9WJeMwHcOk4{q;LrrJA%f%G%A3-%kl|FH^xAn2 z*gL!8RQLU{v6pOO+{!|*;g`4o&^gLlzv4OLDe^MO_Rs|C!CUV?$C(&*OHUTIj@q@2 z-3*01X9Z!iAGqm~Fvk+d$T)JL2vC4Ct$84_y+pd1a{KZk+*qQoiaDG9kWFhhs}}J7 zbRdG(o_*Pw3@i=H6K{>7MzDOpci?T)TrgyyY4CvmGQgKQRbNo>9%S(Jue@NNe!{#Q zscd{n{$K_t_P;fEh}mvvlV#T5Xg0_*S>7H*yz9=9XliOw&2@bebMk7sMRavR_3eWf zc6^*(e4v*>Z87i$4t|u3h7|NX82^_KGBP2)-T!{$mv8fdeGV!?{$0fJ_X7%CD^ot` z&_dvUIl)%nVg_-aoaaet91_`;&<6Yto7*hP%}0$x;{W6v+A43ih#iuXo$@S>Xqlqo z;>kp}g^EUxxJoUQdAE>1Ud7u<3b+tK!A@p!#NaEK9e z&bJTvTJps8D);*hs8XP;hNQddAh@3*%SWM31< z?mXxeFmzLl8}vImFaYOc8a^?$OecBra&mIotq(aXYiepPt%1&`d_j`CrTR&|lW!?_ zUs-Fs{Py+)|Eph8poeh}M+M;lIWud`?q=@l#zKi%M`AnA(Gj_0=*xFa`jB|+$7HPqI*^VQXg~DWKP&BJYS!x2!?ptvh4mDZPlswKk%W|fnAnt zSqa*w+D#`o>)h5%1l@<9xUSBcz!3afTZ=V3mxwWU%>qfS(doJpoQGXBQFJpxEs;E7 zo2zrmVK*OHbruqWB-h$d5RzAs2r6%rruc#bnX$MvMd`q(&L}Y)QOP)uzn`GL@;^WPAOf;_`Zh@xR2=4v7$0W5n@ z*FZErO*2FM_NXB#l_W-#@T_e8&FLbOIn6lsN5}5{m%QS*xGQ>ah-&q_GavD6FSkjf zP1_g-z=e>ktgQV`+VV`c-g=%ps}-%fnZ!)Io|!0ntY!@8SE}c+yJ3dQ8r9BHP4NF- z%%&W9JkOfKArABhpp%qfzWLPGV-VL|>Q6#`a`9?`En3?=pJ>k~T=^o>J-qX4KTn8M za|{z9gojNA;;ciCQTIbg2{0$13`J~gf}vCdoCvr)@V{|9_4}m3qyeQoe~(*M%) zMA%Y9iR!X7I4*KIGh=!O{!-Da{L}wMs30walalzr-8SJgEDwrRTiV;vhZ3*{=E$Y8 zFFwG@20^t5DFsX35OID+qjCLm z&tg101syLbZnH^r6ag$^ys(?>)ShLlo@nQ)|jpm8wf6+ zhmKdz9NZ+x^T=aw$9aQLGLpxz7^!P*8<>56@Ah{yz>stf@h(#_PES17<_Gvacimmq z2F%`R9#M_qm)+eO*%Q^h=!}Dv49KoGi_Rp!n*g#on1Cz6q4R6@cHQO*cvb|re!Zif zC>;+BE2!R78XFrk2K{tH6BBvCKlXt+@ltP6s!tjyt7VY2C_1t|&EnzNMa^1b8 zp#3(#x`dlVb;qw1-z&2#>Pc9qT?03QASY0z!5fJoapD#M86lcYIV7JE_?< zlT*K;lN+LlOZQ@m#?{*Rl>IV8O+qK?ld|tTH$l}>Q-ZO36 z%$fD0(uHU|SZZt6^F+QW`$?K{z5m5E&t(&&w?w>TK*T3G_ftONta6p0#zE-XW)msZr4!&=B*|= z^lE4g+!19mE40rmSr;EJS`3paNT}HbjhaE9a1)>b(TF)Z9vy`$*HtSw=K^BMhzB=l zKVK3+1WaF`afK4J!wzH-u}ilat{hmn+fVGYM=Vmq+c%517nQ%$XB31P{C=#eitc-w zySq}h^+#)dhKNyx4MJhV}NF4@0mU3Zg0kRcO+J$ zHyP%}`_$xFiucT(bm(e$uDNeZz5^FXsM@T4#uMqYu}`F(tE#}??K0na>0Pzv%6k-^ zwe|4n&J|3se$?ByN9VdnT~_aGdD736VTaaH%X_!jZr;SA_tp)QJ3=MscJyYq6d`Gy zY4W;Pvm}+1-c_%=t}nonz7!?>&DkK^(oD!#=_Y%?*KhFZ+X<(0#81M*h1XD#y!w_J zGOo63;0?ZTovjK+(EABas3|HcIvuIhEwNq(R!9rr94mTvIQ=O6>C>kZ4U9HO9A4JB zsx+TIbi@O+wt6G#I6J|l9c%EjZ>vvD(I$)D4vst{{q8Ixx@>#71@=d(iVxuvCj^TX z7phXrrO#)7oMg%FB+g`NcK);^*ooA4~2a zd*CRWk>M-vuH?Q6)O$iKsQqsn=Crqklm@ZzNTzQ=< zyt0dYn31Or&if0vL zaeZK5a~IfG6{Q6&3kK7UbC`j8Pw~g1d5N0Y?m+rDRZ5t_Kk9_g&uq+WX>zwYL@UA{ zn_ba|y(ktO38gu*F3 zR^Y$(20b?M>}EmzbGz+gi$8!t#YeC25CZ2lcs=@lNb?VVNvCssk72GOd$uDnk(*K% zIi|#ttGR-A_FXK_?kLHTbd%Uk`UX$Q>_|$a@MIXY(1`bNLh(QD2=LK#t)$w?E`SUy zI>6mG^;4#wNV0EMO>RkacBl_59)&Yqbm;)1Yi(jnE!TB*CqpC8Rbq8Zjpg0RJZxW% z1~RYr|exUAk| z4AC<>zK%UObJ=$W+iNVTh4$0!U8S~0IF|G0jp!c(Ed-U$z{@&7>Xo&z1tr}SKXf*1 zCqm>!-RiDn8T{&{$QLb%GEkEk|9CgA=)v^ku2-KyH+jvC4Eh$TM=%G>F%o zF7TC0a04J&gA}Wn=Wq=>dKP0c`KJZQ? zr(D`zY|-LLTM^G(REEPGB_2>Rt%z>|8zD-xJxPKOG1&Qvz2CmC@3R#d&CE6@CGU1p z+@g~&lii1^1qQDfQ8m=ds|@`!MtoZg(CpMvZ;6GR&3SxBXCvZTZvJZ7%yjFzc2ANv zbCNjAh}kPhHlC;@^rEj%BdGhU9yjmm-PF;dld%l__$8B??ubQd2Hvs>S}}5oH`R-) z#Pn%1=aIhVb!}|eqV!Ha#!GdCoZ+@5NB)S(roF=zY534e#eSnj6793KTVtl=`K;Pw z?q1+J_%M?SW1h0IG1u_bWSMynp=8p`*Tc_qJ__n?zR9fmSXy*1DVM$j zrKFVm0LdliOWl2K8aXD*S8b^gYQTZ<7oj{h3}4r$I0;&}8%saB39M)MjD7EP#;BhU zkLx)ta52mIoBcbpsTZ&8u4mAr;45R*dnc^ntqhke*nBiS&1EA`2DpCFmwqok>KTHQ z_m${wOg0Ruc4APvzcCSpDflK`n3P4ow_1ibT5U6$a2r~69O|~uvmcEhQ8ZwVp-vCJ z+UpcDNbjmX;fR?|{yrwYRCj*4^=`+H@6?i3()|_@9r&yy>uHgn#Lh+`^Pg99XFIog z)gJK8Y%M7JejyfwccMN2!_Co)|<0H__0+d02Us(^A(mUTG`;(jPBXKs>+DvEh z9T@wWy+QZLTiP!>9{JXHd!@P%r`D>wB;Z!x+DjI?lOSt?7&SQvyLdI1q}yg|IBZM@0``lik0x(UDuEFXh6BD9QzFPT!&AnI*GnM#=!B)N}z@( zZ5`jN?z1fMcZcKxp})5%tr61^T5Yh|NE?v=+IZ{_sotw9#55w&{GykERB!WD#0z|9 zn(upxa&z5W_cb5QhMD@;RnH5KJa?h5bz7Ua_)biCReRdWa9G|~xH|yr`sBIqWOo2g zv@h6)L)0P0Kn=rngOYmXEplz{J#;5V=Rq2M&gusKgbu?0XY=V1Sr5eRPLW}Un)d0t zP)y#_^^EV7TvcCluf5asc@)E6|B$@uMLzxK)rV*w1%{jWojICl(d~i6-zOgLB5P%( zQ%KGUe9pcvM=IgFV(#G4o%?X6|F6CCd}}gW*En0nhEZpfE+8VJREG%y5dwlpFDfBI zq>UmZRB4eWiWCI}6+$%$;7|lZ4JAN=qKJahLJx=mArwJM2!?uAW@OGj*LD7Y{b9eK z$V+lv@4MD|*1DhHy^5HJ8?HSk#Q}p;9>^1(!x7rFwVPIglfz~rI-GKl#SVdzVeHD< z5PXHn1tO7_Y&ZX0|1_rh`I{zmK;niOHKDA{khRcx^ioVZoiTkn0!GgKP9tLs9p!R# z4E1-D(&_RFLp^&%A&zQqJDQ4^cWp$_Y6&}&1#|8i_4Dbr5%){Ka4x1{*&FQj?1b)3 z3U?yJw5`!xedu_vJr*A(xkSN?J&o2Jqt>&7lENOTz327cHh-Y|UikSBg)I-+NEf`R zS%Jri-A8o0THceU>Ok_6HeIa>tNn>^A*z&iOid5b9)Vd|L+>85Y>T?ha^ugHEd0Kn zN4z}{p*>s*s=eZp`Q)|sEOWJeOZ=V#NKUP;!rtvvSLjPlc-Xp*>0R1^B@T|OI4QNy ztpe5G_S50N6Q+=D)Urdb@5dy{Ww2LP*eC2A#7h|q!Hj!!TLk}t1yWI{aw2LjJ^S28 zQq^bqWgs>wZ&GnNIB~m2zQbY5wt^R2zJ@MTx(2oM%l{IzX4^UU@nWk@cPHimsjqYD z5CqQ7#=0Ol6@@0aF!)MGug`gvH6&+UeS;aKI-Aw$XZwO4ANM4|L248vGY~s=(8ROL zG|HET?$5Mv4(E4`+Z~rgt9)L7gzxHdk#Dc_XJ_MO>uN&%0xev^^Bd`DJ0+1F;juhBG_$SRruv(^9fet#-q|ciT5A`09imr=B{%5Bh`q$vJ-XN} z8T9p+!k2W-`rGSE$l5-~w4)?>huVUYsy4%fC4?@*A6^*SIc2oN2KSvfwYD#NWVp9` z-nO$DzJAr6U*hj5x|43;B8IUpX(m@^$!N9sxVD(GWD4RRleAz^Cj>ofqrm|D@8;$= z8SKztQ~yH2gT<;Ue-RysNwaeEm~THU31R56g~B4m<>{DH5;c~>_!z_O__{hR6!dYv zDY;o+K_BKn;e?Qmkqpj1C{)9c)|>0&xa`m2n_(e}u*hLS z#{B&J8N)RAu9;qgw$W~+qvs+cYk^%}sINa2A}cG~=wONW_5x|eLa$?ZKNrkWV?1Bn z+J+GEpqack6+pC)slVyJif-^yMWp06v`}*;V)k>t7DSp;agR#T`87M;a}> zx!Kv-f1bOyQ|QhQy@M4%r__oRGhiMWJD=7RO~x&*TuD)JR93Ph`vmJ*f?rlSrYQ1~ zSAGE@4XWVtEF3`xQsUpCYuiTTOa{@v$hLDAhc0Y4g}4oXbZ?Q|7~LkU!wf9x1h1g1QUUys=Fd&ebmS ziCP1Bb6{{oqD{W`QrG2Hn~75@W~L{fDOC;n#?cRbE~0Ymf<_+5JtMVWbVv=n)^x^q zbA7o5h@9)V(7Wd^T(HX@ujeeeS2%l;H#b&Wc)Qis${yZs^><+KtvcL_M%yyJzOP7h zp+~5=fON^iwQ6vG)j)`b`uBJBAcsmk;(nkb24|osjT-QI@!@o-^v;?9lhKYzmWBn(OUse{iWfrts z{n_m)_A4f8YtDR<+}gNVi?MK;hS}}Z1-+aIXb$%e9u@ph9WZSp9%~kJg3gpwGrA1> zc-4Ft+9bwA04e4aeU=ot4;jE*?H_LRr9s7Pb%Xy2 zzIOkrNRd43xXa0@;re>|UPqXjTlEy9s+hhvoEa$DtGw2Yj#9_BY)y>_lv`^c2h?jk zs$qh6#o94hxfxzh0TIwZF%4PnHR~J(o;3G-@jgTh`o4d!BS^9L)h_1iGkW-$S4F{w zLcA+;D$+4kNR)=Na|!s?cX4B>I9wCOoMd>{<9JBP7l5I-mNDPpWeL2vW#J|ux^Xc| zT}@1#$SjA)%ZS~W-C0dj*`71YkbmVjO|A!g|2!1Y0Dxp)ojLQ{ba8z$C}692&2f>C z*{l4!U2YdGH6nSpNQZNq^K&%m2HNn`(vBkKNQYA5vf z_g5Yk5`r0~vmI6fMH!690-1e)I1^>K{tnJTV`{n7E?HP zZ0x=n%03Z-(w z0Bi5gH(7XOkrZZXAM~h@PKZVeJ|PmlS(d_{)5xUaW|clJ{aR=hy@C)jTuH!^4tw;K zI6tyh@44~1fqZkQwpnRcAi1C&1lI@e`t=*drJZeQm000m%+XxwvD>RNax()J#g?yI)zXvxfZO2-q?3=2eX8 zATTP#4F<9BWuOma?N({s!a{sjVD&De%q4E^d-!G|mx?W?HV zc^Y&|kFmpHNDJ*H>ma}tNm8q=-|IJ(|Kh)l%1_%`dq}9i3@n}m(OQ{!e#y$}%MpqK zgdrL7?Kjag9;pBWsqi?Y5!etv>RO6@U>Xn4f!$YsUatx*qR_mt{~5?Qe1`&hje%+b zF$p?GOORlm0)a#g1&}>Z3dB_XH*jdC{4?^U<|ZUF5Ey1CBUb&7ZJOWkx#k(oYurUo z?ns=<^$$;FRf86;3ux-B&FR}#dEx!O7Ln8E0C-H4){pL+Q9HTLjFI}vrdy^Yz*mgG zZ^w^(p#0-^H~`ci_wZp7K|7=wyw{0hDkmL05g46~o8l$#N>BRwu86|?^Lc0!>{EO| zDW8$=uuH=KF*A!QHZYxDc3Q>EcA1px`uh2eC0c@2EKp2W{)pc|wt)a;WvY-w z67-_^B>C;xDhF%)UOJR7ggcfp_V$n^+3IB()UBV4PUKhJvuBS*u@M-^NM{44uDgE| z4~AEI4=aOCuuKyqw%5q9=EK{0Vv#gSH_k*Iop|- z#PVl)4v^SS6R&y{Hp}W4aq+~6s;gbnU2et_HALNP1R|-{X{PjA)?6 z$`^_;68+NfcqODmP)=0n)(aj58|fqN*vO#svjvSa)V-5|ck6I_ZQhVFTQhwf$eY%S zwHQVvi6^&`0fS`tM8ZBE!CyV&-RF~SHZz&)~y<&`%u@A08G}ev2-?1RZRH2fL+hqU;m=m zzJL8whC0^kQTjXXPZQfp7*zW2BqzJ_8;_#k!LKPVfv|Z0JOM5b^j*rQ#^|Z6a|3@F z*6W-M>i7nTX|$jxXs&J);SDTSSsiqHqfH4Z;@))^YXWDc*bT&1ee5);D%sAznx2{} zB5r{weAYbM8+)#6|1+JR#Srj|b2<@N;HEqGi5z7SLW&TlC)@ABo@MTL9v=mrAIOT5 zK*x!S*IF;U&gw`_s|*vlj6>8@CF*#6aLx zUsa|sqs9LO9d96`tO*RVNDjkWN8mH`Z3nl`Ujzz3uh!hF?W(S=M=}y1V+!)}Pl4Uo z?lAg19%d4~PxKfV+sd6Kt2`Mu1f8`*IOJk?Wpe&_9#-H9MAkAWPsrERRZ zKZuJ<%%YC$p_#+j!sy)+L`C* zYhiA^(L#;Uv%Z4<;)ERGR<@!;-4zgKrUJ##-E@<(zvQKpH7MW$WHAA;4GzLFP%_Vw z=;fD5oqs2OrYbQyqo9m+X_#ka<6d0O`!#;+p%0!o7OPhmd1H-DQk|VvYPEU5Tdiw< zFI^LrPx6HaG0N-4JaG5LVGZT`u$5M*|4%A{yN+b*cS?h5Z>4&vg@XYiiyJ>#!=1fx zrDF})%n;f7oA19L g`2N$%;9n2cbjC*rSk+eRI9@#&8(5q#(sPRVA3;uV1^@s6 literal 0 HcmV?d00001 diff --git a/examples/files/two-writes-using-same-input-stream.png b/examples/files/two-writes-using-same-input-stream.png new file mode 100644 index 0000000000000000000000000000000000000000..bb1b60f6f59a90fa6dca756a71f4c3d5ffc9877b GIT binary patch literal 61233 zcmd43cT`hb`!0&Q0TqF*2nYx$NRcYNgNpPjRZ2hwq<09C0I`Bf@4YF#B-9{XMTF3$ zg%A>?mk=T~kU+Tc^V{y<`R|T%&%NWWF&JyithMHvbFQ~O?=yL8YOKR}p7T5n4Gp87 z?maUa8hSw*n$v4%>8QW#3*B*{o=ydt>D;A(4qaWNHvV$GV|0gxrZ%48=n*Zoea=tU zCXj}Psq5F@sUF`lXBwK#Z+iFcSUh#uz|iMh-a9_o8ny({m-A_9-M@cF>+k!gV!uUx zyB8~-kJR5W90|Y+J5L9mRi-Yq!^c7yfC`&^*7M@z-h;^(%g zbZ{y9^6;phWbbK}Tz$0q(+3eCWY>0ILuwrwZfVHB7Kl>JR1blN9Yoa^c}9Bw`sBZl z^*6TchHOi3Hy!}^4qFYp4OkmJh42GkFNgYSzfd>b=}dS#3=0u#>C1Fw@<;#k6wNb` z^I~tJG$i5>5Vo$|;v+Iq@i1Ua;q~^WTB{3b0ld^s-tS#Uy$0&BHmKS}=Byb9T;Qnb1M*V-#vPHA5N|+p-!)Y;}g$dFm8yVC-&y z_b>OwtN;z}H?v2InSeQf*zTn2WCq6sy=Wt+6gBC+>XXd!2_g<*k?G;emj}47raAl!SW^8=MCgGp=dF#qhuwq|A%h*fvd6Ig;qGDU zB0D=%Be4`hE)F1{MDtFc{%2AJ5sJ$hPR{dR-}-myNv4bk2VpEKEI%){5O%zTZGOZs zxD0RJMRj(i?^>GmFX;501b;8z+g>i6&4QoxoJ~FnFJjSLJSqff_5Jk@**{K}NRjb5 zs5c~q6>roIf~~@@C-Za~4>cd%v?)kUPu<1$Bm1<7$0lqUxJR3b7=-FEBcUH(nlJHT z6MUycg6YJlF0eT4aMRG{F|69AW|x*Dc=Gjjf6r{Xr1k5`0Ls3v%X+0_zq|_bKNEOH z5_M4Ia2782u)KwWhm8kHzupjioVd1c6M-l4|D8(#rOvqa@H)_J|xcb3AbR``uxn z#=%R&d}U1vxaS znmN(PROGi87w(?z8&GIq8S^?s&~_EQMsuR4-)^V7VsIw=tqUgGd>Cpe71Nh;2p;mI^kCGXk_ z^8~3^H$0BxNvU8jau$U+|2DjHOFZG`Fj`cTXV0^Fr|lkQWIv?~zmslqBK9Fa_ok>f zJM15+qPabC`kY`BOd?Iv5$)OHG~r`oEk2M+OIV&Rt_>%bgQdD(#t$b{eLZ|A(3tRu z-Cj&^{bmIM`u9S6V>8eI!LZW03R*aod#mcyS{ZXh(M4Zi; zKjUMJ>Lgo1nxJ>i31-YZxN=^g-BTM%mLHZ7_e7<4C$*MZWHsm}^DGs=v`h#;(mm8* zITBBJn-9H=Qq{tebyvuUC0CvjmOZ=l`o-FK^`tE#;JQ$7Q00B1*ipV}z}{NG_i%IP z@Zv8Pe_lB*_Z^uM)3NOQH+5<3fCCMMMeN3G`nEaIf^5stRSv6ykE3!g+Y7rpUlr6G z3c;OE?@-hd9L%_uzdoSH-uv^6QQ%#COa8;s$X>#1zEI%uH(nn`C>c{TiSM#Z;zCBX zlo|zC>kdJx0${%879eKCS2CPwQ#e}-Zjr4^NwSI~vH{Q@y4s-?W= zEum%RaTl?iTUYGfT2kj{n+V|dLRohU=6In6jFk5;Y$T;$4AFPe!H8-z zsenH=$e6zkR@->W_192}LaCBTU5T8sZQMST>+pO1cKqQGgUu-vecm+%)m;@O?2rkm zgiTfG&nW&9bP!%UC!J#$dg-Ibo!Tl#TfSUocM*kHf*`XY?q>Q)CsPti*ZH_f;Nn~n zaJFUI#*rInR$^#dFNclJz8vFn>Ov};9pu# zo1(sCJwCSARy}3w8RP*di)f=7_h&O!K_QVO8{PWlZ40%^I+pGg0ggB~W zytqWGU+0AG#g2{{!cy2_9Tq+i>~Z$KVtlPD{&eO6QltOIjv~WicdJ8!iS^P)2&`{7 zv6vbFAc6Y>{4?PHVSa*U!-?ID0%{J5<+u_v{g()gQt>vv)Mq{TKreH&E<7u9a_iiR ztKJhhmvO|QZAaK4O!aVa%l;Rvt3o~U$+s0>EV}55a!=fd zju7rqsFPXpkmQFDbvRI(rNrKJC*QQ_Ax@E(aT->17dS;q^C0Ch5l7)Rjt{Mv-2IRI zl+9?z0#kNP9OEHzR$<;pQ6T62Rp+|{{g?<6+3!Ae+6+2g`&!ws!5K`v>m_S^S zM!%$_D&`nW#?6Mc9PNJ3d=K&29|12#KGLIW`12{tlKU8b%`;Qql1b3uomQHWcmm9! zH(Byz0Vq&$#|hcRN0??18q?QzK5RIyJLos8M zWat)mt01i3@9?;i7cmP7sl9+3GoBHZRkkIjs0-x@Xmk=nn~59y@b4aK_M6AZ_U&Y+ zw!Qjdj8v4KHJ6o@C31|Aio`-3A|OkCYh&NZ(K$Vb0h^K{$}Ap zt-H#O!Ym1?u-K&$n*u%MIM4EDK8)73IKq=$OT#6RlpUGYlHRgH`u;#96Py}L|8uNYh*%nKVyyTK zo@}P8fzP`TuzWJu-CT!{_ni@GVTTjw?7Lm!1TSg)All$7N!i%(~d%%k9_RaG@4;A?YB+on}0y}1CBLXbjfGxH!l5ol4kFI>I*%& zqpZp853?Z&$uO^*=`l*T=^Um;CG<8%o*OOonrG`REL&3kT{N2U083+9*D%Lx26jdL zar|8U@2g}pJE0>IZwbu45-Ie8VceV@=mxaJzcVxCwN%QmumA8saA25$F`Jk&C9u^l z7g9Y#CLs&2yPD(y{^%1;f%?2|2)b(MlM(`f#LnytX{$J5miwkW=hY%S zDDdV5vz;UPj=*?(86RlwzwW^d#`zKabQ( zv6ix>RjkFGk^UL&>AZh#xNom#ns!24RM0j0s)F^6^{th-RvmdDUaa25S@A)Y2%})< z72SU&;hAlD-)Coh5un&OWr*29^7zAP6+9(q!}`hSgnN%rbkKqE(_k%*Qd*uB?g8*_aSm)Mo5^=5 z-CsPSAHeracXqcAw8XJX(N5ODz8d|X4}L9=6dH>NWpR1j^!)LB zgH!I7b-j1SG7v)As8?3K#;+2bmZ;Ej-SKgt5lv~6{}~NN@pkgo>!-C>Rm`-Z*}{bB zXMP`cnu0oRZA%#myKm0657W~PrGdjRby+muRr7WfqIYJNQM7qZxoGSURXfIunvexG zY4DFTh2-&{-yv#!&GlMPWY4>cB17RI_yyUfR%|G~trHWD)~_R*{$#1Q zyG$itBAOisQYk`DEhCOye)tEv04*|_wsLFhmhUodd1>A~c5xQ|gckOc_fC1+*{Ka} z)~06^Q0UW!N!P{MU2Pm8Ae<#PNkcA7U}=$sLMp>!vL&Tre}-n9(`1Ygx(%!yl+rCeHiOI+qYBA zXl=WR)hSu)CqiYP7kT>qek(s;bsP6&oHbZeaPkV zK4vcjQ-S%+wBdO8ur_(;Dql}}2Nqr`$3`yU{a4f6u?72;B23-!lQqq$J~KdPQk z)L<5sODE#I`SCwGYE@`p|7W3PFYo>2OfF-i>dafmz4Yg=lKx!MJ&kKX)e0mc>7F~Qx6lW!9Cdv+h#KfbCh&kvYXVqW%5y$l~xGKF`1tZP|}5M2M-p^;=g>ky94-1 zPsaYxxOuyY9@7$>+{L6~$fUIRcYRgeS;2pVpv*;|8sluvG)sFZomdB#T6SsB+R?dlqPqy{gC&Wqk1Ujn{8D=ht!N3wU%>KinIALrAD6N4S8X1qhh1 zv_^C>Yl!742iJ)&{R`d|M7p2d{qd5yZ_Gr)+m5DSf9%%rTcNy!{G&x7xPa8Zke|m$ zwr|JHbO4Y8Q+~QE8ARL^_ifvX+{Gb>;a{x}E6xAWkY^&__4V%R(FCKmm-?4#dw7yv z`qNNp2WDncnGxMYv+Xa}flR5zun&&Ao*-Pb-IBE{JdFqMdGgxN^UM4m?P!xHJXf>M zYUy%ccHq8Jm%|pEZkqP+@JyD9ri{ApZ?U}n%%^QjyTzB*RB{G#avXrGZp!lX{HEFM zEdKt^A}nIW)>ba|1b$To6njL{u)|+H@&QFepA$-KBV(roKYa5}dpYgeH{RNUX!yQa zFg6XuG1x6p&Y|(s{}p>PcPhL@wHNELzMaQbY!dcvE06gzssw*A`gRqu3Xl2uX^&o*$KtN!7E{bjkd61pkypMS$&on10qJ~k$U-P zYs5BY?Fg~6P9)H-eXz$MK(nyOYV8~8s(ZGC zdCMkpDD0>wu%o^ZX%>Z}Mm()$?~VvM>+0qtq9Bn2wn_YNG{V?5T5N#7Of$~g+}vE} z($A)1Z6#ioaaBaKqf_=Ljpzf;(^l$lLc>Oln~5)g=R^fQthIpD`g$~`-0dE7h!Bkd zd^>C^UE@*COG``iQVb4lcA$>3re;kN3Z9ill}w<>Mn7w)^ae9-vHg&|nDwaIdCf`9 z`6RG?X}l|28RmlC?$Tp?D9Ik`;K;Q@+D&7X+vOPdRCNHng_<>jE!(}djE+3=kNT`{ zhE8V$mvGH>D(7p)Tj_XYcYMpMjD*WJ#rj$t?AfatYexp|D_6J7^8S_v*$W_Xd73g` z+eQxom5^`4P!BC@Xcr8uYZX_Cs_4!tN_8*g!G0YQFoB&?3`az$SjQQc_Wn#x?|*mM zvUcJ|_|UK+M*M@UaBE|y4@zzksfQ%+NiKxu1<=@y^Z=6~JJFCpL7*wSaFf zJ<@^H{2gsIPE{~%d-KuCbvu0JCfY?*z}kngY0Y~V&S(_esquSoZdXP}M>l@0yUtbr zSBpzasOGvPCc#DzacsiC_N$g7U8vHcPqT&tF|%+}D;(hCp^#H3@et!JNkOwK-I(Xa5C~OWPG2zguof6ku@@}6PW{-oz#6E^Fp#kd#;Z6 zQYY(MuUy(YlsLW#toeLnV^5vC=W1QOAneaX`L6D2E z=dkEu{WkgU@C}JHN7W`RI_}X2Q0n;MX~8G2@Y|C)?b35;L%%GH_ud*L(tc~E_*{z@ zfSpox&_arRn%A-7+j1rMM~6JcN2fiQC77{t0yd@{VxWAqeOI#GNUOqYAyc8GRW-z0 zrCGupq|CAQD%*8O4d4S5upK`)2W+oyK5@e?h=8*- z2K`<6^L4r{DSHM%cE{OFqH5m`RR;gY$u%6o(mS*ZS6Gi#k73Q{G4UNC@hK5_c;sq0 zH;V_cvy*iJ5(f!|=&aR#u+CmvGuThZZjDZkGMuy&HV|RfS$ieoSZfdgr&Qc?91*;> zo^!X}A#4sQeB>}ae0$Y57`Y`$yw~YNK`PD%%M$NX0%vPsA9piat?aR=gFrr5BFVoapJ$PUA3r{bpOLwB>(+So+@zhaxVV^@*f9Ze;@K4BGVwkj&uI}U zxt~UMjH$34l~B-VwlgM*6iV<)ZXX9AzC-nvG&7s^ZmiTZ7q0{(4NSa4eEGWQ8$*0~ zr5yz|6!f9Z*IF?C+nNC|Z(oho3mlILLhrwfg6+pP{d@vl-nCFvmudt{zEr&5mDdR| zv2b4lIBg3w&$AF4&6Fy0*;ktlG+CWX@g-(^dgf7nfirrWc5%S&V2yxDE2X-5 zuwAIfWU#15KX`M=a(RNHuX-XUh#{(Nx{7bQy609U{3PDjP_T~U@F)e$4X5aK_VZrK z+^s#H=?3`*I4|Ew1WRgstGejktO~{;>gHPnwsAK)`h!o7fBT=?-bvIjQ{&(>^Tt)0 z@o;LSkh!?LtiSLr*4x0Q5w{w8Qn`dBNXDLh6I1C}_b7_{n>$U>^Tghi;Q;5ZZLIYB z6DjHWzZqwj`xgn}fHaW2czUW3F>MicP*PaLWt-8HLrS<%YhY&7>YV%FttWBKJE>LP z1vL`Vn_qt046?RMPNMNN4Xc=Y`^e{>i7Z2Zy+#+<*y$_+mS{wO5V3A^8ry zLfUdHCcZ8*i5H79Qf`!xOxYbnx~8<{!X|`P( zIPIxcBE72&xxF*8Vnpa14DL5VHenkDQ;j|C>|8U63aeDB$;wKsXT#MO&6 zClT*VKnAc?{BskrJ~yN7n|;}F6-_C?N(b%+UPS&d(0@Vv zvbxKEm~G{%PJGl&P|vHXLF+@F{ixs)Wy#b+WbZ;2Vn+s;?cT1@9=BLg{W@S{ zRjv^CWxWAD7@QUwU*1IZJ@;mYZQF*Ej`JugyV!wrNPOO-QUHY}^mBAb^eR;-!5>mv z7M3b99)hi`MEyrf5Z|w#DDlc7m9+ zdo?^@HKNbriS~%Z4T0T{L`gz!!Q;_ttSrvzy={#SS?myn5O)GHa}1aHXD^DZ$b-;} zio+)@_qV?+#rM6Sg!Jk-rS)6x2MeDx*35UX&q>I}PZtF4!P-2um z1ehvmedD5j1;yF`=0CUMPb!jG{IFf2y;om;J-vO<`K_ZAiU2n;{Cfj)QX8IG!K zY8;KOw^>mAcBT@kK)YmnJewEdt%H(+Hl?)rz2OM5MYj#7Q)59r%FdH+a#i=S7%G|N zF6KvsaQpk#fK+D9re=MIq)zuH*g0(WDv?k6)j0kImoX352hsVbFTxy{>d#MS zMyqVP&!46#fGci(Tv#-LkdiZphKJMTJSTODo0!2cF?Ud*o(jO-_EGU8IuR4mYbIRv z#vovC(h3xRMPYGdPxh02b7RftSJzn&!`x}nRJBVrXZH25TbGBulYc|BY2}^pd9|07 z`_@hPa4##2#e&Z);U6c-8P~%DHsk@VUPrZ~@oN`o)?Y;NMm+Fo#mggl{TTX|hjK+j zLqnyX{(LCI%kY^R?_cjZ_Vr4JgxA^mDAmX2%ywp;i%S8r9xl&>ZyywKmzZs18PIYW zx9({;A^-+smiaFb1(=mEYzI)?PJ?T@ZeWjpFDq#}ywXY^Oa1xxO3?h}G@Pflz1=U_ zZ|xW^Fs!;0<6X^FS6^SO8O1bOV#MV>QIT3s886gJ7JM?(;BM|d^U>dJgO^jz!sDJTy{67BS2|n| zT5e&k6XNUqMX9~S`E~19)k29R%C|I0=T@fbaoewev`xts=I!rpT8>Qda?4lvJ--4$ zF*~^7ue|0o9CD0v+@_ET*#*_g?mzq%6Ls|S)x(Qihw}n{Jg3)3!@Z5l%I zoJsJZPGs=h8F4$ftme-0pytaw#R4JPgEN$SPVJ5u`5@fnpt@~F1hb$~)(sur@P?~l zKR?6e+H55_uZ~=OEHUcAHfXCLm_*dnFf*M@*r1?nh1p^Q_MI!7k)bamAKBRKLRU&Lbe>o6mg69& zlOL9I>*TVDZis7P2bTI#>LLp}W!`KK=$n%h7A1#766 z;{KOQqaCF6N;4J4f1iD>D;hlI{%`y2U;lr^->&|<(Eo41fW@iG8$s8A8&i);|GN^` zysS@AbCNri8<(h|gWH?w26v~@inx=be^=`CzoJJ&bItFsVKK2_@!m3!A{ zx;d~kZ1=m4nVA{iN}+;OP_v`$`lNzV)y|NXkMG63zk(Ku~&oxz4h5wLTh z)_hAXB-lXdB*RN3iZtI@zq7chu#bSwyz6GD&MHD^vdbsuGc6tjdH<@|%pB#nZxc>D z-Ld@FY_sLPH!LP5-aQOxs6VCh-jj~&!!tMGX4he!Or^k*RkT{zfuH+$S)$EkOR%Sm zcDERy#iE0}siH)j3H1Ii*dik0a4VGD-@5emW&M&)kAGW@yzXT~AMpG3T+x08iE~1@ z{JapKEEKD}B?P%z8XZO@1x+JOvmDy3Q?xNzG zgQ~yRsA6*uw!LBTn2X4ci0%*Rd#6SVr)`XlOtRK!vcu*KgDvtmhQwpJ^gXU|!?LV= zKQw9K>)Zf|s-ZRvN_CzkIVVj3R_H5Qk@wm!^{P|GnGfIZX>D(BgvnG3{Lrml9?Z_x z*MliqwFXX{VlDk#WMu&QTx5(($jZwqU2sx(L{j z(;Dz}pRKN_z&WAk(*`n`nf>RDQWulc<%DwX{qT^@7~iy`=gkoi&t*P(?madbM1*qy z-7US+i4d6w5yVZP3hyu<2Y7VWxi&XC#m#19w-&-r@U5-4EJCJSm3sWD!5cl9f~Ne8 z(vbD0zv}l4@ZsHLm>?Bq%dBdw-`cR1wGot@c3I&QuwBNUpttuIU)K%U5b(no9b57t zdK<+lW8=O1%VFaRqj)@)gKg;()svGIZ%@&Qz-s`>(M zRQ0myw|b{h*ixb1=DC&YcC%?w#^AeY)2M4-!)EC2imu3}SYQ>U)Z~>PL>@+!vY}jR z#Un(*#2%FPNZd-kI2J+KJ5&3^cZo8%hti{W%`B9LcaphQ)Fi?75nJZvDh>`j$6Tk6 z!&3OMEAnSK^*v5ycG_W!y7Z27dJgmGDBK1`b+et~gkD0#PTW^gW#x%XA&2!|_;RIP zEL}3l2%$3U;qj$6nF%31rkUfP>YXVd0|JO(PJZTdo@eVf$40H|HdDql5H>ga7`YUD z#84yU(lu`mTL4@ltz>wsPA?z4~b#Tak=g%)7wL<^}It6L9o z`={m8qBA)_vU-o8g7 z_jzH_qORruVuPbTuZkVmI~fr|u@&@fte2`n-}fpNfr>Q6>cBMCc#}fPRS(z1GCiV% zmTWoLgl?IJ`{oFo5H{dIXcz+On0xBJWi`s-kO&iLV*K)n#TsE0d~2#&G(SCamc6gE z|GZOVlD=d+vFk&fGm>J~JdX-67Ccy!xj1Vs*;ct4GV1o@d4#5!C{!vG@ku*X{Uu)- z-qARDQ)pJK^k`S$@MGN3LK%~5UTDb$PT5{V&u8$5pI?|-7j4l2glZe>e0+7*zz=j* zjFIEbx2x+&yK7D&0+#phCthY}@2`2(wfJ!%P6k#D%&sh4Yzf90lAoPAbAzAXU#^C( zjhh)K%I#8Lg>vWmomq1#N~T<$sm-(>E3FXAbN1i@daiml6(aC3O~0mx3=%3Su%M(~P!=IwVmrT#n19+FLro4 z3lu@QI!x5;ZdVEnC-$hH81l4!a!1P0c)k5eegY6Tk6+ieN=K95SbocknXI-m_?2lC zu%-Z7f}~#3xfx~2zEu(iXk4$}#*G7P8#j6QO#@p6x`O|#rGUDL&KR5h)Z zmisD9u=1FsZyR|Ql>(hZ7g5Ty%`imW$PpvRjzHrUZI$pwR%n5+K!9BYZfkgIxb}v>NkiYRc6z;JJyPm zBgENrfbV7QSqV!Vx5Aq>XP`Dl$Qmq&BJo(EcTvT=mH5lOCPn9>SG_%DPHG(j!}bBhjB=j9YPaGn z$nmOEyF+&xe0J9{(Ha&q-R3d)W}$%r|e~fvozGr+d9!y!7+!E=Ny=_|q{oNbq zJ{K*mgnyjz6JFZLp)vZBMD*MQt`u>z zW%E_5b8LNWsG)W?x*eF_>jaBD2@Mac14GO;!gV5zxfj9`(0w)#kdBXrf5MFVXZOx6 z-Fj#kh2F+xvK@7d*34nvq#biEgL-ZXfkKzukWiugu)1^Chy+P8XoU)>QBz3g-h}IL z0k)>>i8#z|zF|;7$>$$oC}dz+JClS-&K&M|CYW^-nVIz|d=@ zfTndEy3S?$WF$n~cAc(mD5%oMrP3tOTDi4m|L_97*<_h~cj8`u%w~8o)u%hu8wV)w z`a1}z_C-g75(6TO)27Agv`^qqPVEI^7+QAomco_eQuez z@PVP5#)+j4A)2=t1vFIF0>{)JO}7TjH%LjfSOSDkNC;V_Emq%lKla){9uCQXY%rS3 zC8ll7C0gfr6pFiGvU^r7FKee)pQ8xhsi>!|IzO}d>T|#cEKxG9&hwrJ>V}a+ONY_u zToJqeIu+!*Q~4kS`nFwp;*hLSeJ>Q&rc1S#g|pwp)~uYB~Vyf?l_84pp`nf6Hd z+dpTYBN6xEW%OT{Fb?T}&OSAVs(xdKoAKeBSi^Ycli`pVIJHVH{R+I%oo$Wlk8t2{Ei_K#lJj=mH*<$JiQ9&nR#UI(HZEx_x zuH29gPUHGpJVOw+zkc0C{pOImEM>~gsBxClx5ZIeCbeP4=5DS7YD==(>E^Zr*!t)=X6DA6gtsG?yphED*JU7-I{9FT%$apj-lDzbOBJRIK>XspKf&9?KyHW#sMY6|J!aIf##09SSB-TMk8Dzq-+=22MGe zG8VFBGjIc7ejthiUSl#E)M}zQaP0nj=fRP$^qMgu2+PFuYZp|uB$_|w=kAZiE}Q5S zZ-9~`jI!nBFfksq#dL{05aBgIymU#fr*pkqYQ72=M;Sh?eN;1!`Sn|R{njBKT>h=DGt1S;A%^Ok(C4^5O?lgWm9h=AFlt{A68nHSX@*!}l)G-jS#H$gA$N z0wh*9f$D6WCmVl|*E|?z%PSDByx7|^B>+vmL?Q1Hy!gmK+-y1F5qOPd%Jnx1YMgdj zyfQM0xuaP}-H}J)!Oy#TjN?Hg?u<8FB}EfXuz&l@1G{r?+4`u~=Vp4z{E+3S2}s>Y z@U8?kl2iq6(o5C_?4$Z^G}7u!SQ(KPwp>WFd)g>6P`NLYgl-umm)`y@ZruDxCkQ|c=zRy^~5J0z(z`MJnS7e#X|ewV-1fIl-2#AIuym})3HP{+N5D@R11`IkTi3-a$ZuuZq7NO zut*22Q&rKSpiQgvjm~CiPunf7)ln4ICt}myolYy3A?O(yn^ZVYm%2O!8Td3{?Or+o z61CE5|F(2>AwI-VBkmpK$%2}7>&F+D!qtZ=y@cIGu40hdnqP5F9X#oM$qoh3Zs$T8 zrHd=g{eXa0$**{_(+b1F>%r?Jp2N-A(AUE)>t-s|k2`y^76vF?N-mWOxx{hs0K6w( z@Jx9A+vgnw7$V;AzWwe3IlcJ{>+pR-{>ctYd-$6vIt%-@y6T@!8-GHk};`wj^O14$_ zlW(;*Y^_4!BGM^uhTrXU$CxN&Q6L}7OA4oNmgYQ$LduYO1O5Fn6A??1NpS$2d{oP- z?5Tpax-mKrm5@I5kjA=8;s%-f0WswbJ9O1W4bf)zUh0J7clXT3N=?!c$wK{9OvmWq z8o=DF6-@Q9>wAJuqt6uhbc0)Adm$^fmYfgdWeRJ79B56_w=#RxpZA)i{c|ad7XcNM zG{-Tr?h@trd%xrRaV^dQcj5B6#RcBY=(7THraLbb8E^2m78M71Ca)emvYhhJg0+0- zh+wn)_|fTDgu}GZDU?$`PaG%FL_F+s<9L{Ep%pskJW{yW4Ve{>0`}cvO~U>hzAwHw zUz@5nzi8zYtZ~cozB4t4yHd0NNx`55dviO3XD1GwTU^=ZCv90+6ydWQ@%ZpyX!6M$ zE#KrLSK}_UpNs8O@!WYcnUAc#4k4Xrba`TG(2Xu8$VBqH-hL@S_3U zv_N7-L`wX&StB z%)yIQ>MGhEOJ~z3qMHzmk{eQvDMcCUUw*QG<_=pTwmjNSJ?GgBMFKqEyQGd;F|F^} z3~UqLjaWEPeuy$NKXZVX(N`ag`E4g?GTl^2*{wLb3xG^Nax+P^oK?1qFyC7V?k}J0 zdkhmDy4P>^zF5{biQk~|5cCSXzaceILgEDN%1t1*Yr2?{)x#g>&z78my=oC_>_VvB zvyx!4lw2mY04IDL1C@gqEW-tD*?o*g!mVs*&B8mnEng>VtxmKi)QLOg;l1dbj#Sj;*>TMdxww7UwXX z4YB*-+9VbSbM$ZpSyix;`HkGty9}2jokK3*sYmu|CJ%4Hj7L=;+$$^^B_bp3( zioFix0!$wJrJnG3rVmcLcuJ=Z*N0DJ32KnaBcQu7+h+Hk@;EHBeSK}LJSA=LAVcxJAN}ofhzm>yREi22Y<+b5z@ z?2HaG6uCrNt-iRVjuXFykwf{oBL`OtZWc;4M7qL);kdBFOR?t|+wZmz;@|gB9F6gF6BplU?SF z+vS&}M!l9UfKP`0_M`prixrj=IF!gB*=8CL5a2x95nd7yF!$xP;3}2225azO6=jO~ z)%$1rWWV%sqbx$IBt9^+w*13|JpixFcj?YEt+L6MqTNWfihPH$?;TdY!wG}AUoc(z z$xrHtqOBV&D*%m9)0iJ)J(2Hh^~jl&{6=a zj>~GHO$mNIIlGq|_Kn~%Zl5d>WH{mADlyc^`qKHGO+$z}Ub}I?f7(;)n_!s(L$H7O zONTR3pYgdaO`a|VO%x%u@KnoaXRpnPcWa6Ay0hPB zt?X(nl}+2pn_@lR@;)8s9eH&=f+^_(AaFkcRhXbHs@oWi4BYUXzfkDo6Q5!ZYQNzL zn)fUhU z{O)n*JZ~%2i-G1p*(o$h&n>2&O!0fd+0zkj6}fc%%W~h(f2$h?z!t7lij3j6k#@c@ zy|-YZ$OD4^NASewfbZjDd@V7tt`A|UJ`;8AgPy?b%f}pVACzE*K@c>*Xfe2L(}u<8 z%zA!uik=!U8izQ${9yuu1_zTRQKXM3_G^IahaK#UZXT}BbSdrbG0Cv{a^HF03}ioj&HAO%$NOTnKt2sU-9j4kyz;~D5#YC z4@Psl5oP%|`J*EY4bSkgfdtvp!a^c58f>>mYd8I;`(7pe2MfPZ{cNn}W|ozv$IpZ1 z!Ros>w~57B3D4`{%FVBZw|p3iA1%r%EsZVS?Y$>^ZM?v=SR_R(9>}8X1IiP#b^Hpp zH+V{=&Q+4m=z1eXCWRzDC0vQ3Fm^3jXYc8TT60V551)%fL?luNVtyz@>KekHD+Vqb zn#+{~-pae*4Ym!XR$4@UT{Nk>G2Gc^I%hUT*f_fpAw8v&nnAnXhInh8|jr z7f@g~+L!Iqmkifuw-63%fj&wTS-po*9YzH+|SKt95>n% z$+(9vz{Gi^I#TO~OYfs4>Z)~}shp9zfF=DkOlwon#?*IsOB>GciL=G=G#z!PQ={27 z?yCD}pj$v{YJ}VsLyy;sMxYK_myd79p~Um7^Gsf)YC zDAd?*Jzgc67Yw^;vjAv(Z~TWkYr4|SAb7v^D+85>%%~~yl3IEH!^WmL`+vMT^uN-* zX>MOD``_Gv|7WiR&Htet{r@{18c-{IYaUJ1GR`k~^g`8X0XJuEQtSF=FCk3Nl=;NP zjX0@rb+uhn8kNp*yCsAmA#U4HV_+_1i7tEpp6?eQy~?o@I}?x9(bLl#_)nc33vQ!k zV2IvcVVV7s_RrbAvmNct;nd>GAnx*4+?i_~E%4lTCB#Pm1#Ov0?+1?_6>=PnVGGdJ z0|Nsi@(2w)FBOUvq*i5@gWa%g%^XyGdt9CGS(X{H$(qJP0WNiWumVcBMnl7TpZ{~w z<}C^PkEU=Rj0cn1nua#vS}M;##d$TweN4zKB1ypD7x{XCOi%a(3Cd=|K5tk(<59az z<=693Yv3tX)wY?P7!4{}9@R;A`|sUP2h`|$5bjJZjo0~Llb*2_K&gw#}+HO7MW zCajkbLis5iGEb(`vOb_IanRPGBVnbvS1bdb!K;N))5SDq)ljd}luZVs9L4*;sK19) zPWNK8WJis`i&A<$D*nu`2HsCO!>nrB0>q0@S>Q=j7DCp_I2G=cSq0I&xStNUMvSg} zn0v)U%Gp(zS)Zsh(k*-$vYigG?Y3j{bSP~P#dMC=s>d1Q^d2K(+6&wEesRF5T-@bX z^;8~u#+{~E($m}C*1Szpw*i-SL;j|D)>pRwFaTBOA`PL0ts!BK`poY-sWw*{xbYxU zHMGvVT*6?U*LGI>{JQe8Ip-()Gv;t1;^@h1;USP%Fn+{m8eYlR9XhMUMiJb zl%6-N#=H8Xh1Z85of9f$AsuqGzzZ)1dQh3p)SyD?7cE4I`p*H1sNa5>UiI0-@e+JW zg%R0!>AOtpUi1F?M89Fk<_yZhZoNP#+TE{wOnLGSx(&#^`$_%T;shLTDMm|6yZmZA zUV}1a@e~GVwX&*u@QBA8T;X<>j_IoKFK&X;XU(Q~P*PN!(n!oSRKm+CLDd(0^s5U0 z>?3q6Tvsivv}dc$DHK~kPTjjx(eaXo#=@$RE_pU=Cl+PHA@7xOxRQ7N7gdR#=jrWl zv0Scp>k?{qdJv0sj#_%|pai8>lgcZ*8-lP(S#I!-!ya{`f`^f>t!9)alhW6mcJSAW zI#W8Q)~TZdBB<1Z!T8a>ux0bwSP;Guxi}v!DepRh)S~iK+KIbFEDG!cnU*;G+G`0;X^*m;71k3&Fy2#u%o>ATJsU=?>?4*uE^a)#VOSj<%{ zaRPPS*0kV)Ti3e9Up7(OOF4JrVs@S%aQ>}OpFq0(W2}Z+=9w~Y51c$50=$%Jy|`AI z?_-K~!a&B87h8hX9-qI|hE>}o)@>TBs~3f2Ok~H&t%k2|b@HfzGJ-wtT6X?gAFW2( zWRPGs@FH+I(>3d*dU=5>$q}?xyu2ZHL!MgW4P;B9D5~v^n|q6zo@}6-hJ&=qTnDWd z2Rf;;&|qd}B2rda=q$b4lNOmq(llhA#KqrYl#pu}XW>gZnbog?NIBTG?d^8RU7cVN zExVx;Zjp?>i3w@hsOujHPESx(d$^FRT7ntOiy*B%>FCIjWBzKMjIMNR$uAZvQm3+S z;IxR*P4PYFl=@+HG@PbU(|h{cwNTqaD0*QUt7!@-xU<6HnsdDg1I#Xe0rN>cW;#t{ zYB_Pq&_iRH&`!*Yxgz!D>L=sZdpKma%yeVG->f^8j>r1i@jTwAX$#axas3|1SD>E) znb0b#&bKUQc^>$uJ^n?f{13B@p8FZ8NDyvrd$XKuoHLTDJk1IRVyX!Yrj8Ti5vj8{ zSf>xDXoSbvG_;el2U~)+ZtA1Rr;F}-Pf>>%+#Xdhy+KtVnNu#o)an*hH!aH29tKQS z*_1?!shtr@TN^9QmQZYHt#V$IOH@Dlsi_k6Zq!FG4?+q3T}n6YTy50eC(QaXai$Bg zqJ`U;kFP&}I1@p}FW=4rem*l=NT&9%CIq{+BWf* zwJrbq>8*0t`n5YXiTpZQo_yclACFouo<>n^Wio`alMiIF+@g|!`j{dA7kh6R7S;E~ zk76P!;8#Ti1PnqzKU)!7!~YQTc^h6n%ep%?Vc+j? zyM4TT0bJ=agy!gN4}#8-k3X77=I7^Fs|uevR(5pF8WjX51TCA~duW8M-Ut~M8z?|V zK*V8#?fi2I4(%TwyRr`W-(idkKuVY&!K|pZcBewycE&yJ=5Afa2(S5&8+_|k66^rQ z9ArIk+jDjr`FgyfDFRm`R@NU@S{Rp+bSf%~x31aNKRG@E1wNT{TdJCDq+holX9jyC zg8fU~-yWzl_Q8&r>!J-@WrdG6)dc<2|LlC|NtcCm&R`n8BpiPG$h9J^>gGYrklDVW zqT$XqCOp^A-cpSUDGrW89Ob4JYB5_j_5%tnO>uCybs|G=t|&9&Mbbc8-8bVZhVhQ2 z!>t38xtKf4de7U7qh!tdhP$o{UDhCFedr9N&@Ah%=S&G+BM{BG?6Fn=ocD{dE}$Q= z>?`FdEh(99qOw!4`uo>&F^H18HNod*-5Q5WL85xRa1_(5k&aNNNX`Kb+i%wf+Bwt} z?kONP4?Q^svOb*G-VN8dA#g_&9kpFHUHx3eT+mnPp#h+XV^L$|-Iti32jq3TK9wZC z$628_>HD24bFg5J58;WdgCF-wJq8Nh^zCir(e;o3jC%SyT+Jr!GvZz2^}P-|597H9 zt`7y7BNWI=i5+V%d2ZVgS_NONau4`EA!DJdk$R$7pCylCIZ0W%EtipGpu7xV6Wqad zT{P%9JH1s)?Es@w+X8oW*Ejop5r(p0em&bHl~T9T{S$2EToOxZa5wzxN$`hsd#Wi4 z7ZcsR;WfhMO{$QmPJgx>kDRMdkQ0wuFjck6wOyVEH=0_iRh}+*>_GPoZO5@CON&m+ zueFWlKBqHA-jdS{MT~o()D<-)!0v<>2xKCEc((w0Kn!!V#OAxMe4X+Vhfb_-ephpQ zg1L#-#8uSosN`gJPP6tRhb(4y&!!*><2fsY2ZA-sB{2H3ZllY(3dLlQK@8>bMBK)( z-=_!ac^d7pwY06P*(=t04I4GlgF_cCqdyEJ&O1A5a|jMrVY<7VA7>9qSvr|y(d_Gp zxOP3Q^ej8@6lJ-C?pkKF$F_`ch_BS~ws;%HN43cb#xmPvUi<}V?QD#)5QGgz90J^q zxqVp{nbmjQ)gk4Npvx;3n`LvzSf5{dG)jk_Dr&IDeZm)5`*+OnUnjN66V;ZGx5Qc2 z2O1u@T_f>Tj^*mX)}Wz$=a=r<0JDQwu{V5b zPRN)Y>rRv4Bfg0(aHpMBq2CJU8ijuiEYp{&%iY3h*TG~qouV_; z>$SZPkF=nva5~A0oZpu`Jv10F71H`C(pu$nF_xk5fi$RZL+Y^B!8L2p1y)==7TCDpfyp?SS zEAJes$gW3IZZPlm*S!|w)asOcqn+njr<35yHq)>XavPNV6Y*ihdo(n_zOm>H7U@vf zxkT%6)6et3^f*d7v99+(WJ)-C^(3)*E-I+1!f=M^*|X{N0I!aiHRvHKX&KxY7$zn^ z?`hMw(|vNZR%`fQ-CQ*`V633MR?}I?E`H9vHmz>XGs(IR1wr1eJc&GXdSt$3?=<;s zY(|)ttR|VH&6~4sicEc5ni+f1Hj{IG1(9*_%0+H*q$aB}*z+7Os;ghN zbauiWS}K=QKA7>PbfF6me=%R^?r%X-nTLnoS|v|x*P|p)`Ow`@}aVu=dfxS zll3kj-@S{Z8_orRH@V7+Uri(4x*(2L&-#4jvn41n_^VT>doA^)}^y}UCKP0tH z*w=Xq4;x&_!|~-O=c>JQ!#48 zN9^MsG7lJ*7u+f;6SEyl8T})6P%N`pow~}<5lrh-7vTZ$MPnPS#~zoW5#td-$L2Ba z(d*kH#tsI^8S@xWfM$5ypk!)W&8Zdu>(cGgFLe4qv6qQf&7fgs=Fj)P5 z;QefLrbXV~r{I2ZDRu4moZ|M&hFB+Lt^HggEiCuW(O-L-1BIRAqop~8A`ugDbms7H zrB~73uJOeSzBUc^bNn|WC%k`}CuVc(U=Inp_0g}14nU3c=k+nsMbZ&%fjFk|IW z6tlN_RH0AoQV81hs2&_so8^YmarK@PqfszZ#H!GZ3I80@* zacrF_+5bSO2nGF9uK*MzOg(tOC@vgW$G(w>aK8?^u{gP%EwGdJJ?7+vd2_^DS5d_G z3#Pjd9S_9J`LMEmhm373eJnQmD1$xE1NY5FYY6dW(^keC81;{9p1EIK>gHaxm0JnS z)<$*GF*+tAWMi=MYp+InM(0%(1;6f|Ffn~4yTRY%u0s7-2@W}qcBy$X z@9)T{wM{3BX52zUR{YjqNtou@yZL8_u<0L-M-tL_{DOZMr~%CX{}(F)WeHT()lKH0 zrSE=gb*{pu$uMCT61$>%-NqjN2J;*8ENe42Vs4*2k({CHRfG=GYq+`nU{XCSwQ-}F zrdF^gJ(i+%-dI7%+wB;2e;Zq=XR#B0Av82I=9#xub8uY*a=V?B!*AmfcvAgnVp!L8 zuZlasD91K|mf^Vco#2s7L2;K_m;K<3tCVqFTMg~vHqZ8Ng)Eko`2!*%kyb6Y+7^UqhY!V?|MT^6kc#_D5Eoe0HM zI5`pN-N99eT-PKI>b~KYvfmIYdz)O(F1ZVx(U$#nWvKmQ-_hEFV6g(*Vs87y_P>%SLwel_a%>jpI#bjyxX24oa zNcI3IImb?f`7?RUh*ya~j_zko{R53LySl`^Fn#3XtpU3cHca~gnaDKD2|{jnN3UWh zASZC1WcZrhop-jJXgzmCpZJLxIgJxjL)wTUt&$C4hD{@A0{ z*>?N;{1=+2;@o=T z)1}j~x7-jzcoJRNZXkqza;=KCoAaoe^W=LpDw{}O}}qY>@b^+0u$U}4Zwc!BW^=8CskB! zLd3bSJY@W@pET7f=pvt1yECJihT{l7+!kiOLFFdWL0pxt(+UE7y;Nxzdd$m-=GRz(R8CFH%(s4 zQ>(yf`~wms2~tCkixCj0(ytnoaF8o|90n%yiO6``b|&mP18k_A?(SLSoBo`D-vn@`;|i#MhQ5{|HfYU7@ed?(2J zO>X7tI?nI96e!a-yEaDSQ;qUUv%-{nX-y}Ww7WiTgX`is>ph8M`+B>+oB#ds$;?j& z`CzSzzMCi16aD544ge3(S_aj!s(LclxcPW4bPX&`CZIyQbTDHaed1#4GR|-3MsBcz zo+BI7NMVs=na8N_-uzhjxP`eluRjJ>%zi{U}M3nZBHlkV#@oi^53HdEPJms zI z9V$v>4)X|03%4QU=(xd}$Dr>P587gJi40p+cebb8y-J_u+ZMZOF>UO|*qUuM#waiw zv&vIj4GIF*4?vVYR*Y$LY@6vitw=;*Y-ih{!lZ*aATo-hQzh~?U^P4>^ zwiR5D(Az*TSilxj{TRml3Rm#{rrXZXU4C33Y~J5m!syvJ%ymX;8MridN(v|;Hx|G( z54zvRfd1Q?VfVDm{Km|f{9sA_XnLN+dXwAjrkk(xWg(VvMW1^Rbw}?nw z4_wgS8kbt7u_D7upXMnlUnFgiv0G_~?h+OesT%}UE35r?*VS68Wn;`iARR&VdA?kC zGNJN8hx<#csBYg@$i5+KO;e>vEt<9&XX0;IVY?FvkC$diVz%!{|CRzE%#U|^337QB9{}H@WO^5CA1u{!<5;O} zm}KvSP;c$KR$_$N=BlYfb{?cFkMhR#sG4}06|83I*)1Cd5!uFlom*pl0cYk@PUuKsGzIH2Nj>o2w z%5`Xhc@1-7#T_)Hl8&_qTbisUWKz}I$=yUG8!TB36|LHdrjLDl)S0P_xKf-G+_6Kq zW=E#{i|6-AVsLRGS@BWeNM~c3M&AoWxNPsDO$~#|U-}C%{&RUg~GFVFCG?{T|YfA`Hm+CuNd}wNF+LE1_WHqOv@3HDlJCwbf zbAb1;rAJ=2Tprz8_|bJTzga2|{ID8tYVQx1oH1d>n7gJ@|p_pgv` zkDnZCvDIv<3TlxqP<);ZdV`g=)K3>tKr>$uy1#?C(YlvfW#5z>>d;aT&-KRJhAF{e`y9dB9BZI|Hf9m`t#4H zlGfPY?? zr3{~9=YOMwLHDp_OK!T?X-$vmz)7Efn3Sme`}xN5gPaC$QsFl%C+5okTy3&|EAyEp zkS{lCd&!Ydx~qMmhd%c9e}h1t!Hh4JZPunw*MF8{)h;uTB^AWbB^0DnjdkMHbsvu` zsjOEPZ#_rls9mUmx=z%wBAy=ec_K9opFNb?1bdYihIh|$kx{n|nGvsnM4B+hBwh zImD>sHgn0iiLy>EOCff;C46S1OC)rt$%4FlM#DH4w|23$FAu3OF>H!3aJYh?mfQS} z)q2s$UedVpRPg#{uQH1NGN}8zq&+UKRJpD(cpe{6?Xq$MVizX6s}nu7?!yf%u1zQ) zFo;Wm23V6)2FUCMs*4nQ#(E}YRo5M1?Cg6Ld50_azb{`n5pXIRf4%LcgDyzVVRN1Q z8NsAByfy{;&5^fS{&&_Kij7oE~8_?OrKlijOwz3 z)a{HHpB3&z=;%E{x@?&Bihrvmyf#qrVT!pgS5HHI6DNDj>hjOCf%;<%H^|wz+|!qa zLD?miy$miuy0gjhP%-=pS*p2`J3tQzNfwaljA9IDP=uz13hR&v?;4?WY9GR1Qz|TK zAMfwnx|kjUk>+*Xv=Aih6XLE(?D3YX!v3Pb)@SXYr_#i-Sr0U-l)o`vnn$kCFKbY(wWTF2DCj0wvHzeC`2W{@rF?48oQ6Vats4y$V@xpxXt3T z$ketvfPIn)$eF25ytD`(fg8dORE(Q~UTBPGy(Ff?dDP|flSyS@BZnwr|Ap;0%I}>I z*M5U;sLrH^D{=hh+1l?C%U#6rUQX74k?KM$2*{i+f8a75)|07Bi;B~6T|GhB)p47@ zcOl%JqTbwTk|UjWBixw^22*3CWJnK}j+RJ<0jer&)dO3%>F*Hg*;*@PAX`=QVbLJ< ztrX!u*y<#1Hy{qCxu8(DI3%UPOZ=4l*&Cx*a{1o|2D8}&4p$pGLdzbB`wrbpvCZy4 zG1-mF?gWwuEz2#nwlxsX*}hkf3iSYLX-v_c zyxC*Z`=pc3V^>(FGdFCW%+08A_wL=Hff8K8?l$!%b`WbiSfGznL7`DmW^k|o5D?{< zQFOyFcVi-K1-ItPd3WdTux`v6m(R@2#oKN>9%OkZz0rxiBe==^XOK14I{1Rv6hilC zbsJ^p=Xy=9Oi0S!QDmLBZZA0ThUR&|nJ`@LAZ^goYyXlwd(%fip^4!f1?v3)l>p1J zlT&1=g2p3cpJ^mENDY99?61w7D~{%(q*paF*QS2Q7w~#j$<>+fyZ33P!45ZwK3$9c&JW#p3coD9aoyO?Cu+fK! z0Mt>0=ZTzQV2#^OTrr93dQ3r@_n0Q?^Y*AiS4-CGwB-jXh`t$2h+IsDDZBNGO{z#eIJvb9QUB_x*MdXXm#&N_=7km9ufKM{z0J z?~=3Lu7=nxw3vZoJq*AnuLEywa>BfiJqBJ~#vDEA*Fo5V;JuVGi*SK!VmOy-RuCl- zgV4V6>q`Pc&FO?;o0K-(yK{Z_3j;YZwyzBdkKX1BiU}yGuPr2MED^hk?C-mfCE;oZ zL|%2>Rs(87bZGxf6m&BCeF>bD6j`8ueNm};Ma2m_CiJZfEIuN&KuU%+s)b-+gnPK+ z9C8_p$6?uz(vRQt&|2xN5%kg9WLlA;8GFK?Z>9v12w*yv-sFZ=yAbmo^~byZ6`^rg z2BRE-`Nkiyq!he!)9{?m>IZ4e#WQUUzY0|_8il)$U1LD2t|=cXH@n)3wgm~TVkWL~v6V~Q2qmmR(k z;JvMCUl%knn79rS?s=vfk9(fOc@{0f}`LC`4F1dUq@~rR=?(IX- z{{y+^|HqTp|L+I?{}%xGziaq^*YH&Sabo^|tfa(N*LTJlsNY;gZ-X5)TAKKMraQ*Z zfM1G+FD&`W25)wn7C>{{-&$R}x zH-6_|Z(O`H^X85OB#c(CPg^(*^D%H!mHwgM3-9-)r+kSxb`C$r{jb|4200OxIgq zeZc9`WcRzRf8_Vl`SK>Th!;M1LYmOs_zbAAN!-?{%l02rb99ic5X@wbhY5{DvB|1J zp7560%-mqDxz$S%Pf!ZG8bh&tRs>qK;g1nBt_p8!xQH=yy7r|~$vzEBhi%nwxWKwS z?2Wjb`P1Qvn@G6e)B|%5WW>q{L!*-JLXtjYW;&hQ_MNvJZ_y@U45dNUgqf~+BbVD* zy^a)H{2|*S$kEE2YLzvT>tGKA<^D^D(z&o#X)CiZ>gOBP$W#4nICknLrJ0piWp6=3 zLK%ai`&(GuzA8#5epOXm!CZ0Ye3+~n?4Mis9Y$@nfw3~@?yXDqeAt(05lV@-;oC8X zbKhU^*@G6_g_^94FWq!YSU^KzQ#D9>&Ou9uSYLW78_mvnrA4l(@n&7OU6EJHp(RMd&E>-DCp0 zJ&W_feph1{D#{*ZKfSaw zO?Px>>QM(niK=Dmf|l!3$?|EvoyGZE$_vNts#T~P>>&CKVpc`x$|t|`f8_mG zd8^Yl0PW7GA$pFsk~f@z@j?d9CW=b9*7>3?<3$#y%RbI#-W>$PZTmt56 z-W<91{B&50boehBMcTY!MbcI^Ue?!PjU5ZHX1FSLsr|ikCFg^8zD=lh?iaB1p|S4| z7AOE7y7BDEXJ9zmH{33r4yW(x={k*^bcx8-Cn&?ZDahr*!?cDp`yzVWZM_yW-ugBf zM3;2FL4IRpWiCO;Jj<6=Ygu-$FxgyTi6x}9%{U#6io@wDsaHLbh6LA2^^}yLty(S# zpy;zOa@!atDg&ut?&gTek_%?D_3nzI*Q1 zE*lOkWy;5_WUr7G+U_0tyw}Tvr}|sT`{8#?I&cX(kwLbyo*tXQ!WE!cVL~yNl@0kB zv;KF=6Vr7kIn%XAhAjfK5|$85BAo1NzB8JcNvC#*_*pb-7&H;J3@rkHN>LI<^epjF z4OkF)b`0GEPe!p}a2vg<&NucB+xx8A(lq9}pQa;hP`6k1!LgiU_ooCRF#^6wkNzU) zlKN@a_fN_HH0HQq*>i>#3f_Iuvpnkt95;1=9qh=3K@JwLM6yJW(7&A(i1e$#&3lD~ zLWs0n{*-?x@0~K?a_xq>YKeI?*U_FGo!gIFsO#w}vi?-6aRr9xA0=EOUBJ{iA6I$w zf1n)9J{>bL0bb&7T^CMfI5;`_VzRmXgw@t_2!64i#Q@QhpQAT>H427=qv+pK5N<~V zjk(h=86iqI@-?W=W}R~U5Kph4Ne8yyCRwRq3p2cO+kgLqlVy3(^T)$1Dyh^L%b5dt z^GgAm3p$qQ*I8+p8n(>$^`DV03yJ5t8m-+en_=FSw=MJb-^%VEk(-k~?O?lLWw1~y z1Ya7YSFNxqzFWB8$wnk^J=R06E?``scDk)CKE%ym1&eW?hA`>WD8%VGmUB@;3P4w^ zGJ7T;tH~~ak8*lpSN0&;K#9ogK3hX{f#FtMBs3CqM*Kb{JP8a~_H9#_kCud>W3_2z zeO4_}ip#fCR-PIle?pF`hQ377BOyb;N}%2Ra+yhm^?G#e6XozUP(j|pv7akw{LYTt z1mZ^eTBS&2llc*_kYHWeP6np^F=8e~l}eWy{o7GvsijovG?SVd8MJ>I&liCLKY}r4 zgVI;qfXmFmUAP8r;+hMG5UJEfte{yX+eTA*94{dk-H>$8Y9v{9_9FeZ*)!y z*V92*pKylC)E{=ix);c0!fzCm<%?cY(VI3%>+y}0Y$Ff6dvC-?(=1B&b+#C+(qLvH z6ZVecRvbiP!bEt$LZPUgRcB#8ZLREt_Xn5+AJ|dHsr_ zysZ)EwOu@JiISa;Te=W|SPIE)cib31e$9d0;pruq?7c4M;4UX<;bU4>b}yfP>02!z zxZGLO$Wyh0*$Y?DtxvGB@K|jjNOmds`>cvO{;|bj~TMcx7QrZcLC}cFe6%X5hso*l>AWc|O5sm1>t7U5(0i z+>%vw14}mFr@`Th1lDYUTHtKWQd04kTyC`%=(2_NeEw+Z(SU}!OcS84rwBx>j4=VU zLW;pOH4F6LMt^~oTivD1&mQOiIgrp5Qf@_EH7c{JPZ<=vw(qc)z5MV5qC&15#zw3R(<>Gp2b<9H z{sIW37@*}&zPPV}{KV=HPHau`lgq#ybEg?=$y%etdP+($s<@uFVh!5HX!-78oBgw# zxl)bAYV`#_<=M#E^(yt~B6z3D20F`PoiaE}(@jncn7A8+1-Cn}4o#IKD>q#ZFw~lQ zI#RwwFKSXa)O{>Res0(nWxvhj#9m41e5pc<-!fcNjRs7HPbSr%!?~ecb(pun^21ri z$~rGcHjZ4$yBf(KkAccqvg*paqDXLhN6Y)G#4HU{A&!MO4DGw%%27;oUr{5CbErV0 z=1;d{sm>dt!4PFPgAWZ&+YA;#H)%k(hVy*%kpXgYKR>~U+2tSqR5&T)oL&iyDJU!B zQj1*hiCOvU8$H3p-tL$wK9Nx{HA<5ee2R)i2T6u8qG=1JyfWLenJ7&HuwqLACbhsY zP|K5@9)%D92`=~S_QWA>UWueMaD9})i`&o(aLD-ZTGe>L1>?=F2k3lNlQG|wTDmv>KvQ5jW5#)ZAj7<$fG)~;!z=~EkGFaslhw>U)1lW5A& z9;8vePz|Su-8S&*!?QftV4_;P1UvkjE=}GEr-yaYkeFY&xFu`(IyJ&}oVgLmMf}jh zlQ{Ok7FF3%K#{yxPyM85GovH)JpqB0ilspTzzaluER_u7KLB%!IH!o%?1&Zk`JemwBFvgALn;daIPT z9G}vN?xRz>F=qYw&YPd5^Pd4Z?!Iq%9J%y!;)?AI2n*WgIa1B|egx(9-1Lhzjjrs~ zNF~eOr)s%r@FUwhzdeWyvN&`(Nn6kNl0Tj)1njzEbYad%%$DnEuX`+ZUEpM*-mg_B zH8pKflR@Csj0WSQ*U4Y`S&$&UtRNa|Nd_P_D+*yK2eEFhXdvfW;h0i2ul*I2IMWGS z1_$fE>Uukcz5SWu&XriR1hOQ^KzRGJ@iI0iaV0EI)hJS=bkxbyOkclXXHLQIm8x63 z%2zA9J0u`9>WjFpc~Us}r=bKO(EF!*SsLY1I9XhMWC=THpD5}kfw#U$%SX*HM;m1c zAAjpHgfY#I#1==pU|OojKP;3BfmAH3qQO@^H8Lm^iYStHqBiy{?&CQtQL}8?diVfw zpy2%H-KODu!GEF(u2&I|on=y%ngDrXPFTfI0YyB&aYeqHP+Rko?#PO))w=Ui8hjyS2sjg)PDOxe^pg-)4~8R6WY1GVt|QbE*_sRoL)JH(S2(vZIO~ zQt6v&rcYHelG2AXWfCf|mAS0ZKU6LvL1V5q3)X7MWpbDP;j z$cLy*e&drWfob75NNsI2oLXA?EnL_NZ#=1(lNIh8Y`=8% zOXsy?>cH-o9vBl5Tr?t)u^YF#D9`&NXqj2F@J5u1RE9~}j+VBsw|uYuGZZ_4Ol6O| zTf5laZFZHLSAyUr-Q*H+-&nLMue)djv7)}@Q1$8xfXtRVt>+CUdT; z&(;M~UVjZTq9hULKD;^JMEo+bI1z#!WaHgfF*P@YcGj z2qTNrc?U#$iC=V7`$jRemrZ1pzT^UZ?qo0>;I6l@&^H%;(ja_IRZ%%0@Fpd?SW{@x zagD@$*#^}*Q+;Akk%i8-Lgbi@QT(hocKb=yM+tK|=qTru-*)+A%2&3TA+8bE>Ga*X z*VRdvI&h=1b3R?nbnxqkrVIKCjr3L|_${||JNU@^E*lUsP5$tSi^c^8Fwde^iCrj_ zo^5YEhJ)w(e$U=Sf{Lqa?2=9fV(I=1Qi5B614QsHTUF{ilfmqjA>RM`&k~u%P6`q6ic3o{u0~y7o9<;+@L6EhuY#X zGic~bv4{@bN5AlE& zyx`@CP%#meVnx=}JhOBS+*lXMqv9E9gSNQX3hh{W+~oRM_2ojR(-U>=g&@VonG5>6 zXbr_vTv0D^sadW)VQ^qgib^|q*ZJ|1*N7aFv&UL{zoAv79OfJA)+REAz8qotQ=S@$ zNmvL8uqU-ZUHa?d`h1jT2)KbJ_0iyi%e;dUy(R1m}-!+!-%48AIF^cg7E!s zqI?!N4GUiwIOUL4+D2-!xM~DV6sUoheS`i>O}qL?)IB}&yTMR4%DptdD;laYv;eP+ zjITluo)E7Lc~afU*t@89G_FEfD93bVbWw56Q^}Z0CkpT$T%*icc0?(FGxY}zF%(s# z8+A;g`#N!dUi>Dp{WEXS)eWhvDonUDpe~g`OkQ5nt;sL2K`CKY*1xxHCV2IsF!3&6 z=5YQFbzJs|0&E`)ZHNeN|^f#GnV8n61Vjnc=?E?QO~ zd1EBCZ`jtSLBeTyNq{>i|8Y+o56hkBz6ISDpQU)e_WgJ*LQe46xS$?%LRk4I^O)BD zDtkrQN#OTfk(wekJGIFp&mL6F7Wzc(ldQ7Sv7yDAg+sd+7tHv6afxM;e~E*D0u2zw zr9KWxNMb6*KNc)Bin}q$F82h91W>qE_qshp!ZhGXdJf()}r`9{qD_T zz$G^Yi+#oHzn+Oa>=!enLfzAu)g9E0iZ!LJxbT!|?MiT%Mvwru(7ta)gXM9)rGr}e z{nDkrc^-H4g+8~`li=f`-b2I9W5bbFMXV=`M8Hm5sW88N{U-qu4xT?z;14*8bt>_Wxd$JP9URx)Md9^LDs4E#1^)r2Oxx)s--MBJ^aB zwvHXJe&(Ckn@tQwInoO6%}C!=Y51&pOV|l{ zew&By4U~$#d&p%|I`Q_YOxdIFsCyK2iyZpO)jzu7@hD&0xppn-ojS@K>o*IetXt&( z@U_|K{&E$7pq@nYgUn(DX$@wpkI5=BjO6$V-^SkJy!&zFVQD)DLWtufc;s+nn`gEb zylfcNx|3h=R2^87lI|eTPL8Y|wq2RD+D}{(5%59}J;eOI^#yCVH-sR^Kgs03{S#0_ z$D6kqoo=5j`s~ryrPcZ2-PZh78pBo{t`K1ZD7m9`$yJp zOtnn+O|PO2a(+n}{S)7yno$5l31Oc1NV{sYkZN?9YoXMNBI*s=WY#hC% zbcgSgz3j;cq7fF?nq|DmOevl2J_J8#%EMnW9OpZphhXj`>gx*Q4lJkY!Ko#e53Am^0tJdM20s^LUtkE&v4jqNG ztg+J6fWb;2#!04$Ld5;(A%6Y@8NmJM-L0Np(C!@RWvWVa#6FUqd*Yq*81Z%YPid8j z`UP!Sv#<0u)bEmT7;Ad?=1Ls3l_C4yptEoD0M0k{w4U2cV?~+)yj9*rEWCE_bDt$1 zy{sEhRxVQ8x`E%nIoCzv9#-BtW{vfuX6^8FNd{dZ=(|dPuw79NdRczTh=YY$0jJer42aD*<=vL1O#3>`G3Pn6`sZpmy)Ie#yjp&Aw0EC9@(u+aCXwX z91S}2j`y+ReK!LvhgKO8HfXwqMAC?5G2CysM^qnwsiIX)HK`%*loVs)%5qK7!Kw3+ zpigq^uzBo(%gXv|dw-Ih8dhDKr6^KqByx_0JY0+CIoc75l|b;(zv843z)&` z@@I{~Z;nV9$U)x-Y?k?jKfGmo~%Y%_&S#hP7Bd{vVdwo1Js^P)n*OX_|G9 zXl=lQfZ5E=;?$FCmNyusVBOQFaOMZka$9tI?lN7u?o9$2F>l)shO}}@*S>N8{=j+w z^H`)JtMsFQowqj7wUyFZfE~^D!Z%d<(Dz{^^q>o_eeZ6w78DG<>Q>YdL4? z9aPtkfo~;|5#KY$Om(P0>RC0CuE$Rlw1lN0$}dmvyK-Fa94;HINb}Tr6*M^ z=Cs;XTwl-XT(5UeGp#0~lkD8ZI3ofFadbe`J{so_KaEp*1MCCT1}Oo&{Gqi?3W!~d zF|_;0i1X|sgOrt=K|Jrq+Z*m9r#;s{nIyjt;5r&+g_E6jEUK@Gk4w!CF#&TMgptq6 zcX^7KW$+%Z+P7dv8_p>rlX&6M)uHS~@kX5YNMoJhSa)4G;i0xPX@uF}(E$963$m7a zFhI;RMYB2ixR%14l*YE{e1e2{*R1ulva6!pjdi+GWsw$vMdyF4E{un*a;dRRr;Zpa z+0#}j$9TV3`K#m2opi??rIz^*Sthh1egC>+$19ro-$b@RoVF6>=fc5`7Y<6T=WbOI z(>BncPBgvsE!;xjo-=O`Wi;=eBIkg$e1@qB;N?4g&P4sS&yCt|(5+m$8W7=~^GjH> zlJT7dx1Lyh2c|~J25dk77-tKA{)D3MdWn2ud8igciEB{pb@kM(`L%k-kkc6O150Jvno*v8 z^9X+yCP%*o-l5887Zp1@0zG@vAc`|rbMygMWZZ}CHF5f>6z>kM)0bZR!8puO&jqY6 z*xynnGXb~f{pStE@d{~<#@In&%!h$)Mj5l@wDx}uL-7rhp|@L-_fliqX}JEO4%baQ z>Woa+oaD%e)tXQbD3|Of8l4Lx*X7gKcR#LwqEKT6#)f;iUgCe%nO1h!m4jfv1=0A) z46KQRSWaJi`F_Ut7x+1JoYy$#d_{cyalc+J0EVtw4NgmU5>_esACZXv!;$%#-f@{z zM@4ta1T-oPx6k7ru4lOZ9;*0kh(u30HApBREJzI6n5`-*b(z!wBv9z;s zK71}IPcGBFP7feWr3op`KcwN3z<>6+3#}s8)UsFueT|~qKLLD;cPoE-YEYvPaIfo5 zOY`${HuUnv)8X=CYy)q5MK{0_-Fu?{+D+iwF=>^Urrqs4Xjk~RXrg#tiD*0c3+C(; z@W?XR{eN%P8)>$=aAB9A>3rqjwUA*LF ztEu15iE5|$Cq!P)@l7~%SheGl&6(*-Lxk(kz3>;iq)Qw#(#CxlNO@~^^OPzCr@^w;vKxcYd@B01jZZxms|)9X zL+}$nrN|l2LjX^#%T1(w&&7s71dk~6J`N(0cycPbwa{?=JRhSdJ|L??zt=m?IkjBh zK%cMeaK5|*-T1G<$XJsYaqb*ITJcfxvl9$n_&?|V9C^+~l>mxI6lS`U1|qYBe(dFz z<=(DyMi6zu&#y;rVbYpAfBQOKi341mH<9vr5uKiM*}fR`ZP6OOUIsT=i3lLOYTnn zG@$gf(JT9pa#?wS0PG;scy3)n5pUHJr0|b{$X<69zSv8D$;8{*wv)(0IK;5H&@~sYR#6s5SWLXt-E-w0|RM| zebCGIS8+UNg-w>0?=ETV+4UpG`6i41k@Llt04^x2p;ocVGRzAJO6Rs@V3j(N1Y+(* zXF+3vly{3q+zh~>ikX=%=kXvwCsKNP2GJ>Ui;|7g3H-K*JODYtly*8#^v9175gD+y z{TqdmhXelt{sdq&aDBwG`c^6npe5bvt@V7#;NxH}3xq1r;a| zpdul&+v@v&jG2Q6BGbDHfYgZNE6zk5tI;Ap5FiJ0B`y9T1ywu6T*&;Mw^a zaMX!oDlZK{wuTVED{w}74ScI~E~n(XAI);<35huYeH#DBT*0BoE$B(N1XD*_WV?7M zG|yP+e{ulEwfv!>BX_P;c>Tb2MDPTF>vWSrl9qXoA`{Y={r}*`3jewxH_C>~xq{+M zkride!8Jik@^L|g&G6F&YNQ;`zYTx@--2n^CI_!hKx0k+iG3tael7mug7J;kS~9Ln zkbfw1?I!L6#CpehEoOQB(1i}&xO0?|;l|C0vgl`_x%^s5BXl60;vZaG#3>7aD`0g& z+AJ^C#1(nzd|xNHjI)ouuI9`meN-zfLI8JyOq$sL`n!kkPraUzHs-^2%3J6d40#U0 zUMaXK{2?GK)PHovL72N&n^E=OcHoR#K`v3i8DZX}{hK(f-N5~SUE(%zud5k!=HYig z^7Je4QgZz~w_qMx;2S>Q>mB=#i8T_SX*dw1gr76et!Ma2e(R}0iuXf@z5gfY?)XI$ zUC*dxg}GEhUFO4};7!qgqFpa95Hng?C*gbaHZiZ2sf-oSQWr86gGEHKH^4mXpZO(UB^#n8DIBHG0 zglD`lo3C2Zki5{fK?shSQVLpy`58k+2%n24lzgo+inf}{OWPD5-T4L(yQUOlblCGaqgE@Cx zm$gP~mi+7JmY2!T7LK#O$m8~oH{vb8fv+qDMIW@vi$xFaQtC z!B_t&8xP<310Nv+Dl9ybaDME=F)CC3f7@10eU>>-+hPA6gG1kPccDTf-2`qD;`N!~C!TIj z7w?;bvF?Y_?r#kh349*1iRE1!iF>0<#75SK@Va=rOtKID3Pd(y?ZeF6$Ck3gJP%^A z&-L=Ut@~fzja2X>b)T$@*3j>f@?OW+R3?&Xe2yDL9W>C>@OD2q;Jp% zSLq7j|Ds2at+w3>BX`%DsOc)_KKl=suQ3sQ@V?vWXI{=!4l}pNAj)H#&I|zp$~HhX zXbN*X{m3(VySmxE6FJEQ&#<3&*f)93>?qMof=MbpeESqMweLBsowDI8OkmuiR%Q>? zDL3Y7jl^}Isq6%lfh{)?D{MluDm~Y-H{SyTx%OU?on56{#87=#doFtm-K&4jrwOjV z?->H8Q_KoW(qqnT3<54)oWNc7pVVm6DE2J;*~zCC7Jbtm=nwz=d5IJ9TXb@q9|2=& z2=D;lA1+3Gt51`j`6_z_iPr`5k&nX*-~Vg%5#Q>v+@X}buY|Dk1O9j!z%GFImJj#Lp4`tsSPj&nM?{1N9)QyBtDTSi4x3Y??tn4K7 zSjDkdi&AKiy|VW__BkhnvJQuH4(H&Iad3=qj&Y3heIG@i$K&_c@A~h}x%TVze9h~+ zrUR;-F!_bKb}hmnuYA{}j3+oG3G23$0AQ-rr6t&TBV(a2cx5%x!jjqkO8K2wdH6Kl zEk#K;8jtMe30S~>m=H>X_`BmS&EZ=IM!d*O%l=Z8_VLH*zUG^hyCcS(Ecib?IGEm; zV+QEn;y_~qw7+_kD*MkxAlEvkaVJseEhAen+Am@zZStV6P0)v?$M7d^T$oKUuh}{A z&1*`)(p1_Vx~snL5@ImC=JhR0H_F@P#+EM_!(~?wz9iL{mZ`O6lD5lE7iv(vfiEq1 zM74UAGHU+66nFBB*~9B~2Bq4oIuo+~^DeWaZ{{4(vB@59M#23JBD0FOOmJkHfK&|3 zyp;K|v}~V487`iWyKqVdNA+O`HgzR#EeP!_F(hVF@6G2_I9jEe5GD;H&rPu+cS(Ebr#8Y3j$ zp!R*VvHW>Je$QUh=-Ix&%Qi<-swnpMZMGkd!0-0TKeR_Hf&bB{R{IJkIx}vcNzUof z^o-trV}r`?W`pqM5}AL@Wx6AW_qxew$-G1#Y%=bU3fkunhR1tyR*YS)I{%_=b}|2_ z`}6xtbclz>1l{+R=cKYbG+(7j>5T_%HSCKf2A92>{OtHZONmI~zkTglD~TUIqJ)i1 z4by)q5(J+rR~)eF*FEkL(>HyOIGEBcnpGHT|79`-WAtG?vRP+Io94Qpm2)$D%EiBi zpX0V7Kc973|NUn`Yuc0Z$GRMUtd#m(DFR9Tr8v#KfgQfYDZD_#_csyM03Pm@BYU`` zQ~pt+&~O*U!RUl}b^xKz_f(wRc=xKuhAZrhPPoy1wm#`X>r48T=1nof>P^;Hs_oYg z6i+=`0?Zg&ftmSDwTzUBgB@(z+d+dYm;P`eT$wTBas9{0hnPQFK3&tE=3|iM3q}FB z!+X5?Snu`fsaPRThY#L-LY~)tk5(FWTS&nIfTYAbn{jD#V_o>cdU6(CaJx&@{IK+m zI{MD4xlR>L>d*HhA61|KI+Gf6qha~PWnfR+g-Y{o&stA{P#j0S;ZID;iObF$JUKAS%&MS}{0aqJ z-#7J_{wkf-YB+Q`WxKlRA|V{Djc}X$(}1}pbCHH3?>+27{6M-BA6JuGx*(^ zHD7_)2V@ZQpVl#5FvneAL8g|MkiW$k85*Au0@24`!Up9Hr&ubn-b+kJa+{%f!A{Ps z9>3*n)t(DSJR?HEnmES4o>x((TMrPWHemG3}lHtf@WUW z@@@Hk5;JNT(IGU)bS@p+{WI}cNPBUVfXDlC{Pt*UaSFW z8BYdf4w?s-eAl{f!P0v(FxkM6UDcikx%*}AAInFWE8Mp!&3D8quAfPGh~$2dNY~#` zD!T@x_>ajaiWC=w$APu^xKFA3(pWNm3hS4C&wM@BF^S+VQt3KEEN)SORt^meV>QAH zR9t`rgQ|bH_EpSaxl;Wa)}28_j>D#Y`V(WYF$^_?{SKzAqV#<+h@P$4azxmgzNX(bn?WkL{o; zuyua5pQJUNpD(<|Fq{1{Ur79rGjsUo;Ah!X@7hxJydl|-daQYStR|pmYWC#?yg`HA zdP%SAJB|&!sfRgygk?1xUm#kbt{OZgv*z$z17B4n@b2EsnOT323*LMMyoPAz>sns_ z4rSLvBJ&c3&|$G3MzQXc!IKqx!_e+J!T~!KB^g*1fGD?S@T(f`yZWiGf7M4D!F*~C zbsmYfcW-VBdNl=h^HXETn&^Q8uXa8eXzzMtV4ZGi9Y0G1aquKpGW@d#Sb!2$syF@< zE>s0-#BNUm(n_s?_ngNR@72O;^~q^>O^fKbP1_D`RsUQE2?NL%r>SX^S8*s6EmnK~ z_4XA^D5sq%sN(_rS49Ah!AF3$PG^&lD;))8ee>)7EgIJ5-7F30sVeliq!;OEl;lfs zu!j4Hz}%$9)z2n|_3tab2-SLJ*aammCCej#E9fRAB@Sy<8l8{@y-eHvWUSH~ zTR()Tb0^fO@bUA@0)Wct}aY-IHKoPU5YlIwb6WG z%KXBc2)N;|8mhiy1GDo^k}0Dru8B<7ZEoXa2a&j-G^ju0-+1rLlqDZhPJwvI;ae^5 z5mqsYkD_HojMudaBEJ^Z1v^?cD})SDnzbqy?NEm0Zc9#5Y2Gx?*~v4^v9)7;N?xQS zfAWJ}9qATPk18A6$i8TMp@GjuJ-;f{znkJ2dOn$2_21(+w{~>8-7Sk7Io(r{D9uO{ z!Ca=hAyi|#G9c(uu|!nBg=5al&YU<84M11A%TF%@ zS29ES#iCDod`?j<7?NGtXt`&dX1c}NM05FvsvA~W!s!NOh7^uBAc7FAgcu^R4aaKk-x@L_*g zj6ji>jlCsAbel9VD(P8QPxMzY^syW*8?elY!ull575YsaqkH~#6IGbicsn~a(W)3ZBF@k*= zusiybC0qIS9x=(12L6pLp(^XYC^71aoUj!3m}~rl)drO?DCFyI`FJVbsc-aLMYNB< z@oLWTUc)X#rhB)R^+P9o^0mTWFhQbqT!NJINK#EOWIfVyiLV8Wf=LDIPP2Z^{kZu8 z$Ki{;F4mUDLTd1qIqI&8f-H2}O^wx< z9m>-BQn;%pHFyKHJ8liZ0$*D-?eG#JKBEOP5+F*n=7TAeFb{-8_j+_GeHwj_1j7BY zvRMU+XCEa(t$8N!e9pPu^v>8=Yh!lFME$6(j)F-4qRsV(+Qk=C3ZJ(-8x(~VnzY|( zEoVDC=aKh~;~|>oF|Xj*)wS&;zYFzMjO$@BuKa9xfqNrZ)mmHkq@i|5)UR#VpCX|K z5qsovzj{_>N@+R07&pA0*X)0Bkjqh%kH9u`4jJ`&*gp?#C>JEFhNVN+7(X`IEmJ*V zlOwVQm6;DXTnDSiau0w9Mx~Ov>x{_bQqYAd5C$eyzXMt#C+u=T@C*NbgO7IMvZk5n zSoOkQ`AV83tsil&PFgqILEHd4-pQ`1ZeOfellO@N)wq_dLBb2x?1-TCZNgB1MhnIXxSO0<6IVL22n|HZtjTs`mIqR)p~q~jKv85! zCvK@YsMLgvTJxU=CDk?JaF~V(-$-(Uf&@`H@I|M5$O66cJpC=_z>Sv51ZYU)SBKFu zseWPJE0SA`?1_By#~vQanmcbFk8L%CY=+105~{%M_HQG{9Ip0Go+TZNHp=APQCIS> zexA7Q zBZtl5J9tu&gYWLxk(1IF5VM zL_Nv`1=A;xQLs)5o|lRxiFGhd`}wASS1P*d1GY0V&nPYS&zr^LPa@1#&#t_RF?wb{ zGyZGk^?AbhspM%qdvFPNglWx4$V*%D$%=v9?XzzZ=8zmWDTg$u0Q$<_J9lPY@$sRK zo7`wz*~-Iw;FxGrcImke%8&<6&02Dd{g773jel}{THer{3F)U|f;RJ49Cq8# zt^zwcHVy(Lo)KVtFhls{d%Q@oP2_PP6R zw>1%IX*nf%|H@cwVYP#uK>hY0pAu*w*&7kExMH+W-HkR{U|$L3>7T~9Ay!jBQl9hc z-6u* zH)~`Ea%7ercfYY3ZuWCGc{$b9bsJ=e4Ml$+nsE`cmke2%cQ?bV3 zSnI_e#ha_LM)(;mcVA8*>ibTrhh&4YmEm`Q!4Gx7OkA4P+rAU7DA`B{0I}ITwEMCwBhCutt zLaCGD&#WCG_4el+)lVIA^y|X98U175tyk$EK;E6Z<*4#{rBMLId_&vAG+;;DeF3v5 z^>NkkCg{5{3pcLC$D-5n*uLn|>NBffn9`dTr1c6GQVEVdO)J>(>=W15 zPoKpRQ#M0GsWV#EzOyNkXt@>Syj*qqsjfYRs|>JMz`s7+y|4DlOnamFLJ#+tf9Qn6 z=~wGI-Trsn-LDPQ^Sml)@4Qzt$$iSH`fUK9D*(cj&e0!rsOt-sUz{a8F%=G;OKq!e z4$ET7m`+R#C&te$80`e}OWQUsOusk_zERUBRk8dl*xG0#9!i~Q$}~00bdE4`pPAvi zy>jbR`sU3aSZ~DYuso)_x?d`?2w3_`DSmA9s|2$;e_zfLyk$@#kxAtDc$+!W^q){} za^FdO_@6ZItL#|G*MT^lZnN}vrHmH3u}hbHG2!C3Chee)emfYB(2ErpF$td_GD=ul z>gG8e)%ZB3X7?bk3hbW$bhWUQVEu_Rf3z~lH}XRi>?E?mK}P4?Mgr%Z zu9?n-Ev@ViRy{gsK=KeBYwEUm8}5`3T_`ncf!9yFr@=-r}M|KR6% z!whEodec2=J($Tu?D$98dm7SqBu?O~_$~XjXE#T-c6v&r{`4DRu7ZP=xDxaH(KZ;ScSG}V}{o=;{!9VYdbnA-oX~y zX8kl{l|S_L4K3W1{w&6IC{Lx5@4nsL@<;BedEc{|O6aeR(91_ zyJlmwbqAqX(YEUWm%&^1ScEs+^O9cmITk^#6L?+3#h&0i%5!+rqzr6-RV!-D6qgQp_KK%34`1(I38G%(NnDRF=5v-%r7rat z1}U9K^Gby=B9w3fMdG*YjC^OUlg?F6j~-2Goo?g`=y%%^UI0sly}nKzL%L@CtI zz84v9_2dW3P(2enFq?LguSt;++gEY{yF=|&_}~Y??s)oe{S4+$@a^lKYnYYh?x9S> z`R+gN`)9{`vIe!fcAU%G${fS(tJ$%m@f3j4r5SB^OZG<;`b489d&q~>;9E}52rH4T zMGdABvBf7uqObc`Qe?m>OA5KQMaan4z?1_;8+wCUg4laQxVXmqpWGqmml?1R$)mO$Ug!d0a&c{*bAM%QO&cwQj zd4ZHLWj597)ZpT42$)?MZ+$^h72vLh%W0=2wKcIq9EX*0EOg4+)=<)OEUdg29Ri%jr=nLRR&p}<|@T6qD6Wz91Y`3*1V9%R z)e1B&eL6O_BY8Ee{_{M_q1a#0ZpE@>{}&!t_Z)o3Izy$1 zWViM9=MoF}cXWd&Pf6+vdfa~a#3S&JV{lu(=4oXQW$RvwIZUeo0Rx_cS%F*pxxRsJ z%(uJ^aNvS{ZFc^7)wmrZ88eKZ{WryrsQdRPsz+L1|A2rn^^A0(pVmi9t}g`hTl%m= zi;-mv3zQ~pp$NGa@_syOj9c0xy)cp4pzaXen8QNUZH1jlE&s>(6Km4m^GDPKtZnw) z%3t5u3IQ1a$wfiglotFbDyA`R;mJU33&XxgI{g_b@RyIcLA zl3C7&Y`(ka&qvffX}m4#6mD~>fQ@Ghz9)3y8Ge)km!f1yI>7? z#)YTV(XmHzTu!-`w<91Zx}yUv zc~};VH--qplrZxV&i!?3QwQe3;CyK+);ntwP>0OUG~5q@Z>pYm1)c?HGiU$r)NIcm z@}Lu!q&2IjWf@Z(3+T`-u8K$rnL-zHz~9D>h8~0|=(K4Gt0$|1SCBf;+VRl)<_@6d z9&z01prC>a^cN4Pyw{7ymA7U`LxPLpjWxPEj~L00vxSWLS4<`^yTjWQi-|Qc%<*x5jQD<>q&Bbf#@_`U+-!eW>qaTjJd?zf)^gaCKLJfT>Al>Ke_V;M{~|4ui-%v>)sw%h)w@%l#ySM$01<--m`$VH zK9_hD(A0!O=x09}BP|nV(H;Mer&4j!lO&xTZhot z`YYy3Zo<9mtY))A-(1*tcVuw9Vie|A>$3+DBIQ*Y>oD9=^0x`65o%BLTXb`$u+i?% z{%VD}4x5O3u;v4No8DUukKj$sl2psM+(p+G$XJ^%WQTw^tP3f>Z|o#-TRW!gXhWNnKrU z|Hvxc%?5(cW$iaI-AwL;_xz=CrM;%FD~aj)TG}(!rR#*ST|w- z9B&GzSUJeuuXtn<)x5Q4(iK@GO!Jk~$}?K(Zm&yFeZvD=4hLZ%$fC5VqoJ_!qTi6T zPB5e_2QV%Ss+IE}-v`EXdbBwvW?P1LNO&(Z2i4W6N~PkBsaq|+?4?%uUoibHAzgWL zY*9s@w|W^=pJLUO=Yo$K4q&?k(=PuKPmY5*6Bxwx^)A0@?~5FUSd5tTnp6 z<=kO?1zxf{n8+B7tCFAU!r{8`U^2*@ijN!tTR!5J1B1rDDB-GShz9I3=Y|fp`YR16 zq_dHVGI_XuZ!D!8G(pPzXCWK;AG^pdaEr&3@XE(q5xY%H)qKAV(q;RP6@)ujVw$DhI|=I$Mgc$qL|F`_EPHh-OQUNRvWZH1 z`dI3atale;{xzUj?iFQ^y-m!2(aNp~V1zDA1kKXpR=k;6k6Vkdlo7HZIhbp(aAv=L z3wJnJuRnDF=|Jv59}3zco$2O7sbri#Uk&y6VQmE5J#~TJs$d4Bfs@?t=GR^~34HJ} z6;XHDJjIj?5+RRSz!6N93$H{KGm()Aj0@gjU)&qJtM}*foOW0y{qFFNX4G!Ah%S$9 z^ZcD3m9iSxwVyTA{GTMd)Q}Ii+2tsg0q6W^^}ZVRyH=kfq={mIZPaB)W>g;VZ`0n^rz_$OzLdZ9adf{`=f>G< zMGbOoB!1|qaWige&UN7toAZ8^7VzwECDANo3+h@MO^q*c@bPD%{EM0BFEPN3bb3)o zoG8OpOsRQl6UVS_z)fJdpuBr3)2{u1MeDUt=UO$d;ZVz)4@5XOsh;o-IOVo}9E%Yk zrwIb6d#KjcsWA1;wSBRhpc#IS=4f_%W}y-U|2qdc4`M z26mut@V5#6+P~x9S(N(F^(pKEgd(T0f48#$~J|d0Qd-{ua zZyz3L5qUF%`=*NJ!&$6Wko!Amf2j1+~T`KlH+k*Fxc3p7HlkF*n;i9n2kti zpFd^P-*B+>si)5Xl&^!Sbh zeMYV_Jjjv>ZL9Y7$qQcA$%^R6PC|%aL}5mA{*8|+;d}z9Xl|$ z9A|Hj@Y03yV_|SZTaKl&j7gTQ>U?%hu2*k&T~vO2e+$>xPfw=Hqs7%d3^!ZZC1Who z%(Ir(-Lf|`{cBT&UO7>7k-BKXdD3VEAUO{5FG6Wa`eS)*B0tdPrr}<}7FE;H^BKk; zP}I#UraL(i`0eo97cQ{Vz~;>x@P4uz-JUllY6hJxM~;r9M?qa*0@_Lm??SU>w8;=h zlHv7`sAAi89X&uI9MIX@c1jhN!h(mMT9 z93hB(8AUfhlTms7B+`gKDaGBzHOC#==p$^tCG5M4DEK~9^7@3RJ_&4VA-qpI5E`gC z`#tTp+F6UVtPb{9^wnH;1< zW0G^0IUqFs=7FcdB4goO$vtZtgZ$E3aRUX)&qZFG?cjmptrCGlIFkZANgX&x|X1A57*fElsJ)L*U)n zhKuG&>uey8G-H~o0aV$Dj5DmTkG(AiHY&WKH_(k^Q&&}S{@V)j=-O@a(&^$& z{(vhtuYMyN3tCcPjMn~Wba}pyP?pL91V{$`U8Mi}T`%!tscVl3V(kx9dXz%f-))* zQ6KpFK<12X06L)Z32`#HlsuUTtxi>{Xn^4gV~at6Q0IsZg54T-ARBbe$o(aOc3OjwV#mx!bWj&j06rMTVvc!3qkCiBBEJKNSqFLN0td zCOp@{SHBPLbUL()GLF=d>j3TPqItNckXQfF1@SG4FUG)pTr0$2C=9|#1Xn*&{~wtv z?FXohC%<{0z0wcRO_f5ap-CFbY*mBop#X_3x@)q`9b$6FtLTwINY4A$cMd@2>kq)n z3q5C~XO=Obh9d(QC9-%b6HuUp#lL~kdN8sldzTMUGb$zhe{(~XRcH%$&Q4u(SFTVQ z2x2-n&G60*3k{PEO-$dHK0m6f$<7`D^R5DnSs-)hPvP2|R>rcH&rG&1bDphBdF-27xRIR~Jg$xA0ryI_#v&|5`psT}o# zEs?%-rr^z89#_k_j*6S?SAFltsvV7}_piMG7yEwTrs?$s0OZ_PC``s0FobXvjyP}R z&Wnb@dc{`=rR<6#1lyCQ8unGH+h4}sb5k%pTyo%Wp8v@sR2t*9h}QwMqGV#ed0$e9 z36MS9tV7I+PYRws(7qf4X_Sx-8#I5Up}DANm;*w%W;$rBc_@H|N6L)VoI)zW_vVfO z&=$g^d9XLMLu*IUp+~1Q0pA5;J?2WCL(z1!?9(fzFan5OB;J+?bW-q3G01h_P*FCZ z5docnsbD1Fo1zX`<#{%!4(U}`l0I1XoZwK*kJ#B_lbA76v-E}>@OAnujjuyF0lF>? z_SUmB4|m$lThyF6IqpAaTs!F$8$>C;@SmvRUZ%nR;IwHDr$gVrg-kk*hi|c;I{gIz zDo2`!+o0xW&2IkBkZUH6;XM&Na!m({RxgRR*viAyHj!@AN({}45=Jux7gEChXC|QrH1AYmd7Ko_ zK(zIs4e@Rq7#Z}Uevon}Vs|-NQz=+?JcuF$*f@V-Eti%_CT`&Fu8;+oEUrOBH6C%# zJITe-S(8eIh9tAZ+vYV8qUOGq(`!LnU@_7UOe@XwVm@s$6CQ+fUVQCVk{djQt2Qha zmhpamyMcG-=Kk`E@6E&BoA-*r+)tOFSd)UKOW&MN}VRy%k%?)KJl5l@vr4czJ;Cw>*YexJddHCh;@5k#5!t(^ID#;LS$i8;^{-F93m24SWb!OK z<~}4Jixvpo7AhS8noH%mfWnD}&X1HAES_PV?vYhbja%F{UC*EI)MZzA9P_1=XW>Bp zJEqc>HB9A_KwuH4{AtizCtSefukrw#2e)|YkYIr^!%((StsHTZ1~Whce8%hDAI{*d z(!Uhb4XAdZr}MKKk#eYl6FjabXJtm`hulXEzC`dhil?GwEgZm-Hknc$@|CtRBS{mr zl1vYD_@V&%@ALuz^@_n6Cc*3atG3k8e2tBdnNM5VG!4iZ zlI@HNBt`lbF`jRLxNiR{SIkZ;>d%m239y2y4EV3`GwMpW|81y|){w8gtA_P2M|6m| zK}q6R-;wwBW@t*)TV}}kglS6mYYh(hM8O9NTm3k-+RDV}2xtS?V;Wk}g)1CPx=cIt z@IO=lAot0WPnkbcw4rm)HMkW|gF^Io#%+mtdgNKh=Jq?}CmEUUBkidRN!A;_7dLh9 z84Tq_vjt7Duq(`K{N>B9%GYNr_ai?3zf`YrN%cw|c9g}l#qcrnc8^L;6$~xO7*6*T zT$7a_I2J7#Saf5h@8$cZ^k1*)gsjLB&09={M7i9*GW-yIQ&y$TDBD7;*d?$5<4;ndNA*E9w+aafyiA|DfQl1@SHQVR2Op~Ac>kfPgW8@Dut{ylIM1t^jAjv!J-1j323F9G zlxfXiYGy^%QVmS{V2Ij3`qzM_TZjP=bg9Ve>vpY+lNSp z^_wFhK8cF!F%I~fFZw<*$O8GeD6VD4_0;0@s^%0U^EhK0*+Nf4NWMuoHrk$K+W-($ zo8HQ?g3l%r^uA zNnugD=d(C>25(4qrM$|~gDGfjiY*!0IlVC(kuf^8b8J5}4J?@oV>R&atyG$ub$io zIUr-Xk7gb17e_BZKR75{dnX+Je1@v;-JbO4E4}a49#a4P?NNTf0@^R)vVizID3QRK z3}rDIopa~**(%_w?66a2EV%199ARiNBq$eTQRrZiG?Y3tmIySF_>5^a5T4s*90bpq z_Ycy9;B5*-#MDX`yd!}YyPE)e9}lHBAdlW-;4)OeL-) z)nG!!aPcq!Ub?f@X|(Q-ueq@_`wPt!`uXx*10mp8NS#@~yG8n0b6x$9OdJkEHVras zZd&dUo^i{S_9dU0JIi-uPGFpJj$~s6i6=+FzoobjfH_%>6@`ku<2bL|yBR$k zdP$p-IkX<^nf5KInCW+JbhD=< zMW*Bs-9~48T){t_S`sZld_lo9+)eefl3W`3hR@+VXg&>6$cW+Oj!lSV#3X z&-;sfN-4=E3kkwe;NsdirY;7YdO*yKpW$^St+;Y;_bk4tI4_jf?4;w z56de&4ngGOiEyd7WfzG3d%DoiF7@}fHDprX|JxlTcuP=nSRBai<@HLX#eNc*R*yFf zC61+FTN>Z^T!j~5$B^3ePfM$^U}Tz(n;35@_ms4CcNA|RaHo=JrUG!+xrgxyeA9PW z8<$!Fw|1VO&_7CaF@=qtYw@lH$OphLn9XyGTudlGf`VU~(<9|5%dgQ{TI=zBAD;nX zA!RY7cHEjzTA5sNZqJ&rm1TWCMiXynFBe-UYlSbl@$O^(EnsGzp0Vy`#;XnQYGl8Z zjQk{1wia7VQ8^U=LpvU$SbW)an+`7-iI&%_s7}F_cgND5eCL1B@$1g45m7QR-yn8u z(|Y7%$=URQeun`w!i~k8Y`29wOEc{+6_LoAk;!5Z65{rtWBlCB2_}@xTt!c(VM9=z zW{8vW;dg`bv|A_=BZDWxK-NgXitH{S+x5aez3+Pd6iFUA!Gg(9E$QLx#WtX7#h}W6+{xgXi`@d zaIXnFCefsb2B#?XMz$pdLf?_@x>=8_`6TjglLHFq&bp1d)+$U)6?B9C$YHi|55D(Z~_&q?C&w_p zp$CW0%jp8Zaz#{s=P`Q2D~qj+e5&DC!kY|H@3${`qG6Py4O3;gSO1MCqt5@=gm&Uu zpMZ~{9roxO|1tA7iwu8;5*pTY5CQ@IM1$UgLDGio!lu^IDg0F8(0%<>UxtI3a_Mw3 z3RH49tA792uC_&ewdv|5=Cnera1GDl0B#kwrv%2SI)O|_lwoTC)?vV^XNj$&KSGWk zgbw%X*f1!dv<0};5G2=j4RVO?M*?n&gYH=eSiek;EyAUKFY;EBi0NwoPvX(&afRo| zM9#I~Grt~?x6Xq`^>nuwu621cbvLE5C&u5@O4v-Q`h`(J;f71kFV`<*Tlh4;`61}3 zroBi!yK7o!`$}g~IHh)&pmAl=25#~q)PHevBlX2%S-HfgcF_r&B}`3?&G*N}gLAS` zZSqnYB1PUwpq;bHSMZ)cR^!FL6K#U)1#-Le1o+kGFUx#RO)`xritx7v`(6ihHknU4o~5nZf}GZJI9{g2JbG{9LOGlGx-- z^q;!8&>&-Jnp$eGxodF=s-&vod?a-DX`HqpUL#7$YlfP1ws?E!jneb_UQa&xq>=*j z&-qnwRnW$DpTT>Ndy)#iq(z%-5iUF1lP(sEt%K~MQh84kdZcg65{dpt#3YEC2*eN5 zeu!-6PSLjA>!=-JJ7*oFYvkGaW8Xay?F#hm)-zGuMyvkYVWM5-H8z{z1TKn2X2|el zULYd-n-td!xeOd3Fx+TnkcN0lgD_e-xnbsbJ)!Uwzl!*>uBVszefL+%;3VYy>m>PX zrFr$kz-B*v8?*V+eNxMvDM)(6Qypcek<9*OBj$(MvQDC#oYJUgLQ^97mOhH%teC9B zhGBOF|GS?O8z25nXcF~v@9Nb<*s1hzW&H4AF0rlZVbHD>*6@t0lgg^Oin3tpd*3sv zBB&)SkCBnSWE#|s!OuZce%yT_u~Bd;v7rg)9$lJNz#gzt^;1zd<>rGR#9SfH>Nqbr8*-7ziZ{XTi7v5sV0v0zb>OV-Gkx?S=dvMCcIzE8WFaR{ih{ z;W2i7d_T;7@IwJ)siE5%r66bC zJJ0?)ueoOTuSP?fn8J?A$JjvBLnf&Es};#ElpbP2YS2^8pCuvtT_6I?JHAyOP2S`e zC#WDWQr4Z+`eE(s>-`ZLpCkhH0>CrL>B+^Z=Hu%d#@u)9)mamX>pS=GVh@&6w?NBV zIOvW6EP@AA%=d|gea%R(e-y2$ki6RBu)eyV1?1E6i~RC6yE#iyGO-0+er76?+L-q>;m&n3 zB^LPu~IWM2TM+FS_rsq3)3$(!|V zbcE|{qF<+MHf+4Er~8EX7ta*i!>IKRLp7yT0Z&&`_gq1nLAuu3hZjJjs@*c6YU7+z zbiFmZ)3$Xgy3+^TJ835jSs`b0;T2Up#qNs}ToKEisY@B20Ze}14RT8d+U!_k$PMB< z4yb!uJ4NFB{;8Z9YYht$pv4r=n2kKb@H=p#p`?{(T%*xM{k-eSDIv2qw?aO8K~uYSFhFy;fWl{+mZ)@D4Ls^2(RyXdAF;EZeEiVc zDoUhRWNk%h#C%MW20V>EP}ZcdG&vJhS~{?TXVTyDYzNg5mdz8&^((jTp`RP^Io(pg zBVI3zdP>3Gf2rC!Jp+Bmg_%+?>gYk%*B)sN;KH&b6_Y@cPAi*GiJ!WhlJD~|T7H_O zZTTse#H=qmk&Qwd(StqrC*YdO_I8mUsdwpA4t{H%;%3hGIVzA8MtDcQ@K z=m7@>{L-mol{p-17-VvSP+(qL&^A_yd%P~LgnBh8j~Q+??zdPxfW)1EG*jyaOK+qG zCN|Cb^_LOwKKdP#|aZasM%EXy` ztrIflia3!(AYYq})P|&L?bB_Znb-A%z~JX2K8|>+Iy<$FZmW!CDQ2bZwg9U|0j64k zyQ7u=#dvkoO-=gY&+E*(kD5lKL$k)Yg6%3&dajGugjOkPHOzDN*c~3IfjHpnU;H%6 z`eIp!jS;_n7kba3>c9V>7%Q#l^O_5AHCb5AXR5ptc;a;X7ga+YFl zJkESAv!$9cn>gZ5Fr#%!Mn-B{WRp+!-BcX6{%ZYp3j5M#o%okn-~M~W!!Wanb#2+7 zq}uu$riZ%_q`JfM>$gUZ8`Q1|)=V?N>v@#`r{S~$|Ilu-FKM=Lav0np+a{ z68(g4Y5RVX2ru$00(v3Eb5*ky{=NvNA^;lC-H~Xc*3E2MaS`eW2IR9YUb&Rz&eC9( zjhv?=8|`a}5-w2$SPvvi5dS(xuT4nkQii(WGdOR+DzW12LVlvN51 z8hgLm2F0uJVGX0Xc!WGv#)vL&4WKnx>aL}W=^oiII9cR5mWsv5px(L z*AC>>DZ_x4M(BS4v29(G!E%VSPA#mKuy4-FX5)5kG3Tqj2hR9(`)d2U739F31+J0@ zKjqi4Xd97IT2RzG)};g6X`7?Qj+=6$y%#~NvJLJ&hoYZ&rL0?8Qa~G)GKumo6<%8m z@RO5g770u{61zX9u(em!3PqE$HK@`LUP*~!?jvla$y|8wz;1AtYbTHRU9AyvNnp@( zN*^W%$$5h(wS*gkP{b$(xFT7&JD}}`paFd=ilHko z8b3)8i}mqUc1blJpo}&em--+gS&$G?^W#fvzj*yihA*a(6-RHR!eo~wY@lp#7qu5k z>p%Uo9XpyMu^WYs*`sMWXQ+gq^LQzwCn9&+vurterG2TMzsTOyFpGsMYvsg&sH(AD zOPNkxlm94T)nDA-4qDSvfsCzjdTTyTleJz#%y_>m#qdg`X$6;t`(0kP5F#}`9SHpM z780Fu`h|GXCuz450gi5D=*cB6>)AI3ke?M(T+W!w+yzr{#k~W!U~i~8dbsn6Raq}` zE+QZq8XWupkvLgPSz)fNeG+)O4T6N50?UCyS zpFUxnDmZV%#T3*%(UO>MW6S>B{foqcnv8q;Se*p|bNl$YI;~{#y)P3i1j2dlafvDG zG!ten$-l7Fv~Z!|hgta(H8QdOTe8D9!SU_0cCHA6#fmp>menW8yMCYBs5gv`b42zf z^HvT3x}kcJ(2B+C^6YR^4lBxKTS(^zOP~oiZ|#GTZdO9YUQ zZgsTlinb>^e9}0w?r9@(ai*}z7P#M4Jv%s2uYJ8A?TP!Wf#?gZYy|N#2VXGo>)b8{ zz56(yz^m||_&F&p8Nd6}tUtWFTTTMuuoa7dYj80&h0=t)S?)Grf#9+nw>KCMpK{ys zovt3uyo75tI(=R5@*}K27kpz_!psE_oXL5Bn;5Yy1VCO8vT$FDN^%HNtr z48#j9OS4_Go*z10mnrm0MMF4v=8Abz`>)Y}CDh7P=R!HWtn9j_XSW0wkbLjIzF6F^ zt7JLS;8SB>Xk0vO3UT#!&bz;w3mPenlDb(uuDaB4%=-&ocRKBuqfQ!$b$2ts1I?wK zWEyYf&UA=~2QpRHA zmHQt)e2`u35*v{?lJIF~Q|1R@>;<1YvMO2<7;C@Coq@x6P5Es~ozIfh7MFdh0dSTz z5`qKXR|oOI=bl159yeAj<`2ZQ@I7WY=4ZA{RLw@M`Uh1Yvg;pra~HfxES{Q(`Zc@n zt7SNMi~HW_kV9!TloT%8QZzmNZWM~zdM7rJsln3P+B#d+uVX;yNkL_6aWsJE27WB! zs$Gd9eRNMvW~w^{oNXNhg_ZR4;Yq~_7@nmhyqYGJtY;TxntwMOqbsjSKKiLoMI)eE zpRzF#4Q@KhQH!4NTpMyAHlGl!8h>;A+FN{pG7EIn>IRTJ*w3h7YMO|MIA@~$I80! zl*6tT5$0O)j3WM?{VaqL^r77j>W z-rZT4c%*Kg*w+o~mgnf`>@?EXw^_Gm%i+96!Td!^ogf7_qTOrT_gyBFi0pdw2TTOgepCB7)U50BtWP) z+-qFkyVhOnTi=iG=lkP*&wkI|XYX^K{p@`-)^ychJbCo!_5c^;ITFf|yg}>I@M<7h zh|{iNjiWLJIr>V;J>I#il$p6AnVc1P>qV6aCfqVtUoWpnu1we>ZPqMuZ{ioLBn|)J z!7hDmZKv_A{u)0XKjZTS)jIcEO{AXc?=(qdMF8`d(eS23NzF`v3KeE(zR&66H#s@E zen`B*V{I_=___2hF6ZU8@6WC(Z0d_1*K+h>#dm3n&Od3;bZk`EtNB7nVzW?hwRHc{ zw|&XO0+*{|Tcbk?rv#Vt*ebj=83D|xC(8DQhOvUAcDQ*Zr^mFtLOLx&adi6_TIgoG ztQRN&1P0qVK#{562?FYcO*w~&O5>iSm$REofiVm6d@p@^aU}VsjP7StmFMdETl3O- z>w_L*AXuV=yI+5idT$)oZ51-kP7ZiW2ktV94EoArb`g*S)}*si7w5LH{vGz(()GW9 zx&bGGQMDH~(~(xW(d$moXjb1AHSmr{(Nk{CfqL15xGG5-dL!Zm1g^dQWjwG*S3uoF zOqIk=Y(U9(q@9C6MhGFI@RFqT-?tn;WzLn|w~0}tn)R-@)YF0{@Zej)qmex^xx>X_ znV+QgRt}3_=2CmHRx3&Ai=Rh?72D==W0p%Mg7(;R3&V(VqzOgwj8x+UruYN^u(jHz zJQ^gF@MSV#V#9Tx<@%|;J=Eme*6=P)D2n3Q;?hHMAm2|b;J8}CQKb~!)kpYP=c`vA z;eT@6^dTGxIIg>L7aw~qN|O$0T|T$zKRly(i%Nzrh73B_@8(@Xnzp z$C@|Z*(C-|8}F_>Gze9s(6d51g6w!e6i+65`CM2AQl@cp>KOX;1LMS?D=XV8?~X&c z69zxZF;digF*zstj{=hvlptlPRVFvo?zWkQ#bUZNf`nU31{8`NLrMv)v_GB^Px4>WwF1y8;QvxtQxk9>D(tq>?3R-nr2@ zP7omH;2f;2^8*{$#*yob6*bcf<&FIv*$`HrgX>T=Cg`1@=6u|*F1JnY75!i26{(=8 z*vGMFqDRXw{v*}YXWfypiO-x2cM$4Qv2TnSEhi9#ZlzwaWhWBp8km4B! zap=H5&1Iu=-lCRD{`$*MSXg+dEHAGq07qRsE}`RDG~yQ^ecJU7+Y5>wMJ~(U*4z9F zwA_BZ9*X-Razt;VX6NQcmYVJzLOD0ysqZf>{yKO#y$|o8ASc%e9y51;r-h}V7XsusCC!rBfvvft*nv-RW1KK_0c`8vH2)4B~DpDBNJfLZf|3A z>%KeZCZa%Ns>VLGh@fA|-pED``=E}ty3|JutzSgFa$CJ;`l7a=+1*g}lzqThos(2F z^QnE=jY9NTI7cYS%CX{%8(L?w{8XHz)+k5UczMP>JdKlHq^1_v)zOxXjEt5L#<-?u_iDW1^w z(*-bw1Bm)s)SA7uP(E^^QqX6*ISDn|>(=E+@ibDK#AO&(|2^ghTv4YKDsM6+wlzS$ zTT3(mhb^rIM${!M-NE_56q`I z2XvP@RT4w#Hqo1t59O7Vs(UxEbfWk~C3e1U&VA*4yX9sZ-z!h2><|=|0U_4QiMVCV?hrVmI!QJ4b*N*F;7| zDzN)S%|`w?XZ39aPLAoZZ#ZQp$MVM4_x@s05q13IHHIVtJAKJ1pA@6pFAK5_iP4b^pwa9$qP&z&zP3<%OKx&@0e#JCM9?0^r5UbqN zvBM119#T*2->q`n)t@bOEl%Yia13hd*6NVzdUtzmAfabLoEG-ErQs?AeCe3lMzNZA z!Q5Mig|;5$stH&m6}tr&;{|blvdI+gE)6W`n|uqv)=!!HN4UR4Ku}k{SPaY<%=&I7 zy@R%$@Poc?MVDTS=_JV!0D^hrLP_+2F^RC>Xj(4C)t{_g8Wb6~i7gF{zxCz`Z zV}2|qyM$P4ie+~lu}kUhnpmHSFnU+ZYc}uh>B;Mb4MIhLn2eI=(s58NpDWJ2;OP!iGuy0YT^(an zLcM#KWSg+Lzp$o~>0@r+=>x@|OV{Oh>a?_Hg!H7x9aE?l^lQ@CnLmR&P7;&dA-XM!!9FORikozHBs0HA|m{=2G z@iu{`wlr#``YxO@&>trjGtHR-HE89sl}`LQ1tXB5G2Fcx^1cEPSpHnWR@eF2qVyYB zvdzOn-)8af>g}0XxwVRXGs((pB(ol7&77}=@&NKoio|!Fe&vXN@|5~7nI(zlIEAHF z)iI$`{curikZp;GZ=#5A_1Y#zsWn0|PcmsftUec19BMFn+M$$2a*A2N4& zHn4~0d>G!Or|?`{^D+q33!Wt>M)}KT`Qer)2LO;uOjbOWs9*~n)7P>ypDloYfP$cvcMuE#MMp23i{2$p>MSTaAt3WbVeJAA!P=U| zhE8_3)UOUokmnzq$Ff^8`f3m-Y4cNBtn#6`p)GWEHk?0fhdVDa(g#I8y!8J$d_tFB zeWx%z^6l`6)#3m%YkO_9mxp2wU0Vr$yj+L3~ zO@AuGIP&3BFfphPI4f`U`2Lo>eqAF_4`ORHWl<#Ru5JqmOzBz;Y6CNqenwh|iqR6% zl-K>9(5zGE^9%GwWYM9aq8kro+3v;r1q;OGc~xD)HnX8LO&ouh+OY#?;`o3oXpbVPOOHjEFDuo z+W5n!A8PCD`6n_eICanPZSWIXK4zP7-7;eE`fjSu$GB%;$+m*6+xKt=-tjF~U98U;Eo6oyS>_|3;dBkG#RDbFQ zE*azO_kgWs$bknbzx}XSRDDPYi96kPb7M&YgdHw@@fq#=`@Grk+v9&+mI+aP51zbH? OSEg62E)xvhV*d*_^%oHU literal 0 HcmV?d00001 diff --git a/examples/multiple-readers-example.md b/examples/multiple-readers-example.md new file mode 100644 index 000000000..6039d8a45 --- /dev/null +++ b/examples/multiple-readers-example.md @@ -0,0 +1,241 @@ +# How to Avoid the `ReceiverDisconnectedException` + +In version 2.3.2 and above, the connector uses epoch receivers from the Event Hubs Java client. +This only allows one receiver to be open per consumer group-partition combo. To be crystal clear, +let's say we have `receiverA` with an epoch of `0` which is open within the consumer group `foo` on partition `0`. +Now, if we open a new receiver, `receiverB`, for the same consumer group and partition with an epoch of +`0` (or higher), then `receiverA` will be disconnected and get the `ReceiverDisconnectedException`. + +In order to avoid this issue, there should be only one reader per consumer group-partition combo. +In general, you should have a unique consumer group for each consuming application being run. +Moreover, you should make sure that your code has been written in a way that does not result in +re-computing the input stream from the eventhub. + +In this document, we elaborate more on cases that result in recomputing the input stream unintentionally, +and how you can avoid such cases by simple adjustments in your code. + + +## Table of Contents +* [Stream Recomputation](#stream-recomputation) + * [RDD Actions](#rdd-actions) + * [Write to Multiple Data Sinks](#write-to-multiple-data-sinks) +* [Quick Check for Multiple Readers](#quick-check-for-multiuple-readers) +* [Examples of Having Multiple Readers Unintentionally](#examples-of-having-multiple-readers-uninttentially) + * [Multiple Actions](#multiple-actions) + * [Multiple Sinks](#multiple-sinks) +* [Persist Data to Prevent Recomputation](#persist-data-to-prevent-recomputation) + + + +## Stream Recomputation + +Sometimes a Spark application recomputes a single input stream multiple times. If the input stream is an Event Hubs instance, +this recomputation can eventually cause opening multiple receivers per consumer group-partition combo and result in getting +the `ReceiverDisconnectedException`. Therefore, it is important to make sure that the application that reads +events from an Event Hubs instance does not recompute the input stream multiple times. + + +### RDD Actions + +In Spark, RDD operations are either `Transformations` or `Actions`. In abstract, transfromations create a new dataset +and actions return a value. Spark has a `lazy` execution model for transfromations, which means those operations are +being executed only when there is an action on the result dataset. Therefore, a transfomed RDD may be **recomputed** +each time an action is being ran on it. However, you can avoid this recomputation by persisting an RDD in memory using +the `persist` or `cache` method. Please refer +to [RDD Operations](https://spark.apache.org/docs/latest/rdd-programming-guide.html#rdd-operations) for more details. + + +### Write to Multiple Data Sinks + +You can write the output of a streaming query to multiple sinks by simply using the DataFrame/Dataset multiple times. +However, each write may cause the recomputation of the DataFrame/Dataset. In order to avoid this recomputation, similar +to the RDD case you can `persist` or `cache` the DataFrame/Dataset before writing it to multiple locations. Please refer to +[Using Foreach and ForeachBatch](https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html#using-foreach-and-foreachbatch) +for more information. + + + +## Quick Check for Multiple Readers + +A quick way to check if your application uses multiple readers is to compare the rate of `Incoming` and `Outgoing` messages to/from +the underlying Event Hubs instance. You have access to both `Messages` and `Throughput` metrics in the Overview page of +the Event Hubs instance on Azure Portal. + +Assume you have only Spark application (with a single stream reader) that reads events from an Event Hubs instance. +In this case, you should see the number(or total bytes) of Outgoing messages matching the number (or total bytes) of +Incoming messages. If you find out the rate of Outgoing messages is `N` times the rate of Incoming messages, +it indicates that your application is recomputing the input stream `N` times. This is a strong signal to +update the application code to eliminate input stream recomputations (usually by using `persist` or `cache` method). + + +## Examples of Having Multiple Readers Unintentionally + +### Multiple Actions + +The code below is an example where a single read stream is being used by multiple RDD actions without caching: + +```scala +import org.apache.spark.eventhubs._ +import org.apache.spark.sql.DataFrame +import org.apache.spark.sql.streaming.Trigger + +// EventHub connection string +val endpoint = "Endpoint=sb://SAMPLE;SharedAccessKeyName=KEY_NAME;SharedAccessKey=KEY;" +val eventHub = "EVENTHUBS_NAME" +val consumerGroup = "CONSUMER_GROUP" +val connectionString = ConnectionStringBuilder(endpoint) + .setEventHubName(eventHub) + .build + +// Eventhub configuration +val ehConf = EventHubsConf(connectionString) + .setStartingPosition(EventPosition.fromEndOfStream) + .setConsumerGroup(consumerGroup) + .setMaxEventsPerTrigger(500) + +// read stream +val ehStream = spark.readStream + .format("eventhubs") + .options(ehConf.toMap) + .load + +ehStream.writeStream + .trigger(Trigger.ProcessingTime("5 seconds")) + .foreachBatch { (ehStreamDF,_) => + handleEhDataFrame(ehStreamDF) + } + .start + .awaitTermination + + +def handleEhDataFrame(ehStreamDF : DataFrame) : Unit = { + val totalSize = ehStreamDF.map(s => s.length).reduce((a, b) => a + b) + val eventCount = ehStreamDF.count + println("Batch contained " + eventCount + " events with total size of " + totalSize) +} +``` + +As you can see in the graph below which shows the rate of Incoming vs Outgoing messages in the +Event Hubs entity, the number of Outgoing messages is almost twice the number of Incoming messages. +This pattern indicates that the above code reads events from the Event Hubs entity twice: Once when +it computes the `reduce` action, and once when it computes the `count` action. + +Incoming vs Outgoing Messages without persist + + +### Multiple Sinks + +The code below shows how writing the output of a streaming query to multiple sinks creates multiple readers: + +```scala +import org.apache.spark.eventhubs._ +import org.apache.spark.sql.DataFrame +import org.apache.spark.sql.streaming.Trigger + +// EventHub connection string +val endpoint = "Endpoint=sb://SAMPLE;SharedAccessKeyName=KEY_NAME;SharedAccessKey=KEY;" +val src_eventHub = "SRC_EVENTHUBS_NAME" +val dst_eventHub = "DST_EVENTHUBS_NAME" +val consumerGroup = "CONSUMER_GROUP" +val src_connectionString = ConnectionStringBuilder(endpoint) + .setEventHubName(src_eventHub) + .build +val dst_connectionString = ConnectionStringBuilder(endpoint) + .setEventHubName(dst_eventHub) + .build + +// Eventhub configuration +val src_ehConf = EventHubsConf(src_connectionString) + .setStartingPosition(EventPosition.fromEndOfStream) + .setConsumerGroup(consumerGroup) + .setMaxEventsPerTrigger(500) +val dst_ehConf = EventHubsConf(dst_connectionString) + +// read stream +val ehStream = spark.readStream + .format("eventhubs") + .options(src_ehConf.toMap) + .load + .select($"body" cast "string") + +// eventhub write stream +val wst1 = ehStream.writeStream + .format("org.apache.spark.sql.eventhubs.EventHubsSourceProvider") + .options(dst_ehConf.toMap) + .option("checkpointLocation", "/checkpointDir") + .trigger(Trigger.ProcessingTime("10 seconds")) + .start() + +// console write stream +val wst2 = ehStream.writeStream + .outputMode("append") + .format("console") + .option("truncate", false) + .option("numRows",10) + .trigger(Trigger.ProcessingTime("10 seconds")) + .start() + +wst1.awaitTermination() +wst2.awaitTermination() +``` + +You can see in the graph below from the source Event Hubs entity that the number of Outgoing messages is almost +twice the number of Incoming messages which indicates the existence of two separate readers in our application. + +Incoming vs Outgoing Messages for two srite stream using a single input stream + + +### Persist Data to Prevent Recomputation + +As we have mentioned before, one way to avoid recomputations is to persist (or cache) the generated +DataFrame/Dataset from the input stream before performing your desired tasks and unpersist it afterward. +The code below shows how you can run multiple actions on a DataFrame without recomputing the input stream: + +```scala +import org.apache.spark.eventhubs._ +import org.apache.spark.sql.DataFrame +import org.apache.spark.sql.streaming.Trigger + +// EventHub connection string +val endpoint = "Endpoint=sb://SAMPLE;SharedAccessKeyName=KEY_NAME;SharedAccessKey=KEY;" +val eventHub = "EVENTHUBS_NAME" +val consumerGroup = "CONSUMER_GROUP" +val connectionString = ConnectionStringBuilder(endpoint) + .setEventHubName(eventHub) + .build + +// Eventhub configuration +val ehConf = EventHubsConf(connectionString) + .setStartingPosition(EventPosition.fromEndOfStream) + .setConsumerGroup(consumerGroup) + .setMaxEventsPerTrigger(500) + +// read stream +val ehStream = spark.readStream + .format("eventhubs") + .options(ehConf.toMap) + .load + +ehStream.writeStream + .trigger(Trigger.ProcessingTime("5 seconds")) + .foreachBatch { (ehStreamDF,_) => + handleEhDataFrame(ehStreamDF) + } + .start + .awaitTermination + + +def handleEhDataFrame(ehStreamDF : DataFrame) : Unit = { + ehStreamDF.persist + val totalSize = ehStreamDF.map(s => s.length).reduce((a, b) => a + b) + val eventCount = ehStreamDF.count + println("Batch contained " + eventCount + " events with total size of " + totalSize) + ehStreamDF.persist +} +``` + +The graph below from the Event Hubs entity shows the number of Incoming and Outgoing messages are +almost the same, which means the application reads events only once despite executing two actions +on the generated DataFrame. + +Incoming vs Outgoing Messages using persist \ No newline at end of file From b6a2b385718f7a78511aab19ec0ba2e567dfdf13 Mon Sep 17 00:00:00 2001 From: nyaghma Date: Wed, 30 Sep 2020 19:16:56 -0700 Subject: [PATCH 12/29] Slow partitions adjustment doc (#533) --- docs/README.md | 5 + docs/slow-partition-adjustment-feature.md | 137 ++++++++++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 docs/slow-partition-adjustment-feature.md diff --git a/docs/README.md b/docs/README.md index 94fb15abb..7c7575f19 100644 --- a/docs/README.md +++ b/docs/README.md @@ -5,6 +5,11 @@ Hello! This connector supports Structured Streaming and Spark Streaming. For doc - [Structured Streaming + Event Hubs Integration Guide](structured-streaming-eventhubs-integration.md) - [Spark Streaming + Event Hubs Integration Guide](spark-streaming-eventhubs-integration.md) +Also, you can read more about other features of this connector in below documents: + +- [Receive Key-Value Pairs from Events Sent using Event Hubs Kafka Endpoint](receive-events-sent-using-kafka-protocol.md) +- [Spark Structured Streaming Adjustment for Slow Partitions](slow-partition-adjustment-feature.md) + Additionally, here are some links to documentation on Event Hubs, Spark, and Databricks: - [Azure Event Hubs on Databricks](https://docs.azuredatabricks.net/spark/latest/data-sources/azure/eventhubs-connector.html) diff --git a/docs/slow-partition-adjustment-feature.md b/docs/slow-partition-adjustment-feature.md new file mode 100644 index 000000000..36fad4887 --- /dev/null +++ b/docs/slow-partition-adjustment-feature.md @@ -0,0 +1,137 @@ +# Spark Structured Streaming Adjustment for Slow Partitions + +This document introduces the new feature in the Spark EventHubs connector which allows spark jobs to automatically adjust +the number of events to be read from each partition in each batch in presence of slow partitions in the underlying EventHub. +The goal of such adjustment is to prevent delaying an entire batch because of temporary performance issues in one or a few partitions. + + +## Table of Contents +* [Introduction](#introduction) + * [An Example of the Slow Partition Problem](#an-example-of-the-slow-partition-problem) +* [A Solution to the Slow Partition Problem](#a-solution-to-the-slow-partition-problem) +* [User Configuration](#user-configuration) + * [Enable Slow Partition Adjustment](#enable-slow-partition-adjustment) + * [Set Max Acceptable Batch Receive Time](#set-max-acceptable-batch-receive-time) + * [Monitor Partitions Performances and Slow Partition Adjustment](#monitor-partitions-performances-and-slow-partition-adjustment) + + +## Introduction + +As it has been discussed in [Spark Structured Streaming Programming Guide](http://spark.apache.org/docs/latest/structured-streaming-programming-guide.html), +Structured Streaming queries process data streams as a series of small batch jobs. The Spark EventHubs connector uses the same methodology to run +Structured Streaming queries. In each batch, it specifies and reads a range of events from each underlying partition. +Assuming all partitions have sufficient events, each batch tries to read almost an equal number of events from each partition. +Each batch execution is complete when all the specified events from all partitions are read and available on executor nodes. + +In this model, the batch execution performance is determined by the performance of the slowest underlying partition. +This means in a rare situation where one or a few partitions are experiencing performance issues, the batch execution +takes more time despite the fact that majority of partitions have completed their tasks without any delay. +The following example elaborates this issue: + + +### An Example of the Slow Partition Problem + +Consider a scenario in which we are running a Spark Structured Streaming job to read events from an EventHubs instance with 10 partitions. +Lets assume the `MaxEventsPerTrigger` is set to 10,000 in our job, which means in each batch we are reading 1,000 events from each partition. +Lets also assume in this set-up reading each event (from any partition) takes about 0.2 ms, which results in reading all events from a single partition in about 200 ms. +If we have enough executor nodes to run all task in parallel, a batch is expected to be completed in about 200 ms. + +Now consider a situation in which one of a the partitions (e.g. partition 7) is becoming slower than other partitions and reading +an event from that partition takes about 2 ms instead of 0.2 ms. In this case, reading 1,000 events from partition 7 would take 2 seconds. +Since a batch performance is bounded by the slowest partition, the batch also takes 2 seconds to complete despite the fact +that 90% of partitions have completed their tasks in 200 ms. + + +## A Solution to the Slow Partition Problem + +The above mentioned problem occurs because the number of events to be read from each partition is being assigned without considering partitions performances. + In order to resolve this issue, we are adding a feature which monitors the performance of all partitions and automatically adjust the number of events + assigned to each partition based on their current performance. This new feature is called `SlowPartitionAdjustment` and is disabled by default. + +When `SlowPartitionAdjustment` is enabled, we monitor the performance of each partition by measuring the time that takes to read all events in a batch +from that partition. We compare these times and if any of the underlying partitions is experiencing a performance glitch, we assign less number of +events (relative to its current performance) to be read from that partition in the next batches until that partition reaches its expected performance. + +Coming back to our example, by using the slow partition adjustment feature, the next batch would assign ~100 events to partitions 7 and therefore +all partitions would complete their assigned tasks in 200 ms. As the result, the next batch would take the expected 200 ms. + + +## User Configuration + +### Enable Slow Partition Adjustment + +In order to use the slow partition feature, `SlowPartitionAdjustment` configuration should be set to true. You can set the `SlowPartitionAdjustment` +in the `EventHubsConf` object. Note that the default value for this configuration is false. + +```scala +val ehConf = EventHubsConf(connectionString) + .setSlowPartitionAdjustment(true) +``` + +### Set Max Acceptable Batch Receive Time + +An important factor in slow partition adjustment feature is identifying the partitions which are experiencing performance issues during batch executions. +In order to do that, we measure the time that each partition takes to complete its task and mark partitions which take more than average + standard deviation +time as slow partitions. This method is more sensitive for smaller/faster batches and there is higher chance that it marks partitions as slow in such scenarios. + +In order to avoid this, we have introduced another configuration option, `MaxAcceptableBatchReceiveTime`, which indicates the maximum acceptable time for +a batch execution in the user defined job. We simply consider all partitions to be performing as expected if the batch execution time does not exceed the +`MaxAcceptableBatchReceiveTime` and do not mark any partition as slow in such scenarios. This configuration allows users to set the sensitivity of the +slow partition adjustment feature and trigger its logic only when batch execution time is higher than an expected value. The `MaxAcceptableBatchReceiveTime` +value can be set in the `EventHubsConf` object and its default value is 30 seconds. + +```scala +val ehConf = EventHubsConf(connectionString) + .setSlowPartitionAdjustment(true) + .setMaxAcceptableBatchReceiveTime(Duration.ofSeconds(20)) +``` + +Note that although you can set the `MaxAcceptableBatchReceiveTime` value without enabling the `SlowPartitionAdjustment`, it is only being used when the +`SlowPartitionAdjustment` is enabled. + +### Monitor Partitions Performances and Slow Partition Adjustment + +If you want to monitor when the slow partition adjustment feature marks a partition as slow and how it adjusts the next batch, you can define a class which extends +the [`ThrottlingStatusPlugin`](https://github.com/Azure/azure-event-hubs-spark/blob/master/core/src/main/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPlugin.scala) trait and set an object of that calss in the `EventHubsConf` using the `ThrottlingStatusPlugin` configuration option. + +The `ThrottlingStatusPlugin` trait has two methods: + +- [`onPartitionsPerformanceStatusUpdate`](https://github.com/Azure/azure-event-hubs-spark/blob/90d70928d9c738923afe5d08557e0a61c9c7188d/core/src/main/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPlugin.scala#L31) which provides the number of events and the execution time for each partition during the last batch execution in addition to +the performance percentage metric for each partition which indicates if a partition is running slow or not. The performance percentage metric is a value in the range +of [0, 1] where a lower value indicates a slower partition and 1 indicates a partition that is performing as expected. + +- [`onBatchCreation`](https://github.com/Azure/azure-event-hubs-spark/blob/90d70928d9c738923afe5d08557e0a61c9c7188d/core/src/main/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPlugin.scala#L27) which provides the range of assigned offsets to be read from each partition in the next batch based on their throttling factor (aka performance percentage metric). +The throttling factor (aka performance percentage metric) is also provided so that users can see how the performance of each partition affects the number of events in the next batch. + + +```scala +class SimpleThrottlingStatusPlugin extends ThrottlingStatusPlugin with Logging { + override def onBatchCreation( + nextBatchLocalId: Long, + nextBatchOffsetRanges: Array[OffsetRange], + partitionsThrottleFactor: mutable.Map[NameAndPartition, Double]): Unit = { + log.info( + s"New Batch with localId = $nextBatchLocalId has been created with start and end offsets:" + + s"${nextBatchOffsetRanges} and partitions performances: ${partitionsThrottleFactor}") + } + + override def onPartitionsPerformanceStatusUpdate( + latestUpdatedBatchLocalId: Long, + partitionsBatchSizes: Map[NameAndPartition, Int], + partitionsBatchReceiveTimeMS: Map[NameAndPartition, Long], + partitionsPerformancePercentages: Option[Map[NameAndPartition, Double]]): Unit = { + log.info( + s"Latest updated batch with localId = $latestUpdatedBatchLocalId received these information:" + + s"Batch size: ${partitionsBatchSizes}, batch receive times in ms: ${partitionsBatchReceiveTimeMS}, " + + s"performance percentages: ${partitionsPerformancePercentages}") + } +} + + +val ehConf = EventHubsConf(connectionString) + .setSlowPartitionAdjustment(true) + .setThrottlingStatusPlugin(new SimpleThrottlingStatusPlugin) +``` + +Note that although you can set the `ThrottlingStatusPlugin` value without enabling the `SlowPartitionAdjustment`, it is only being used when the +`SlowPartitionAdjustment` is enabled. \ No newline at end of file From a1e4c679f5963144393e6aca85758c84bb2390c9 Mon Sep 17 00:00:00 2001 From: Lucas Yang Date: Tue, 6 Oct 2020 17:19:34 +0000 Subject: [PATCH 13/29] fix the error when num of partitions increased (#500) --- .../eventhubs/utils/EventHubsTestUtils.scala | 11 +-- .../eventhubs/utils/SimulatedClient.scala | 7 +- .../spark/sql/eventhubs/EventHubsSource.scala | 51 ++++++++----- .../sql/eventhubs/EventHubsSourceSuite.scala | 74 +++++++++++++++++++ 4 files changed, 116 insertions(+), 27 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/utils/EventHubsTestUtils.scala b/core/src/main/scala/org/apache/spark/eventhubs/utils/EventHubsTestUtils.scala index fe4e35b30..f618bc484 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/utils/EventHubsTestUtils.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/utils/EventHubsTestUtils.scala @@ -133,7 +133,13 @@ private[spark] class EventHubsTestUtils { */ def getEventHubsConf(ehName: String = "name"): EventHubsConf = { val partitionCount = getEventHubs(ehName).partitionCount + val positions: Map[NameAndPartition, EventPosition] = (for { + partition <- 0 until partitionCount + } yield NameAndPartition(ehName, partition) -> EventPosition.fromSequenceNumber(0L)).toMap + getEventHubsConfWithoutStartingPositions(ehName).setStartingPositions(positions) + } + def getEventHubsConfWithoutStartingPositions(ehName: String = "name"): EventHubsConf = { val connectionString = ConnectionStringBuilder() .setNamespaceName("namespace") .setEventHubName(ehName) @@ -141,13 +147,8 @@ private[spark] class EventHubsTestUtils { .setSasKey("key") .build - val positions: Map[NameAndPartition, EventPosition] = (for { - partition <- 0 until partitionCount - } yield NameAndPartition(ehName, partition) -> EventPosition.fromSequenceNumber(0L)).toMap - EventHubsConf(connectionString) .setConsumerGroup("consumerGroup") - .setStartingPositions(positions) .setMaxRatePerPartition(DefaultMaxRate) .setUseSimulatedClient(true) } diff --git a/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedClient.scala b/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedClient.scala index a6872b0a7..35e04464a 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedClient.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedClient.scala @@ -97,8 +97,11 @@ private[spark] class SimulatedClient(private val ehConf: EventHubsConf) extends if (positions.isEmpty && position.isEmpty) { (for { id <- 0 until eventHub.partitionCount } yield id -> apiCall(id)).toMap } else if (positions.isEmpty) { - require(position.get.seqNo >= 0L) - (for { id <- 0 until partitionCount } yield id -> position.get.seqNo).toMap + if (position.get.seqNo < 0L) { + (for { id <- 0 until partitionCount } yield id -> apiCall(id)).toMap + } else { + (for { id <- 0 until partitionCount } yield id -> position.get.seqNo).toMap + } } else { require(positions.get.forall(x => x._2.seqNo >= 0L)) require(positions.get.size == partitionCount) diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala index 07df27d63..23e36f30d 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala @@ -165,21 +165,24 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, s"version from $text.") } } - - metadataLog - .get(0) - .getOrElse { - // translate starting points within ehConf to sequence numbers - val seqNos = ehClient.translate(ehConf, partitionCount).map { - case (pId, seqNo) => - (NameAndPartition(ehName, pId), seqNo) - } - val offset = EventHubsSourceOffset(seqNos) - metadataLog.add(0, offset) - logInfo(s"Initial sequence numbers: $seqNos") - offset + val defaultSeqNos = ehClient + .translate(ehConf, partitionCount) + .map { + case (pId, seqNo) => + (NameAndPartition(ehName, pId), seqNo) } - .partitionToSeqNos + + val seqNos = metadataLog.get(0) match { + case Some(checkpoint) => + defaultSeqNos ++ checkpoint.partitionToSeqNos + case None => + defaultSeqNos + } + val offset = EventHubsSourceOffset(seqNos) + metadataLog.add(0, offset) + logInfo(s"Initial sequence numbers: $seqNos") + offset.partitionToSeqNos + } private var currentSeqNos: Option[Map[NameAndPartition, SequenceNumber]] = None @@ -204,11 +207,11 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, // If not, we'll report possible data loss. earliestSeqNos = Some(earliestAndLatest.map { case (p, (e, _)) => NameAndPartition(ehName, p) -> e - }.toMap) + }) val latest = earliestAndLatest.map { case (p, (_, l)) => NameAndPartition(ehName, p) -> l - }.toMap + } val seqNos: Map[NameAndPartition, SequenceNumber] = maxOffsetsPerTrigger match { case None => @@ -225,7 +228,6 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, currentSeqNos = Some(seqNos) logInfo(s"GetOffset: ${seqNos.toSeq.map(_.toString).sorted}") - Some(EventHubsSourceOffset(seqNos)) } @@ -326,7 +328,6 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, schema, isStreaming = true) } - if (earliestSeqNos.isEmpty) { val earliestAndLatest = ehClient.allBoundedSeqNos earliestSeqNos = Some(earliestAndLatest.map { @@ -337,7 +338,18 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, val fromSeqNos = start match { // recovery mode .. case Some(prevBatchEndOffset) => - val startingSeqNos = EventHubsSourceOffset.getPartitionSeqNos(prevBatchEndOffset) + val prevOffsets = EventHubsSourceOffset.getPartitionSeqNos(prevBatchEndOffset) + val startingSeqNos = if (prevOffsets.size < untilSeqNos.size) { + val defaultSeqNos = ehClient + .translate(ehConf, partitionCount) + .map { + case (pId, seqNo) => + (NameAndPartition(ehName, pId), seqNo) + } + defaultSeqNos ++ prevOffsets + } else { + prevOffsets + } adjustStartingOffset(startingSeqNos) case None => adjustStartingOffset(initialPartitionSeqNos) @@ -379,7 +391,6 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, true } }.toArray - // if slowPartitionAdjustment is on, add the current batch to the perforamnce tracker if (slowPartitionAdjustment) { addCurrentBatchToStatusTracker(offsetRanges) diff --git a/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala b/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala index f12672db6..a8a399b0e 100644 --- a/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala +++ b/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala @@ -254,6 +254,80 @@ class EventHubsSourceSuite extends EventHubsSourceTest { ) } + test("Partitions number increased") { + val name = newEventHubs() + var eventHub = testUtils.createEventHubs(name, DefaultPartitionCount) + testUtils.send(name, partition = Some(0), data = 0 to 9) + testUtils.send(name, partition = Some(1), data = 10 to 19) + testUtils.send(name, partition = Some(2), data = 20 to 29) + testUtils.send(name, partition = Some(3), data = 30 to 39) + + val parameters = testUtils + .getEventHubsConfWithoutStartingPositions(eventHub.name) + .setMaxEventsPerTrigger(8) + .setStartingPosition(EventPosition.fromStartOfStream) + .toMap + + val reader = spark.readStream + .format("eventhubs") + .options(parameters) + + val eventhubs = reader + .load() + .select("body") + .as[String] + + val mapped: org.apache.spark.sql.Dataset[_] = eventhubs.map(_.toInt) + + val clock = new StreamManualClock + + val waitUntilBatchProcessed = AssertOnQuery { q => + eventually(Timeout(streamingTimeout)) { + if (q.exception.isEmpty) { + assert(clock.isStreamWaitingAt(clock.getTimeMillis())) + } + } + if (q.exception.isDefined) { + throw q.exception.get + } + true + } + val defaultCheckpointLocation = + Utils.createTempDir(namePrefix = "streaming.metadata").getCanonicalPath + + testStream(mapped)( + StartStream(ProcessingTime(100), clock, checkpointLocation = defaultCheckpointLocation), + waitUntilBatchProcessed, + // we'll get one event per partition per trigger + CheckAnswer(0, 1, 10, 11, 20, 21, 30, 31), + AdvanceManualClock(100), + waitUntilBatchProcessed, + // four additional events + CheckAnswer(0, 1, 10, 11, 20, 21, 30, 31, 2, 3, 12, 13, 22, 23, 32, 33), + StopStream + ) + // Add partitions to eventhub + eventHub = testUtils.createEventHubs(name, DefaultPartitionCount * 2) + testUtils.send(name, partition = Some(0), data = 0 to 9) + testUtils.send(name, partition = Some(1), data = 10 to 19) + testUtils.send(name, partition = Some(2), data = 20 to 29) + testUtils.send(name, partition = Some(3), data = 30 to 39) + testUtils.send(name, partition = Some(4), data = 40 to 49) + testUtils.send(name, partition = Some(5), data = 50 to 59) + testUtils.send(name, partition = Some(6), data = 60 to 69) + testUtils.send(name, partition = Some(7), data = 70 to 79) + testStream(mapped)( + StartStream(ProcessingTime(100), clock, checkpointLocation = defaultCheckpointLocation), + waitUntilBatchProcessed, + // four additional events + CheckAnswer(4, 14, 24, 34, 40, 50, 60, 70), + AdvanceManualClock(100), + waitUntilBatchProcessed, + // four additional events + CheckAnswer(4, 14, 24, 34, 40, 50, 60, 70, 5, 15, 25, 35, 41, 51, 61, 71) + ) + } + test("maxOffsetsPerTrigger with non-uniform partitions") { val name = newEventHubs() val eventHub = testUtils.createEventHubs(name, DefaultPartitionCount) From 5feb961cacb458e603b3e0bec72128dce35a2072 Mon Sep 17 00:00:00 2001 From: nyaghma Date: Tue, 6 Oct 2020 10:30:02 -0700 Subject: [PATCH 14/29] Fix non streaming rpc endpoint (#542) --- .../client/CachedEventHubsReceiver.scala | 2 +- .../spark/sql/eventhubs/EventHubsSource.scala | 35 +++++----- .../eventhubs/EventHubsSourceProvider.scala | 64 +++++++++++-------- 3 files changed, 54 insertions(+), 47 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala b/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala index 4a8008920..cf784387a 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala @@ -329,7 +329,7 @@ private[spark] object CachedEventHubsReceiver extends CachedReceiver with Loggin private[this] val receivers = new MutableMap[String, CachedEventHubsReceiver]() - // RPC endpoint for partition performacne communciation in the executor + // RPC endpoint for partition performance communication in the executor val partitionPerformanceReceiverRef = RpcUtils.makeDriverRef(PartitionPerformanceReceiver.ENDPOINT_NAME, SparkEnv.get.conf, diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala index 23e36f30d..f871605e0 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala @@ -30,9 +30,7 @@ import org.apache.spark.eventhubs.rdd.{ EventHubsRDD, OffsetRange } import org.apache.spark.eventhubs.utils.ThrottlingStatusPlugin import org.apache.spark.eventhubs.{ EventHubsConf, NameAndPartition, _ } import org.apache.spark.internal.Logging -import org.apache.spark.rpc.RpcEndpointRef import org.apache.spark.scheduler.ExecutorCacheTaskLocation -import org.apache.spark.SparkEnv import org.apache.spark.sql.execution.streaming.{ HDFSMetadataLog, Offset, @@ -77,6 +75,7 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, import EventHubsConf._ import EventHubsSource._ + import EventHubsSourceProvider._ private lazy val ehClient = EventHubsSourceProvider.clientFactory(parameters)(ehConf) private lazy val partitionCount: Int = ehClient.partitionCount @@ -87,9 +86,11 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, private val sc = sqlContext.sparkContext private val maxOffsetsPerTrigger: Option[Long] = - Option(parameters.get(MaxEventsPerTriggerKey).map(_.toLong).getOrElse( - parameters.get(MaxEventsPerTriggerKeyAlias).map(_.toLong).getOrElse( - partitionCount * 1000))) + Option(parameters + .get(MaxEventsPerTriggerKey) + .map(_.toLong) + .getOrElse( + parameters.get(MaxEventsPerTriggerKeyAlias).map(_.toLong).getOrElse(partitionCount * 1000))) // set slow partition adjustment flag and static values in the tracker private val slowPartitionAdjustment: Boolean = @@ -147,22 +148,25 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, text.substring(1, text.length).toInt } catch { case _: NumberFormatException => - throw new IllegalStateException(s"Log file was malformed: failed to read correct log " + - s"version from $text.") + throw new IllegalStateException( + s"Log file was malformed: failed to read correct log " + + s"version from $text.") } if (version > 0) { if (version > maxSupportedVersion) { - throw new IllegalStateException(s"UnsupportedLogVersion: maximum supported log version " + - s"is v${maxSupportedVersion}, but encountered v$version. The log file was produced " + - s"by a newer version of Spark and cannot be read by this version. Please upgrade.") + throw new IllegalStateException( + s"UnsupportedLogVersion: maximum supported log version " + + s"is v${maxSupportedVersion}, but encountered v$version. The log file was produced " + + s"by a newer version of Spark and cannot be read by this version. Please upgrade.") } else { return version } } } // reaching here means we failed to read the correct log version - throw new IllegalStateException(s"Log file was malformed: failed to read correct log " + - s"version from $text.") + throw new IllegalStateException( + s"Log file was malformed: failed to read correct log " + + s"version from $text.") } } val defaultSeqNos = ehClient @@ -449,14 +453,7 @@ private[eventhubs] object EventHubsSource { """.stripMargin private[eventhubs] val VERSION = 1 - - // RPC endpoint for partition performacne communciation in the driver private var localBatchId = -1 - val partitionsStatusTracker = PartitionsStatusTracker.getPartitionStatusTracker - val partitionPerformanceReceiver: PartitionPerformanceReceiver = - new PartitionPerformanceReceiver(SparkEnv.get.rpcEnv, partitionsStatusTracker) - val partitionPerformanceReceiverRef: RpcEndpointRef = SparkEnv.get.rpcEnv - .setupEndpoint(PartitionPerformanceReceiver.ENDPOINT_NAME, partitionPerformanceReceiver) def getSortedExecutorList(sc: SparkContext): Array[String] = { val bm = sc.env.blockManager diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSourceProvider.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSourceProvider.scala index 230b2ee0b..5097b8f85 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSourceProvider.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSourceProvider.scala @@ -45,6 +45,8 @@ import org.apache.spark.sql.types._ import org.apache.spark.sql.{ AnalysisException, DataFrame, SQLContext, SaveMode } import org.apache.spark.unsafe.types.UTF8String import org.json4s.jackson.Serialization +import org.apache.spark.rpc.RpcEndpointRef +import org.apache.spark.SparkEnv import collection.JavaConverters._ @@ -140,6 +142,12 @@ private[sql] class EventHubsSourceProvider } private[sql] object EventHubsSourceProvider extends Serializable { + // RPC endpoint for partition performance communication in the driver + val partitionsStatusTracker = PartitionsStatusTracker.getPartitionStatusTracker + val partitionPerformanceReceiver: PartitionPerformanceReceiver = + new PartitionPerformanceReceiver(SparkEnv.get.rpcEnv, partitionsStatusTracker) + val partitionPerformanceReceiverRef: RpcEndpointRef = SparkEnv.get.rpcEnv + .setupEndpoint(PartitionPerformanceReceiver.ENDPOINT_NAME, partitionPerformanceReceiver) def eventHubsSchema: StructType = { StructType( @@ -169,32 +177,32 @@ private[sql] object EventHubsSourceProvider extends Serializable { new java.sql.Timestamp(ed.getSystemProperties.getEnqueuedTime.toEpochMilli)), UTF8String.fromString(ed.getSystemProperties.getPublisher), UTF8String.fromString(ed.getSystemProperties.getPartitionKey), - ArrayBasedMapData( - ed.getProperties.asScala - .mapValues { - case b: Binary => - val buf = b.asByteBuffer() - val arr = new Array[Byte](buf.remaining) - buf.get(arr) - arr.asInstanceOf[AnyRef] - case d128: Decimal128 => d128.asBytes.asInstanceOf[AnyRef] - case d32: Decimal32 => d32.getBits.asInstanceOf[AnyRef] - case d64: Decimal64 => d64.getBits.asInstanceOf[AnyRef] - case s: Symbol => s.toString.asInstanceOf[AnyRef] - case ub: UnsignedByte => ub.toString.asInstanceOf[AnyRef] - case ui: UnsignedInteger => ui.toString.asInstanceOf[AnyRef] - case ul: UnsignedLong => ul.toString.asInstanceOf[AnyRef] - case us: UnsignedShort => us.toString.asInstanceOf[AnyRef] - case c: Character => c.toString.asInstanceOf[AnyRef] - case d: DescribedType => d.getDescribed - case default => default + ArrayBasedMapData(ed.getProperties.asScala + .mapValues { + case b: Binary => + val buf = b.asByteBuffer() + val arr = new Array[Byte](buf.remaining) + buf.get(arr) + arr.asInstanceOf[AnyRef] + case d128: Decimal128 => d128.asBytes.asInstanceOf[AnyRef] + case d32: Decimal32 => d32.getBits.asInstanceOf[AnyRef] + case d64: Decimal64 => d64.getBits.asInstanceOf[AnyRef] + case s: Symbol => s.toString.asInstanceOf[AnyRef] + case ub: UnsignedByte => ub.toString.asInstanceOf[AnyRef] + case ui: UnsignedInteger => ui.toString.asInstanceOf[AnyRef] + case ul: UnsignedLong => ul.toString.asInstanceOf[AnyRef] + case us: UnsignedShort => us.toString.asInstanceOf[AnyRef] + case c: Character => c.toString.asInstanceOf[AnyRef] + case d: DescribedType => d.getDescribed + case default => default + } + .map { p => + p._2 match { + case s: String => UTF8String.fromString(p._1) -> UTF8String.fromString(s) + case default => + UTF8String.fromString(p._1) -> UTF8String.fromString(Serialization.write(p._2)) } - .map { p => - p._2 match { - case s: String => UTF8String.fromString(p._1) -> UTF8String.fromString(s) - case default => UTF8String.fromString(p._1) -> UTF8String.fromString(Serialization.write(p._2)) - } - }), + }), ArrayBasedMapData( // Don't duplicate offset, enqueued time, and seqNo (ed.getSystemProperties.asScala -- Seq(OffsetAnnotation, @@ -210,8 +218,10 @@ private[sql] object EventHubsSourceProvider extends Serializable { } .map { p => p._2 match { - case s: String => UTF8String.fromString(p._1) -> UTF8String.fromString(s) - case default => UTF8String.fromString(p._1) -> UTF8String.fromString(Serialization.write(p._2)) + case s: String => UTF8String.fromString(p._1) -> UTF8String.fromString(s) + case default => + UTF8String.fromString(p._1) -> UTF8String.fromString( + Serialization.write(p._2)) } }) ) From 865dae86c3aa841ca2ea9c68dff2d884f010b46a Mon Sep 17 00:00:00 2001 From: nyaghma Date: Mon, 26 Oct 2020 18:01:36 -0700 Subject: [PATCH 15/29] Dynamic added partitions (#544) --- .../spark/eventhubs/EventHubsConf.scala | 49 ++++++++++++++----- .../eventhubs/client/EventHubsClient.scala | 35 ++++++++++++- .../org/apache/spark/eventhubs/package.scala | 2 + .../spark/sql/eventhubs/EventHubsSource.scala | 8 ++- .../eventhubs/EventHubsDirectDStream.scala | 2 +- 5 files changed, 82 insertions(+), 14 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala index 03398e2f9..63cde7751 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala @@ -23,7 +23,11 @@ import java.util.concurrent.ConcurrentHashMap import com.microsoft.azure.eventhubs.AzureActiveDirectoryTokenProvider.AuthenticationCallback import org.apache.spark.eventhubs.PartitionPreferredLocationStrategy.PartitionPreferredLocationStrategy import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap -import org.apache.spark.eventhubs.utils.{AadAuthenticationCallback, MetricPlugin, ThrottlingStatusPlugin} +import org.apache.spark.eventhubs.utils.{ + AadAuthenticationCallback, + MetricPlugin, + ThrottlingStatusPlugin +} import org.apache.spark.internal.Logging import org.json4s.NoTypeHints import org.json4s.jackson.Serialization @@ -172,7 +176,8 @@ final class EventHubsConf private (private val connectionStr: String) ThrottlingStatusPluginKey, MaxAcceptableBatchReceiveTimeKey, UseAadAuthKey, - AadAuthCallbackKey + AadAuthCallbackKey, + DynamicPartitionDiscoveryKey ).map(_.toLowerCase).toSet val trimmedConfig = EventHubsConf(connectionString) @@ -496,10 +501,31 @@ final class EventHubsConf private (private val connectionStr: String) self.get(SlowPartitionAdjustmentKey).getOrElse(DefaultSlowPartitionAdjustment).toBoolean } + /** + * Set the flag for dynamic partition discovery. This option is useful only if partitions are being dynamically + * added to an existing event hub. For more information on how to dynamically add partitions to an event hub + * please refer to [[https://docs.microsoft.com/en-us/azure/event-hubs/dynamically-add-partitions]]. + * If dynamic partition discovery is disabled the number of partitions is being read and set at the + * beginning of the execution. Otherwise the number of partitions is being read and updated every + * [[UpdatePartitionCountIntervalMS]] milliseconds. The default value is false. + * Default: [[DefaultDynamicPartitionDiscovery]] + * + * @param b the flag which specifies whether the connector uses dynamic partition discovery + * @return the updated [[EventHubsConf]] instance + */ + def setDynamicPartitionDiscovery(b: Boolean): EventHubsConf = { + set(DynamicPartitionDiscoveryKey, b) + } + + /** The dynamic partition discovery flag */ + def dynamicPartitionDiscovery: Boolean = { + self.get(DynamicPartitionDiscoveryKey).getOrElse(DefaultDynamicPartitionDiscovery).toBoolean + } + /** Set the max time that is acceptable for a partition to receive events in a single batch. * This value is being used to identify slow partitions when the slowPartitionAdjustment is on. - * Only partitions that tale more than this time to receive thier portion of events in batch are considered - * as potential slow partitrions. + * Only partitions that tale more than this time to receive their portion of events in batch are considered + * as potential slow partitions. * Default: [[DefaultMaxAcceptableBatchReceiveTime]] * * @param d the new maximum acceptable time for a partition to receive events in a single batch @@ -580,13 +606,13 @@ final class EventHubsConf private (private val connectionStr: String) } /** - * set a callback class for aad auth. The class should be Serializable and derived from - * org.apache.spark.eventhubs.utils.AadAuthenticationCallback. - * More info about this: https://docs.microsoft.com/en-us/azure/event-hubs/authorize-access-azure-active-directory - * - * @param callback The callback class which implements org.apache.spark.eventhubs.utils.AadAuthenticationCallback - * @return the updated [[EventHubsConf]] instance - */ + * set a callback class for aad auth. The class should be Serializable and derived from + * org.apache.spark.eventhubs.utils.AadAuthenticationCallback. + * More info about this: https://docs.microsoft.com/en-us/azure/event-hubs/authorize-access-azure-active-directory + * + * @param callback The callback class which implements org.apache.spark.eventhubs.utils.AadAuthenticationCallback + * @return the updated [[EventHubsConf]] instance + */ def setAadAuthCallback(callback: AadAuthenticationCallback): EventHubsConf = { setUseAadAuth(true) set(AadAuthCallbackKey, callback.getClass.getName) @@ -657,6 +683,7 @@ object EventHubsConf extends Logging { val MaxAcceptableBatchReceiveTimeKey = "eventhubs.maxAcceptableBatchReceiveTime" val UseAadAuthKey = "eventhubs.useAadAuth" val AadAuthCallbackKey = "eventhubs.aadAuthCallback" + val DynamicPartitionDiscoveryKey = "eventhubs.DynamicPartitionDiscovery" /** Creates an EventHubsConf */ def apply(connectionString: String) = new EventHubsConf(connectionString) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/client/EventHubsClient.scala b/core/src/main/scala/org/apache/spark/eventhubs/client/EventHubsClient.scala index 48fcb99b2..f63d0af37 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/client/EventHubsClient.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/client/EventHubsClient.scala @@ -44,6 +44,7 @@ private[spark] class EventHubsClient(private val ehConf: EventHubsConf) with Logging { import org.apache.spark.eventhubs._ + import EventHubsClient._ ehConf.validate @@ -164,8 +165,22 @@ private[spark] class EventHubsClient(private val ehConf: EventHubsConf) * * @return partition count */ - override lazy val partitionCount: Int = { + override def partitionCount: Int = { try { + if (ehConf.dynamicPartitionDiscovery) { + partitionCountDynamic + } else { + partitionCountLazyVal + } + } catch { + case e: Exception => throw e + } + } + + lazy val partitionCountLazyVal: Int = { + try { + logDebug( + s"partitionCountLazyVal makes a call to runTimeInfo to read the number of partitions.") val runtimeInfo = client.getRuntimeInformation.get runtimeInfo.getPartitionCount } catch { @@ -173,6 +188,22 @@ private[spark] class EventHubsClient(private val ehConf: EventHubsConf) } } + def partitionCountDynamic: Int = { + try { + val currentTimeStamp = System.currentTimeMillis() + if ((currentTimeStamp - partitionCountCacheUpdateTimestamp > UpdatePartitionCountIntervalMS) || (partitionCountCache == 0)) { + val runtimeInfo = client.getRuntimeInformation.get + partitionCountCache = runtimeInfo.getPartitionCount + partitionCountCacheUpdateTimestamp = currentTimeStamp + logDebug( + s"partitionCountDynamic made a call to runTimeInfo to read the number of partitions = ${partitionCountCache} at timestamp = ${partitionCountCacheUpdateTimestamp}") + } + partitionCountCache + } catch { + case e: Exception => throw e + } + } + /** * Cleans up all open connections and links. * @@ -318,6 +349,8 @@ private[spark] class EventHubsClient(private val ehConf: EventHubsConf) } private[spark] object EventHubsClient { + private var partitionCountCache: Int = 0 + private var partitionCountCacheUpdateTimestamp: Long = 0 private[spark] def apply(ehConf: EventHubsConf): EventHubsClient = new EventHubsClient(ehConf) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/package.scala b/core/src/main/scala/org/apache/spark/eventhubs/package.scala index daefcfabf..521feb882 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/package.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/package.scala @@ -50,11 +50,13 @@ package object eventhubs { val DefaultPartitionPreferredLocationStrategy = "Hash" val DefaultUseExclusiveReceiver = "true" val DefaultSlowPartitionAdjustment = "false" + val DefaultDynamicPartitionDiscovery = "false" val StartingSequenceNumber = 0L val DefaultThreadPoolSize = 16 val DefaultEpoch = 0L val RetryCount = 10 val WaitInterval = 5000 + val UpdatePartitionCountIntervalMS = 300000 val OffsetAnnotation = "x-opt-offset" val EnqueuedTimeAnnotation = "x-opt-enqueued-time" diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala index f871605e0..be03e120f 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala @@ -78,7 +78,7 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, import EventHubsSourceProvider._ private lazy val ehClient = EventHubsSourceProvider.clientFactory(parameters)(ehConf) - private lazy val partitionCount: Int = ehClient.partitionCount + private def partitionCount: Int = ehClient.partitionCount private val ehConf = EventHubsConf.toConf(parameters) private val ehName = ehConf.name @@ -178,6 +178,10 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, val seqNos = metadataLog.get(0) match { case Some(checkpoint) => + if (defaultSeqNos.size > checkpoint.partitionToSeqNos.size) { + logInfo( + s"Number of partitions has increased from ${checkpoint.partitionToSeqNos.size} in the latest checkpoint to ${defaultSeqNos.size}.") + } defaultSeqNos ++ checkpoint.partitionToSeqNos case None => defaultSeqNos @@ -344,6 +348,8 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, case Some(prevBatchEndOffset) => val prevOffsets = EventHubsSourceOffset.getPartitionSeqNos(prevBatchEndOffset) val startingSeqNos = if (prevOffsets.size < untilSeqNos.size) { + logInfo( + s"Number of partitions has increased from ${prevOffsets.size} to ${untilSeqNos.size}") val defaultSeqNos = ehClient .translate(ehConf, partitionCount) .map { diff --git a/core/src/main/scala/org/apache/spark/streaming/eventhubs/EventHubsDirectDStream.scala b/core/src/main/scala/org/apache/spark/streaming/eventhubs/EventHubsDirectDStream.scala index ccfa91f86..8b0208258 100644 --- a/core/src/main/scala/org/apache/spark/streaming/eventhubs/EventHubsDirectDStream.scala +++ b/core/src/main/scala/org/apache/spark/streaming/eventhubs/EventHubsDirectDStream.scala @@ -55,7 +55,7 @@ private[spark] class EventHubsDirectDStream private[spark] (_ssc: StreamingConte import EventHubsDirectDStream._ - private lazy val partitionCount: Int = ehClient.partitionCount + private def partitionCount: Int = ehClient.partitionCount private lazy val ehName = ehConf.name @transient private var _client: Client = _ From 728493a955c5027d3e33f43b44260658dcb5d629 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Thu, 29 Oct 2020 17:53:32 +0100 Subject: [PATCH 16/29] fixes for pyspark documentation (#551) --- docs/PySpark/structured-streaming-pyspark.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/docs/PySpark/structured-streaming-pyspark.md b/docs/PySpark/structured-streaming-pyspark.md index 070d13c28..3d98e210e 100644 --- a/docs/PySpark/structured-streaming-pyspark.md +++ b/docs/PySpark/structured-streaming-pyspark.md @@ -56,7 +56,7 @@ connectionString = "YOUR.CONNECTION.STRING" ehConf = {} ehConf['eventhubs.connectionString'] = connectionString -For 2.3.15 version and above, the configuration dictionary requires that connection string be encrypted. +# For 2.3.15 version and above, the configuration dictionary requires that connection string be encrypted. ehConf['eventhubs.connectionString'] = sc._jvm.org.apache.spark.eventhubs.EventHubsUtils.encrypt(connectionString) ``` @@ -239,7 +239,7 @@ ehConf = { } # Simple batch query -val df = spark \ +df = spark \ .read \ .format("eventhubs") \ .options(**ehConf) \ @@ -329,16 +329,14 @@ ds = df \ .write \ .format("eventhubs") \ .options(**ehWriteConf) \ - .option("checkpointLocation", YOUR.OUTPUT.PATH.STRING) \ .save() # Write body data from a DataFrame to EventHubs with a partitionKey ds = df \ .selectExpr("partitionKey", "body") \ - .writeStream \ + .write \ .format("eventhubs") \ .options(**ehWriteConf) \ - .option("checkpointLocation", "///output.txt") \ .save() ``` From 6db45d1a2a7f013466f784140db767f870c57101 Mon Sep 17 00:00:00 2001 From: nyaghma Date: Tue, 3 Nov 2020 20:00:56 -0800 Subject: [PATCH 17/29] Partition sender fix (#550) --- .../apache/spark/eventhubs/client/EventHubsClient.scala | 2 +- .../apache/spark/sql/eventhubs/EventHubsWriteTask.scala | 7 ++++--- .../org/apache/spark/sql/eventhubs/EventHubsWriter.scala | 1 + .../apache/spark/sql/eventhubs/EventHubsSinkSuite.scala | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/client/EventHubsClient.scala b/core/src/main/scala/org/apache/spark/eventhubs/client/EventHubsClient.scala index f63d0af37..ffc0bd4b2 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/client/EventHubsClient.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/client/EventHubsClient.scala @@ -88,7 +88,7 @@ private[spark] class EventHubsClient(private val ehConf: EventHubsConf) } val sendTask = if (partition.isDefined) { - if (partitionSender.getPartitionId.toInt != partition.get) { + if ((partitionSender == null) || (partitionSender.getPartitionId.toInt != partition.get)) { logInfo("Recreating partition sender.") createPartitionSender(partition.get) } diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsWriteTask.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsWriteTask.scala index e56642eae..0d6a251b6 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsWriteTask.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsWriteTask.scala @@ -181,14 +181,15 @@ private[eventhubs] abstract class EventHubsRowWriter(inputSchema: Seq[Attribute] val partitionIdExpression = inputSchema - .find(_.name == EventHubsWriter.PartitionIdAttributeName) + .find(attr => + (attr.name == EventHubsWriter.PartitionIdAttributeNameAlias) || (attr.name == EventHubsWriter.PartitionIdAttributeName)) .getOrElse(Literal(null, StringType)) partitionIdExpression.dataType match { case StringType => // good case t => throw new IllegalStateException( - s"${EventHubsWriter.PartitionIdAttributeName} attribute unsupported type $t" + s"${EventHubsWriter.PartitionIdAttributeNameAlias} attribute unsupported type $t" ) } @@ -198,7 +199,7 @@ private[eventhubs] abstract class EventHubsRowWriter(inputSchema: Seq[Attribute] .getOrElse(Literal(null, MapType(StringType, StringType))) propertiesExpression.dataType match { - case MapType(StringType, StringType, true) => // good + case MapType(StringType, StringType, true) => // good case MapType(StringType, StringType, false) => // good case t => throw new IllegalStateException( diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsWriter.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsWriter.scala index dda560d76..d59a318a0 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsWriter.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsWriter.scala @@ -41,6 +41,7 @@ private[eventhubs] object EventHubsWriter extends Logging { val BodyAttributeName = "body" val PartitionKeyAttributeName = "partitionKey" val PartitionIdAttributeName = "partition" + val PartitionIdAttributeNameAlias = "partitionId" val PropertiesAttributeName = "properties" override def toString: String = "EventHubsWriter" diff --git a/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSinkSuite.scala b/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSinkSuite.scala index 542186337..56a7cba0e 100644 --- a/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSinkSuite.scala +++ b/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSinkSuite.scala @@ -411,7 +411,7 @@ class EventHubsSinkSuite extends StreamTest with SharedSQLContext { assert( ex.getMessage .toLowerCase(Locale.ROOT) - .contains(s"partition attribute unsupported type")) + .contains(s"partitionid attribute unsupported type")) } test("streaming - write data with valid schema but wrong type - bad partitionKey type") { From ac93a730d32d0c5de9b71e9278e2a6a67bb28fbb Mon Sep 17 00:00:00 2001 From: nyaghma Date: Fri, 6 Nov 2020 14:59:34 -0800 Subject: [PATCH 18/29] Retry client create (#552) --- .../client/ClientConnectionPool.scala | 45 ++++++++++++------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/client/ClientConnectionPool.scala b/core/src/main/scala/org/apache/spark/eventhubs/client/ClientConnectionPool.scala index b4bb76a50..a5a301bc5 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/client/ClientConnectionPool.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/client/ClientConnectionPool.scala @@ -23,8 +23,12 @@ import java.util.concurrent.{ ConcurrentLinkedQueue, Executors, ScheduledExecuto import com.microsoft.azure.eventhubs.{ EventHubClient, EventHubClientOptions, RetryPolicy } import org.apache.spark.eventhubs._ +import org.apache.spark.eventhubs.utils.RetryUtils.retryJava import org.apache.spark.internal.Logging +import scala.concurrent.{ Await, Future } +import scala.concurrent.ExecutionContext.Implicits.global + /** * A connection pool for EventHubClients. A connection pool is created per connection string. * If a connection isn't available in the pool, then a new one is created. @@ -61,23 +65,32 @@ private class ClientConnectionPool(val ehConf: EventHubsConf) extends Logging { .setMaximumSilentTime(ehConf.maxSilentTime.getOrElse(DefaultMaxSilentTime)) .setOperationTimeout(ehConf.receiverTimeout.getOrElse(DefaultReceiverTimeout)) .setRetryPolicy(RetryPolicy.getDefault) - client = EventHubClient - .createWithAzureActiveDirectory( - connStr.getEndpoint, - ehConf.name, - ehConf.aadAuthCallback().get, - ehConf.aadAuthCallback().get.authority, - ClientThreadPool.get(ehConf), - ehClientOption - ) - .get() + client = Await.result( + retryJava( + EventHubClient + .createWithAzureActiveDirectory(connStr.getEndpoint, + ehConf.name, + ehConf.aadAuthCallback().get, + ehConf.aadAuthCallback().get.authority, + ClientThreadPool.get(ehConf), + ehClientOption), + "createWithAzureActiveDirectory" + ), + ehConf.internalOperationTimeout + ) } else { - client = EventHubClient.createFromConnectionStringSync( - connStr.toString, - RetryPolicy.getDefault, - ClientThreadPool.get(ehConf), - null, - ehConf.maxSilentTime.getOrElse(DefaultMaxSilentTime)) + client = Await.result( + retryJava( + EventHubClient.createFromConnectionString( + connStr.toString, + RetryPolicy.getDefault, + ClientThreadPool.get(ehConf), + null, + ehConf.maxSilentTime.getOrElse(DefaultMaxSilentTime)), + "createFromConnectionString" + ), + ehConf.internalOperationTimeout + ) } } } else { From 753d225309789cf6ba92d29c652d9470d382be48 Mon Sep 17 00:00:00 2001 From: nyaghma Date: Fri, 6 Nov 2020 15:53:00 -0800 Subject: [PATCH 19/29] Throttling status plugin update (#555) --- .../spark/eventhubs/PartitionContext.scala | 33 +++++++++++++++++++ .../eventhubs/PartitionsStatusTracker.scala | 9 +++-- .../utils/SimpleThrottlingStatusPlugin.scala | 20 ++++++----- .../utils/ThrottlingStatusPlugin.scala | 9 +++-- .../spark/sql/eventhubs/EventHubsSource.scala | 9 +++-- .../utils/ThrottlingStatusPluginMock.scala | 6 +++- docs/slow-partition-adjustment-feature.md | 16 +++++---- 7 files changed, 78 insertions(+), 24 deletions(-) create mode 100644 core/src/main/scala/org/apache/spark/eventhubs/PartitionContext.scala diff --git a/core/src/main/scala/org/apache/spark/eventhubs/PartitionContext.scala b/core/src/main/scala/org/apache/spark/eventhubs/PartitionContext.scala new file mode 100644 index 000000000..7cf586f93 --- /dev/null +++ b/core/src/main/scala/org/apache/spark/eventhubs/PartitionContext.scala @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.eventhubs + +import java.net.URI + +/** + * Partition context which provides EventHub's information for partitions + * to be used in Throttling Status Plugin + * + * @param namespaceEndpoint Namespace endpoint. + * @param eventHubName EventHub name. + */ +class PartitionContext(val namespaceEndpoint: URI, val eventHubName: String) { + override def toString: String = { + s"NamespaceEndpoint: $namespaceEndpoint, eventHubName: $eventHubName" + } +} diff --git a/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala b/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala index dd8fe2ee1..bff298078 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala @@ -17,6 +17,7 @@ package org.apache.spark.eventhubs +import java.net.URI import java.util.logging.Logger import scala.collection.mutable @@ -203,6 +204,7 @@ class PartitionsStatusTracker extends Logging { val performancePercentages = batch.getPerformancePercentages PartitionsStatusTracker.throttlingStatusPlugin.foreach( _.onPartitionsPerformanceStatusUpdate( + partitionContext, batch.batchId, batch.paritionsStatusList.map(par => (par._1, par._2.batchSize))(breakOut), batch.paritionsStatusList @@ -242,17 +244,20 @@ object PartitionsStatusTracker { var enoughUpdatesCount: Int = 1 var throttlingStatusPlugin: Option[ThrottlingStatusPlugin] = None var defaultPartitionsPerformancePercentage: Option[Map[NameAndPartition, Double]] = None + var partitionContext: PartitionContext = null def setDefaultValuesInTracker(numOfPartitions: Int, - ehName: String, + pContext: PartitionContext, maxBatchReceiveTime: Long, throttlingSP: Option[ThrottlingStatusPlugin]) = { + partitionContext = pContext partitionsCount = numOfPartitions acceptableBatchReceiveTimeInMs = maxBatchReceiveTime enoughUpdatesCount = (partitionsCount / 2) + 1 throttlingStatusPlugin = throttlingSP defaultPartitionsPerformancePercentage = Some( - (for (pid <- 0 until partitionsCount) yield (NameAndPartition(ehName, pid), 1.0))(breakOut)) + (for (pid <- 0 until partitionsCount) + yield (NameAndPartition(pContext.eventHubName, pid), 1.0))(breakOut)) } private def partitionSeqNoKey(nAndP: NameAndPartition, seqNo: SequenceNumber): String = diff --git a/core/src/main/scala/org/apache/spark/eventhubs/utils/SimpleThrottlingStatusPlugin.scala b/core/src/main/scala/org/apache/spark/eventhubs/utils/SimpleThrottlingStatusPlugin.scala index b7d751489..5be40fbdd 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/utils/SimpleThrottlingStatusPlugin.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/utils/SimpleThrottlingStatusPlugin.scala @@ -17,30 +17,34 @@ package org.apache.spark.eventhubs.utils -import org.apache.spark.eventhubs.NameAndPartition +import java.net.URI + +import org.apache.spark.eventhubs.{ NameAndPartition, PartitionContext } import org.apache.spark.eventhubs.rdd.OffsetRange -import scala.collection.mutable import org.apache.spark.internal.Logging +import scala.collection.mutable class SimpleThrottlingStatusPlugin extends ThrottlingStatusPlugin with Logging { override def onBatchCreation( + partitionContext: PartitionContext, nextBatchLocalId: Long, nextBatchOffsetRanges: Array[OffsetRange], partitionsThrottleFactor: mutable.Map[NameAndPartition, Double]): Unit = { - log.info( - s"New Batch with localId = $nextBatchLocalId has been created with start and end offsets:" + - s"${nextBatchOffsetRanges} and partitions throttle factors: ${partitionsThrottleFactor}") + log.info(s"New Batch with localId = ${nextBatchLocalId} has been created for " + + s"partitionContext: ${partitionContext} with start and end offsets: ${nextBatchOffsetRanges} " + + s"and partitions throttle factors: ${partitionsThrottleFactor}") } override def onPartitionsPerformanceStatusUpdate( + partitionContext: PartitionContext, latestUpdatedBatchLocalId: Long, partitionsBatchSizes: Map[NameAndPartition, Int], partitionsBatchReceiveTimeMS: Map[NameAndPartition, Long], partitionsPerformancePercentages: Option[Map[NameAndPartition, Double]]): Unit = { log.info( - s"Latest updated batch with localId = $latestUpdatedBatchLocalId received these information:" + - s"Batch size: ${partitionsBatchSizes}, batch receive times in ms: ${partitionsBatchReceiveTimeMS}, " + - s"performance percentages: ${partitionsPerformancePercentages}") + s"Latest updated batch with localId = ${latestUpdatedBatchLocalId} for partitionContext: ${partitionContext} " + + s"received these information: Batch size: ${partitionsBatchSizes}, batch receive times in ms: " + + s"${partitionsBatchReceiveTimeMS}, performance percentages: ${partitionsPerformancePercentages}.") } } diff --git a/core/src/main/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPlugin.scala b/core/src/main/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPlugin.scala index 03c4330fa..40c65cc7b 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPlugin.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPlugin.scala @@ -17,18 +17,21 @@ package org.apache.spark.eventhubs.utils -import org.apache.spark.eventhubs.NameAndPartition -import org.apache.spark.eventhubs.rdd.OffsetRange +import java.net.URI +import org.apache.spark.eventhubs.{ NameAndPartition, PartitionContext } +import org.apache.spark.eventhubs.rdd.OffsetRange import scala.collection.mutable trait ThrottlingStatusPlugin extends Serializable { - def onBatchCreation(nextBatchLocalId: Long, + def onBatchCreation(partitionContext: PartitionContext, + nextBatchLocalId: Long, nextBatchOffsetRanges: Array[OffsetRange], partitionsThrottleFactor: mutable.Map[NameAndPartition, Double]): Unit def onPartitionsPerformanceStatusUpdate( + partitionContext: PartitionContext, latestUpdatedBatchLocalId: Long, partitionsBatchSizes: Map[NameAndPartition, Int], partitionsBatchReceiveTimeMS: Map[NameAndPartition, Long], diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala index be03e120f..3d8205790 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala @@ -82,6 +82,8 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, private val ehConf = EventHubsConf.toConf(parameters) private val ehName = ehConf.name + private val partitionContext = + new PartitionContext(ConnectionStringBuilder(ehConf.connectionString).getEndpoint, ehName) private val sc = sqlContext.sparkContext @@ -101,9 +103,10 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, PartitionsStatusTracker.setDefaultValuesInTracker( partitionCount, - ehName, + partitionContext, ehConf.maxAcceptableBatchReceiveTime.getOrElse(DefaultMaxAcceptableBatchReceiveTime).toMillis, - throttlingStatusPlugin) + throttlingStatusPlugin + ) var partitionsThrottleFactor: mutable.Map[NameAndPartition, Double] = (for (pid <- 0 until partitionCount) yield (NameAndPartition(ehName, pid), 1.0))(breakOut) @@ -405,7 +408,7 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, if (slowPartitionAdjustment) { addCurrentBatchToStatusTracker(offsetRanges) throttlingStatusPlugin.foreach( - _.onBatchCreation(localBatchId, offsetRanges, partitionsThrottleFactor)) + _.onBatchCreation(partitionContext, localBatchId, offsetRanges, partitionsThrottleFactor)) } val rdd = EventHubsSourceProvider.toInternalRow(new EventHubsRDD(sc, ehConf.trimmed, offsetRanges)) diff --git a/core/src/test/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPluginMock.scala b/core/src/test/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPluginMock.scala index f08d0d6ef..413066603 100644 --- a/core/src/test/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPluginMock.scala +++ b/core/src/test/scala/org/apache/spark/eventhubs/utils/ThrottlingStatusPluginMock.scala @@ -17,7 +17,9 @@ package org.apache.spark.eventhubs.utils -import org.apache.spark.eventhubs.NameAndPartition +import java.net.URI + +import org.apache.spark.eventhubs.{ NameAndPartition, PartitionContext } import org.apache.spark.eventhubs.rdd.OffsetRange import scala.collection.mutable @@ -26,11 +28,13 @@ class ThrottlingStatusPluginMock extends ThrottlingStatusPlugin { val id = 1 override def onBatchCreation( + partitionContext: PartitionContext, nextBatchLocalId: Long, nextBatchOffsetRanges: Array[OffsetRange], partitionsThrottleFactor: mutable.Map[NameAndPartition, Double]): Unit = {} override def onPartitionsPerformanceStatusUpdate( + partitionContext: PartitionContext, latestUpdatedBatchLocalId: Long, partitionsBatchSizes: Map[NameAndPartition, Int], partitionsBatchReceiveTimeMS: Map[NameAndPartition, Long], diff --git a/docs/slow-partition-adjustment-feature.md b/docs/slow-partition-adjustment-feature.md index 36fad4887..56f305252 100644 --- a/docs/slow-partition-adjustment-feature.md +++ b/docs/slow-partition-adjustment-feature.md @@ -106,28 +106,30 @@ The throttling factor (aka performance percentage metric) is also provided so th ```scala class SimpleThrottlingStatusPlugin extends ThrottlingStatusPlugin with Logging { + override def onBatchCreation( + partitionContext: PartitionContext, nextBatchLocalId: Long, nextBatchOffsetRanges: Array[OffsetRange], partitionsThrottleFactor: mutable.Map[NameAndPartition, Double]): Unit = { - log.info( - s"New Batch with localId = $nextBatchLocalId has been created with start and end offsets:" + - s"${nextBatchOffsetRanges} and partitions performances: ${partitionsThrottleFactor}") + log.info(s"New Batch with localId = ${nextBatchLocalId} has been created for " + + s"partitionContext: ${partitionContext} with start and end offsets: ${nextBatchOffsetRanges} " + + s"and partitions throttle factors: ${partitionsThrottleFactor}") } override def onPartitionsPerformanceStatusUpdate( + partitionContext: PartitionContext, latestUpdatedBatchLocalId: Long, partitionsBatchSizes: Map[NameAndPartition, Int], partitionsBatchReceiveTimeMS: Map[NameAndPartition, Long], partitionsPerformancePercentages: Option[Map[NameAndPartition, Double]]): Unit = { log.info( - s"Latest updated batch with localId = $latestUpdatedBatchLocalId received these information:" + - s"Batch size: ${partitionsBatchSizes}, batch receive times in ms: ${partitionsBatchReceiveTimeMS}, " + - s"performance percentages: ${partitionsPerformancePercentages}") + s"Latest updated batch with localId = ${latestUpdatedBatchLocalId} for partitionContext: ${partitionContext} " + + s"received these information: Batch size: ${partitionsBatchSizes}, batch receive times in ms: " + + s"${partitionsBatchReceiveTimeMS}, performance percentages: ${partitionsPerformancePercentages}.") } } - val ehConf = EventHubsConf(connectionString) .setSlowPartitionAdjustment(true) .setThrottlingStatusPlugin(new SimpleThrottlingStatusPlugin) From 129fb2bf7c785bc8cb0e83319d85926621e4b0aa Mon Sep 17 00:00:00 2001 From: nyaghma Date: Mon, 9 Nov 2020 19:17:04 -0800 Subject: [PATCH 20/29] Serializable contexts (#558) --- .../scala/org/apache/spark/eventhubs/PartitionContext.scala | 3 ++- .../spark/eventhubs/PartitionPerformanceReceiver.scala | 4 +++- .../scala/org/apache/spark/eventhubs/TaskContextSlim.scala | 5 ++++- .../spark/eventhubs/client/CachedEventHubsReceiver.scala | 5 ++--- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/PartitionContext.scala b/core/src/main/scala/org/apache/spark/eventhubs/PartitionContext.scala index 7cf586f93..531e71a9b 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/PartitionContext.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/PartitionContext.scala @@ -18,6 +18,7 @@ package org.apache.spark.eventhubs import java.net.URI +import org.json4s.jackson.Serialization /** * Partition context which provides EventHub's information for partitions @@ -26,7 +27,7 @@ import java.net.URI * @param namespaceEndpoint Namespace endpoint. * @param eventHubName EventHub name. */ -class PartitionContext(val namespaceEndpoint: URI, val eventHubName: String) { +class PartitionContext(val namespaceEndpoint: URI, val eventHubName: String) extends Serializable { override def toString: String = { s"NamespaceEndpoint: $namespaceEndpoint, eventHubName: $eventHubName" } diff --git a/core/src/main/scala/org/apache/spark/eventhubs/PartitionPerformanceReceiver.scala b/core/src/main/scala/org/apache/spark/eventhubs/PartitionPerformanceReceiver.scala index ff03fe5f3..0531fb0de 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/PartitionPerformanceReceiver.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/PartitionPerformanceReceiver.scala @@ -22,6 +22,7 @@ import java.time.Duration import org.apache.spark.internal.Logging import org.apache.spark.rpc.{ RpcEndpoint, RpcEnv } import org.apache.spark.SparkContext +import org.json4s.jackson.Serialization private[spark] class PartitionPerformanceReceiver(override val rpcEnv: RpcEnv, val statusTracker: PartitionsStatusTracker) @@ -56,7 +57,8 @@ case class PartitionPerformanceMetric(val nAndP: NameAndPartition, //val taskId: Long, val requestSeqNo: SequenceNumber, val batchSize: Int, - val receiveTimeInMillis: Long) { + val receiveTimeInMillis: Long) + extends Serializable { override def toString: String = { s"PartitionPerformanceMetric received from task: $taskContextSlim for partition: $nAndP, requestSeqNo: $requestSeqNo. " + diff --git a/core/src/main/scala/org/apache/spark/eventhubs/TaskContextSlim.scala b/core/src/main/scala/org/apache/spark/eventhubs/TaskContextSlim.scala index d5f42ead0..c87d7306c 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/TaskContextSlim.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/TaskContextSlim.scala @@ -17,6 +17,8 @@ package org.apache.spark.eventhubs +import org.json4s.jackson.Serialization + /** * Task context which provides Spark's task information * @@ -24,7 +26,8 @@ package org.apache.spark.eventhubs * @param taskId Spark's current task Id. * @param sparkPartitionId Spark's current partition Id. */ -class TaskContextSlim(val stageId: Int, val taskId: Long, val sparkPartitionId: Int) { +class TaskContextSlim(val stageId: Int, val taskId: Long, val sparkPartitionId: Int) + extends Serializable { override def toString: String = { s"Spark stage $stageId (TID $taskId) spark partitionId $sparkPartitionId" } diff --git a/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala b/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala index cf784387a..000e0e48c 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala @@ -309,9 +309,8 @@ private[client] class CachedEventHubsReceiver private (ehConf: EventHubsConf, CachedEventHubsReceiver.partitionPerformanceReceiverRef.send(partitionPerformance) } catch { case e: Exception => - logError( - s"(Task: ${EventHubsUtils.getTaskContextSlim}) failed to send the RPC message containing " + - s"PartitionPerformanceMetric: $PartitionPerformanceMetric to the driver.") + logError(s"(Task: ${EventHubsUtils.getTaskContextSlim}) failed to send the RPC message containing " + + s"PartitionPerformanceMetric: ${PartitionPerformanceMetric} to the driver with error: ${e}.") } } } From b841785338d596ff9d7382786b3876d297331db0 Mon Sep 17 00:00:00 2001 From: nyaghma Date: Tue, 10 Nov 2020 10:36:53 -0800 Subject: [PATCH 21/29] Fix logging issue (#559) --- .../spark/eventhubs/client/CachedEventHubsReceiver.scala | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala b/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala index 000e0e48c..c164461de 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/client/CachedEventHubsReceiver.scala @@ -304,13 +304,14 @@ private[client] class CachedEventHubsReceiver private (ehConf: EventHubsConf, private def sendPartitionPerformanceToDriver(partitionPerformance: PartitionPerformanceMetric) = { logDebug( s"(Task: ${EventHubsUtils.getTaskContextSlim}) sends PartitionPerformanceMetric: " + - s"$PartitionPerformanceMetric to the driver.") + s"${partitionPerformance} to the driver.") try { CachedEventHubsReceiver.partitionPerformanceReceiverRef.send(partitionPerformance) } catch { case e: Exception => - logError(s"(Task: ${EventHubsUtils.getTaskContextSlim}) failed to send the RPC message containing " + - s"PartitionPerformanceMetric: ${PartitionPerformanceMetric} to the driver with error: ${e}.") + logError( + s"(Task: ${EventHubsUtils.getTaskContextSlim}) failed to send the RPC message containing " + + s"PartitionPerformanceMetric: ${partitionPerformance} to the driver with error: ${e}.") } } } From 3327b4c4223a2b9fb0a1c40ee3f52c01c3443ec7 Mon Sep 17 00:00:00 2001 From: SJ Date: Thu, 12 Nov 2020 07:56:04 -0800 Subject: [PATCH 22/29] Update version number for new release (2.3.18) and client SDK dependency (#560) --- core/pom.xml | 2 +- .../scala/org/apache/spark/eventhubs/EventHubsConf.scala | 6 +++--- .../src/main/scala/org/apache/spark/eventhubs/package.scala | 2 +- pom.xml | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/core/pom.xml b/core/pom.xml index 382dd1f0f..8d17f6da4 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -23,7 +23,7 @@ com.microsoft.azure azure-eventhubs-spark-parent_${scala.binary.version} - 2.3.17 + 2.3.18 ../pom.xml azure-eventhubs-spark_${scala.binary.version} diff --git a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala index 63cde7751..91b56ad1c 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsConf.scala @@ -210,7 +210,7 @@ final class EventHubsConf private (private val connectionStr: String) * Sets the default starting position for all partitions. * * If you would like to start from a different position for a specific partition, - * please see [[setStartingPositions()]]. If a position is set for particiular partition, + * please see [[setStartingPositions()]]. If a position is set for particular partition, * we will use that position instead of the one set by this method. * * If no starting position is set, then [[DefaultEventPosition]] is used @@ -239,7 +239,7 @@ final class EventHubsConf private (private val connectionStr: String) * in [[setStartingPosition()]]. If nothing is set in [[setStartingPosition()]], then * we will start consuming from the start of the EventHub partition. * - * @param eventPositions a map of parition ids (ints) to [[EventPosition]]s + * @param eventPositions a map of partition ids (ints) to [[EventPosition]]s * @return the updated [[EventHubsConf]] instance * @see [[EventPosition]] */ @@ -486,7 +486,7 @@ final class EventHubsConf private (private val connectionStr: String) } /** - * Set the flag for slow parition adjustment. The default value is false. + * Set the flag for slow partition adjustment. The default value is false. * Default: [[DefaultSlowPartitionAdjustment]] * * @param b the flag which specifies whether the connector uses slow partition adjustment logic diff --git a/core/src/main/scala/org/apache/spark/eventhubs/package.scala b/core/src/main/scala/org/apache/spark/eventhubs/package.scala index 521feb882..e08164131 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/package.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/package.scala @@ -62,7 +62,7 @@ package object eventhubs { val EnqueuedTimeAnnotation = "x-opt-enqueued-time" val SequenceNumberAnnotation = "x-opt-sequence-number" - val SparkConnectorVersion = "2.3.17" + val SparkConnectorVersion = "2.3.18" val DefaultUseAadAuth = "false" diff --git a/pom.xml b/pom.xml index 14d73b02f..1b8aebc32 100644 --- a/pom.xml +++ b/pom.xml @@ -25,7 +25,7 @@ com.microsoft.azure azure-eventhubs-spark-parent_${scala.binary.version} - 2.3.17 + 2.3.18 pom EventHubs+Spark Parent POM @@ -137,7 +137,7 @@ com.microsoft.azure azure-eventhubs - 3.2.0 + 3.2.2 org.apache.spark From 598b51eeb62ac06c9863053413bf438745165765 Mon Sep 17 00:00:00 2001 From: nyaghma Date: Thu, 12 Nov 2020 11:26:45 -0800 Subject: [PATCH 23/29] Rpc endpoint for direct stream (#561) --- .../spark/eventhubs/EventHubsUtils.scala | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsUtils.scala b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsUtils.scala index 11d0c4eda..826ddca86 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsUtils.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsUtils.scala @@ -41,12 +41,27 @@ import org.apache.spark.streaming.StreamingContext import org.apache.spark.streaming.api.java.{ JavaInputDStream, JavaStreamingContext } import org.apache.spark.streaming.eventhubs.EventHubsDirectDStream import org.apache.spark.{ SparkContext, TaskContext } +import org.apache.spark.rpc.RpcEndpointRef +import org.apache.spark.SparkEnv /** * Helper to create Direct DStreams which consume events from Event Hubs. */ object EventHubsUtils extends Logging { + var partitionPerformanceReceiverRef: RpcEndpointRef = null + + private def createRpcEndpoint() = { + if (partitionPerformanceReceiverRef == null) { + // RPC endpoint for partition performance communication in the driver + val partitionsStatusTracker = PartitionsStatusTracker.getPartitionStatusTracker + val partitionPerformanceReceiver: PartitionPerformanceReceiver = + new PartitionPerformanceReceiver(SparkEnv.get.rpcEnv, partitionsStatusTracker) + partitionPerformanceReceiverRef = SparkEnv.get.rpcEnv + .setupEndpoint(PartitionPerformanceReceiver.ENDPOINT_NAME, partitionPerformanceReceiver) + } + } + /** * Creates a Direct DStream which consumes from the Event Hubs instance * specified in the [[EventHubsConf]]. @@ -56,6 +71,7 @@ object EventHubsUtils extends Logging { * @return An [[EventHubsDirectDStream]] */ def createDirectStream(ssc: StreamingContext, ehConf: EventHubsConf): EventHubsDirectDStream = { + createRpcEndpoint() new EventHubsDirectDStream(ssc, ehConf, EventHubsClient.apply) } @@ -69,6 +85,7 @@ object EventHubsUtils extends Logging { */ def createDirectStream(jssc: JavaStreamingContext, ehConf: EventHubsConf): JavaInputDStream[EventData] = { + createRpcEndpoint() new JavaInputDStream(createDirectStream(jssc.ssc, ehConf)) } @@ -85,6 +102,7 @@ object EventHubsUtils extends Logging { def createRDD(sc: SparkContext, ehConf: EventHubsConf, offsetRanges: Array[OffsetRange]): EventHubsRDD = { + createRpcEndpoint() new EventHubsRDD(sc, ehConf.trimmed, offsetRanges) } @@ -101,6 +119,7 @@ object EventHubsUtils extends Logging { def createRDD(jsc: JavaSparkContext, ehConf: EventHubsConf, offsetRanges: Array[OffsetRange]): JavaRDD[EventData] = { + createRpcEndpoint() new JavaRDD(createRDD(jsc.sc, ehConf, offsetRanges)) } From 6509ed467b5a4680a72b8678442cf53277454f36 Mon Sep 17 00:00:00 2001 From: nyaghma Date: Thu, 12 Nov 2020 16:53:45 -0800 Subject: [PATCH 24/29] Rpc endpoint recreation (#564) --- .../spark/eventhubs/EventHubsUtils.scala | 25 +++++++++++++------ 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsUtils.scala b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsUtils.scala index 826ddca86..d344fb05d 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsUtils.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsUtils.scala @@ -40,9 +40,9 @@ import org.apache.spark.internal.Logging import org.apache.spark.streaming.StreamingContext import org.apache.spark.streaming.api.java.{ JavaInputDStream, JavaStreamingContext } import org.apache.spark.streaming.eventhubs.EventHubsDirectDStream -import org.apache.spark.{ SparkContext, TaskContext } +import org.apache.spark.{ SparkContext, SparkEnv, TaskContext } import org.apache.spark.rpc.RpcEndpointRef -import org.apache.spark.SparkEnv +import org.apache.spark.util.RpcUtils /** * Helper to create Direct DStreams which consume events from Event Hubs. @@ -53,12 +53,21 @@ object EventHubsUtils extends Logging { private def createRpcEndpoint() = { if (partitionPerformanceReceiverRef == null) { - // RPC endpoint for partition performance communication in the driver - val partitionsStatusTracker = PartitionsStatusTracker.getPartitionStatusTracker - val partitionPerformanceReceiver: PartitionPerformanceReceiver = - new PartitionPerformanceReceiver(SparkEnv.get.rpcEnv, partitionsStatusTracker) - partitionPerformanceReceiverRef = SparkEnv.get.rpcEnv - .setupEndpoint(PartitionPerformanceReceiver.ENDPOINT_NAME, partitionPerformanceReceiver) + try { + partitionPerformanceReceiverRef = RpcUtils.makeDriverRef( + PartitionPerformanceReceiver.ENDPOINT_NAME, + SparkEnv.get.conf, + SparkEnv.get.rpcEnv) + logInfo( + s"There is an existing partitionPerformanceReceiverRef on the driver, use that one rather than creating a new one") + } catch { + case e: Exception => + val partitionsStatusTracker = PartitionsStatusTracker.getPartitionStatusTracker + val partitionPerformanceReceiver: PartitionPerformanceReceiver = + new PartitionPerformanceReceiver(SparkEnv.get.rpcEnv, partitionsStatusTracker) + partitionPerformanceReceiverRef = SparkEnv.get.rpcEnv + .setupEndpoint(PartitionPerformanceReceiver.ENDPOINT_NAME, partitionPerformanceReceiver) + } } } From ddd9e32698ada47a569a9fdffc025e0c81e050d4 Mon Sep 17 00:00:00 2001 From: SJ Date: Mon, 16 Nov 2020 22:22:14 -0800 Subject: [PATCH 25/29] Update documentation for the current release (2.3.18) (#565) --- .github/CONTRIBUTING.md | 2 +- README.md | 26 +++++++++---------- docs/PySpark/structured-streaming-pyspark.md | 8 +++--- docs/spark-streaming-eventhubs-integration.md | 4 +-- ...uctured-streaming-eventhubs-integration.md | 8 +++--- 5 files changed, 24 insertions(+), 24 deletions(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 83a5c070e..04fa8b07d 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -18,7 +18,7 @@ run all unit/integration tests and build a JAR. ### SBT Dependency - libraryDependencies += "com.microsoft.azure" %% "azure-eventhubs-spark" %% "2.3.17" + libraryDependencies += "com.microsoft.azure" %% "azure-eventhubs-spark" %% "2.3.18" ## Filing Issues diff --git a/README.md b/README.md index e81fb5ae5..0b18f524d 100644 --- a/README.md +++ b/README.md @@ -30,23 +30,23 @@ By making Event Hubs and Spark easier to use together, we hope this connector ma #### Spark |Spark Version|Package Name|Package Version| |-------------|------------|----------------| -|Spark 3.0|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.17%7Cjar)| -|Spark 2.4|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.17%7Cjar)| -|Spark 2.4|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.17%7Cjar)| -|Spark 2.3|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.17%7Cjar)| +|Spark 3.0|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.18-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.18%7Cjar)| +|Spark 2.4|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.18-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.18%7Cjar)| +|Spark 2.4|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.18-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.18%7Cjar)| +|Spark 2.3|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.18-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.18%7Cjar)| |Spark 2.2|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.2.10-blue.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.2.10%7Cjar)| |Spark 2.1|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.2.10-blue.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.2.10%7Cjar)| #### Databricks |Databricks Runtime Version|Artifact Id|Package Version| |-------------|------------|----------------| -|Databricks Runtime 7.X|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.17%7Cjar)| -|Databricks Runtime 6.X|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.17%7Cjar)| -|Databricks Runtime 6.X|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.17%7Cjar)| -|Databricks Runtime 5.X|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.17%7Cjar)| -|Databricks Runtime 5.X|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.17%7Cjar)| -|Databricks Runtime 4.X|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.17%7Cjar)| -|Databricks Runtime 3.5|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.17-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.17%7Cjar)| +|Databricks Runtime 7.X|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.18-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.18%7Cjar)| +|Databricks Runtime 6.X|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.18-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.18%7Cjar)| +|Databricks Runtime 6.X|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.18-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.18%7Cjar)| +|Databricks Runtime 5.X|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.18-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.18%7Cjar)| +|Databricks Runtime 5.X|azure-eventhubs-spark_2.12|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.18-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.12%7C2.3.18%7Cjar)| +|Databricks Runtime 4.X|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.18-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.18%7Cjar)| +|Databricks Runtime 3.5|azure-eventhubs-spark_2.11|[![Maven Central](https://img.shields.io/badge/maven%20central-2.3.18-brightgreen.svg)](https://search.maven.org/#artifactdetails%7Ccom.microsoft.azure%7Cazure-eventhubs-spark_2.11%7C2.3.18%7Cjar)| #### Roadmap @@ -61,13 +61,13 @@ For Scala/Java applications using SBT/Maven project definitions, link your appli groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.11 - version = 2.3.17 + version = 2.3.18 or groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.12 - version = 2.3.17 + version = 2.3.18 ### Documentation diff --git a/docs/PySpark/structured-streaming-pyspark.md b/docs/PySpark/structured-streaming-pyspark.md index 3d98e210e..c23854d50 100644 --- a/docs/PySpark/structured-streaming-pyspark.md +++ b/docs/PySpark/structured-streaming-pyspark.md @@ -23,13 +23,13 @@ Structured streaming integration for Azure Event Hubs is ultimately run on the J ``` groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.11 - version = 2.3.17 + version = 2.3.18 or groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.12 - version = 2.3.17 + version = 2.3.18 ``` For Python applications, you need to add this above library and its dependencies when deploying your application. @@ -395,11 +395,11 @@ AMQP types need to be handled explicitly by the connector. Below we list the AMQ As with any Spark applications, `spark-submit` is used to launch your application. `azure-eventhubs-spark_2.11` and its dependencies can be directly added to `spark-submit` using `--packages`, such as, - ./bin/spark-submit --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.17 ... + ./bin/spark-submit --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.18 ... For experimenting on `spark-shell`, you can also use `--packages` to add `azure-eventhubs-spark_2.11` and its dependencies directly, - ./bin/spark-shell --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.17 ... + ./bin/spark-shell --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.18 ... See [Application Submission Guide](https://spark.apache.org/docs/latest/submitting-applications.html) for more details about submitting applications with external dependencies. diff --git a/docs/spark-streaming-eventhubs-integration.md b/docs/spark-streaming-eventhubs-integration.md index 3988459c3..d1b5302b7 100644 --- a/docs/spark-streaming-eventhubs-integration.md +++ b/docs/spark-streaming-eventhubs-integration.md @@ -23,13 +23,13 @@ For Scala/Java applications using SBT/Maven project definitions, link your appli ``` groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.11 - version = 2.3.17 + version = 2.3.18 or groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.12 - version = 2.3.17 + version = 2.3.18 ``` For Python applications, you need to add this above library and its dependencies when deploying your application. diff --git a/docs/structured-streaming-eventhubs-integration.md b/docs/structured-streaming-eventhubs-integration.md index 8b8d56c9f..08905983b 100644 --- a/docs/structured-streaming-eventhubs-integration.md +++ b/docs/structured-streaming-eventhubs-integration.md @@ -23,13 +23,13 @@ For Scala/Java applications using SBT/Maven project definitions, link your appli ``` groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.11 - version = 2.3.17 + version = 2.3.18 or groupId = com.microsoft.azure artifactId = azure-eventhubs-spark_2.12 - version = 2.3.17 + version = 2.3.18 ``` For Python applications, you need to add this above library and its dependencies when deploying your application. @@ -397,11 +397,11 @@ AMQP types need to be handled explicitly by the connector. Below we list the AMQ As with any Spark applications, `spark-submit` is used to launch your application. `azure-eventhubs-spark_2.11` and its dependencies can be directly added to `spark-submit` using `--packages`, such as, - ./bin/spark-submit --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.17 ... + ./bin/spark-submit --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.18 ... For experimenting on `spark-shell`, you can also use `--packages` to add `azure-eventhubs-spark_2.11` and its dependencies directly, - ./bin/spark-shell --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.17 ... + ./bin/spark-shell --packages com.microsoft.azure:azure-eventhubs-spark_2.11:2.3.18 ... See [Application Submission Guide](https://spark.apache.org/docs/latest/submitting-applications.html) for more details about submitting applications with external dependencies. From 7d307243361cb89ad31b112e5ed1566802b373e8 Mon Sep 17 00:00:00 2001 From: Arthur Erlendsson <30675661+arerlend@users.noreply.github.com> Date: Fri, 4 Dec 2020 22:40:09 -0800 Subject: [PATCH 26/29] update scala-maven-plugin --- .../eventhubs/client/EventHubsClient.scala | 2 +- .../eventhubs/utils/SimulatedClient.scala | 2 +- pom.xml | 38 +++++++++++++++++-- 3 files changed, 37 insertions(+), 5 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/client/EventHubsClient.scala b/core/src/main/scala/org/apache/spark/eventhubs/client/EventHubsClient.scala index ffc0bd4b2..457b7ad2f 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/client/EventHubsClient.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/client/EventHubsClient.scala @@ -128,7 +128,7 @@ private[spark] class EventHubsClient(private val ehConf: EventHubsConf) if (r.getIsEmpty) r.getLastEnqueuedSequenceNumber + 1 else r.getBeginSequenceNumber } val latest = r.getLastEnqueuedSequenceNumber + 1 - i -> (earliest, latest) + i -> ((earliest, latest): (Long, Long)) } Await .result(Future.sequence(futures), ehConf.internalOperationTimeout) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedClient.scala b/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedClient.scala index 35e04464a..f12d0b04b 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedClient.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedClient.scala @@ -74,7 +74,7 @@ private[spark] class SimulatedClient(private val ehConf: EventHubsConf) extends */ override def allBoundedSeqNos: Map[PartitionId, (SequenceNumber, SequenceNumber)] = (0 until partitionCount) - .map(i => i -> (eventHub.earliestSeqNo(i), eventHub.latestSeqNo(i))) + .map(i => i -> ((eventHub.earliestSeqNo(i), eventHub.latestSeqNo(i)): (SequenceNumber, SequenceNumber))) .toMap /** diff --git a/pom.xml b/pom.xml index 1b8aebc32..960343d01 100644 --- a/pom.xml +++ b/pom.xml @@ -52,6 +52,8 @@ 2.11 2.3.3 github + 1.8 + 1g @@ -182,17 +184,47 @@ target/scala-${scala.binary.version}/test-classes - org.scala-tools - maven-scala-plugin - 2.15.2 + net.alchim31.maven + scala-maven-plugin + + 3.2.2 + scala-compile-first compile + + + + scala-test-compile-first + testCompile + + ${scala.binary.version} + incremental + + -unchecked + -deprecation + -feature + -explaintypes + -Yno-adapted-args + + + -Xms1024m + -Xmx1024m + -XX:ReservedCodeCacheSize=${CodeCacheSize} + + + -source + ${java.version} + -target + ${java.version} + -Xlint:all,-serial,-path,-try + + org.scalatest From f7369962c21a5ac954e0219e1ee87e61be867aec Mon Sep 17 00:00:00 2001 From: Arthur Erlendsson <30675661+arerlend@users.noreply.github.com> Date: Fri, 4 Dec 2020 22:57:37 -0800 Subject: [PATCH 27/29] fix test lint --- .../eventhubs/utils/EventHubsTestUtilsSuite.scala | 12 ++++++------ .../spark/sql/eventhubs/EventHubsSourceSuite.scala | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/core/src/test/scala/org/apache/spark/eventhubs/utils/EventHubsTestUtilsSuite.scala b/core/src/test/scala/org/apache/spark/eventhubs/utils/EventHubsTestUtilsSuite.scala index ea260c07d..0371d02c5 100644 --- a/core/src/test/scala/org/apache/spark/eventhubs/utils/EventHubsTestUtilsSuite.scala +++ b/core/src/test/scala/org/apache/spark/eventhubs/utils/EventHubsTestUtilsSuite.scala @@ -20,9 +20,9 @@ package org.apache.spark.eventhubs.utils import java.util.concurrent.atomic.AtomicInteger import com.microsoft.azure.eventhubs.EventData -import org.apache.spark.eventhubs.{ EventHubsConf, NameAndPartition } +import org.apache.spark.eventhubs.{EventHubsConf, NameAndPartition, SequenceNumber} import org.apache.spark.internal.Logging -import org.scalatest.{ BeforeAndAfter, BeforeAndAfterAll, FunSuite } +import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuite} import collection.JavaConverters._ @@ -143,10 +143,10 @@ class EventHubsTestUtilsSuite val conf = testUtils.getEventHubsConf(eventHub.name) val client = SimulatedClient(conf) val results = client.allBoundedSeqNos.toMap - assert(results(0) == (0, 1)) - assert(results(1) == (0, 2)) - assert(results(2) == (0, 3)) - assert(results(3) == (0, 1)) + assert(results(0) == ((0, 1): (SequenceNumber, SequenceNumber))) + assert(results(1) == ((0, 2): (SequenceNumber, SequenceNumber))) + assert(results(2) == ((0, 3): (SequenceNumber, SequenceNumber))) + assert(results(3) == ((0, 1): (SequenceNumber, SequenceNumber))) } test("partitionSize") { diff --git a/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala b/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala index a8a399b0e..a3ac32bb7 100644 --- a/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala +++ b/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala @@ -433,7 +433,7 @@ class EventHubsSourceSuite extends EventHubsSourceTest { private def testFromLatestSeqNos(eh: String): Unit = { val eventHub = testUtils.createEventHubs(eh, DefaultPartitionCount) - testUtils.send(eh, partition = Some(0), Seq(-1)) + testUtils.send(eh, partition = Some(0), data = Seq(-1)) require(testUtils.getEventHubs(eh).getPartitions.size === 4) @@ -530,15 +530,15 @@ class EventHubsSourceSuite extends EventHubsSourceTest { .setStartingPositions(positions) // partition 0 starts at the earliest sequence numbers, these should all be seen - testUtils.send(eh, partition = Some(0), Seq(-20, -21, -22)) + testUtils.send(eh, partition = Some(0), data = Seq(-20, -21, -22)) // partition 1 starts at the latest sequence numbers, these should all be skipped - testUtils.send(eh, partition = Some(1), Seq(-10, -11, -12)) + testUtils.send(eh, partition = Some(1), data = Seq(-10, -11, -12)) // partition 2 starts at 0, these should all be seen - testUtils.send(eh, partition = Some(2), Seq(0, 1, 2)) + testUtils.send(eh, partition = Some(2), data = Seq(0, 1, 2)) // partition 3 starts at 1, first should be skipped - testUtils.send(eh, partition = Some(3), Seq(10, 11, 12)) + testUtils.send(eh, partition = Some(3), data = Seq(10, 11, 12)) // partition 4 starts at 2, first and second should be skipped - testUtils.send(eh, partition = Some(4), Seq(20, 21, 22)) + testUtils.send(eh, partition = Some(4), data = Seq(20, 21, 22)) val reader = spark.readStream .format("eventhubs") From 3d8e11a97eec503d47f9689f82dba9c223298068 Mon Sep 17 00:00:00 2001 From: Navid Yaghmazadeh Date: Tue, 8 Dec 2020 14:53:06 -0800 Subject: [PATCH 28/29] fix slow partition adj when a new partition added --- .../eventhubs/PartitionsStatusTracker.scala | 72 ++++++++++--------- .../spark/sql/eventhubs/EventHubsSource.scala | 24 ++++--- 2 files changed, 56 insertions(+), 40 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala b/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala index bff298078..8d4b2134f 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala @@ -30,32 +30,32 @@ class PartitionsStatusTracker extends Logging { import PartitionsStatusTracker._ - // retrives the batchStatus object based on the local batchId + // retrieves the batchStatus object based on the local batchId private val batchesStatusList = mutable.Map[Long, BatchStatus]() /** - * retirives the local batchId for a pair of (NameAndPartition, SequenceNumber) - * it's useful to accss the right batch when a performance metric message is - * received for a parition-RequestSeqNo pair. + * retrieves the local batchId for a pair of (NameAndPartition, SequenceNumber) + * it's useful to access the right batch when a performance metric message is + * received for a partition-RequestSeqNo pair. * it's getting updated every time a batch is removed or added to the tracker */ private val partitionSeqNoPairToBatchIdMap = mutable.Map[String, Long]() /** * Add a batch to the tracker by creating a BatchStatus object and adding it to the map. - * Also, we add mappings from each partition-startSeqNo pair to the batchId in order to be able to retrive + * Also, we add mappings from each partition-startSeqNo pair to the batchId in order to be able to retrieve * the batchStatus object from the map when the performance metric message is received. - * Note that we ignore partitions with batchSize zero (startSeqNo == latestSeqNo) since we won't recieve + * Note that we ignore partitions with batchSize zero (startSeqNo == latestSeqNo) since we won't receive * any performance metric message for such partitions. */ - def addorUpdateBatch(batchId: Long, offsetRanges: Array[OffsetRange]): Unit = { + def addOrUpdateBatch(batchId: Long, offsetRanges: Array[OffsetRange]): Unit = { if (batchesStatusList.contains(batchId)) { // Batches are not supposed to be updated. Log an error if a batch is being updated logError( s"Batch with local batch id: $batchId already exists in the partition status tracker. Batches" + s"are not supposed to be updated in the partition status tracker.") } else { - // remove the oldest batch from the batchesStatusList to realse space for adding the new batch. + // remove the oldest batch from the batchesStatusList to release space for adding the new batch. val batchIdToRemove = batchId - PartitionsStatusTracker.TrackingBatchCount logDebug( s"Remove the batch ${if (batchIdToRemove >= 0) batchIdToRemove else None} from the tracker.") @@ -64,7 +64,7 @@ class PartitionsStatusTracker extends Logging { } } - // find partitions with a zero size batch.. No performance metric msg will be received for those partitions + // find partitions with a zero size batch. No performance metric msg will be received for those partitions. val isZeroSizeBatchPartition: Map[NameAndPartition, Boolean] = offsetRanges.map(range => (range.nameAndPartition, (range.fromSeqNo == range.untilSeqNo)))( breakOut) @@ -99,7 +99,7 @@ class PartitionsStatusTracker extends Logging { } // remove the mapping from partition-seqNo pair to the batchId (ignore partitions with empty batch size) val batchStatus = batchesStatusList(batchId) - batchStatus.paritionsStatusList + batchStatus.partitionsStatusList .filter(p => !p._2.emptyBatch) .values .foreach(ps => { @@ -128,7 +128,7 @@ class PartitionsStatusTracker extends Logging { } /** - * return the batch id for a given parition-RequestSeqNo pair. + * return the batch id for a given partition-RequestSeqNo pair. * if the batch doesn't exist in the tracker, return BATCH_NOT_FOUND */ private def getBatchIdForPartitionSeqNoPair(nAndP: NameAndPartition, @@ -138,7 +138,7 @@ class PartitionsStatusTracker extends Logging { } /** - * update the partition perforamcne in the underlying batch based on the information received + * update the partition performance in the underlying batch based on the information received * from the executor node. This is a best effort logic, so if the batch doesn't exist in the * tracker simply assumes this is an old performance metric and ignores it. * @@ -159,7 +159,7 @@ class PartitionsStatusTracker extends Logging { s"in the partition status tracker. Assume the message is for an old batch, so ignore it.") return } - // find the batch in batchesStatusList and update the partition performacne in the batch + // find the batch in batchesStatusList and update the partition performance in the batch // if it doesn't exist there should be an error adding/removing batches in the tracker if (!batchesStatusList.contains(batchId)) { throw new IllegalStateException( @@ -171,9 +171,9 @@ class PartitionsStatusTracker extends Logging { } /** - * Checks the latest batch with enough updates and retruns the perforamnce percentage for each partition as a - * value between [0-1] where 0 means the partition is not responding and 1 means it's working wihtout any - * perforamnce issue. This information can be used to adjust the batch size for each partition in the next batch. + * Checks the latest batch with enough updates and returns the performance percentage for each partition as a + * value between [0-1] where 0 means the partition is not responding and 1 means it's working without any + * performance issue. This information can be used to adjust the batch size for each partition in the next batch. */ def partitionsPerformancePercentage(): Option[Map[NameAndPartition, Double]] = { // if there is no batch in the tracker, return None @@ -200,14 +200,14 @@ class PartitionsStatusTracker extends Logging { } case Some(batch) => { logDebug( - s"Batch ${batch.batchId} is the latest batch with enough updates. Caculate and return its perforamnce.") + s"Batch ${batch.batchId} is the latest batch with enough updates. Calculate and return its performance.") val performancePercentages = batch.getPerformancePercentages PartitionsStatusTracker.throttlingStatusPlugin.foreach( _.onPartitionsPerformanceStatusUpdate( partitionContext, batch.batchId, - batch.paritionsStatusList.map(par => (par._1, par._2.batchSize))(breakOut), - batch.paritionsStatusList + batch.partitionsStatusList.map(par => (par._1, par._2.batchSize))(breakOut), + batch.partitionsStatusList .map(par => (par._1, par._2.batchReceiveTimeInMillis))(breakOut), performancePercentages ) @@ -222,7 +222,7 @@ class PartitionsStatusTracker extends Logging { * Clean up the tracker. This will be called when the source has been stopped */ def cleanUp() = { - batchesStatusList.map(b => b._2.paritionsStatusList.clear) + batchesStatusList.map(b => b._2.partitionsStatusList.clear) batchesStatusList.clear partitionSeqNoPairToBatchIdMap.clear } @@ -260,6 +260,14 @@ object PartitionsStatusTracker { yield (NameAndPartition(pContext.eventHubName, pid), 1.0))(breakOut)) } + def updateDefaultValuesInTracker(numOfPartitions: Int) = { + partitionsCount = numOfPartitions; + enoughUpdatesCount = (partitionsCount / 2) + 1 + defaultPartitionsPerformancePercentage = Some( + (for (pid <- 0 until partitionsCount) + yield (NameAndPartition(partitionContext.eventHubName, pid), 1.0))(breakOut)) + } + private def partitionSeqNoKey(nAndP: NameAndPartition, seqNo: SequenceNumber): String = s"(name=${nAndP.ehName},pid=${nAndP.partitionId},startSeqNo=$seqNo)".toLowerCase @@ -268,7 +276,7 @@ object PartitionsStatusTracker { private[eventhubs] class BatchStatus( val batchId: Long, - val paritionsStatusList: mutable.Map[NameAndPartition, PartitionStatus]) + val partitionsStatusList: mutable.Map[NameAndPartition, PartitionStatus]) extends Logging { private var hasEnoughUpdates: Boolean = false @@ -278,16 +286,16 @@ private[eventhubs] class BatchStatus( def updatePartitionPerformance(nAndP: NameAndPartition, batchSize: Int, receiveTimeInMillis: Long): Unit = { - if (!paritionsStatusList.contains(nAndP)) { + if (!partitionsStatusList.contains(nAndP)) { throw new IllegalStateException( s"Partition $nAndP doesn't exist in the batch status for batchId $batchId. This is an illegal state that shouldn't happen.") } - paritionsStatusList(nAndP).updatePerformanceMetrics(batchSize, receiveTimeInMillis) + partitionsStatusList(nAndP).updatePerformanceMetrics(batchSize, receiveTimeInMillis) } def receivedEnoughUpdates: Boolean = { if (!hasEnoughUpdates) { - hasEnoughUpdates = paritionsStatusList.values + hasEnoughUpdates = partitionsStatusList.values .filter(par => par.hasBeenUpdated) .size >= PartitionsStatusTracker.enoughUpdatesCount } @@ -300,8 +308,8 @@ private[eventhubs] class BatchStatus( case None => { // just use partitions which have batchSize > 0 and have been updated logInfo( - s"Calculate partition performacne percenatges for batch = $batchId with partitions status = $paritionsStatusList") - val partitionsTimePerEvent = paritionsStatusList + s"Calculate partition performance percentages for batch = $batchId with partitions status = $partitionsStatusList") + val partitionsTimePerEvent = partitionsStatusList .filter(p => (p._2.hasBeenUpdated && !p._2.emptyBatch)) .values .map(ps => ps.timePerEventInMillis) @@ -313,7 +321,7 @@ private[eventhubs] class BatchStatus( s"so return None ") None } else if (allPartitionsFinishedWithinAcceptableTime) { - logInfo(s"All partitions are within the range of normal perforamnce because " + + logInfo(s"All partitions are within the range of normal performance because " + s"their receive time was less than ${PartitionsStatusTracker.acceptableBatchReceiveTimeInMs}.") PartitionsStatusTracker.defaultPartitionsPerformancePercentage } else { @@ -331,12 +339,12 @@ private[eventhubs] class BatchStatus( s" for updated partitions in the batch $batchId.") // update performance metrics in each paritition and return that mapping - paritionsStatusList.foreach(par => + partitionsStatusList.foreach(par => par._2.updatePerformancePercentage(avgTimePerEvent, stdDevTimePerEvent)) val ppp: Map[NameAndPartition, Double] = - paritionsStatusList.map(par => (par._1, par._2.performancePercentage))(breakOut) + partitionsStatusList.map(par => (par._1, par._2.performancePercentage))(breakOut) // if all partitions have been updated, save the result in performancePercentages - if (paritionsStatusList.values + if (partitionsStatusList.values .filter(ps => ps.hasBeenUpdated) .size == PartitionsStatusTracker.partitionsCount) { performancePercentages = Some(ppp) @@ -351,7 +359,7 @@ private[eventhubs] class BatchStatus( * its portion of events. If all of partitions are within this time frame it means none of those is slow. */ private def allPartitionsFinishedWithinAcceptableTime: Boolean = { - val updatedPartitionsTime = paritionsStatusList + val updatedPartitionsTime = partitionsStatusList .filter(p => (p._2.hasBeenUpdated && !p._2.emptyBatch)) .values .map(ps => ps.batchReceiveTimeInMillis) @@ -364,7 +372,7 @@ private[eventhubs] class BatchStatus( } override def toString: String = { - s"BatchStatus(localBatchId=$batchId, PartitionsStatus=${paritionsStatusList.values.toString()})" + s"BatchStatus(localBatchId=$batchId, PartitionsStatus=${partitionsStatusList.values.toString()})" } } diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala index 3d8205790..3887a5bd1 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala @@ -111,9 +111,16 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, var partitionsThrottleFactor: mutable.Map[NameAndPartition, Double] = (for (pid <- 0 until partitionCount) yield (NameAndPartition(ehName, pid), 1.0))(breakOut) - val defaultPartitionsPerformancePercentage: Map[NameAndPartition, Double] = + var defaultPartitionsPerformancePercentage: Map[NameAndPartition, Double] = (for (pid <- 0 until partitionCount) yield (NameAndPartition(ehName, pid), 1.0))(breakOut) + private def updatePartitionCountInPartitionsStatusTracker(numberOfPartitions: Int) = { + logInfo(s"Update the partitionCount to ${numberOfPartitions} in the PartitionsStatusTracker.") + PartitionsStatusTracker.updateDefaultValuesInTracker(numberOfPartitions) + defaultPartitionsPerformancePercentage = + (for (pid <- 0 until numberOfPartitions) yield (NameAndPartition(ehName, pid), 1.0))(breakOut) + } + private lazy val initialPartitionSeqNos = { val metadataLog = new HDFSMetadataLog[EventHubsSourceOffset](sqlContext.sparkSession, metadataPath) { @@ -293,18 +300,18 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, .get(nameAndPartition) .map { size => val begin = from.getOrElse(nameAndPartition, fromNew(nameAndPartition)) - // adjust performance performance pewrcentages to use as much as events possible in the batch - val perforamnceFactor: Double = if (slowPartitionAdjustment) { - partitionsPerformancePercentage(nameAndPartition) + // adjust performance percentages to use as much as events possible in the batch + val performanceFactor: Double = if (slowPartitionAdjustment) { + partitionsPerformancePercentage.getOrElse(nameAndPartition, 1.0) } else 1.0 if (slowPartitionAdjustment) { - partitionsThrottleFactor(nameAndPartition) = perforamnceFactor + partitionsThrottleFactor(nameAndPartition) = performanceFactor logInfo( s"Slow partition adjustment is on, so prorate amount for $nameAndPartition will be adjusted by" + - s" the perfromanceFactor = $perforamnceFactor") + s" the performanceFactor = $performanceFactor") } - val prorate = limit * (size / total) * perforamnceFactor + val prorate = limit * (size / total) * performanceFactor logDebug(s"rateLimit $nameAndPartition prorated amount is $prorate") // Don't completely starve small partitions val off = begin + (if (prorate < 1) Math.ceil(prorate) else Math.floor(prorate)).toLong @@ -353,6 +360,7 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, val startingSeqNos = if (prevOffsets.size < untilSeqNos.size) { logInfo( s"Number of partitions has increased from ${prevOffsets.size} to ${untilSeqNos.size}") + updatePartitionCountInPartitionsStatusTracker(partitionCount) val defaultSeqNos = ehClient .translate(ehConf, partitionCount) .map { @@ -425,7 +433,7 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, localBatchId += 1 logDebug( s"Slow partition adjustment is on, add the current batch $localBatchId to the tracker.") - partitionsStatusTracker.addorUpdateBatch(localBatchId, offsetRanges) + partitionsStatusTracker.addOrUpdateBatch(localBatchId, offsetRanges) } /** From b887f4d2bf9dd4b9d94d0ead4050f7f86ba0be85 Mon Sep 17 00:00:00 2001 From: Navid Yaghmazadeh Date: Fri, 11 Dec 2020 09:01:14 -0800 Subject: [PATCH 29/29] fix slow partition adj issue with multi streams --- .../spark/eventhubs/EventHubsUtils.scala | 3 +- .../PartitionPerformanceReceiver.scala | 32 ++- .../eventhubs/PartitionsStatusTracker.scala | 112 +++++----- .../SimulatedPartitionStatusTracker.scala | 19 +- .../spark/sql/eventhubs/EventHubsSource.scala | 10 +- .../eventhubs/EventHubsSourceProvider.scala | 3 +- .../sql/eventhubs/EventHubsSourceSuite.scala | 199 +++++++++++++----- 7 files changed, 252 insertions(+), 126 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsUtils.scala b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsUtils.scala index d344fb05d..932bf669b 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/EventHubsUtils.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/EventHubsUtils.scala @@ -62,9 +62,8 @@ object EventHubsUtils extends Logging { s"There is an existing partitionPerformanceReceiverRef on the driver, use that one rather than creating a new one") } catch { case e: Exception => - val partitionsStatusTracker = PartitionsStatusTracker.getPartitionStatusTracker val partitionPerformanceReceiver: PartitionPerformanceReceiver = - new PartitionPerformanceReceiver(SparkEnv.get.rpcEnv, partitionsStatusTracker) + new PartitionPerformanceReceiver(SparkEnv.get.rpcEnv) partitionPerformanceReceiverRef = SparkEnv.get.rpcEnv .setupEndpoint(PartitionPerformanceReceiver.ENDPOINT_NAME, partitionPerformanceReceiver) } diff --git a/core/src/main/scala/org/apache/spark/eventhubs/PartitionPerformanceReceiver.scala b/core/src/main/scala/org/apache/spark/eventhubs/PartitionPerformanceReceiver.scala index 0531fb0de..712ff0b25 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/PartitionPerformanceReceiver.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/PartitionPerformanceReceiver.scala @@ -23,12 +23,24 @@ import org.apache.spark.internal.Logging import org.apache.spark.rpc.{ RpcEndpoint, RpcEnv } import org.apache.spark.SparkContext import org.json4s.jackson.Serialization +import scala.collection.mutable -private[spark] class PartitionPerformanceReceiver(override val rpcEnv: RpcEnv, - val statusTracker: PartitionsStatusTracker) +private[spark] class PartitionPerformanceReceiver(override val rpcEnv: RpcEnv) extends RpcEndpoint with Logging { + // Keeps track of PartitionsStatusTracker per EventHub source + var statusTrackers: mutable.Map[String, PartitionsStatusTracker] = + mutable.Map[String, PartitionsStatusTracker]() + + def addStatusTracker(ehName: String, statusTracker: PartitionsStatusTracker): Unit = { + statusTrackers(ehName) = statusTracker + } + + def getStatusTracker(ehName: String): Option[PartitionsStatusTracker] = { + statusTrackers.get(ehName) + } + override def onStart(): Unit = { logInfo("Start PartitionPerformanceReceiver RPC endpoint") } @@ -36,10 +48,18 @@ private[spark] class PartitionPerformanceReceiver(override val rpcEnv: RpcEnv, override def receive: PartialFunction[Any, Unit] = { case ppm: PartitionPerformanceMetric => { logDebug(s"Received PartitionPerformanceMetric $ppm") - statusTracker.updatePartitionPerformance(ppm.nAndP, - ppm.requestSeqNo, - ppm.batchSize, - ppm.receiveTimeInMillis) + val ehStatusTracker = getStatusTracker(ppm.nAndP.ehName) + ehStatusTracker match { + case Some(statusTracker) => + statusTracker.updatePartitionPerformance(ppm.nAndP, + ppm.requestSeqNo, + ppm.batchSize, + ppm.receiveTimeInMillis) + case None => + logError( + s"PartitionPerformanceReceiver doesn't have a PartitionsStatusTracker for EventHub ${ppm.nAndP.ehName} " + + s"to send the received PartitionPerformanceMetric ${ppm}.") + } } case _ => { logError(s"Received an unknown message in PartitionPerformanceReceiver. It's not acceptable!") diff --git a/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala b/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala index 8d4b2134f..00352a508 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/PartitionsStatusTracker.scala @@ -26,10 +26,22 @@ import org.apache.spark.eventhubs.rdd.OffsetRange import org.apache.spark.eventhubs.utils.ThrottlingStatusPlugin import org.apache.spark.internal.Logging -class PartitionsStatusTracker extends Logging { +class PartitionsStatusTracker(var partitionsCount: Int, + val partitionContext: PartitionContext, + val acceptableBatchReceiveTimeInMs: Long, + val throttlingStatusPlugin: Option[ThrottlingStatusPlugin]) + extends Logging { import PartitionsStatusTracker._ + // quorum size for calculating a batch performance + var enoughUpdatesCount: Int = (partitionsCount / 2) + 1 + + // default performance percentages of 1.0 for when a quorum is not available + var defaultPartitionsPerformancePercentage: Option[Map[NameAndPartition, Double]] = Some( + (for (pid <- 0 until partitionsCount) + yield (NameAndPartition(partitionContext.eventHubName, pid), 1.0))(breakOut)) + // retrieves the batchStatus object based on the local batchId private val batchesStatusList = mutable.Map[Long, BatchStatus]() @@ -41,6 +53,18 @@ class PartitionsStatusTracker extends Logging { */ private val partitionSeqNoPairToBatchIdMap = mutable.Map[String, Long]() + /** + * Update the number of partitions in the tracker + * @param numOfPartitions updated number of partitions + */ + def updateNumberofPartitionsInTracker(numOfPartitions: Int) = { + partitionsCount = numOfPartitions; + enoughUpdatesCount = (partitionsCount / 2) + 1 + defaultPartitionsPerformancePercentage = Some( + (for (pid <- 0 until partitionsCount) + yield (NameAndPartition(partitionContext.eventHubName, pid), 1.0))(breakOut)) + } + /** * Add a batch to the tracker by creating a BatchStatus object and adding it to the map. * Also, we add mappings from each partition-startSeqNo pair to the batchId in order to be able to retrieve @@ -58,7 +82,8 @@ class PartitionsStatusTracker extends Logging { // remove the oldest batch from the batchesStatusList to release space for adding the new batch. val batchIdToRemove = batchId - PartitionsStatusTracker.TrackingBatchCount logDebug( - s"Remove the batch ${if (batchIdToRemove >= 0) batchIdToRemove else None} from the tracker.") + s"Eventhub: ${partitionContext.eventHubName}, Remove batch ${if (batchIdToRemove >= 0) batchIdToRemove + else None} from the tracker.") if (batchIdToRemove >= 0) { removeBatch(batchIdToRemove) } @@ -70,10 +95,14 @@ class PartitionsStatusTracker extends Logging { breakOut) // create the batchStatus tracker and add it to the map - batchesStatusList(batchId) = new BatchStatus(batchId, offsetRanges.map(range => { - val np = range.nameAndPartition - (np, new PartitionStatus(np, range.fromSeqNo, isZeroSizeBatchPartition(np))) - })(breakOut)) + batchesStatusList(batchId) = new BatchStatus( + this, + batchId, + offsetRanges.map(range => { + val np = range.nameAndPartition + (np, new PartitionStatus(np, range.fromSeqNo, isZeroSizeBatchPartition(np))) + })(breakOut) + ) // add the mapping from partition-startSeqNo pair to the batchId ... ignore partitions with zero batch size offsetRanges @@ -94,7 +123,8 @@ class PartitionsStatusTracker extends Logging { private def removeBatch(batchId: Long): Unit = { if (!batchesStatusList.contains(batchId)) { logInfo( - s"Batch with local batchId = $batchId doesn't exist in the batch status tracker, so it can't be removed.") + s"Eventhub: ${partitionContext.eventHubName}, Batch with local batchId = $batchId doesn't exist in " + + s"the batch status tracker, so it can't be removed.") return } // remove the mapping from partition-seqNo pair to the batchId (ignore partitions with empty batch size) @@ -178,7 +208,8 @@ class PartitionsStatusTracker extends Logging { def partitionsPerformancePercentage(): Option[Map[NameAndPartition, Double]] = { // if there is no batch in the tracker, return None if (batchesStatusList.isEmpty) { - logDebug(s"There is no batch in the tracker, so return None") + logDebug( + s"Eventhub: ${partitionContext.eventHubName}, There is no batch in the tracker, so return None") None } else { // find the latest batch with enough updates @@ -194,15 +225,16 @@ class PartitionsStatusTracker extends Logging { latestUpdatedBatch match { case None => { logDebug( - s"No batch has ${PartitionsStatusTracker.enoughUpdatesCount} partitions with updates (enough updates), " + - s"so return None") + s"Eventhub: ${partitionContext.eventHubName}, No batch has ${enoughUpdatesCount} partitions with " + + s"updates (enough updates), so return None") None } case Some(batch) => { logDebug( - s"Batch ${batch.batchId} is the latest batch with enough updates. Calculate and return its performance.") + s"Eventhub: ${partitionContext.eventHubName}, Batch ${batch.batchId} is the latest batch with enough " + + s"updates. Calculate and return its performance.") val performancePercentages = batch.getPerformancePercentages - PartitionsStatusTracker.throttlingStatusPlugin.foreach( + throttlingStatusPlugin.foreach( _.onPartitionsPerformanceStatusUpdate( partitionContext, batch.batchId, @@ -236,45 +268,15 @@ class PartitionsStatusTracker extends Logging { } object PartitionsStatusTracker { - private val _partitionsStatusTrackerInstance = new PartitionsStatusTracker private val TrackingBatchCount = 3 val BatchNotFound: Long = -1 - var acceptableBatchReceiveTimeInMs: Long = DefaultMaxAcceptableBatchReceiveTime.toMillis - var partitionsCount: Int = 1 - var enoughUpdatesCount: Int = 1 - var throttlingStatusPlugin: Option[ThrottlingStatusPlugin] = None - var defaultPartitionsPerformancePercentage: Option[Map[NameAndPartition, Double]] = None - var partitionContext: PartitionContext = null - - def setDefaultValuesInTracker(numOfPartitions: Int, - pContext: PartitionContext, - maxBatchReceiveTime: Long, - throttlingSP: Option[ThrottlingStatusPlugin]) = { - partitionContext = pContext - partitionsCount = numOfPartitions - acceptableBatchReceiveTimeInMs = maxBatchReceiveTime - enoughUpdatesCount = (partitionsCount / 2) + 1 - throttlingStatusPlugin = throttlingSP - defaultPartitionsPerformancePercentage = Some( - (for (pid <- 0 until partitionsCount) - yield (NameAndPartition(pContext.eventHubName, pid), 1.0))(breakOut)) - } - - def updateDefaultValuesInTracker(numOfPartitions: Int) = { - partitionsCount = numOfPartitions; - enoughUpdatesCount = (partitionsCount / 2) + 1 - defaultPartitionsPerformancePercentage = Some( - (for (pid <- 0 until partitionsCount) - yield (NameAndPartition(partitionContext.eventHubName, pid), 1.0))(breakOut)) - } private def partitionSeqNoKey(nAndP: NameAndPartition, seqNo: SequenceNumber): String = s"(name=${nAndP.ehName},pid=${nAndP.partitionId},startSeqNo=$seqNo)".toLowerCase - - def getPartitionStatusTracker: PartitionsStatusTracker = _partitionsStatusTrackerInstance } private[eventhubs] class BatchStatus( + val partitionsStatusTracker: PartitionsStatusTracker, val batchId: Long, val partitionsStatusList: mutable.Map[NameAndPartition, PartitionStatus]) extends Logging { @@ -297,7 +299,7 @@ private[eventhubs] class BatchStatus( if (!hasEnoughUpdates) { hasEnoughUpdates = partitionsStatusList.values .filter(par => par.hasBeenUpdated) - .size >= PartitionsStatusTracker.enoughUpdatesCount + .size >= partitionsStatusTracker.enoughUpdatesCount } hasEnoughUpdates } @@ -308,7 +310,8 @@ private[eventhubs] class BatchStatus( case None => { // just use partitions which have batchSize > 0 and have been updated logInfo( - s"Calculate partition performance percentages for batch = $batchId with partitions status = $partitionsStatusList") + s"Eventhub: ${partitionsStatusTracker.partitionContext.eventHubName}, Calculate partition performance " + + s"percentages for batch = $batchId with partitions status = $partitionsStatusList") val partitionsTimePerEvent = partitionsStatusList .filter(p => (p._2.hasBeenUpdated && !p._2.emptyBatch)) .values @@ -317,13 +320,15 @@ private[eventhubs] class BatchStatus( // check if there is no updated partition with batchSize > 0 if (partitionsTimePerEvent.isEmpty) { logInfo( - s"There is no updated partition with batchSize greater than 0 in batch $batchId, " + - s"so return None ") + s"Eventhub: ${partitionsStatusTracker.partitionContext.eventHubName}, There is no updated partition " + + s"with batchSize greater than 0 in batch $batchId, so return None.") None } else if (allPartitionsFinishedWithinAcceptableTime) { - logInfo(s"All partitions are within the range of normal performance because " + - s"their receive time was less than ${PartitionsStatusTracker.acceptableBatchReceiveTimeInMs}.") - PartitionsStatusTracker.defaultPartitionsPerformancePercentage + logInfo( + s"Eventhub: ${partitionsStatusTracker.partitionContext.eventHubName}, All partitions " + + s"are within the range of normal performance because their receive time was less than " + + s"${partitionsStatusTracker.acceptableBatchReceiveTimeInMs}.") + partitionsStatusTracker.defaultPartitionsPerformancePercentage } else { // calculate the standard deviation val avgTimePerEvent @@ -346,7 +351,7 @@ private[eventhubs] class BatchStatus( // if all partitions have been updated, save the result in performancePercentages if (partitionsStatusList.values .filter(ps => ps.hasBeenUpdated) - .size == PartitionsStatusTracker.partitionsCount) { + .size == partitionsStatusTracker.partitionsCount) { performancePercentages = Some(ppp) } Some(ppp) @@ -367,12 +372,13 @@ private[eventhubs] class BatchStatus( true else { val maxReceiveTime = updatedPartitionsTime.max - (maxReceiveTime < PartitionsStatusTracker.acceptableBatchReceiveTimeInMs) + (maxReceiveTime < partitionsStatusTracker.acceptableBatchReceiveTimeInMs) } } override def toString: String = { - s"BatchStatus(localBatchId=$batchId, PartitionsStatus=${partitionsStatusList.values.toString()})" + s"BatchStatus(Eventhub: ${partitionsStatusTracker.partitionContext.eventHubName}, localBatchId=$batchId, " + + s"PartitionsStatus=${partitionsStatusList.values.toString()})" } } diff --git a/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedPartitionStatusTracker.scala b/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedPartitionStatusTracker.scala index ba4ef4e1c..86edd5176 100644 --- a/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedPartitionStatusTracker.scala +++ b/core/src/main/scala/org/apache/spark/eventhubs/utils/SimulatedPartitionStatusTracker.scala @@ -16,12 +16,27 @@ */ package org.apache.spark.eventhubs.utils +import java.net.URI +import org.apache.spark.eventhubs.{ + DefaultMaxAcceptableBatchReceiveTime, + NameAndPartition, + PartitionContext, + PartitionsStatusTracker, + SequenceNumber +} -import org.apache.spark.eventhubs.{ NameAndPartition, PartitionsStatusTracker, SequenceNumber } import scala.collection.breakOut private[spark] object SimulatedPartitionStatusTracker { - val sourceTracker = PartitionsStatusTracker.getPartitionStatusTracker + var sourceTracker = new PartitionsStatusTracker( + 1, + new PartitionContext(new URI("sb://namespace.servicebus.windows.net"), "mockEH"), + DefaultMaxAcceptableBatchReceiveTime.toMillis, + None) + + def updateSourceTrackerForNewEH(tracker: PartitionsStatusTracker) = { + sourceTracker = tracker + } def updatePartitionPerformance(nAndP: NameAndPartition, requestSeqNo: SequenceNumber, diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala index 3887a5bd1..8ef1c5a19 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSource.scala @@ -101,12 +101,17 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, private lazy val throttlingStatusPlugin: Option[ThrottlingStatusPlugin] = ehConf.throttlingStatusPlugin() - PartitionsStatusTracker.setDefaultValuesInTracker( + private var localBatchId = -1 + + // Create a partition status tracker for this source and add it to the partitionPerformanceReceiver + // this is being used only if slow partition adjustment is on + val partitionsStatusTracker = new PartitionsStatusTracker( partitionCount, partitionContext, ehConf.maxAcceptableBatchReceiveTime.getOrElse(DefaultMaxAcceptableBatchReceiveTime).toMillis, throttlingStatusPlugin ) + partitionPerformanceReceiver.addStatusTracker(ehName, partitionsStatusTracker) var partitionsThrottleFactor: mutable.Map[NameAndPartition, Double] = (for (pid <- 0 until partitionCount) yield (NameAndPartition(ehName, pid), 1.0))(breakOut) @@ -116,7 +121,7 @@ private[spark] class EventHubsSource private[eventhubs] (sqlContext: SQLContext, private def updatePartitionCountInPartitionsStatusTracker(numberOfPartitions: Int) = { logInfo(s"Update the partitionCount to ${numberOfPartitions} in the PartitionsStatusTracker.") - PartitionsStatusTracker.updateDefaultValuesInTracker(numberOfPartitions) + partitionsStatusTracker.updateNumberofPartitionsInTracker(numberOfPartitions) defaultPartitionsPerformancePercentage = (for (pid <- 0 until numberOfPartitions) yield (NameAndPartition(ehName, pid), 1.0))(breakOut) } @@ -470,7 +475,6 @@ private[eventhubs] object EventHubsSource { """.stripMargin private[eventhubs] val VERSION = 1 - private var localBatchId = -1 def getSortedExecutorList(sc: SparkContext): Array[String] = { val bm = sc.env.blockManager diff --git a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSourceProvider.scala b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSourceProvider.scala index 5097b8f85..e2c0429ef 100644 --- a/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSourceProvider.scala +++ b/core/src/main/scala/org/apache/spark/sql/eventhubs/EventHubsSourceProvider.scala @@ -143,9 +143,8 @@ private[sql] class EventHubsSourceProvider private[sql] object EventHubsSourceProvider extends Serializable { // RPC endpoint for partition performance communication in the driver - val partitionsStatusTracker = PartitionsStatusTracker.getPartitionStatusTracker val partitionPerformanceReceiver: PartitionPerformanceReceiver = - new PartitionPerformanceReceiver(SparkEnv.get.rpcEnv, partitionsStatusTracker) + new PartitionPerformanceReceiver(SparkEnv.get.rpcEnv) val partitionPerformanceReceiverRef: RpcEndpointRef = SparkEnv.get.rpcEnv .setupEndpoint(PartitionPerformanceReceiver.ENDPOINT_NAME, partitionPerformanceReceiver) diff --git a/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala b/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala index a3ac32bb7..29aa2d615 100644 --- a/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala +++ b/core/src/test/scala/org/apache/spark/sql/eventhubs/EventHubsSourceSuite.scala @@ -17,19 +17,41 @@ package org.apache.spark.sql.eventhubs -import java.io.{BufferedWriter, FileInputStream, OutputStream, OutputStreamWriter} +import java.io.{ BufferedWriter, FileInputStream, OutputStream, OutputStreamWriter } import java.nio.charset.StandardCharsets.UTF_8 import java.time.Duration import java.util.concurrent.atomic.AtomicInteger -import org.apache.qpid.proton.amqp.{Binary, Decimal128, Decimal32, Decimal64, DescribedType, Symbol, UnknownDescribedType, UnsignedByte, UnsignedInteger, UnsignedLong, UnsignedShort} -import org.apache.spark.eventhubs.utils.{EventHubsTestUtils, SimpleThrottlingStatusPlugin, SimulatedClient, SimulatedPartitionStatusTracker} -import org.apache.spark.eventhubs.{EventHubsConf, EventPosition, NameAndPartition} +import org.apache.qpid.proton.amqp.{ + Binary, + Decimal128, + Decimal32, + Decimal64, + DescribedType, + Symbol, + UnknownDescribedType, + UnsignedByte, + UnsignedInteger, + UnsignedLong, + UnsignedShort +} +import org.apache.spark.eventhubs.utils.{ + EventHubsTestUtils, + SimpleThrottlingStatusPlugin, + SimulatedClient, + SimulatedPartitionStatusTracker +} +import org.apache.spark.eventhubs.{ + EventHubsConf, + EventPosition, + NameAndPartition, + PartitionsStatusTracker +} import org.apache.spark.sql.Dataset import org.apache.spark.sql.execution.streaming._ -import org.apache.spark.sql.functions.{count, window} +import org.apache.spark.sql.functions.{ count, window } import org.apache.spark.sql.streaming.util.StreamManualClock -import org.apache.spark.sql.streaming.{ProcessingTime, StreamTest} +import org.apache.spark.sql.streaming.{ ProcessingTime, StreamTest } import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.util.Utils import org.json4s.NoTypeHints @@ -118,10 +140,22 @@ class EventHubsSourceSuite extends EventHubsSourceTest { private def getEventHubsConf(ehName: String): EventHubsConf = testUtils.getEventHubsConf(ehName) - case class PartitionsStatusTrackerUpdate(updates: List[(NameAndPartition, Long, Int, Long)]) extends ExternalAction { + case class PartitionsStatusTrackerUpdate(updates: List[(NameAndPartition, Long, Int, Long)]) + extends ExternalAction { + override def runAction(): Unit = { + updates.foreach { u => + SimulatedPartitionStatusTracker.updatePartitionPerformance(u._1, u._2, u._3, u._4) + } + } + } + + case class RenewPartitionsStatusTrackerInstance(ehName: String) extends ExternalAction { override def runAction(): Unit = { - updates.foreach{ u => - SimulatedPartitionStatusTracker.updatePartitionPerformance(u._1, u._2, u._3, u._4)} + val tracker: Option[PartitionsStatusTracker] = + EventHubsSourceProvider.partitionPerformanceReceiver.getStatusTracker(ehName) + if (tracker.isDefined) { + SimulatedPartitionStatusTracker.updateSourceTrackerForNewEH(tracker.get) + } } } @@ -610,8 +644,8 @@ class EventHubsSourceSuite extends EventHubsSourceTest { } .map { p => p._2 match { - case s: String => p._1 -> s - case default => p._1 -> Serialization.write(p._2) + case s: String => p._1 -> s + case default => p._1 -> Serialization.write(p._2) } } @@ -785,12 +819,13 @@ class EventHubsSourceSuite extends EventHubsSourceTest { NameAndPartition(eventHub.name, 2), NameAndPartition(eventHub.name, 3)) + val throttlingPlugin = new SimpleThrottlingStatusPlugin val parameters = getEventHubsConf(eventHub.name) .setMaxEventsPerTrigger(20) .setSlowPartitionAdjustment(true) .setMaxAcceptableBatchReceiveTime(Duration.ofMillis(4)) - .setThrottlingStatusPlugin(new SimpleThrottlingStatusPlugin) + .setThrottlingStatusPlugin(throttlingPlugin) .setStartingPosition(EventPosition.fromSequenceNumber(0L)) .toMap @@ -824,12 +859,16 @@ class EventHubsSourceSuite extends EventHubsSourceTest { testStream(mapped)( StartStream(ProcessingTime(100), clock), + RenewPartitionsStatusTrackerInstance(eventHub.name), waitUntilBatchProcessed, // we'll get 5 events per partition per trigger Assert(Set[Long](0).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), CheckLastBatch(0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4), - PartitionsStatusTrackerUpdate(List( (partitions(0), 0L, 5, 9L), (partitions(1), 0L, 5, 11L), - (partitions(2), 0L, 5, 9L), (partitions(3), 0L, 5, 11L))), + PartitionsStatusTrackerUpdate( + List((partitions(0), 0L, 5, 9L), + (partitions(1), 0L, 5, 11L), + (partitions(2), 0L, 5, 9L), + (partitions(3), 0L, 5, 11L))), //Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty), Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), AdvanceManualClock(100), @@ -838,8 +877,11 @@ class EventHubsSourceSuite extends EventHubsSourceTest { // we should get 5 events per partition per trigger Assert(Set[Long](0, 1).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), CheckLastBatch(5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5, 6, 7, 8, 9), - PartitionsStatusTrackerUpdate(List( (partitions(0), 5L, 5, 16L), (partitions(1), 5L, 5, 13L), - (partitions(2), 5L, 5, 16L), (partitions(3), 5L, 5, 15L))), + PartitionsStatusTrackerUpdate( + List((partitions(0), 5L, 5, 16L), + (partitions(1), 5L, 5, 13L), + (partitions(2), 5L, 5, 16L), + (partitions(3), 5L, 5, 15L))), Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), //Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty), AdvanceManualClock(100), @@ -847,7 +889,8 @@ class EventHubsSourceSuite extends EventHubsSourceTest { // the difference between max and min time per event is less than the acceptable time difference (1 MS) // we should get 5 events per partition per trigger Assert(Set[Long](0, 1, 2).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), - CheckLastBatch(10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10, 11, 12, 13, 14), + CheckLastBatch(10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10, 11, 12, 13, 14, 10, 11, 12, 13, + 14), // miss the perforamnce update for this batch. Next round every partitions is considered as normal speed Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), //Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty), @@ -855,10 +898,13 @@ class EventHubsSourceSuite extends EventHubsSourceTest { waitUntilBatchProcessed, // we should get 5 events per partition per trigger Assert(Set[Long](1, 2, 3).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), - CheckLastBatch(15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15, 16, 17, 18, 19), + CheckLastBatch(15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15, 16, 17, 18, 19, 15, 16, 17, 18, + 19), // get update for three partitions (missing partition 1) - PartitionsStatusTrackerUpdate(List( (partitions(0), 15L, 5, 55L), - (partitions(2), 15L, 5, 52L), (partitions(3), 15L, 5, 43L))), + PartitionsStatusTrackerUpdate( + List((partitions(0), 15L, 5, 55L), + (partitions(2), 15L, 5, 52L), + (partitions(3), 15L, 5, 43L))), Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), //Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty), AdvanceManualClock(100), @@ -866,21 +912,30 @@ class EventHubsSourceSuite extends EventHubsSourceTest { // all partitions have receiveTimePerEvent <= avg + stdDev // we should get 5 events per partition per trigger Assert(Set[Long](2, 3, 4).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), - CheckLastBatch(20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20, 21, 22, 23, 24), + CheckLastBatch(20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20, 21, 22, 23, 24, 20, 21, 22, 23, + 24), StopStream, StartStream(ProcessingTime(100), clock), + RenewPartitionsStatusTrackerInstance(eventHub.name), // get update for the last batch before stopping the stream. It should be ignored because the tracker // state should be clean at the start of the stream - PartitionsStatusTrackerUpdate(List( (partitions(0), 20L, 5, 100L), (partitions(1), 20L, 5, 13L), - (partitions(2), 20L, 5, 16L), (partitions(3), 20L, 5, 15L))), + PartitionsStatusTrackerUpdate( + List((partitions(0), 20L, 5, 100L), + (partitions(1), 20L, 5, 13L), + (partitions(2), 20L, 5, 16L), + (partitions(3), 20L, 5, 15L))), Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty), waitUntilBatchProcessed, // last received status update should be ignored since it belongs to a batch before restarting the stream // we should get 5 events per partition per trigger Assert(Set[Long](0, 1).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), - CheckLastBatch(25, 26, 27, 28, 29, 25, 26, 27, 28, 29, 25, 26, 27, 28, 29, 25, 26, 27, 28, 29), - PartitionsStatusTrackerUpdate(List( (partitions(0), 25L, 5, 73L), (partitions(1), 25L, 5, 72L), - (partitions(2), 25L, 5, 66L), (partitions(3), 25L, 5, 73L))), + CheckLastBatch(25, 26, 27, 28, 29, 25, 26, 27, 28, 29, 25, 26, 27, 28, 29, 25, 26, 27, 28, + 29), + PartitionsStatusTrackerUpdate( + List((partitions(0), 25L, 5, 73L), + (partitions(1), 25L, 5, 72L), + (partitions(2), 25L, 5, 66L), + (partitions(3), 25L, 5, 73L))), Assert(noSlowPartition.equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), //Assert(SimulatedPartitionStatusTracker.getPerformancePercentages.isEmpty), AdvanceManualClock(100), @@ -890,22 +945,22 @@ class EventHubsSourceSuite extends EventHubsSourceTest { Assert(Set[Long](0, 1, 2).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), CheckLastBatch(30, 31, 32, 33, 34, 30, 31, 32, 33, 34, 30, 31, 32, 33, 34, 30, 31, 32, 33, 34) ) - } + } test("setSlowPartitionAdjustment with slow partitions") { val eventHub = testUtils.createEventHubs(newEventHubs(), DefaultPartitionCount) testUtils.populateUniformly(eventHub.name, 10000) val partitions: List[NameAndPartition] = List(NameAndPartition(eventHub.name, 0), - NameAndPartition(eventHub.name, 1), - NameAndPartition(eventHub.name, 2), - NameAndPartition(eventHub.name, 3)) - + NameAndPartition(eventHub.name, 1), + NameAndPartition(eventHub.name, 2), + NameAndPartition(eventHub.name, 3)) + val throttlingPlugin = new SimpleThrottlingStatusPlugin val parameters = getEventHubsConf(eventHub.name) .setMaxEventsPerTrigger(20) .setSlowPartitionAdjustment(true) .setMaxAcceptableBatchReceiveTime(Duration.ofMillis(3)) - .setThrottlingStatusPlugin(new SimpleThrottlingStatusPlugin) + .setThrottlingStatusPlugin(throttlingPlugin) .setStartingPosition(EventPosition.fromSequenceNumber(0L)) .toMap @@ -936,25 +991,34 @@ class EventHubsSourceSuite extends EventHubsSourceTest { testStream(mapped)( StartStream(ProcessingTime(100), clock), + RenewPartitionsStatusTrackerInstance(eventHub.name), waitUntilBatchProcessed, // we'll get 5 events per partition per trigger Assert(Set[Long](0).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), CheckLastBatch(0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4), // for the next batch, let's make partition 2 slow - PartitionsStatusTrackerUpdate(List( (partitions(0), 0L, 5, 18L), (partitions(1), 0L, 5, 21L), - (partitions(2), 0L, 5, 42L), (partitions(3), 0L, 5, 25L))), - Assert(Map(partitions(0) -> 1.0, partitions(1) -> 1.0, partitions(2) -> 0.63, partitions(3) -> 1.0) - .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + PartitionsStatusTrackerUpdate( + List((partitions(0), 0L, 5, 18L), + (partitions(1), 0L, 5, 21L), + (partitions(2), 0L, 5, 42L), + (partitions(3), 0L, 5, 25L))), + Assert( + Map(partitions(0) -> 1.0, partitions(1) -> 1.0, partitions(2) -> 0.63, partitions(3) -> 1.0) + .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), AdvanceManualClock(100), waitUntilBatchProcessed, // we should get 3 events for partition 2, 5 events for other partitions Assert(Set[Long](0, 1).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), CheckLastBatch(5, 6, 7, 8, 9, 5, 6, 7, 8, 9, 5, 6, 7, 5, 6, 7, 8, 9), // for the next batch, let's make partition 1 slow and recover partition 2 from being slow - PartitionsStatusTrackerUpdate(List( (partitions(0), 5L, 5, 18L), (partitions(1), 5L, 5, 163L), - (partitions(2), 5L, 3, 10L), (partitions(3), 5L, 5, 15L))), - Assert(Map(partitions(0) -> 1.0, partitions(1) -> 0.33, partitions(2) -> 1.0, partitions(3) -> 1.0) - .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + PartitionsStatusTrackerUpdate( + List((partitions(0), 5L, 5, 18L), + (partitions(1), 5L, 5, 163L), + (partitions(2), 5L, 3, 10L), + (partitions(3), 5L, 5, 15L))), + Assert( + Map(partitions(0) -> 1.0, partitions(1) -> 0.33, partitions(2) -> 1.0, partitions(3) -> 1.0) + .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), AdvanceManualClock(100), waitUntilBatchProcessed, // we should get 4 events for partitions 0 and 3, 5 events for partition 2, and just 1 event for partition 1 @@ -963,19 +1027,25 @@ class EventHubsSourceSuite extends EventHubsSourceTest { CheckLastBatch(10, 11, 12, 13, 10, 8, 9, 10, 11, 12, 10, 11, 12, 13), // for the next batch, let's only have 2 updates (one slow, on fast parttion) // since we don't have enough updated partitions, we should continue with the previous partition performance - PartitionsStatusTrackerUpdate(List( (partitions(0), 10L, 4, 13L), (partitions(3), 10L, 4, 168L))), - Assert(Map(partitions(0) -> 1.0, partitions(1) -> 0.33, partitions(2) -> 1.0, partitions(3) -> 1.0) - .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + PartitionsStatusTrackerUpdate( + List((partitions(0), 10L, 4, 13L), (partitions(3), 10L, 4, 168L))), + Assert( + Map(partitions(0) -> 1.0, partitions(1) -> 0.33, partitions(2) -> 1.0, partitions(3) -> 1.0) + .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), AdvanceManualClock(100), waitUntilBatchProcessed, // we should get 4 events for partitions 0 and 3, 5 events for partition 2, and just 1 event for partition 1 Assert(Set[Long](1, 2, 3).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), CheckLastBatch(14, 15, 16, 17, 11, 13, 14, 15, 16, 17, 14, 15, 16, 17), // let's get back to normal fro all partitions - PartitionsStatusTrackerUpdate(List( (partitions(0), 14L, 4, 12L), (partitions(1), 11L, 1, 3L), - (partitions(2), 13L, 5, 14L), (partitions(3), 14L, 4, 11L))), - Assert( Map(partitions(0) -> 1.0, partitions(1) -> 1.0, partitions(2) -> 1.0, partitions(3) -> 1.0) - .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + PartitionsStatusTrackerUpdate( + List((partitions(0), 14L, 4, 12L), + (partitions(1), 11L, 1, 3L), + (partitions(2), 13L, 5, 14L), + (partitions(3), 14L, 4, 11L))), + Assert( + Map(partitions(0) -> 1.0, partitions(1) -> 1.0, partitions(2) -> 1.0, partitions(3) -> 1.0) + .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), AdvanceManualClock(100), waitUntilBatchProcessed, // all partitions have receiveTimePerEvent <= avg + stdDev @@ -989,18 +1059,21 @@ class EventHubsSourceSuite extends EventHubsSourceTest { test("setSlowPartitionAdjustment with more than one slow partitions") { val eventHub = testUtils.createEventHubs(newEventHubs(), 5) testUtils.populateUniformly(eventHub.name, 1000) - val partitions: List[NameAndPartition] = List(NameAndPartition(eventHub.name, 0), + val partitions: List[NameAndPartition] = List( + NameAndPartition(eventHub.name, 0), NameAndPartition(eventHub.name, 1), NameAndPartition(eventHub.name, 2), NameAndPartition(eventHub.name, 3), - NameAndPartition(eventHub.name, 4)) + NameAndPartition(eventHub.name, 4) + ) + val throttlingPlugin = new SimpleThrottlingStatusPlugin val parameters = getEventHubsConf(eventHub.name) .setMaxEventsPerTrigger(50) .setSlowPartitionAdjustment(true) .setMaxAcceptableBatchReceiveTime(Duration.ofMillis(4)) - .setThrottlingStatusPlugin(new SimpleThrottlingStatusPlugin) + .setThrottlingStatusPlugin(throttlingPlugin) .setStartingPosition(EventPosition.fromSequenceNumber(0L)) .toMap @@ -1031,22 +1104,32 @@ class EventHubsSourceSuite extends EventHubsSourceTest { testStream(mapped)( StartStream(ProcessingTime(100), clock), + RenewPartitionsStatusTrackerInstance(eventHub.name), waitUntilBatchProcessed, // we'll get 10 events per partition per trigger Assert(Set[Long](0).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), - CheckLastBatch(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9), + CheckLastBatch(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9), // for the next batch, let's make partitions 0 and 4 slow - PartitionsStatusTrackerUpdate(List( (partitions(0), 0L, 10, 62L), (partitions(1), 0L, 10, 21L), - (partitions(2), 0L, 10, 20L), (partitions(3), 0L, 10, 40L), (partitions(4), 0L, 10, 65L))), - Assert(Map(partitions(0) -> 0.67, partitions(1) -> 1.0, partitions(2) -> 1.0, partitions(3) -> 1.0, partitions(4) -> 0.64) - .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), + PartitionsStatusTrackerUpdate( + List((partitions(0), 0L, 10, 62L), + (partitions(1), 0L, 10, 21L), + (partitions(2), 0L, 10, 20L), + (partitions(3), 0L, 10, 40L), + (partitions(4), 0L, 10, 65L))), + Assert( + Map(partitions(0) -> 0.67, + partitions(1) -> 1.0, + partitions(2) -> 1.0, + partitions(3) -> 1.0, + partitions(4) -> 0.64) + .equals(SimulatedPartitionStatusTracker.getPerformancePercentages)), AdvanceManualClock(100), waitUntilBatchProcessed, // we should get 10 events for partition 1, 2, 3 and 6 events for partitions 0, 4 Assert(Set[Long](0, 1).equals(SimulatedPartitionStatusTracker.currentBatchIdsInTracker)), - CheckLastBatch(10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, - 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 11, 12, 13, 14, 15) + CheckLastBatch(10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 10, 11, 12, 13, 14, 15) ) }