diff --git a/config/coordinator/coordinator-docker-traces-v2-override.config.toml b/config/coordinator/coordinator-docker-traces-v2-override.config.toml index 0f08975b5..7ca39cdb1 100644 --- a/config/coordinator/coordinator-docker-traces-v2-override.config.toml +++ b/config/coordinator/coordinator-docker-traces-v2-override.config.toml @@ -1,14 +1,13 @@ [prover] -fs-requests-directory="/data/prover-execution/v3/requests" -fs-responses-directory="/data/prover-execution/v3/responses" - -[blob-compression.prover] -fs-requests-directory="/data/prover-compression/v3/requests" -fs-responses-directory="/data/prover-compression/v3/responses" - -[proof-aggregation.prover] -fs-requests-directory="/data/prover-aggregation/v3/requests" -fs-responses-directory="/data/prover-aggregation/v3/responses" +[prover.execution] +fs-requests-directory = "/data/prover/v3/execution/requests" +fs-responses-directory = "/data/prover/v3/execution/responses" +[prover.blob-compression] +fs-requests-directory = "/data/prover/v3/compression/requests" +fs-responses-directory = "/data/prover/v3/compression/responses" +[prover.proof-aggregation] +fs-requests-directory = "/data/prover/v3/aggregation/requests" +fs-responses-directory = "/data/prover/v3/aggregation/responses" [zk-traces] eth-api="http://traces-node-v2:8545" diff --git a/config/coordinator/coordinator-docker.config.toml b/config/coordinator/coordinator-docker.config.toml index 4848cc406..167d9914a 100644 --- a/config/coordinator/coordinator-docker.config.toml +++ b/config/coordinator/coordinator-docker.config.toml @@ -5,12 +5,30 @@ duplicated-logs-debounce-time="PT15S" eip4844-switch-l2-block-number=0 [prover] -fs-requests-directory="/data/prover-execution/v2/requests" -fs-responses-directory="/data/prover-execution/v2/responses" -fs-inprogress-request-writing-suffix=".inprogress_coordinator_writing" -fs-inprogress-proving-suffix-pattern=".*\\.inprogress\\.prover.*" -fs-polling-interval="PT1S" -fs-polling-timeout="PT10M" +fs-inprogress-request-writing-suffix = ".inprogress_coordinator_writing" +fs-inprogress-proving-suffix-pattern = ".*\\.inprogress\\.prover.*" +fs-polling-interval = "PT1S" +fs-polling-timeout = "PT10M" +[prover.execution] +fs-requests-directory = "/data/prover/v2/execution/requests" +fs-responses-directory = "/data/prover/v2/execution/responses" +[prover.blob-compression] +fs-requests-directory = "/data/prover/v2/compression/requests" +fs-responses-directory = "/data/prover/v2/compression/responses" +[prover.proof-aggregation] +fs-requests-directory = "/data/prover/v2/aggregation/requests" +fs-responses-directory = "/data/prover/v2/aggregation/responses" +#[prover.new] +#switch-block-number-inclusive=1000 +#[prover.new.execution] +#fs-requests-directory = "/data/prover/v3/execution/requests" +#fs-responses-directory = "/data/prover/v3/execution/responses" +#[prover.new.blob-compression] +#fs-requests-directory = "/data/prover/v3/compression/requests" +#fs-responses-directory = "/data/prover/v3/compression/responses" +#[prover.new.proof-aggregation] +#fs-requests-directory = "/data/prover/v3/aggregation/requests" +#fs-responses-directory = "/data/prover/v3/aggregation/responses" [blob-compression] blob-size-limit=102400 # 100KB @@ -18,13 +36,6 @@ handler-polling-interval="PT1S" # default batches limit is aggregation-proofs-limit -1 # batches-limit must be less than or equal to aggregation-proofs-limit-1 batches-limit=1 -[blob-compression.prover] -fs-requests-directory="/data/prover-compression/v2/requests" -fs-responses-directory="/data/prover-compression/v2/responses" -fs-inprogress-request-writing-suffix=".inprogress_coordinator_writing" -fs-inprogress-proving-suffix-pattern=".*\\.inprogress\\.prover.*" -fs-polling-interval="PT1S" -fs-polling-timeout="PT10M" [zk-traces] eth-api="http://traces-node:8545" @@ -150,14 +161,6 @@ aggregation-coordinator-polling-interval="PT2S" deadline-check-interval="PT8S" target-end-blocks=[] -[proof-aggregation.prover] -fs-requests-directory="/data/prover-aggregation/v2/requests" -fs-responses-directory="/data/prover-aggregation/v2/responses" -fs-inprogress-request-writing-suffix=".inprogress_coordinator_writing" -fs-inprogress-proving-suffix-pattern=".*\\.inprogress\\.prover.*" -fs-polling-interval="PT20S" -fs-polling-timeout="PT20M" - [finalization-signer] # Web3j/Web3signer type="Web3j" diff --git a/config/coordinator/coordinator-local-dev.config-traces-v2.overrides.toml b/config/coordinator/coordinator-local-dev.config-traces-v2.overrides.toml index e57009c38..97bb70fab 100644 --- a/config/coordinator/coordinator-local-dev.config-traces-v2.overrides.toml +++ b/config/coordinator/coordinator-local-dev.config-traces-v2.overrides.toml @@ -1,3 +1,14 @@ +[prover] +[prover.execution] +fs-requests-directory = "/data/prover/v3/execution/requests" +fs-responses-directory = "/data/prover/v3/execution/responses" +[prover.blob-compression] +fs-requests-directory = "/data/prover/v3/compression/requests" +fs-responses-directory = "/data/prover/v3/compression/responses" +[prover.proof-aggregation] +fs-requests-directory = "/data/prover/v3/aggregation/requests" +fs-responses-directory = "/data/prover/v3/aggregation/responses" + [zk-traces] eth-api="http://127.0.0.1:8745" diff --git a/config/coordinator/coordinator-local-dev.config.overrides.toml b/config/coordinator/coordinator-local-dev.config.overrides.toml index 0f1765905..9a0a301fe 100644 --- a/config/coordinator/coordinator-local-dev.config.overrides.toml +++ b/config/coordinator/coordinator-local-dev.config.overrides.toml @@ -11,17 +11,15 @@ endpoint="http://127.0.0.1:9000" endpoint="http://127.0.0.1:9000" [prover] -fs-requests-directory="tmp/local/prover-execution/v2/requests" -fs-responses-directory="tmp/local/prover-execution/v2/responses" - -[blob-compression] -[blob-compression.prover] -fs-requests-directory="tmp/local/prover-compression/v2/requests" -fs-responses-directory="tmp/local/prover-compression/v2/responses" - -[proof-aggregation.prover] -fs-requests-directory="tmp/local/prover-aggregation/v2/requests" -fs-responses-directory="tmp/local/prover-aggregation/v2/responses" +[prover.execution] +fs-requests-directory = "tmp/local/prover/v2/execution/requests" +fs-responses-directory = "tmp/local/prover/v2/execution/responses" +[prover.blob-compression] +fs-requests-directory = "tmp/local/prover/v2/compression/requests" +fs-responses-directory = "tmp/local/prover/v2/compression/responses" +[prover.proof-aggregation] +fs-requests-directory = "tmp/local/prover/v2/aggregation/requests" +fs-responses-directory = "tmp/local/prover/v2/aggregation/responses" [zk-traces] eth-api="http://127.0.0.1:8645" diff --git a/config/coordinator/log4j2-dev.xml b/config/coordinator/log4j2-dev.xml index 4dc7d5e19..48aa11611 100644 --- a/config/coordinator/log4j2-dev.xml +++ b/config/coordinator/log4j2-dev.xml @@ -96,15 +96,15 @@ - + - + - + diff --git a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/BlockchainClientHelper.kt b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/BlockchainClientHelper.kt index eed4f2078..bd41c137d 100644 --- a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/BlockchainClientHelper.kt +++ b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/BlockchainClientHelper.kt @@ -14,6 +14,9 @@ import net.consensys.linea.ethereum.gaspricing.FeesFetcher import net.consensys.linea.ethereum.gaspricing.WMAGasProvider import net.consensys.linea.httprest.client.VertxHttpRestClient import net.consensys.linea.web3j.SmartContractErrors +import net.consensys.zkevm.coordinator.app.config.L1Config +import net.consensys.zkevm.coordinator.app.config.L2Config +import net.consensys.zkevm.coordinator.app.config.SignerConfig import net.consensys.zkevm.coordinator.clients.smartcontract.LineaRollupSmartContractClient import net.consensys.zkevm.ethereum.crypto.Web3SignerRestClient import net.consensys.zkevm.ethereum.crypto.Web3SignerTxSignService diff --git a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorApp.kt b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorApp.kt index 75dd759db..0adf224f8 100644 --- a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorApp.kt +++ b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorApp.kt @@ -7,15 +7,15 @@ import io.vertx.core.json.jackson.DatabindCodec import io.vertx.micrometer.backends.BackendRegistries import io.vertx.sqlclient.SqlClient import net.consensys.linea.async.toSafeFuture -import net.consensys.linea.contract.Web3JL2MessageServiceLogsClient -import net.consensys.linea.contract.Web3JLogsClient import net.consensys.linea.jsonrpc.client.LoadBalancingJsonRpcClient import net.consensys.linea.jsonrpc.client.VertxHttpJsonRpcClientFactory import net.consensys.linea.metrics.micrometer.MicrometerMetricsFacade import net.consensys.linea.vertx.loadVertxConfig import net.consensys.linea.web3j.okHttpClientBuilder import net.consensys.zkevm.coordinator.api.Api -import net.consensys.zkevm.coordinator.clients.prover.FileBasedExecutionProverClient +import net.consensys.zkevm.coordinator.app.config.CoordinatorConfig +import net.consensys.zkevm.coordinator.app.config.DatabaseConfig +import net.consensys.zkevm.fileio.DirectoryCleaner import net.consensys.zkevm.persistence.dao.aggregation.AggregationsRepositoryImpl import net.consensys.zkevm.persistence.dao.aggregation.PostgresAggregationsDao import net.consensys.zkevm.persistence.dao.aggregation.RetryingPostgresAggregationsDao @@ -79,28 +79,6 @@ class CoordinatorApp(private val configs: CoordinatorConfig) { Async.defaultExecutorService() ) - private fun createExecutionProverClient(config: ProverConfig): FileBasedExecutionProverClient { - return FileBasedExecutionProverClient( - config = FileBasedExecutionProverClient.Config( - requestDirectory = config.fsRequestsDirectory, - responseDirectory = config.fsResponsesDirectory, - inprogressProvingSuffixPattern = config.fsInprogressProvingSuffixPattern, - pollingInterval = config.fsPollingInterval.toKotlinDuration(), - timeout = config.fsPollingTimeout.toKotlinDuration(), - tracesVersion = configs.traces.rawExecutionTracesVersion, - stateManagerVersion = configs.stateManager.version - ), - l2MessageServiceLogsClient = Web3JL2MessageServiceLogsClient( - logsClient = Web3JLogsClient(vertx, l2Web3jClient), - l2MessageServiceAddress = configs.l2.messageServiceAddress - ), - vertx = vertx, - l2Web3jClient = l2Web3jClient - ) - } - - private val proverClient: FileBasedExecutionProverClient = createExecutionProverClient(configs.prover) - private val persistenceRetryer = PersistenceRetryer( vertx = vertx, config = PersistenceRetryer.Config( @@ -164,7 +142,6 @@ class CoordinatorApp(private val configs: CoordinatorConfig) { vertx = vertx, l2Web3jClient = l2Web3jClient, httpJsonRpcClientFactory = httpJsonRpcClientFactory, - proverClientV2 = proverClient, batchesRepository = batchesRepository, blobsRepository = blobsRepository, aggregationsRepository = aggregationsRepository, @@ -175,16 +152,22 @@ class CoordinatorApp(private val configs: CoordinatorConfig) { private val requestFileCleanup = DirectoryCleaner( vertx = vertx, - directories = listOf( - configs.prover.fsRequestsDirectory, // Execution proof request directory - configs.blobCompression.prover.fsRequestsDirectory, // Compression proof request directory - configs.proofAggregation.prover.fsRequestsDirectory // Aggregation proof request directory + directories = listOfNotNull( + configs.proversConfig.proverA.execution.requestsDirectory, + configs.proversConfig.proverA.blobCompression.requestsDirectory, + configs.proversConfig.proverA.proofAggregation.requestsDirectory, + configs.proversConfig.proverB?.execution?.requestsDirectory, + configs.proversConfig.proverB?.blobCompression?.requestsDirectory, + configs.proversConfig.proverB?.proofAggregation?.requestsDirectory ), fileFilters = DirectoryCleaner.getSuffixFileFilters( - listOf( - configs.prover.fsInprogressRequestWritingSuffix, - configs.blobCompression.prover.fsInprogressRequestWritingSuffix, - configs.proofAggregation.prover.fsInprogressRequestWritingSuffix + listOfNotNull( + configs.proversConfig.proverA.execution.inprogressRequestWritingSuffix, + configs.proversConfig.proverA.blobCompression.inprogressRequestWritingSuffix, + configs.proversConfig.proverA.proofAggregation.inprogressRequestWritingSuffix, + configs.proversConfig.proverB?.execution?.inprogressRequestWritingSuffix, + configs.proversConfig.proverB?.blobCompression?.inprogressRequestWritingSuffix, + configs.proversConfig.proverB?.proofAggregation?.inprogressRequestWritingSuffix ) ) + DirectoryCleaner.JSON_FILE_FILTER ) @@ -203,20 +186,25 @@ class CoordinatorApp(private val configs: CoordinatorConfig) { } fun stop(): Int { - SafeFuture.allOf( - l1App.stop(), - SafeFuture.fromRunnable { l2Web3jClient.shutdown() }, - api.stop().toSafeFuture() - ).thenApply { - LoadBalancingJsonRpcClient.stop() - }.thenCompose { - requestFileCleanup.cleanup() - }.thenCompose { - vertx.close().toSafeFuture().thenApply { log.info("vertx Stopped") } - }.thenApply { - log.info("CoordinatorApp Stopped") - }.get() - return 0 + return kotlin.runCatching { + SafeFuture.allOf( + l1App.stop(), + SafeFuture.fromRunnable { l2Web3jClient.shutdown() }, + api.stop().toSafeFuture() + ).thenApply { + LoadBalancingJsonRpcClient.stop() + }.thenCompose { + requestFileCleanup.cleanup() + }.thenCompose { + vertx.close().toSafeFuture().thenApply { log.info("vertx Stopped") } + }.thenApply { + log.info("CoordinatorApp Stopped") + }.get() + 0 + }.recover { e -> + log.error("CoordinatorApp Stopped with error: errorMessage={}", e.message, e) + 1 + }.getOrThrow() } private fun initDb(dbConfig: DatabaseConfig): SqlClient { diff --git a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorAppCli.kt b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorAppCli.kt index b8a8d2d99..3494fb2c5 100644 --- a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorAppCli.kt +++ b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorAppCli.kt @@ -10,6 +10,12 @@ import com.sksamuel.hoplite.ConfigLoaderBuilder import com.sksamuel.hoplite.addFileSource import net.consensys.linea.traces.TracesCountersV1 import net.consensys.linea.traces.TracesCountersV2 +import net.consensys.zkevm.coordinator.app.config.CoordinatorConfig +import net.consensys.zkevm.coordinator.app.config.CoordinatorConfigTomlDto +import net.consensys.zkevm.coordinator.app.config.GasPriceCapTimeOfDayMultipliersConfig +import net.consensys.zkevm.coordinator.app.config.SmartContractErrorCodesConfig +import net.consensys.zkevm.coordinator.app.config.TracesLimitsV1ConfigFile +import net.consensys.zkevm.coordinator.app.config.TracesLimitsV2ConfigFile import org.apache.logging.log4j.LogManager import org.apache.logging.log4j.Logger import picocli.CommandLine @@ -175,7 +181,7 @@ internal constructor(private val errorWriter: PrintWriter, private val startActi val gasPriceCapTimeOfDayMultipliers = loadConfigsOrError(listOf(gasPriceCapTimeOfDayMultipliersFile)) - val configs = loadConfigsOrError(coordinatorConfigFiles) + val configs = loadConfigsOrError(coordinatorConfigFiles) if (tracesLimitsV1Configs is Err) { hasConfigError = true @@ -223,7 +229,7 @@ internal constructor(private val errorWriter: PrintWriter, private val startActi return if (hasConfigError) { null } else { - configs.get()?.let { config: CoordinatorConfig -> + configs.get()?.let { config: CoordinatorConfigTomlDto -> config.copy( conflation = config.conflation.copy( _tracesLimitsV1 = tracesLimitsV1Configs?.get()?.tracesLimits?.let { TracesCountersV1(it) }, @@ -235,7 +241,7 @@ internal constructor(private val errorWriter: PrintWriter, private val startActi timeOfDayMultipliers = gasPriceCapTimeOfDayMultipliers.get()?.gasPriceCapTimeOfDayMultipliers ) ) - ) + ).reified() } } } diff --git a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorAppMain.kt b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorAppMain.kt index fa08741d3..348e895aa 100644 --- a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorAppMain.kt +++ b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorAppMain.kt @@ -1,5 +1,6 @@ package net.consensys.zkevm.coordinator.app +import net.consensys.zkevm.coordinator.app.config.CoordinatorConfig import org.apache.logging.log4j.LogManager import org.apache.logging.log4j.core.LoggerContext import org.apache.logging.log4j.core.config.Configurator diff --git a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/GasPriceUpdaterApp.kt b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/GasPriceUpdaterApp.kt index 5a1998d77..658856349 100644 --- a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/GasPriceUpdaterApp.kt +++ b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/GasPriceUpdaterApp.kt @@ -17,6 +17,7 @@ import net.consensys.linea.jsonrpc.client.VertxHttpJsonRpcClientFactory import net.consensys.linea.web3j.Web3jBlobExtended import net.consensys.toKWeiUInt import net.consensys.zkevm.LongRunningService +import net.consensys.zkevm.coordinator.app.config.DynamicGasPriceServiceConfig import org.apache.logging.log4j.LogManager import org.web3j.protocol.Web3j import tech.pegasys.teku.infrastructure.async.SafeFuture diff --git a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/L1DependentApp.kt b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/L1DependentApp.kt index d7f226a8d..d6ec0a054 100644 --- a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/L1DependentApp.kt +++ b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/L1DependentApp.kt @@ -37,6 +37,8 @@ import net.consensys.linea.web3j.SmartContractErrors import net.consensys.linea.web3j.Web3jBlobExtended import net.consensys.linea.web3j.okHttpClientBuilder import net.consensys.zkevm.LongRunningService +import net.consensys.zkevm.coordinator.app.config.CoordinatorConfig +import net.consensys.zkevm.coordinator.app.config.StateManagerClientConfig import net.consensys.zkevm.coordinator.blockcreation.BatchesRepoBasedLastProvenBlockNumberProvider import net.consensys.zkevm.coordinator.blockcreation.BlockCreationMonitor import net.consensys.zkevm.coordinator.blockcreation.GethCliqueSafeBlockProvider @@ -44,14 +46,13 @@ import net.consensys.zkevm.coordinator.blockcreation.TracesConflationClientV2Ada import net.consensys.zkevm.coordinator.blockcreation.TracesCountersClientV2Adapter import net.consensys.zkevm.coordinator.blockcreation.TracesCountersV1WatcherClient import net.consensys.zkevm.coordinator.blockcreation.TracesFilesManager +import net.consensys.zkevm.coordinator.clients.ExecutionProverClientV2 import net.consensys.zkevm.coordinator.clients.ShomeiClient import net.consensys.zkevm.coordinator.clients.TracesGeneratorJsonRpcClientV1 import net.consensys.zkevm.coordinator.clients.TracesGeneratorJsonRpcClientV2 import net.consensys.zkevm.coordinator.clients.Type2StateManagerClient import net.consensys.zkevm.coordinator.clients.Type2StateManagerJsonRpcClient -import net.consensys.zkevm.coordinator.clients.prover.FileBasedBlobCompressionProverClient -import net.consensys.zkevm.coordinator.clients.prover.FileBasedExecutionProverClient -import net.consensys.zkevm.coordinator.clients.prover.FileBasedProofAggregationClient +import net.consensys.zkevm.coordinator.clients.prover.ProverClientFactory import net.consensys.zkevm.coordinator.clients.smartcontract.LineaRollupSmartContractClient import net.consensys.zkevm.coordinator.clients.smartcontract.LineaRollupSmartContractClientReadOnly import net.consensys.zkevm.domain.BlocksConflation @@ -111,7 +112,6 @@ class L1DependentApp( private val vertx: Vertx, private val l2Web3jClient: Web3j, private val httpJsonRpcClientFactory: VertxHttpJsonRpcClientFactory, - proverClientV2: FileBasedExecutionProverClient, private val batchesRepository: BatchesRepository, private val blobsRepository: BlobsRepository, private val aggregationsRepository: AggregationsRepository, @@ -142,7 +142,6 @@ class L1DependentApp( l2Web3jClient, smartContractErrors ) - private val l1Web3jClient = Web3j.build( HttpService( configs.l1.rpcEndpoint.toString(), @@ -151,9 +150,7 @@ class L1DependentApp( 1000, Async.defaultExecutorService() ) - private val l1Web3jService = Web3jBlobExtended(HttpService(configs.l1.ethFeeHistoryEndpoint.toString())) - private val l2ZkTracesWeb3jClient: Web3j = Web3j.build( HttpService(configs.zkTraces.ethApi.toString()), @@ -161,13 +158,19 @@ class L1DependentApp( Async.defaultExecutorService() ) - private val lineaChainId = l1Web3jClient.ethChainId().send().chainId.toLong() + private val l1ChainId = l1Web3jClient.ethChainId().send().chainId.toLong() private val l2MessageServiceLogsClient = Web3JL2MessageServiceLogsClient( logsClient = Web3JLogsClient(vertx, l2Web3jClient), l2MessageServiceAddress = configs.l2.messageServiceAddress ) + private val proverClientFactory = ProverClientFactory( + vertx = vertx, + config = configs.proversConfig, + metricsFacade = metricsFacade + ) + private val l2ExtendedWeb3j = ExtendedWeb3JImpl(l2ZkTracesWeb3jClient) private val finalizationTransactionManager = createTransactionManager( @@ -431,7 +434,7 @@ class L1DependentApp( // dynamic gas pricing is disabled and will act as a fallback gas provider // if L1 dynamic gas pricing is enabled val primaryOrFallbackGasProvider = WMAGasProvider( - chainId = lineaChainId, + chainId = l1ChainId, feesFetcher = feesFetcher, priorityFeeCalculator = l1DataSubmissionPriorityFeeCalculator, config = WMAGasProvider.Config( @@ -460,18 +463,6 @@ class L1DependentApp( ) private val blobCompressionProofCoordinator = run { - val blobCompressionProverClient = FileBasedBlobCompressionProverClient( - config = FileBasedBlobCompressionProverClient.Config( - requestFileDirectory = configs.blobCompression.prover.fsRequestsDirectory, - responseFileDirectory = configs.blobCompression.prover.fsResponsesDirectory, - inprogressProvingSuffixPattern = configs.blobCompression.prover.fsInprogressProvingSuffixPattern, - inprogressRequestFileSuffix = configs.blobCompression.prover.fsInprogressRequestWritingSuffix, - pollingInterval = configs.blobCompression.prover.fsPollingInterval.toKotlinDuration(), - timeout = configs.blobCompression.prover.fsPollingTimeout.toKotlinDuration() - ), - vertx = vertx - ) - val maxProvenBlobCache = run { val highestProvenBlobTracker = HighestProvenBlobTracker(lastProcessedBlockNumber) metricsFacade.createGauge( @@ -496,7 +487,7 @@ class L1DependentApp( val blobCompressionProofCoordinator = BlobCompressionProofCoordinator( vertx = vertx, blobsRepository = blobsRepository, - blobCompressionProverClient = blobCompressionProverClient, + blobCompressionProverClient = proverClientFactory.blobCompressionProverClient(), rollingBlobShnarfCalculator = RollingBlobShnarfCalculator( blobShnarfCalculator = GoBackedBlobShnarfCalculator(blobShnarfCalculatorVersion), blobsRepository = blobsRepository, @@ -550,17 +541,6 @@ class L1DependentApp( } private val proofAggregationCoordinatorService: LongRunningService = run { - val proofAggregationClient = FileBasedProofAggregationClient( - vertx = vertx, - config = FileBasedProofAggregationClient.Config( - requestFileDirectory = configs.proofAggregation.prover.fsRequestsDirectory, - responseFileDirectory = configs.proofAggregation.prover.fsResponsesDirectory, - responseFilePollingInterval = configs.proofAggregation.prover.fsPollingInterval.toKotlinDuration(), - responseFileMonitorTimeout = configs.proofAggregation.prover.fsPollingTimeout.toKotlinDuration(), - inprogressRequestFileSuffix = configs.proofAggregation.prover.fsInprogressRequestWritingSuffix, - proverInProgressSuffixPattern = configs.proofAggregation.prover.fsInprogressProvingSuffixPattern - ) - ) // it needs it's own client because internally set the blockNumber when making queries. // it does not make any transaction val messageService = instantiateL2MessageServiceContractClient( @@ -610,7 +590,7 @@ class L1DependentApp( startBlockNumberInclusive = lastFinalizedBlock + 1u, aggregationsRepository = aggregationsRepository, consecutiveProvenBlobsProvider = maxBlobEndBlockNumberTracker, - proofAggregationClient = proofAggregationClient, + proofAggregationClient = proverClientFactory.proofAggregationProverClient(), l2web3jClient = l2Web3jClient, l2MessageServiceClient = l2MessageServiceClient, aggregationDeadlineDelay = configs.conflation.conflationDeadlineLastBlockConfirmationDelay.toKotlinDuration(), @@ -629,7 +609,7 @@ class L1DependentApp( // dynamic gas pricing is disabled and will act as a fallback gas provider // if L1 dynamic gas pricing is enabled val primaryOrFallbackGasProvider = WMAGasProvider( - chainId = lineaChainId, + chainId = l1ChainId, feesFetcher = feesFetcher, priorityFeeCalculator = l1FinalizationPriorityFeeCalculator, config = WMAGasProvider.Config( @@ -786,11 +766,17 @@ class L1DependentApp( BatchProofHandlerImpl(batchesRepository)::acceptNewBatch ) ) + val executionProverClient: ExecutionProverClientV2 = proverClientFactory.executionProverClient( + tracesVersion = configs.traces.rawExecutionTracesVersion, + stateManagerVersion = configs.stateManager.version, + l2MessageServiceLogsClient = l2MessageServiceLogsClient, + l2Web3jClient = l2Web3jClient + ) val proofGeneratingConflationHandlerImpl = ProofGeneratingConflationHandlerImpl( tracesProductionCoordinator = TracesConflationCoordinatorImpl(tracesConflationClient, zkStateClient), zkProofProductionCoordinator = ZkProofCreationCoordinatorImpl( - executionProverClient = proverClientV2 + executionProverClient = executionProverClient ), batchProofHandler = batchProofHandler, vertx = vertx, diff --git a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/L1toL2MessageAnchoringApp.kt b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/L1toL2MessageAnchoringApp.kt index dcb3b2141..0bf004be2 100644 --- a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/L1toL2MessageAnchoringApp.kt +++ b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/L1toL2MessageAnchoringApp.kt @@ -7,6 +7,10 @@ import net.consensys.linea.contract.L2MessageService import net.consensys.linea.contract.l1.Web3JLineaRollupSmartContractClient import net.consensys.linea.web3j.SmartContractErrors import net.consensys.zkevm.LongRunningService +import net.consensys.zkevm.coordinator.app.config.L1Config +import net.consensys.zkevm.coordinator.app.config.L2Config +import net.consensys.zkevm.coordinator.app.config.MessageAnchoringServiceConfig +import net.consensys.zkevm.coordinator.app.config.SignerConfig import net.consensys.zkevm.ethereum.coordination.messageanchoring.L1EventQuerier import net.consensys.zkevm.ethereum.coordination.messageanchoring.L1EventQuerierImpl import net.consensys.zkevm.ethereum.coordination.messageanchoring.L2MessageAnchorer diff --git a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorConfig.kt b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/config/CoordinatorConfig.kt similarity index 89% rename from coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorConfig.kt rename to coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/config/CoordinatorConfig.kt index 9ceb08ece..4400e1fd8 100644 --- a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorConfig.kt +++ b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/config/CoordinatorConfig.kt @@ -1,4 +1,4 @@ -package net.consensys.zkevm.coordinator.app +package net.consensys.zkevm.coordinator.app.config import com.sksamuel.hoplite.ConfigAlias import com.sksamuel.hoplite.Masked @@ -17,6 +17,7 @@ import net.consensys.linea.traces.TracesCountersV2 import net.consensys.linea.traces.TracingModuleV1 import net.consensys.linea.traces.TracingModuleV2 import net.consensys.linea.web3j.SmartContractErrors +import net.consensys.zkevm.coordinator.clients.prover.ProversConfig import net.consensys.zkevm.coordinator.clients.smartcontract.BlockParameter import java.math.BigInteger import java.net.URL @@ -75,15 +76,6 @@ data class ZkTraces( val newBlockPollingInterval: Duration ) -data class ProverConfig( - val fsRequestsDirectory: Path, - val fsResponsesDirectory: Path, - val fsPollingInterval: Duration, - val fsPollingTimeout: Duration, - val fsInprogressProvingSuffixPattern: String, - val fsInprogressRequestWritingSuffix: String -) - interface RetryConfig { val maxRetries: Int? val timeout: Duration? @@ -138,8 +130,7 @@ data class BlobCompressionConfig( val blobSizeLimit: Int, @ConfigAlias("batches-limit") private val _batchesLimit: Int? = null, - val handlerPollingInterval: Duration, - val prover: ProverConfig + val handlerPollingInterval: Duration ) { init { _batchesLimit?.also { @@ -156,12 +147,12 @@ data class AggregationConfig( val aggregationDeadline: Duration, val aggregationCoordinatorPollingInterval: Duration, val deadlineCheckInterval: Duration, - val prover: ProverConfig, val aggregationSizeMultipleOf: Int = 1, @ConfigAlias("target-end-blocks") private val _targetEndBlocks: List = emptyList() ) { val targetEndBlocks: List = _targetEndBlocks.map { it.toULong() } + init { require(aggregationSizeMultipleOf > 0) { "aggregationSizeMultipleOf should be greater than 0" } } @@ -557,11 +548,66 @@ data class Type2StateProofProviderConfig( data class TracesLimitsV1ConfigFile(val tracesLimits: Map) data class TracesLimitsV2ConfigFile(val tracesLimits: Map) +// +// CoordinatorConfigTomlDto class to parse from toml +// CoordinatorConfig class with reified configs +// separation between Toml representation and domain representation +// otherwise it's hard to test the configuration is loaded properly +data class CoordinatorConfigTomlDto( + val l2InclusiveBlockNumberToStopAndFlushAggregation: ULong? = null, + val zkTraces: ZkTraces, + val blobCompression: BlobCompressionConfig, + val proofAggregation: AggregationConfig, + val traces: TracesConfig, + val type2StateProofProvider: Type2StateProofProviderConfig, + val l1: L1Config, + val l2: L2Config, + val finalizationSigner: SignerConfig, + val dataSubmissionSigner: SignerConfig, + val blobSubmission: BlobSubmissionConfig, + val aggregationFinalization: AggregationFinalizationConfig, + val database: DatabaseConfig, + val persistenceRetry: PersistenceRetryConfig, + val stateManager: StateManagerClientConfig, + val conflation: ConflationConfig, + val api: ApiConfig, + val l2Signer: SignerConfig, + val messageAnchoringService: MessageAnchoringServiceConfig, + val dynamicGasPriceService: DynamicGasPriceServiceConfig, + val l1DynamicGasPriceCapService: L1DynamicGasPriceCapServiceConfig, + val testL1Disabled: Boolean = false, + val prover: ProverConfigTomlDto +) { + fun reified(): CoordinatorConfig = CoordinatorConfig( + l2InclusiveBlockNumberToStopAndFlushAggregation = l2InclusiveBlockNumberToStopAndFlushAggregation, + zkTraces = zkTraces, + blobCompression = blobCompression, + proofAggregation = proofAggregation, + traces = traces, + type2StateProofProvider = type2StateProofProvider, + l1 = l1, + l2 = l2, + finalizationSigner = finalizationSigner, + dataSubmissionSigner = dataSubmissionSigner, + blobSubmission = blobSubmission, + aggregationFinalization = aggregationFinalization, + database = database, + persistenceRetry = persistenceRetry, + stateManager = stateManager, + conflation = conflation, + api = api, + l2Signer = l2Signer, + messageAnchoringService = messageAnchoringService, + dynamicGasPriceService = dynamicGasPriceService, + l1DynamicGasPriceCapService = l1DynamicGasPriceCapService, + testL1Disabled = testL1Disabled, + proversConfig = prover.reified() + ) +} + data class CoordinatorConfig( val l2InclusiveBlockNumberToStopAndFlushAggregation: ULong? = null, - val duplicatedLogsDebounceTime: Duration = Duration.ofSeconds(10), val zkTraces: ZkTraces, - val prover: ProverConfig, val blobCompression: BlobCompressionConfig, val proofAggregation: AggregationConfig, val traces: TracesConfig, @@ -581,7 +627,8 @@ data class CoordinatorConfig( val messageAnchoringService: MessageAnchoringServiceConfig, val dynamicGasPriceService: DynamicGasPriceServiceConfig, val l1DynamicGasPriceCapService: L1DynamicGasPriceCapServiceConfig, - val testL1Disabled: Boolean = false + val testL1Disabled: Boolean = false, + val proversConfig: ProversConfig ) { init { if (l2InclusiveBlockNumberToStopAndFlushAggregation != null) { diff --git a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/config/ProverConfig.kt b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/config/ProverConfig.kt new file mode 100644 index 000000000..289b0b325 --- /dev/null +++ b/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/config/ProverConfig.kt @@ -0,0 +1,91 @@ +package net.consensys.zkevm.coordinator.app.config + +import net.consensys.zkevm.coordinator.clients.prover.FileBasedProverConfig +import net.consensys.zkevm.coordinator.clients.prover.ProverConfig +import net.consensys.zkevm.coordinator.clients.prover.ProversConfig +import java.nio.file.Path +import java.time.Duration +import kotlin.time.Duration.Companion.hours +import kotlin.time.Duration.Companion.seconds +import kotlin.time.toJavaDuration +import kotlin.time.toKotlinDuration + +data class ProverConfigTomlDto( + val switchBlockNumberInclusive: Long? = null, + var fsInprogressRequestWritingSuffix: String? = null, + var fsInprogressProvingSuffixPattern: String? = null, + var fsPollingInterval: Duration? = null, + var fsPollingTimeout: Duration? = null, + val execution: FileSystemTomlDto, + val blobCompression: FileSystemTomlDto, + val proofAggregation: FileSystemTomlDto, + val new: ProverConfigTomlDto? = null +) { + private fun asProverConfig(): ProverConfig { + return ProverConfig( + execution = execution.toDomain(), + blobCompression = blobCompression.toDomain(), + proofAggregation = proofAggregation.toDomain() + ) + } + + fun reified(): ProversConfig { + fsInprogressRequestWritingSuffix = fsInprogressRequestWritingSuffix ?: ".inprogress_coordinator_writing" + fsInprogressProvingSuffixPattern = fsInprogressProvingSuffixPattern ?: "\\.inprogress\\.prover.*" + fsPollingInterval = fsPollingInterval ?: 1.seconds.toJavaDuration() + fsPollingTimeout = fsPollingTimeout ?: 3.hours.toJavaDuration() + execution.reifyWithRootDefaults(this) + blobCompression.reifyWithRootDefaults(this) + proofAggregation.reifyWithRootDefaults(this) + + if (new != null) { + if (new.switchBlockNumberInclusive == null) { + throw IllegalArgumentException("switchBlockNumberInclusive must be set when new prover is configured") + } + new.fsInprogressProvingSuffixPattern = new.fsInprogressProvingSuffixPattern + ?: fsInprogressProvingSuffixPattern + new.fsInprogressRequestWritingSuffix = new.fsInprogressRequestWritingSuffix + ?: fsInprogressRequestWritingSuffix + new.fsPollingInterval = new.fsPollingInterval ?: fsPollingInterval + new.fsPollingTimeout = new.fsPollingTimeout ?: fsPollingTimeout + new.execution.reifyWithRootDefaults(new) + new.blobCompression.reifyWithRootDefaults(new) + new.proofAggregation.reifyWithRootDefaults(new) + } + + return ProversConfig( + proverA = this.asProverConfig(), + switchBlockNumberInclusive = new?.switchBlockNumberInclusive?.toULong(), + proverB = new?.asProverConfig() + ) + } +} + +data class FileSystemTomlDto( + internal val fsRequestsDirectory: Path, + internal val fsResponsesDirectory: Path, + internal var fsInprogressRequestWritingSuffix: String?, + internal var fsInprogressProvingSuffixPattern: String?, + internal var fsPollingInterval: Duration?, + internal var fsPollingTimeout: Duration? +) { + internal fun reifyWithRootDefaults(rootConfig: ProverConfigTomlDto) { + fsInprogressRequestWritingSuffix = fsInprogressRequestWritingSuffix + ?: rootConfig.fsInprogressRequestWritingSuffix + fsInprogressProvingSuffixPattern = fsInprogressProvingSuffixPattern + ?: rootConfig.fsInprogressProvingSuffixPattern + fsPollingInterval = fsPollingInterval ?: rootConfig.fsPollingInterval + fsPollingTimeout = fsPollingTimeout ?: rootConfig.fsPollingTimeout + } + + fun toDomain(): FileBasedProverConfig { + return FileBasedProverConfig( + requestsDirectory = fsRequestsDirectory, + responsesDirectory = fsResponsesDirectory, + inprogressRequestWritingSuffix = fsInprogressRequestWritingSuffix!!, + inprogressProvingSuffixPattern = fsInprogressProvingSuffixPattern!!, + pollingInterval = fsPollingInterval!!.toKotlinDuration(), + pollingTimeout = fsPollingTimeout!!.toKotlinDuration() + ) + } +} diff --git a/coordinator/app/src/test/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorConfigTest.kt b/coordinator/app/src/test/kotlin/net/consensys/zkevm/coordinator/app/config/CoordinatorConfigTest.kt similarity index 93% rename from coordinator/app/src/test/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorConfigTest.kt rename to coordinator/app/src/test/kotlin/net/consensys/zkevm/coordinator/app/config/CoordinatorConfigTest.kt index c173b6435..ca65a06d1 100644 --- a/coordinator/app/src/test/kotlin/net/consensys/zkevm/coordinator/app/CoordinatorConfigTest.kt +++ b/coordinator/app/src/test/kotlin/net/consensys/zkevm/coordinator/app/config/CoordinatorConfigTest.kt @@ -1,4 +1,4 @@ -package net.consensys.zkevm.coordinator.app +package net.consensys.zkevm.coordinator.app.config import com.github.michaelbull.result.get import com.github.michaelbull.result.getError @@ -10,6 +10,10 @@ import net.consensys.linea.traces.TracesCountersV2 import net.consensys.linea.traces.TracingModuleV1 import net.consensys.linea.traces.TracingModuleV2 import net.consensys.linea.web3j.SmartContractErrors +import net.consensys.zkevm.coordinator.app.CoordinatorAppCli +import net.consensys.zkevm.coordinator.clients.prover.FileBasedProverConfig +import net.consensys.zkevm.coordinator.clients.prover.ProverConfig +import net.consensys.zkevm.coordinator.clients.prover.ProversConfig import net.consensys.zkevm.coordinator.clients.smartcontract.BlockParameter import org.assertj.core.api.Assertions.assertThat import org.junit.jupiter.api.Assertions.assertEquals @@ -22,6 +26,7 @@ import java.math.BigInteger import java.net.URI import java.nio.file.Path import java.time.Duration +import kotlin.time.Duration.Companion.minutes import kotlin.time.Duration.Companion.seconds import kotlin.time.toJavaDuration @@ -178,42 +183,48 @@ class CoordinatorConfigTest { Duration.parse("PT1S") ) - private val proverConfig = ProverConfig( - fsRequestsDirectory = Path.of("/data/prover-execution/v2/requests"), - fsResponsesDirectory = Path.of("/data/prover-execution/v2/responses"), - fsPollingInterval = Duration.parse("PT1S"), - fsPollingTimeout = Duration.parse("PT10M"), - fsInprogressProvingSuffixPattern = ".*\\.inprogress\\.prover.*", - fsInprogressRequestWritingSuffix = ".inprogress_coordinator_writing" + private val proversConfig = ProversConfig( + proverA = ProverConfig( + execution = FileBasedProverConfig( + requestsDirectory = Path.of("/data/prover/v2/execution/requests"), + responsesDirectory = Path.of("/data/prover/v2/execution/responses"), + pollingInterval = 1.seconds, + pollingTimeout = 10.minutes, + inprogressProvingSuffixPattern = ".*\\.inprogress\\.prover.*", + inprogressRequestWritingSuffix = ".inprogress_coordinator_writing" + ), + blobCompression = FileBasedProverConfig( + requestsDirectory = Path.of("/data/prover/v2/compression/requests"), + responsesDirectory = Path.of("/data/prover/v2/compression/responses"), + pollingInterval = 1.seconds, + pollingTimeout = 10.minutes, + inprogressProvingSuffixPattern = ".*\\.inprogress\\.prover.*", + inprogressRequestWritingSuffix = ".inprogress_coordinator_writing" + ), + proofAggregation = FileBasedProverConfig( + requestsDirectory = Path.of("/data/prover/v2/aggregation/requests"), + responsesDirectory = Path.of("/data/prover/v2/aggregation/responses"), + pollingInterval = 1.seconds, + pollingTimeout = 10.minutes, + inprogressProvingSuffixPattern = ".*\\.inprogress\\.prover.*", + inprogressRequestWritingSuffix = ".inprogress_coordinator_writing" + ) + ), + switchBlockNumberInclusive = null, + proverB = null ) private val blobCompressionConfig = BlobCompressionConfig( blobSizeLimit = 100 * 1024, handlerPollingInterval = Duration.parse("PT1S"), - _batchesLimit = 1, - prover = ProverConfig( - fsRequestsDirectory = Path.of("/data/prover-compression/v2/requests"), - fsResponsesDirectory = Path.of("/data/prover-compression/v2/responses"), - fsPollingInterval = Duration.parse("PT1S"), - fsPollingTimeout = Duration.parse("PT10M"), - fsInprogressProvingSuffixPattern = ".*\\.inprogress\\.prover.*", - fsInprogressRequestWritingSuffix = ".inprogress_coordinator_writing" - ) + _batchesLimit = 1 ) private val aggregationConfig = AggregationConfig( aggregationProofsLimit = 3, aggregationDeadline = Duration.parse("PT1M"), aggregationCoordinatorPollingInterval = Duration.parse("PT2S"), - deadlineCheckInterval = Duration.parse("PT8S"), - prover = ProverConfig( - fsRequestsDirectory = Path.of("/data/prover-aggregation/v2/requests"), - fsResponsesDirectory = Path.of("/data/prover-aggregation/v2/responses"), - fsPollingInterval = Duration.parse("PT20S"), - fsPollingTimeout = Duration.parse("PT20M"), - fsInprogressProvingSuffixPattern = ".*\\.inprogress\\.prover.*", - fsInprogressRequestWritingSuffix = ".inprogress_coordinator_writing" - ) + deadlineCheckInterval = Duration.parse("PT8S") ) private val tracesConfig = TracesConfig( @@ -619,9 +630,7 @@ class CoordinatorConfigTest { ) private val coordinatorConfig = CoordinatorConfig( - duplicatedLogsDebounceTime = Duration.parse("PT15S"), zkTraces = zkTracesConfig, - prover = proverConfig, blobCompression = blobCompressionConfig, proofAggregation = aggregationConfig, traces = tracesConfig, @@ -640,7 +649,8 @@ class CoordinatorConfigTest { l2Signer = l2SignerConfig, messageAnchoringService = messageAnchoringServiceConfig, dynamicGasPriceService = dynamicGasPriceServiceConfig, - l1DynamicGasPriceCapService = l1DynamicGasPriceCapServiceConfig + l1DynamicGasPriceCapService = l1DynamicGasPriceCapServiceConfig, + proversConfig = proversConfig ) } @@ -664,11 +674,11 @@ class CoordinatorConfigTest { CoordinatorAppCli.loadConfigsOrError( listOf(File("../../config/common/traces-limits-v2.toml")) ) - CoordinatorAppCli.loadConfigsOrError( + CoordinatorAppCli.loadConfigsOrError( listOf(File("../../config/coordinator/coordinator-docker.config.toml")) ) .onFailure { error: String -> fail(error) } - .onSuccess { config: CoordinatorConfig -> + .onSuccess { config: CoordinatorConfigTomlDto -> val configs = config.copy( conflation = config.conflation.copy( _tracesLimitsV1 = tracesLimitsConfigs.get()?.tracesLimits?.let { TracesCountersV1(it) }, @@ -681,7 +691,7 @@ class CoordinatorConfigTest { ) ) ) - assertEquals(coordinatorConfig, configs) + assertEquals(coordinatorConfig, configs.reified()) assertEquals(coordinatorConfig.l1.rpcEndpoint, coordinatorConfig.l1.ethFeeHistoryEndpoint) } } @@ -705,7 +715,7 @@ class CoordinatorConfigTest { listOf(File("../../config/common/traces-limits-v2.toml")) ) - CoordinatorAppCli.loadConfigsOrError( + CoordinatorAppCli.loadConfigsOrError( listOf( File("../../config/coordinator/coordinator-docker.config.toml"), File("../../config/coordinator/coordinator-docker-web3signer-override.config.toml") @@ -733,7 +743,7 @@ class CoordinatorConfigTest { l2Signer = l2SignerConfig.copy(type = SignerConfig.Type.Web3Signer) ) - assertEquals(expectedConfig, configs) + assertEquals(expectedConfig, configs.reified()) } } diff --git a/coordinator/app/src/test/kotlin/net/consensys/zkevm/coordinator/app/config/ProverConfigTest.kt b/coordinator/app/src/test/kotlin/net/consensys/zkevm/coordinator/app/config/ProverConfigTest.kt new file mode 100644 index 000000000..e35117553 --- /dev/null +++ b/coordinator/app/src/test/kotlin/net/consensys/zkevm/coordinator/app/config/ProverConfigTest.kt @@ -0,0 +1,149 @@ +package net.consensys.zkevm.coordinator.app.config + +import com.sksamuel.hoplite.ConfigLoaderBuilder +import com.sksamuel.hoplite.toml.TomlPropertySource +import net.consensys.zkevm.coordinator.clients.prover.FileBasedProverConfig +import net.consensys.zkevm.coordinator.clients.prover.ProverConfig +import net.consensys.zkevm.coordinator.clients.prover.ProversConfig +import org.assertj.core.api.Assertions.assertThat +import org.junit.jupiter.api.Test +import java.nio.file.Path +import kotlin.time.Duration.Companion.minutes +import kotlin.time.Duration.Companion.seconds + +class ProverConfigTest { + data class Config( + val prover: ProverConfigTomlDto + ) + + private fun parseConfig(toml: String): ProversConfig { + return ConfigLoaderBuilder + .default() + .addSource(TomlPropertySource(toml)) + .build() + .loadConfigOrThrow() + .let { it.prover.reified() } + } + + val proverAConfigToml = """ + [prover] + fs-inprogress-request-writing-suffix = ".inprogress_coordinator_writing" + fs-inprogress-proving-suffix-pattern = "\\.inprogress\\.prover.*" + fs-polling-interval = "PT10S" + fs-polling-timeout = "PT10M" + [prover.execution] + fs-requests-directory = "/data/prover/execution/requests" + fs-responses-directory = "/data/prover/execution/responses" + fs-inprogress-request-writing-suffix = ".OVERRIDE_inprogress_coordinator_writing" + fs-inprogress-proving-suffix-pattern = "OVERRIDE_\\.inprogress\\.prover.*" + [prover.blob-compression] + fs-requests-directory = "/data/prover/compression/requests" + fs-responses-directory = "/data/prover/compression/responses" + fs-polling-interval = "PT20S" + fs-polling-timeout = "PT20M" + [prover.proof-aggregation] + fs-requests-directory = "/data/prover/aggregation/requests" + fs-responses-directory = "/data/prover/aggregation/responses" + """.trimIndent() + + @Test + fun `should load configs with single prover and overrids`() { + val config = parseConfig(proverAConfigToml) + assertThat(config.switchBlockNumberInclusive).isNull() + assertProverAConfig(config.proverA) + assertThat(config.proverB).isNull() + } + + fun assertProverAConfig(config: ProverConfig) { + assertThat(config.execution).isEqualTo( + FileBasedProverConfig( + requestsDirectory = Path.of("/data/prover/execution/requests"), + responsesDirectory = Path.of("/data/prover/execution/responses"), + inprogressRequestWritingSuffix = ".OVERRIDE_inprogress_coordinator_writing", + inprogressProvingSuffixPattern = "OVERRIDE_\\.inprogress\\.prover.*", + pollingInterval = 10.seconds, + pollingTimeout = 10.minutes + ) + ) + assertThat(config.blobCompression).isEqualTo( + FileBasedProverConfig( + requestsDirectory = Path.of("/data/prover/compression/requests"), + responsesDirectory = Path.of("/data/prover/compression/responses"), + inprogressRequestWritingSuffix = ".inprogress_coordinator_writing", + inprogressProvingSuffixPattern = "\\.inprogress\\.prover.*", + pollingInterval = 20.seconds, + pollingTimeout = 20.minutes + ) + ) + assertThat(config.proofAggregation).isEqualTo( + FileBasedProverConfig( + requestsDirectory = Path.of("/data/prover/aggregation/requests"), + responsesDirectory = Path.of("/data/prover/aggregation/responses"), + inprogressRequestWritingSuffix = ".inprogress_coordinator_writing", + inprogressProvingSuffixPattern = "\\.inprogress\\.prover.*", + pollingInterval = 10.seconds, + pollingTimeout = 10.minutes + ) + ) + } + + @Test + fun `should load configs with 2 provers and overrides`() { + val toml = """ + $proverAConfigToml + [prover.new] + switch-block-number-inclusive=200 + fs-inprogress-request-writing-suffix = ".NEW_OVERRIDE_inprogress_coordinator_writing" + fs-polling-timeout = "PT5M" + [prover.new.execution] + fs-requests-directory = "/data/prover-new/execution/requests" + fs-responses-directory = "/data/prover-new/execution/responses" + fs-inprogress-request-writing-suffix = ".NEW_OVERRIDE_2_inprogress_coordinator_writing" + fs-inprogress-proving-suffix-pattern = "NEW_OVERRIDE_2\\.inprogress\\.prover.*" + [prover.new.blob-compression] + fs-requests-directory = "/data/prover-new/compression/requests" + fs-responses-directory = "/data/prover-new/compression/responses" + fs-polling-interval = "PT12S" + fs-polling-timeout = "PT12M" + [prover.new.proof-aggregation] + fs-requests-directory = "/data/prover-new/aggregation/requests" + fs-responses-directory = "/data/prover-new/aggregation/responses + " + """.trimIndent() + val config = parseConfig(toml) + + assertProverAConfig(config.proverA) + assertThat(config.switchBlockNumberInclusive).isEqualTo(200uL) + assertThat(config.proverB).isNotNull + assertThat(config.proverB!!.execution).isEqualTo( + FileBasedProverConfig( + requestsDirectory = Path.of("/data/prover-new/execution/requests"), + responsesDirectory = Path.of("/data/prover-new/execution/responses"), + inprogressRequestWritingSuffix = ".NEW_OVERRIDE_2_inprogress_coordinator_writing", + inprogressProvingSuffixPattern = "NEW_OVERRIDE_2\\.inprogress\\.prover.*", + pollingInterval = 10.seconds, + pollingTimeout = 5.minutes + ) + ) + assertThat(config.proverB!!.blobCompression).isEqualTo( + FileBasedProverConfig( + requestsDirectory = Path.of("/data/prover-new/compression/requests"), + responsesDirectory = Path.of("/data/prover-new/compression/responses"), + inprogressRequestWritingSuffix = ".NEW_OVERRIDE_inprogress_coordinator_writing", + inprogressProvingSuffixPattern = "\\.inprogress\\.prover.*", + pollingInterval = 12.seconds, + pollingTimeout = 12.minutes + ) + ) + assertThat(config.proverB!!.proofAggregation).isEqualTo( + FileBasedProverConfig( + requestsDirectory = Path.of("/data/prover-new/aggregation/requests"), + responsesDirectory = Path.of("/data/prover-new/aggregation/responses"), + inprogressRequestWritingSuffix = ".NEW_OVERRIDE_inprogress_coordinator_writing", + inprogressProvingSuffixPattern = "\\.inprogress\\.prover.*", + pollingInterval = 10.seconds, + pollingTimeout = 5.minutes + ) + ) + } +} diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/ABProverClientRouter.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/ABProverClientRouter.kt new file mode 100644 index 000000000..1dbfa0163 --- /dev/null +++ b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/ABProverClientRouter.kt @@ -0,0 +1,26 @@ +package net.consensys.zkevm.coordinator.clients.prover + +import net.consensys.zkevm.coordinator.clients.ProverClient +import net.consensys.zkevm.domain.BlockInterval +import tech.pegasys.teku.infrastructure.async.SafeFuture + +class StartBlockNumberBasedSwitchPredicate( + val switchStartBlockNumberInclusive: ULong +) where ProofRequest : BlockInterval { + fun invoke(proofRequest: ProofRequest): Boolean = proofRequest.startBlockNumber >= switchStartBlockNumberInclusive +} + +class ABProverClientRouter( + val proverA: ProverClient, + val proverB: ProverClient, + val switchToProverBPredicate: (ProofRequest) -> Boolean +) : ProverClient { + + override fun requestProof(proofRequest: ProofRequest): SafeFuture { + if (switchToProverBPredicate(proofRequest)) { + return proverB.requestProof(proofRequest) + } else { + return proverA.requestProof(proofRequest) + } + } +} diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/Config.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/Config.kt new file mode 100644 index 000000000..5588f5cb3 --- /dev/null +++ b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/Config.kt @@ -0,0 +1,25 @@ +package net.consensys.zkevm.coordinator.clients.prover + +import java.nio.file.Path +import kotlin.time.Duration + +data class ProversConfig( + val proverA: ProverConfig, + val switchBlockNumberInclusive: ULong?, + val proverB: ProverConfig? +) + +data class ProverConfig( + val execution: FileBasedProverConfig, + val blobCompression: FileBasedProverConfig, + val proofAggregation: FileBasedProverConfig +) + +data class FileBasedProverConfig( + val requestsDirectory: Path, + val responsesDirectory: Path, + val inprogressProvingSuffixPattern: String, + val inprogressRequestWritingSuffix: String, + val pollingInterval: Duration, + val pollingTimeout: Duration +) diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedBlobCompressionProverClient.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedBlobCompressionProverClient.kt deleted file mode 100644 index c1004343f..000000000 --- a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedBlobCompressionProverClient.kt +++ /dev/null @@ -1,256 +0,0 @@ -package net.consensys.zkevm.coordinator.clients.prover - -import com.github.michaelbull.result.Err -import com.github.michaelbull.result.Ok -import com.github.michaelbull.result.Result -import io.vertx.core.Vertx -import net.consensys.linea.errors.ErrorResponse -import net.consensys.zkevm.coordinator.clients.BlobCompressionProof -import net.consensys.zkevm.coordinator.clients.BlobCompressionProverClient -import net.consensys.zkevm.coordinator.clients.ProverErrorType -import net.consensys.zkevm.coordinator.clients.prover.serialization.BlobCompressionProofJsonRequest -import net.consensys.zkevm.coordinator.clients.prover.serialization.BlobCompressionProofJsonResponse -import net.consensys.zkevm.coordinator.clients.prover.serialization.JsonSerialization -import net.consensys.zkevm.domain.BlockIntervals -import net.consensys.zkevm.domain.ConflationCalculationResult -import net.consensys.zkevm.domain.ProofIndex -import net.consensys.zkevm.ethereum.coordination.blob.ShnarfResult -import net.consensys.zkevm.fileio.FileMonitor -import net.consensys.zkevm.fileio.FileReader -import net.consensys.zkevm.fileio.FileWriter -import net.consensys.zkevm.fileio.inProgressFilePattern -import org.apache.logging.log4j.LogManager -import org.apache.logging.log4j.Logger -import org.apache.logging.log4j.util.Strings -import tech.pegasys.teku.infrastructure.async.SafeFuture -import java.nio.file.Path -import kotlin.io.path.notExists -import kotlin.time.Duration - -/** - * Implementation of interface with the Blob Compression Prover trough Files. - * - * Blob Compression Prover will ingest file like - * path/to/prover-input-dir/--bcv-ccv-getZkBlobCompressionProof.json - * - * When done prover will output file - * path/to/prover-output-dir/--getZkBlobCompressionProof.json - * - * So, this class will need to watch the file system and wait for the output proof to be generated - */ -class FileBasedBlobCompressionProverClient( - private val config: Config, - private val vertx: Vertx, - private val fileWriter: FileWriter = FileWriter(vertx, JsonSerialization.proofResponseMapperV1), - private val fileReader: FileReader = FileReader( - vertx, - JsonSerialization.proofResponseMapperV1, - BlobCompressionProofJsonResponse::class.java - ), - private val fileMonitor: FileMonitor = FileMonitor( - vertx, - FileMonitor.Config(config.pollingInterval, config.timeout) - ), - private val compressionProofRequestFileNameProvider: ProverFileNameProvider = CompressionProofRequestFileNameProvider, - private val compressionProofResponseFileNameProvider: ProverFileNameProvider = - CompressionProofResponseFileNameProvider -) : BlobCompressionProverClient { - - init { - if (config.requestFileDirectory.notExists()) { - config.requestFileDirectory.toFile().mkdirs() - } - if (config.responseFileDirectory.notExists()) { - config.responseFileDirectory.toFile().mkdirs() - } - } - - private val log: Logger = LogManager.getLogger(this::class.java) - - data class Config( - val requestFileDirectory: Path, - val responseFileDirectory: Path, - val inprogressProvingSuffixPattern: String, - val inprogressRequestFileSuffix: String, - val pollingInterval: Duration, - val timeout: Duration - ) - - fun parseResponse(filePath: Path): - SafeFuture>> { - return fileReader - .read(filePath) - .thenApply { - when (it) { - is Ok -> Ok(it.value.toDomainObject()) - is Err -> Err(ErrorResponse(mapFileReaderError(it.error.type), it.error.message)) - } - } - } - - override fun requestBlobCompressionProof( - compressedData: ByteArray, - conflations: List, - parentStateRootHash: ByteArray, - finalStateRootHash: ByteArray, - parentDataHash: ByteArray, - prevShnarf: ByteArray, - expectedShnarfResult: ShnarfResult, - commitment: ByteArray, - kzgProofContract: ByteArray, - kzgProofSideCar: ByteArray - ): SafeFuture>> { - val compressionProofIndex = ProofIndex( - startBlockNumber = conflations.first().startBlockNumber, - endBlockNumber = conflations.last().endBlockNumber, - hash = expectedShnarfResult.expectedShnarf - ) - - val responseFilePath = config.responseFileDirectory.resolve( - compressionProofResponseFileNameProvider.getFileName(compressionProofIndex) - ) - return fileMonitor.fileExists(responseFilePath) - .thenCompose { responseFileExists -> - if (responseFileExists) { - log.info( - "compression proof already proven: blob={} reusedResponse={}", - compressionProofIndex.intervalString(), - responseFilePath - ) - parseResponse(responseFilePath) - } else { - writeRequest( - compressionProofIndex = compressionProofIndex, - compressedData = compressedData, - conflations = conflations, - prevShnarf = prevShnarf, - parentStateRootHash = parentStateRootHash, - finalStateRootHash = finalStateRootHash, - parentDataHash = parentDataHash, - expectedShnarfResult = expectedShnarfResult, - commitment = commitment, - kzgProofContract = kzgProofContract, - kzgProofSideCar = kzgProofSideCar - ) - .thenCompose { fileMonitor.monitor(responseFilePath) } - .thenCompose { - when (it) { - is Ok -> { - log.debug("blob compression proof created: ${it.value}") - parseResponse(it.value) - } - - is Err -> { - val proverErrorType = mapFileMonitorError(it.error) - val errorMessage = if (proverErrorType == ProverErrorType.ResponseNotFound) { - "Blob compression proof not found after ${config.timeout.inWholeSeconds}s, " + - "blob=${compressionProofIndex.intervalString()}" - } else { - Strings.EMPTY - } - SafeFuture.completedFuture(Err(ErrorResponse(proverErrorType, errorMessage))) - } - } - } - } - } - } - - private fun writeRequest( - compressionProofIndex: ProofIndex, - compressedData: ByteArray, - conflations: List, - prevShnarf: ByteArray, - parentStateRootHash: ByteArray, - finalStateRootHash: ByteArray, - parentDataHash: ByteArray, - expectedShnarfResult: ShnarfResult, - commitment: ByteArray, - kzgProofContract: ByteArray, - kzgProofSideCar: ByteArray - ): SafeFuture { - val request = buildRequest( - compressedData = compressedData, - conflations = conflations, - prevShnarf = prevShnarf, - parentStateRootHash = parentStateRootHash, - finalStateRootHash = finalStateRootHash, - parentDataHash = parentDataHash, - expectedShnarfResult = expectedShnarfResult, - commitment = commitment, - kzgProofContract = kzgProofContract, - kzgProofSideCar = kzgProofSideCar - ) - val requestFilePath = config.requestFileDirectory.resolve( - compressionProofRequestFileNameProvider.getFileName(compressionProofIndex) - ) - return fileMonitor.fileExists( - config.requestFileDirectory, - inProgressFilePattern(requestFilePath.fileName.toString(), config.inprogressProvingSuffixPattern) - ).thenCompose { alreadyExistingRequest: Boolean -> - if (alreadyExistingRequest) { - log.info( - "compression proof already requested or proving in progress: blob={} reusingFile={}", - compressionProofIndex.intervalString(), - requestFilePath.fileName.toString() - ) - SafeFuture.completedFuture(requestFilePath) - } else { - log.debug( - "requesting compression proof: blob={} fileName={}", - compressionProofIndex.intervalString(), - requestFilePath - ) - fileWriter.write(request, requestFilePath, config.inprogressRequestFileSuffix) - } - } - } - - private fun buildRequest( - compressedData: ByteArray, - conflations: List, - prevShnarf: ByteArray, - parentStateRootHash: ByteArray, - finalStateRootHash: ByteArray, - parentDataHash: ByteArray, - expectedShnarfResult: ShnarfResult, - commitment: ByteArray, - kzgProofContract: ByteArray, - kzgProofSideCar: ByteArray - ): BlobCompressionProofJsonRequest { - return BlobCompressionProofJsonRequest( - compressedData = compressedData, - conflationOrder = BlockIntervals( - startingBlockNumber = conflations.first().startBlockNumber, - upperBoundaries = conflations.map { it.endBlockNumber } - ), - prevShnarf = prevShnarf, - parentStateRootHash = parentStateRootHash, - finalStateRootHash = finalStateRootHash, - parentDataHash = parentDataHash, - dataHash = expectedShnarfResult.dataHash, - snarkHash = expectedShnarfResult.snarkHash, - expectedX = expectedShnarfResult.expectedX, - expectedY = expectedShnarfResult.expectedY, - expectedShnarf = expectedShnarfResult.expectedShnarf, - commitment = commitment, - kzgProofContract = kzgProofContract, - kzgProofSidecar = kzgProofSideCar - ) - } - - companion object { - - private fun mapFileMonitorError(error: FileMonitor.ErrorType): ProverErrorType { - return when (error) { - FileMonitor.ErrorType.TIMED_OUT -> ProverErrorType.ResponseNotFound - } - } - - private fun mapFileReaderError(error: FileReader.ErrorType): ProverErrorType { - return when (error) { - FileReader.ErrorType.PARSING_ERROR -> ProverErrorType.ParseError - } - } - } -} diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedBlobCompressionProverClientV2.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedBlobCompressionProverClientV2.kt new file mode 100644 index 000000000..d89fdff6f --- /dev/null +++ b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedBlobCompressionProverClientV2.kt @@ -0,0 +1,70 @@ +package net.consensys.zkevm.coordinator.clients.prover + +import com.fasterxml.jackson.databind.ObjectMapper +import io.vertx.core.Vertx +import net.consensys.zkevm.coordinator.clients.BlobCompressionProof +import net.consensys.zkevm.coordinator.clients.BlobCompressionProofRequest +import net.consensys.zkevm.coordinator.clients.BlobCompressionProverClientV2 +import net.consensys.zkevm.coordinator.clients.prover.serialization.BlobCompressionProofJsonRequest +import net.consensys.zkevm.coordinator.clients.prover.serialization.BlobCompressionProofJsonResponse +import net.consensys.zkevm.coordinator.clients.prover.serialization.JsonSerialization +import net.consensys.zkevm.domain.ProofIndex +import net.consensys.zkevm.fileio.FileReader +import net.consensys.zkevm.fileio.FileWriter +import org.apache.logging.log4j.LogManager +import tech.pegasys.teku.infrastructure.async.SafeFuture + +/** + * Implementation of interface with the Blob Compression Prover through Files. + * + * Blob Compression Prover will ingest file like + * path/to/prover/requests/---getZkBlobCompressionProof.json + * + * When done prover will output file + * path/to/prover/responses/---getZkBlobCompressionProof.json + * + * So, this class will need to watch the file system and wait for the output proof to be generated + */ +class FileBasedBlobCompressionProverClientV2( + val config: FileBasedProverConfig, + val vertx: Vertx, + jsonObjectMapper: ObjectMapper = JsonSerialization.proofResponseMapperV1 +) : + GenericFileBasedProverClient< + BlobCompressionProofRequest, + BlobCompressionProof, + BlobCompressionProofJsonRequest, + BlobCompressionProofJsonResponse + >( + config = config, + vertx = vertx, + fileWriter = FileWriter(vertx, jsonObjectMapper), + fileReader = FileReader( + vertx, + jsonObjectMapper, + BlobCompressionProofJsonResponse::class.java + ), + requestFileNameProvider = CompressionProofRequestFileNameProvider, + responseFileNameProvider = CompressionProofResponseFileNameProvider, + proofIndexProvider = FileBasedBlobCompressionProverClientV2::blobFileIndex, + requestMapper = FileBasedBlobCompressionProverClientV2::requestDtoMapper, + responseMapper = BlobCompressionProofJsonResponse::toDomainObject, + proofTypeLabel = "blob", + log = LogManager.getLogger(this::class.java) + ), + BlobCompressionProverClientV2 { + + companion object { + fun blobFileIndex(request: BlobCompressionProofRequest): ProofIndex { + return ProofIndex( + startBlockNumber = request.startBlockNumber, + endBlockNumber = request.endBlockNumber, + hash = request.expectedShnarfResult.expectedShnarf + ) + } + + fun requestDtoMapper(domainRequest: BlobCompressionProofRequest): SafeFuture { + return SafeFuture.completedFuture(BlobCompressionProofJsonRequest.fromDomainObject(domainRequest)) + } + } +} diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedExecutionProverClient.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedExecutionProverClient.kt deleted file mode 100644 index 50a8aab7d..000000000 --- a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedExecutionProverClient.kt +++ /dev/null @@ -1,238 +0,0 @@ -package net.consensys.zkevm.coordinator.clients.prover - -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.node.ArrayNode -import io.vertx.core.Vertx -import net.consensys.linea.CommonDomainFunctions.blockIntervalString -import net.consensys.zkevm.coordinator.clients.ExecutionProverClient -import net.consensys.zkevm.coordinator.clients.GenerateTracesResponse -import net.consensys.zkevm.coordinator.clients.GetProofResponse -import net.consensys.zkevm.coordinator.clients.GetZkEVMStateMerkleProofResponse -import net.consensys.zkevm.coordinator.clients.L2MessageServiceLogsClient -import net.consensys.zkevm.coordinator.clients.prover.serialization.JsonSerialization -import net.consensys.zkevm.domain.ProofIndex -import net.consensys.zkevm.domain.RlpBridgeLogsData -import net.consensys.zkevm.fileio.FileMonitor -import net.consensys.zkevm.fileio.inProgressFilePattern -import net.consensys.zkevm.toULong -import org.apache.logging.log4j.LogManager -import org.apache.logging.log4j.Logger -import org.web3j.protocol.Web3j -import org.web3j.protocol.core.DefaultBlockParameter -import tech.pegasys.teku.ethereum.executionclient.schema.ExecutionPayloadV1 -import tech.pegasys.teku.infrastructure.async.SafeFuture -import java.math.BigInteger -import java.nio.file.Path -import kotlin.io.path.notExists -import kotlin.time.Duration - -/** - * Implementation of interface with the Prover trough Files. - * - * Prover will ingest file like - * path/to/prover-input-dir/--getZkProof.json - * - * When done prover will output file - * path/to/prover-output-dir/--proof.json - * - * So, this class will need to watch the file system and wait for the output proof to be generated - */ -class FileBasedExecutionProverClient( - private val config: Config, - private val l2MessageServiceLogsClient: L2MessageServiceLogsClient, - private val vertx: Vertx, - private val l2Web3jClient: Web3j, - private val mapper: ObjectMapper = JsonSerialization.proofResponseMapperV1, - private val executionProofRequestFileNameProvider: ProverFileNameProvider = - ExecutionProofRequestFileNameProvider( - tracesVersion = config.tracesVersion, - stateManagerVersion = config.stateManagerVersion - ), - private val executionProofResponseFileNameProvider: ProverFileNameProvider = ExecutionProofResponseFileNameProvider, - private val fileMonitor: FileMonitor = FileMonitor( - vertx, - FileMonitor.Config(config.pollingInterval, config.timeout) - ), - val proverResponsesRepository: FileBasedProverResponsesRepository = FileBasedProverResponsesRepository( - FileBasedProverResponsesRepository.Config(config.responseDirectory), - proofResponseFileNameProvider = executionProofResponseFileNameProvider, - fileMonitor - ) -) : ExecutionProverClient { - private val log: Logger = LogManager.getLogger(this::class.java) - private val requestFileWriter: RequestFileWriter = RequestFileWriter( - vertx = vertx, - config = RequestFileWriter.Config( - requestDirectory = config.requestDirectory, - writingInprogressSuffix = ".coordinator_writing_inprogress", - proverInprogressSuffixPattern = config.inprogressProvingSuffixPattern - ), - mapper = mapper, - proofRequestFileNameProvider = executionProofRequestFileNameProvider, - log = log - ) - - init { - if (config.requestDirectory.notExists()) { - val dirCreated = config.requestDirectory.toFile().mkdirs() - if (!dirCreated) { - log.error("Failed to create prover request directory {}!", config.requestDirectory) - } - } - if (config.responseDirectory.notExists()) { - val dirCreated = config.responseDirectory.toFile().mkdirs() - if (!dirCreated) { - log.error("Failed to create prover response directory {}!", config.responseDirectory) - } - } - } - - data class Config( - val requestDirectory: Path, - val responseDirectory: Path, - val inprogressProvingSuffixPattern: String, - val pollingInterval: Duration, - val timeout: Duration, - val tracesVersion: String, - val stateManagerVersion: String - ) - - internal data class GetProofRequest( - val zkParentStateRootHash: String?, - val keccakParentStateRootHash: String, - val conflatedExecutionTracesFile: String, - val tracesEngineVersion: String, - val type2StateManagerVersion: String?, - val zkStateMerkleProof: ArrayNode, - val blocksData: List - ) - - internal inner class ResponseFileMonitor( - startBlockNumber: ULong, - endBlockNumber: ULong - ) { - private val proverResponseIndex = ProofIndex( - startBlockNumber, - endBlockNumber - ) - - fun findResponse(): SafeFuture { - return proverResponsesRepository.find(proverResponseIndex) - } - - fun monitor(): SafeFuture { - return proverResponsesRepository.monitor(proverResponseIndex) - } - } - - private fun getPreviousBlockKeccakStateRootHash(blockNumber: Long): SafeFuture { - return SafeFuture.of( - l2Web3jClient - .ethGetBlockByNumber( - DefaultBlockParameter.valueOf(BigInteger.valueOf(blockNumber - 1)), - false - ) - .sendAsync() - ) - .thenApply { previousBlock -> previousBlock.block.stateRoot } - } - - private fun isRequestAlreadyExistingOrProvingInProgress( - requestFilePath: Path, - startBlockNumber: ULong, - endBlockNumber: ULong - ): SafeFuture { - return fileMonitor.fileExists( - config.requestDirectory, - inProgressFilePattern(requestFilePath.fileName.toString(), config.inprogressProvingSuffixPattern) - ).thenApply { - if (it == true) { - log.info( - "Request file exists or proving already in progress for batch={}: requestFile={}", - blockIntervalString(startBlockNumber, endBlockNumber), - requestFilePath.fileName - ) - } - it - } - } - - private fun buildRequestFilePath( - startBlockNumber: ULong, - endBlockNumber: ULong - ): Path = config.requestDirectory.resolve( - executionProofRequestFileNameProvider.getFileName( - ProofIndex( - startBlockNumber = startBlockNumber, - endBlockNumber = endBlockNumber - ) - ) - ) - - override fun requestBatchExecutionProof( - blocks: List, - tracesResponse: GenerateTracesResponse, - type2StateData: GetZkEVMStateMerkleProofResponse - ): SafeFuture { - val startBlockNumber = blocks.first().blockNumber.toULong() - val endBlockNumber = blocks.last().blockNumber.toULong() - val responseMonitor = ResponseFileMonitor(startBlockNumber, endBlockNumber) - val requestFilePath = buildRequestFilePath(startBlockNumber, endBlockNumber) - - // Check if the request is already proven. If so, return it. - // This happens when coordinator is restarted and the request is already proven. - return responseMonitor.findResponse().handleComposed { _, throwable -> - if (throwable == null) { - log.debug( - "execution proof already proven: batch={} reusedResponse={}", - blockIntervalString(startBlockNumber, endBlockNumber), - executionProofResponseFileNameProvider.getFileName( - ProofIndex( - startBlockNumber = startBlockNumber, - endBlockNumber = endBlockNumber - ) - ) - ) - SafeFuture.completedFuture(GetProofResponse(startBlockNumber, endBlockNumber)) - } else { - isRequestAlreadyExistingOrProvingInProgress( - requestFilePath = requestFilePath, - startBlockNumber = startBlockNumber, - endBlockNumber = endBlockNumber - ).thenCompose { requestAlreadyExistingOrProvingInProgress -> - when { - requestAlreadyExistingOrProvingInProgress -> SafeFuture.completedFuture(requestFilePath) - else -> { - val bridgeLogsSfList = - blocks.map { block -> - l2MessageServiceLogsClient.getBridgeLogs( - blockNumber = block.blockNumber.longValue() - ) - } - SafeFuture.collectAll(bridgeLogsSfList.stream()) - .thenApply { blocksLogs -> - blocks.zip(blocksLogs) - }.thenComposeCombined( - getPreviousBlockKeccakStateRootHash(blocks.first().blockNumber.longValue()) - ) { bundledBlocks, previousKeccakStateRootHash -> - requestFileWriter.write( - bundledBlocks, - tracesResponse, - type2StateData, - previousKeccakStateRootHash - ) - } - } - } - .thenCompose { - responseMonitor - .monitor() - .thenApply { - GetProofResponse(startBlockNumber, endBlockNumber) - } - } - } - } - } - } -} diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedExecutionProverClientV2.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedExecutionProverClientV2.kt new file mode 100644 index 000000000..5d08b309c --- /dev/null +++ b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedExecutionProverClientV2.kt @@ -0,0 +1,137 @@ +package net.consensys.zkevm.coordinator.clients.prover + +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.databind.node.ArrayNode +import io.vertx.core.Vertx +import net.consensys.encodeHex +import net.consensys.linea.async.toSafeFuture +import net.consensys.toBigInteger +import net.consensys.zkevm.coordinator.clients.BatchExecutionProofRequestV1 +import net.consensys.zkevm.coordinator.clients.BatchExecutionProofResponse +import net.consensys.zkevm.coordinator.clients.ExecutionProverClientV2 +import net.consensys.zkevm.coordinator.clients.L2MessageServiceLogsClient +import net.consensys.zkevm.coordinator.clients.prover.serialization.JsonSerialization +import net.consensys.zkevm.domain.ProofIndex +import net.consensys.zkevm.domain.RlpBridgeLogsData +import net.consensys.zkevm.encoding.ExecutionPayloadV1Encoder +import net.consensys.zkevm.encoding.ExecutionPayloadV1RLPEncoderByBesuImplementation +import net.consensys.zkevm.fileio.FileReader +import net.consensys.zkevm.fileio.FileWriter +import net.consensys.zkevm.toULong +import org.apache.logging.log4j.LogManager +import org.web3j.protocol.Web3j +import org.web3j.protocol.core.DefaultBlockParameter +import tech.pegasys.teku.infrastructure.async.SafeFuture +import java.nio.file.Path + +data class BatchExecutionProofRequestDto( + val zkParentStateRootHash: String?, + val keccakParentStateRootHash: String, + val conflatedExecutionTracesFile: String, + val tracesEngineVersion: String, + val type2StateManagerVersion: String?, + val zkStateMerkleProof: ArrayNode, + val blocksData: List +) + +internal class ExecutionProofRequestDataDecorator( + private val l2MessageServiceLogsClient: L2MessageServiceLogsClient, + private val l2Web3jClient: Web3j, + private val encoder: ExecutionPayloadV1Encoder = ExecutionPayloadV1RLPEncoderByBesuImplementation +) : (BatchExecutionProofRequestV1) -> SafeFuture { + private fun getBlockStateRootHash(blockNumber: ULong): SafeFuture { + return l2Web3jClient + .ethGetBlockByNumber( + DefaultBlockParameter.valueOf(blockNumber.toBigInteger()), + false + ) + .sendAsync() + .thenApply { block -> block.block.stateRoot } + .toSafeFuture() + } + + override fun invoke(request: BatchExecutionProofRequestV1): SafeFuture { + val bridgeLogsSfList = request.blocks.map { block -> + l2MessageServiceLogsClient.getBridgeLogs(blockNumber = block.blockNumber.longValue()) + .thenApply { block to it } + } + + return SafeFuture.collectAll(bridgeLogsSfList.stream()) + .thenCombine( + getBlockStateRootHash(request.blocks.first().blockNumber.toULong() - 1UL) + ) { blocksAndBridgeLogs, previousKeccakStateRootHash -> + BatchExecutionProofRequestDto( + zkParentStateRootHash = request.type2StateData.zkParentStateRootHash.toHexString(), + keccakParentStateRootHash = previousKeccakStateRootHash, + conflatedExecutionTracesFile = request.tracesResponse.tracesFileName, + tracesEngineVersion = request.tracesResponse.tracesEngineVersion, + type2StateManagerVersion = request.type2StateData.zkStateManagerVersion, + zkStateMerkleProof = request.type2StateData.zkStateMerkleProof, + blocksData = blocksAndBridgeLogs.map { (block, bridgeLogs) -> + val rlp = encoder.encode(block).encodeHex() + RlpBridgeLogsData(rlp, bridgeLogs) + } + ) + } + } +} + +/** + * Implementation of interface with the Execution Prover through Files. + * + * Prover will ingest file like + * path/to/prover/requests/---etv-stv-getZkProof.json + * + * When done prover will output file + * path/to/prover/responses/---etv-stv-getZkProof.json + * + * So, this class will need to watch the file system and wait for the output proof to be generated + */ +class FileBasedExecutionProverClientV2( + config: FileBasedProverConfig, + private val tracesVersion: String, + private val stateManagerVersion: String, + l2MessageServiceLogsClient: L2MessageServiceLogsClient, + vertx: Vertx, + l2Web3jClient: Web3j, + jsonObjectMapper: ObjectMapper = JsonSerialization.proofResponseMapperV1, + executionProofRequestFileNameProvider: ProverFileNameProvider = + ExecutionProofRequestFileNameProvider( + tracesVersion = tracesVersion, + stateManagerVersion = stateManagerVersion + ), + executionProofResponseFileNameProvider: ProverFileNameProvider = ExecutionProofResponseFileNameProvider +) : + GenericFileBasedProverClient< + BatchExecutionProofRequestV1, + BatchExecutionProofResponse, + BatchExecutionProofRequestDto, + Any + >( + config = config, + vertx = vertx, + fileWriter = FileWriter(vertx, jsonObjectMapper), + // This won't be used in practice because we don't parse the response + fileReader = FileReader(vertx, jsonObjectMapper, Any::class.java), + requestFileNameProvider = executionProofRequestFileNameProvider, + responseFileNameProvider = executionProofResponseFileNameProvider, + requestMapper = ExecutionProofRequestDataDecorator(l2MessageServiceLogsClient, l2Web3jClient), + responseMapper = { throw UnsupportedOperationException("Batch execution proof response shall not be parsed!") }, + proofTypeLabel = "batch", + log = LogManager.getLogger(FileBasedExecutionProverClientV2::class.java) + ), + ExecutionProverClientV2 { + + override fun parseResponse( + responseFilePath: Path, + proofIndex: ProofIndex + + ): SafeFuture { + return SafeFuture.completedFuture( + BatchExecutionProofResponse( + startBlockNumber = proofIndex.startBlockNumber, + endBlockNumber = proofIndex.endBlockNumber + ) + ) + } +} diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedProofAggregationClient.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedProofAggregationClient.kt deleted file mode 100644 index cf8bbf2c2..000000000 --- a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedProofAggregationClient.kt +++ /dev/null @@ -1,207 +0,0 @@ -package net.consensys.zkevm.coordinator.clients.prover - -import com.fasterxml.jackson.databind.ObjectMapper -import com.github.michaelbull.result.Err -import com.github.michaelbull.result.Ok -import com.github.michaelbull.result.Result -import io.vertx.core.Vertx -import net.consensys.encodeHex -import net.consensys.linea.errors.ErrorResponse -import net.consensys.zkevm.coordinator.clients.ProofAggregationClient -import net.consensys.zkevm.coordinator.clients.ProverErrorType -import net.consensys.zkevm.coordinator.clients.prover.serialization.JsonSerialization.proofResponseMapperV1 -import net.consensys.zkevm.coordinator.clients.prover.serialization.ProofToFinalizeJsonResponse -import net.consensys.zkevm.domain.ProofIndex -import net.consensys.zkevm.domain.ProofToFinalize -import net.consensys.zkevm.domain.ProofsToAggregate -import net.consensys.zkevm.ethereum.crypto.HashFunction -import net.consensys.zkevm.ethereum.crypto.Sha256HashFunction -import net.consensys.zkevm.fileio.FileMonitor -import net.consensys.zkevm.fileio.FileReader -import net.consensys.zkevm.fileio.FileWriter -import net.consensys.zkevm.fileio.inProgressFilePattern -import org.apache.logging.log4j.util.Strings -import tech.pegasys.teku.infrastructure.async.SafeFuture -import java.nio.file.Path -import kotlin.io.path.notExists -import kotlin.time.Duration - -class FileBasedProofAggregationClient( - private val vertx: Vertx, - private val config: Config, - private val mapper: ObjectMapper = proofResponseMapperV1, - private val proofAggregationResponseFileNameProvider: ProverFileNameProvider = - AggregationProofFileNameProvider, - private val proofAggregationRequestFileNameProvider: ProverFileNameProvider = - AggregationProofFileNameProvider, - private val executionProofResponseFileNameProvider: ProverFileNameProvider = - ExecutionProofResponseFileNameProvider, - private val compressionProofResponseFileNameProvider: ProverFileNameProvider = - CompressionProofResponseFileNameProvider, - private val fileWriter: FileWriter = FileWriter(vertx, mapper), - private val fileReader: FileReader = FileReader( - vertx, - mapper, - ProofToFinalizeJsonResponse::class.java - ), - private val fileMonitor: FileMonitor = FileMonitor( - vertx, - FileMonitor.Config(config.responseFilePollingInterval, config.responseFileMonitorTimeout) - ), - private val hashFunction: HashFunction = Sha256HashFunction() -) : ProofAggregationClient { - - init { - if (config.requestFileDirectory.notExists()) { - config.requestFileDirectory.toFile().mkdirs() - } - if (config.responseFileDirectory.notExists()) { - config.responseFileDirectory.toFile().mkdirs() - } - } - - data class Config( - val requestFileDirectory: Path, - val responseFileDirectory: Path, - val responseFilePollingInterval: Duration, - val responseFileMonitorTimeout: Duration, - val inprogressRequestFileSuffix: String, - val proverInProgressSuffixPattern: String - ) - - data class Request( - val executionProofs: List, - val compressionProofs: List, - val parentAggregationLastBlockTimestamp: Long, - val parentAggregationLastL1RollingHashMessageNumber: Long, - val parentAggregationLastL1RollingHash: String - ) - - override fun getAggregatedProof( - aggregation: ProofsToAggregate - ): SafeFuture>> { - val blockInterval = aggregation.getStartEndBlockInterval() - val request = buildRequest(aggregation) - val responseFilePath = config.responseFileDirectory.resolve( - proofAggregationResponseFileNameProvider.getFileName( - ProofIndex( - startBlockNumber = blockInterval.startBlockNumber, - endBlockNumber = blockInterval.endBlockNumber, - hash = getRequestHash(request) - ) - ) - ) - - return fileMonitor.fileExists(responseFilePath) - .thenCompose { responseExists -> - if (responseExists) { - parseResponse(responseFilePath) - } else { - val requestFilePath = config.requestFileDirectory - .resolve(getZkAggregatedProofRequestFileName(request, aggregation)) - writeRequest(request = request, requestFilePath = requestFilePath) - .thenCompose { fileMonitor.monitor(responseFilePath) } - .thenCompose { - when (it) { - is Ok -> parseResponse(it.value) - is Err -> SafeFuture.completedFuture( - Err(ErrorResponse(mapFileMonitorError(it.error), Strings.EMPTY)) - ) - } - } - } - } - } - - private fun writeRequest(request: Request, requestFilePath: Path): SafeFuture { - return fileMonitor.fileExists( - config.requestFileDirectory, - inProgressFilePattern(requestFilePath.fileName.toString(), config.proverInProgressSuffixPattern) - ).thenCompose { - if (it) { - SafeFuture.completedFuture(requestFilePath) - } else { - fileWriter.write(request, requestFilePath, config.inprogressRequestFileSuffix) - } - } - } - - internal fun buildRequest(proofsToAggregate: ProofsToAggregate): Request { - val executionProofs = proofsToAggregate.executionProofs - .toIntervalList() - .map { blockInterval -> - executionProofResponseFileNameProvider.getFileName( - ProofIndex( - startBlockNumber = blockInterval.startBlockNumber, - endBlockNumber = blockInterval.endBlockNumber - ) - ) - } - - val compressionProofs = proofsToAggregate.compressionProofIndexes - .map { - compressionProofResponseFileNameProvider.getFileName( - ProofIndex( - startBlockNumber = it.startBlockNumber, - endBlockNumber = it.endBlockNumber, - hash = it.hash - ) - ) - } - - return Request( - executionProofs = executionProofs, - compressionProofs = compressionProofs, - parentAggregationLastBlockTimestamp = proofsToAggregate.parentAggregationLastBlockTimestamp.epochSeconds, - parentAggregationLastL1RollingHashMessageNumber = - proofsToAggregate.parentAggregationLastL1RollingHashMessageNumber.toLong(), - parentAggregationLastL1RollingHash = proofsToAggregate.parentAggregationLastL1RollingHash.encodeHex() - ) - } - - internal fun getZkAggregatedProofRequestFileName( - request: Request, - proofsToAggregate: ProofsToAggregate - ): String { - val startEndBlockInterval = proofsToAggregate.getStartEndBlockInterval() - val contentHash = getRequestHash(request) - return proofAggregationRequestFileNameProvider.getFileName( - ProofIndex( - startBlockNumber = startEndBlockInterval.startBlockNumber, - endBlockNumber = startEndBlockInterval.endBlockNumber, - hash = contentHash - ) - ) - } - - private fun parseResponse(filePath: Path): - SafeFuture>> { - return fileReader - .read(filePath) - .thenApply { - when (it) { - is Ok -> Ok(it.value.toDomainObject()) - is Err -> Err(ErrorResponse(mapFileReaderError(it.error.type), it.error.message)) - } - } - } - - private fun getRequestHash(request: Request): ByteArray { - val contentBytes = (request.compressionProofs + request.executionProofs).joinToString().toByteArray() - return hashFunction.hash(contentBytes) - } - - companion object { - private fun mapFileMonitorError(error: FileMonitor.ErrorType): ProverErrorType { - return when (error) { - FileMonitor.ErrorType.TIMED_OUT -> ProverErrorType.ResponseNotFound - } - } - - private fun mapFileReaderError(error: FileReader.ErrorType): ProverErrorType { - return when (error) { - FileReader.ErrorType.PARSING_ERROR -> ProverErrorType.ParseError - } - } - } -} diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedProofAggregationClientV2.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedProofAggregationClientV2.kt new file mode 100644 index 000000000..af06b425d --- /dev/null +++ b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedProofAggregationClientV2.kt @@ -0,0 +1,154 @@ +package net.consensys.zkevm.coordinator.clients.prover + +import com.fasterxml.jackson.databind.ObjectMapper +import io.vertx.core.Vertx +import net.consensys.encodeHex +import net.consensys.zkevm.coordinator.clients.ProofAggregationProverClientV2 +import net.consensys.zkevm.coordinator.clients.prover.serialization.JsonSerialization +import net.consensys.zkevm.coordinator.clients.prover.serialization.ProofToFinalizeJsonResponse +import net.consensys.zkevm.domain.ProofIndex +import net.consensys.zkevm.domain.ProofToFinalize +import net.consensys.zkevm.domain.ProofsToAggregate +import net.consensys.zkevm.ethereum.crypto.HashFunction +import net.consensys.zkevm.ethereum.crypto.Sha256HashFunction +import net.consensys.zkevm.fileio.FileReader +import net.consensys.zkevm.fileio.FileWriter +import org.apache.logging.log4j.LogManager +import tech.pegasys.teku.infrastructure.async.SafeFuture + +data class AggregationProofRequestDto( + val executionProofs: List, + val compressionProofs: List, + val parentAggregationLastBlockTimestamp: Long, + val parentAggregationLastL1RollingHashMessageNumber: Long, + val parentAggregationLastL1RollingHash: String +) { + companion object { + fun fromDomainObject( + proofsToAggregate: ProofsToAggregate, + executionProofResponseFileNameProvider: ProverFileNameProvider, + compressionProofResponseFileNameProvider: ProverFileNameProvider + ): AggregationProofRequestDto { + val executionProofsResponseFiles = proofsToAggregate.executionProofs + .toIntervalList() + .map { blockInterval -> + executionProofResponseFileNameProvider.getFileName( + ProofIndex( + startBlockNumber = blockInterval.startBlockNumber, + endBlockNumber = blockInterval.endBlockNumber + ) + ) + } + + val compressionProofsResponsesFiles = proofsToAggregate.compressionProofIndexes + .map { + compressionProofResponseFileNameProvider.getFileName( + ProofIndex( + startBlockNumber = it.startBlockNumber, + endBlockNumber = it.endBlockNumber, + hash = it.hash + ) + ) + } + + return AggregationProofRequestDto( + executionProofs = executionProofsResponseFiles, + compressionProofs = compressionProofsResponsesFiles, + parentAggregationLastBlockTimestamp = proofsToAggregate.parentAggregationLastBlockTimestamp.epochSeconds, + parentAggregationLastL1RollingHashMessageNumber = + proofsToAggregate.parentAggregationLastL1RollingHashMessageNumber.toLong(), + parentAggregationLastL1RollingHash = proofsToAggregate.parentAggregationLastL1RollingHash.encodeHex() + ) + } + } +} + +internal class AggregationRequestDtoMapper( + private val executionProofResponseFileNameProvider: ProverFileNameProvider, + private val compressionProofResponseFileNameProvider: ProverFileNameProvider +) : (ProofsToAggregate) -> SafeFuture { + override fun invoke(proofsToAggregate: ProofsToAggregate): SafeFuture { + return SafeFuture.completedFuture( + AggregationProofRequestDto.fromDomainObject( + proofsToAggregate, + executionProofResponseFileNameProvider, + compressionProofResponseFileNameProvider + ) + ) + } +} + +/** + * Implementation of interface with the Aggregation Prover through Files. + * + * Aggregation Prover will ingest file like + * path/to/prover/requests/---getZkAggregatedProof.json + * + * When done prover will output file + * path/to/prover/responses/---getZkAggregatedProof.json + * + * So, this class will need to watch the file system and wait for the output proof to be generated + */ +class FileBasedProofAggregationClientV2( + vertx: Vertx, + config: FileBasedProverConfig, + hashFunction: HashFunction = Sha256HashFunction(), + executionProofResponseFileNameProvider: ProverFileNameProvider = ExecutionProofResponseFileNameProvider, + compressionProofResponseFileNameProvider: ProverFileNameProvider = CompressionProofResponseFileNameProvider, + jsonObjectMapper: ObjectMapper = JsonSerialization.proofResponseMapperV1 +) : + GenericFileBasedProverClient< + ProofsToAggregate, + ProofToFinalize, + AggregationProofRequestDto, + ProofToFinalizeJsonResponse + >( + config = config, + vertx = vertx, + fileWriter = FileWriter(vertx, jsonObjectMapper), + fileReader = FileReader( + vertx, + jsonObjectMapper, + ProofToFinalizeJsonResponse::class.java + ), + requestFileNameProvider = AggregationProofFileNameProvider, + responseFileNameProvider = AggregationProofFileNameProvider, + proofIndexProvider = createProofIndexProviderFn(hashFunction), + requestMapper = AggregationRequestDtoMapper( + executionProofResponseFileNameProvider = executionProofResponseFileNameProvider, + compressionProofResponseFileNameProvider = compressionProofResponseFileNameProvider + ), + responseMapper = ProofToFinalizeJsonResponse::toDomainObject, + proofTypeLabel = "aggregation", + log = LogManager.getLogger(this::class.java) + ), + ProofAggregationProverClientV2 { + + companion object { + fun createProofIndexProviderFn( + hashFunction: HashFunction, + executionProofResponseFileNameProvider: ProverFileNameProvider = ExecutionProofResponseFileNameProvider, + compressionProofResponseFileNameProvider: ProverFileNameProvider = CompressionProofResponseFileNameProvider + ): (ProofsToAggregate) -> ProofIndex { + return { request: ProofsToAggregate -> + + val requestDto = AggregationProofRequestDto.fromDomainObject( + proofsToAggregate = request, + executionProofResponseFileNameProvider = executionProofResponseFileNameProvider, + compressionProofResponseFileNameProvider = compressionProofResponseFileNameProvider + ) + val hash = hashRequest(hashFunction, requestDto) + ProofIndex( + startBlockNumber = request.startBlockNumber, + endBlockNumber = request.endBlockNumber, + hash = hash + ) + } + } + + private fun hashRequest(hashFunction: HashFunction, request: AggregationProofRequestDto): ByteArray { + val contentBytes = (request.compressionProofs + request.executionProofs).joinToString().toByteArray() + return hashFunction.hash(contentBytes) + } + } +} diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedProverResponsesRepository.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedProverResponsesRepository.kt deleted file mode 100644 index d909becaa..000000000 --- a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/FileBasedProverResponsesRepository.kt +++ /dev/null @@ -1,78 +0,0 @@ -package net.consensys.zkevm.coordinator.clients.prover - -import com.github.michaelbull.result.Err -import com.github.michaelbull.result.Ok -import net.consensys.linea.errors.ErrorResponse -import net.consensys.zkevm.coordinator.clients.ProverErrorType -import net.consensys.zkevm.domain.ProofIndex -import net.consensys.zkevm.fileio.FileMonitor -import org.apache.logging.log4j.LogManager -import org.apache.logging.log4j.Logger -import tech.pegasys.teku.infrastructure.async.SafeFuture -import java.nio.file.Path -import kotlin.io.path.notExists - -class FileBasedProverResponsesRepository( - private val config: Config, - private val proofResponseFileNameProvider: ProverFileNameProvider, - private val fileMonitor: FileMonitor -) : ProverResponsesRepository { - private val log: Logger = LogManager.getLogger(this::class.java) - - init { - if (config.responseDirectory.notExists()) { - config.responseDirectory.toFile().mkdirs() - } - } - - data class Config(val responseDirectory: Path) - - private fun outputFileNameForBlockInterval( - proverResponseIndex: ProofIndex - ): Path { - val proofFileName = proofResponseFileNameProvider.getFileName( - ProofIndex( - startBlockNumber = proverResponseIndex.startBlockNumber, - endBlockNumber = proverResponseIndex.endBlockNumber - ) - ) - return config.responseDirectory.resolve(proofFileName) - } - - override fun find( - index: ProofIndex - ): SafeFuture { - val outputFile = outputFileNameForBlockInterval(index) - log.trace("Polling for file {}", outputFile) - return fileMonitor.fileExists(outputFile).thenCompose { - if (it == true) { - SafeFuture.completedFuture(Unit) - } else { - SafeFuture.failedFuture( - ErrorResponse( - ProverErrorType.ResponseNotFound, - "Response file '$outputFile' wasn't found in the repo" - ).asException() - ) - } - } - } - - override fun monitor( - index: ProofIndex - ): SafeFuture { - val outputFile = outputFileNameForBlockInterval(index) - return fileMonitor.monitor(outputFile).thenCompose { - when (it) { - is Ok -> SafeFuture.completedFuture(Unit) - is Err -> { - when (it.error) { - FileMonitor.ErrorType.TIMED_OUT -> SafeFuture.failedFuture( - ErrorResponse(ProverErrorType.ResponseTimeout, "Monitoring timed out").asException() - ) - } - } - } - } - } -} diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/GenericFileBasedProverClient.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/GenericFileBasedProverClient.kt new file mode 100644 index 000000000..bdf9655fa --- /dev/null +++ b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/GenericFileBasedProverClient.kt @@ -0,0 +1,190 @@ +package net.consensys.zkevm.coordinator.clients.prover + +import com.github.michaelbull.result.Err +import com.github.michaelbull.result.getOrElse +import com.github.michaelbull.result.map +import io.vertx.core.Vertx +import net.consensys.linea.errors.ErrorResponse +import net.consensys.zkevm.domain.BlockInterval +import net.consensys.zkevm.domain.ProofIndex +import net.consensys.zkevm.fileio.FileMonitor +import net.consensys.zkevm.fileio.FileReader +import net.consensys.zkevm.fileio.FileWriter +import net.consensys.zkevm.fileio.inProgressFilePattern +import org.apache.logging.log4j.LogManager +import org.apache.logging.log4j.Logger +import tech.pegasys.teku.infrastructure.async.SafeFuture +import java.nio.file.Path +import java.util.concurrent.atomic.AtomicLong +import java.util.function.Supplier +import kotlin.io.path.notExists + +open class GenericFileBasedProverClient( + private val config: FileBasedProverConfig, + private val vertx: Vertx, + private val fileWriter: FileWriter, + private val fileReader: FileReader, + private val requestFileNameProvider: ProverFileNameProvider, + private val responseFileNameProvider: ProverFileNameProvider, + private val fileMonitor: FileMonitor = FileMonitor( + vertx, + FileMonitor.Config(config.pollingInterval, config.pollingTimeout) + ), + private val proofIndexProvider: (Request) -> ProofIndex = ::blockIntervalProofIndex, + private val requestMapper: (Request) -> SafeFuture, + private val responseMapper: (ResponseDto) -> Response, + private val proofTypeLabel: String, + private val log: Logger = LogManager.getLogger(GenericFileBasedProverClient::class.java) +) : Supplier + where Request : BlockInterval, + Response : Any, + RequestDto : Any, + ResponseDto : Any { + + init { + createDirectoryIfNotExists(config.requestsDirectory, log) + createDirectoryIfNotExists(config.responsesDirectory, log) + } + + private val responsesWaiting = AtomicLong(0) + override fun get(): Long = responsesWaiting.get() + + fun requestProof(proofRequest: Request): SafeFuture { + val proofIndex = proofIndexProvider(proofRequest) + val requestFileName = requestFileNameProvider.getFileName(proofIndex) + val requestFilePath = config.requestsDirectory.resolve(requestFileName) + val responseFilePath = config.responsesDirectory.resolve(responseFileNameProvider.getFileName(proofIndex)) + + return fileMonitor.fileExists(responseFilePath) + .thenCompose { responseFileExists -> + if (responseFileExists) { + log.debug( + "request already proven: {}={} reusedResponse={}", + proofTypeLabel, + proofIndex.intervalString(), + responseFilePath + ) + SafeFuture.completedFuture(responseFilePath) + } else { + findRequestFileIfAlreadyInFileSystem(requestFileName) + .thenCompose { requestFileFound: String? -> + responsesWaiting.incrementAndGet() + if (requestFileFound != null) { + log.debug( + "request already in file system: {}={} reusedRequest={}", + proofTypeLabel, + proofIndex.intervalString(), + requestFileFound + ) + SafeFuture.completedFuture(Unit) + } else { + requestMapper(proofRequest) + .thenCompose { proofRequestDto -> + fileWriter.write( + proofRequestDto, + requestFilePath, + config.inprogressRequestWritingSuffix + ).thenApply { + Unit + } + } + } + } + .thenCompose { waitForResponse(responseFilePath) } + .thenApply { + responsesWaiting.decrementAndGet() + responseFilePath + } + } + } + .thenCompose { proofResponseFilePath -> parseResponse(proofResponseFilePath, proofIndex) } + .whenException { + log.error( + "Failed to get proof: {}={} errorMessage={}", + proofTypeLabel, + proofIndex.intervalString(), + it.message, + it + ) + } + } + + private fun waitForResponse( + responseFilePath: Path + ): SafeFuture { + return fileMonitor.monitor(responseFilePath).thenCompose { + if (it is Err) { + when (it.error) { + FileMonitor.ErrorType.TIMED_OUT -> { + SafeFuture.failedFuture(RuntimeException("Timeout waiting for response file=$responseFilePath")) + } + + else -> { + SafeFuture.failedFuture(RuntimeException("Unexpected error=$it")) + } + } + } else { + SafeFuture.completedFuture(responseFilePath) + } + } + } + + private fun findRequestFileIfAlreadyInFileSystem( + requestFileName: String + ): SafeFuture { + return fileMonitor.findFile( + directory = config.requestsDirectory, + pattern = inProgressFilePattern(requestFileName, config.inprogressProvingSuffixPattern) + ) + } + + open fun parseResponse( + responseFilePath: Path, + proofIndex: ProofIndex + ): SafeFuture { + return fileReader.read(responseFilePath) + .thenCompose { result -> + result + .map { SafeFuture.completedFuture(responseMapper(it)) } + .getOrElse { errorResponse: ErrorResponse -> + when (errorResponse.type) { + FileReader.ErrorType.PARSING_ERROR -> { + log.error( + "Failed to read response file={} errorMessage={}", + responseFilePath, + errorResponse.message + ) + } + } + SafeFuture.failedFuture(errorResponse.asException()) + } + } + } + + companion object { + fun blockIntervalProofIndex(request: R): ProofIndex { + return ProofIndex( + startBlockNumber = request.startBlockNumber, + endBlockNumber = request.endBlockNumber + ) + } + + fun createDirectoryIfNotExists( + directory: Path, + log: Logger = LogManager.getLogger(GenericFileBasedProverClient::class.java) + ) { + try { + if (directory.notExists()) { + val dirCreated = directory.toFile().mkdirs() + if (!dirCreated) { + log.error("Failed to create directory {}!", directory) + throw RuntimeException("Failed to create directory $directory") + } + } + } catch (e: Exception) { + log.error("Failed to create directory {}!", directory, e) + throw e + } + } + } +} diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/ProverClientFactory.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/ProverClientFactory.kt new file mode 100644 index 000000000..61143aa63 --- /dev/null +++ b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/ProverClientFactory.kt @@ -0,0 +1,112 @@ +package net.consensys.zkevm.coordinator.clients.prover + +import io.vertx.core.Vertx +import net.consensys.linea.contract.Web3JL2MessageServiceLogsClient +import net.consensys.linea.metrics.LineaMetricsCategory +import net.consensys.linea.metrics.MetricsFacade +import net.consensys.linea.metrics.micrometer.GaugeAggregator +import net.consensys.zkevm.coordinator.clients.BlobCompressionProverClientV2 +import net.consensys.zkevm.coordinator.clients.ExecutionProverClientV2 +import net.consensys.zkevm.coordinator.clients.ProofAggregationProverClientV2 +import net.consensys.zkevm.coordinator.clients.ProverClient +import net.consensys.zkevm.domain.BlockInterval +import org.web3j.protocol.Web3j + +class ProverClientFactory( + private val vertx: Vertx, + private val config: ProversConfig, + private val metricsFacade: MetricsFacade +) { + private val executionWaitingResponsesMetric = GaugeAggregator() + private val blobWaitingResponsesMetric = GaugeAggregator() + private val aggregationWaitingResponsesMetric = GaugeAggregator() + + init { + metricsFacade.createGauge( + category = LineaMetricsCategory.BATCH, + name = "prover.waiting", + description = "Number of execution proof waiting responses", + measurementSupplier = executionWaitingResponsesMetric + ) + metricsFacade.createGauge( + category = LineaMetricsCategory.BLOB, + name = "prover.waiting", + description = "Number of blob compression proof waiting responses", + measurementSupplier = blobWaitingResponsesMetric + + ) + metricsFacade.createGauge( + category = LineaMetricsCategory.AGGREGATION, + name = "prover.waiting", + description = "Number of aggregation proof waiting responses", + measurementSupplier = aggregationWaitingResponsesMetric + ) + } + + fun executionProverClient( + tracesVersion: String, + stateManagerVersion: String, + l2MessageServiceLogsClient: Web3JL2MessageServiceLogsClient, + l2Web3jClient: Web3j + ): ExecutionProverClientV2 { + return createClient( + proverAConfig = config.proverA.execution, + proverBConfig = config.proverB?.execution, + switchBlockNumberInclusive = config.switchBlockNumberInclusive + ) { proverConfig -> + FileBasedExecutionProverClientV2( + config = proverConfig, + vertx = vertx, + tracesVersion = tracesVersion, + stateManagerVersion = stateManagerVersion, + l2MessageServiceLogsClient = l2MessageServiceLogsClient, + l2Web3jClient = l2Web3jClient + ).also { executionWaitingResponsesMetric.addReporter(it) } + } + } + + fun blobCompressionProverClient(): BlobCompressionProverClientV2 { + return createClient( + proverAConfig = config.proverA.blobCompression, + proverBConfig = config.proverB?.blobCompression, + switchBlockNumberInclusive = config.switchBlockNumberInclusive + ) { proverConfig -> + FileBasedBlobCompressionProverClientV2( + config = proverConfig, + vertx = vertx + ).also { blobWaitingResponsesMetric.addReporter(it) } + } + } + + fun proofAggregationProverClient(): ProofAggregationProverClientV2 { + return createClient( + proverAConfig = config.proverA.proofAggregation, + proverBConfig = config.proverB?.proofAggregation, + switchBlockNumberInclusive = config.switchBlockNumberInclusive + ) { proverConfig -> + FileBasedProofAggregationClientV2( + config = proverConfig, + vertx = vertx + ).also { aggregationWaitingResponsesMetric.addReporter(it) } + } + } + + private fun createClient( + proverAConfig: FileBasedProverConfig, + proverBConfig: FileBasedProverConfig?, + switchBlockNumberInclusive: ULong?, + clientBuilder: (FileBasedProverConfig) -> ProverClient + ): ProverClient + where ProofRequest : BlockInterval { + return if (switchBlockNumberInclusive != null) { + val switchPredicate = StartBlockNumberBasedSwitchPredicate(switchBlockNumberInclusive) + ABProverClientRouter( + proverA = clientBuilder(proverAConfig), + proverB = clientBuilder(proverBConfig!!), + switchToProverBPredicate = switchPredicate::invoke + ) + } else { + clientBuilder(proverAConfig) + } + } +} diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/ProverResponsesRepository.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/ProverResponsesRepository.kt deleted file mode 100644 index b01bd3bc9..000000000 --- a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/ProverResponsesRepository.kt +++ /dev/null @@ -1,9 +0,0 @@ -package net.consensys.zkevm.coordinator.clients.prover - -import net.consensys.zkevm.domain.ProofIndex -import tech.pegasys.teku.infrastructure.async.SafeFuture - -interface ProverResponsesRepository { - fun find(index: ProofIndex): SafeFuture - fun monitor(index: ProofIndex): SafeFuture -} diff --git a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/RequestFileWriter.kt b/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/RequestFileWriter.kt deleted file mode 100644 index c26913626..000000000 --- a/coordinator/clients/prover-client/file-based-client/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/RequestFileWriter.kt +++ /dev/null @@ -1,104 +0,0 @@ -package net.consensys.zkevm.coordinator.clients.prover - -import com.fasterxml.jackson.databind.ObjectMapper -import io.vertx.core.Vertx -import net.consensys.encodeHex -import net.consensys.linea.async.toSafeFuture -import net.consensys.zkevm.coordinator.clients.GenerateTracesResponse -import net.consensys.zkevm.coordinator.clients.GetZkEVMStateMerkleProofResponse -import net.consensys.zkevm.domain.BridgeLogsData -import net.consensys.zkevm.domain.ProofIndex -import net.consensys.zkevm.domain.RlpBridgeLogsData -import net.consensys.zkevm.encoding.ExecutionPayloadV1Encoder -import net.consensys.zkevm.encoding.ExecutionPayloadV1RLPEncoderByBesuImplementation -import net.consensys.zkevm.fileio.FileWriter -import net.consensys.zkevm.toULong -import org.apache.logging.log4j.Logger -import tech.pegasys.teku.ethereum.executionclient.schema.ExecutionPayloadV1 -import tech.pegasys.teku.infrastructure.async.SafeFuture -import java.nio.file.Path -import java.util.concurrent.Callable - -internal class RequestFileWriter( - private val vertx: Vertx, - private val proofRequestFileNameProvider: ProverFileNameProvider, - private val config: Config, - private val mapper: ObjectMapper, - private val log: Logger, - private val fileWriter: FileWriter = FileWriter(vertx, mapper), - private val executionPayloadV1Encoder: ExecutionPayloadV1Encoder = ExecutionPayloadV1RLPEncoderByBesuImplementation -) { - - data class Config( - val requestDirectory: Path, - val writingInprogressSuffix: String, - val proverInprogressSuffixPattern: String - ) - - fun write( - blocksAndLogs: List>>, - tracesResponse: GenerateTracesResponse, - type2StateData: GetZkEVMStateMerkleProofResponse, - keccakPreviousStateRootHash: String - ): SafeFuture { - val startBlockNumber = blocksAndLogs.first().first.blockNumber.toULong() - val endBlockNumber = blocksAndLogs.last().first.blockNumber.toULong() - val requestFilePath = config.requestDirectory - .resolve( - proofRequestFileNameProvider.getFileName( - ProofIndex( - startBlockNumber = startBlockNumber, - endBlockNumber = endBlockNumber - ) - ) - ) - - return buildRequest( - blocksAndLogs, - tracesResponse, - type2StateData, - keccakPreviousStateRootHash - ).thenCompose { request -> - writeRequestToFile(requestFilePath, request) - } - } - - private fun writeRequestToFile( - requestFilePath: Path, - request: FileBasedExecutionProverClient.GetProofRequest - ): SafeFuture { - return fileWriter.write(request, requestFilePath, config.writingInprogressSuffix) - .thenPeek { - log.debug("execution proof request created: {}", requestFilePath) - } - } - - private fun buildRequest( - blocksAndLogs: List>>, - tracesResponse: GenerateTracesResponse, - type2StateData: GetZkEVMStateMerkleProofResponse, - keccakPreviousStateRootHash: String - ): SafeFuture { - val blocksRlpBridgeLogsDataFuture = - blocksAndLogs.map { (block, bridgeLogs) -> - vertx.executeBlocking( - Callable { - executionPayloadV1Encoder.encode(block).encodeHex() - } - ).toSafeFuture().thenApply { rlp -> - RlpBridgeLogsData(rlp, bridgeLogs) - } - } - return SafeFuture.collectAll(*blocksRlpBridgeLogsDataFuture.toTypedArray()).thenApply { blocksRlpBridgeLogsData -> - FileBasedExecutionProverClient.GetProofRequest( - zkParentStateRootHash = type2StateData.zkParentStateRootHash.toHexString(), - keccakParentStateRootHash = keccakPreviousStateRootHash, - conflatedExecutionTracesFile = tracesResponse.tracesFileName, - tracesEngineVersion = tracesResponse.tracesEngineVersion, - type2StateManagerVersion = type2StateData.zkStateManagerVersion, - zkStateMerkleProof = type2StateData.zkStateMerkleProof, - blocksData = blocksRlpBridgeLogsData - ) - } - } -} diff --git a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/FileBasedBatchExecutionProverClientTest.kt b/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/FileBasedBatchExecutionProverClientTest.kt deleted file mode 100644 index 789ec1561..000000000 --- a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/FileBasedBatchExecutionProverClientTest.kt +++ /dev/null @@ -1,255 +0,0 @@ -package net.consensys.zkevm.coordinator.clients - -import com.fasterxml.jackson.databind.node.ArrayNode -import io.vertx.core.Vertx -import io.vertx.junit5.Timeout -import io.vertx.junit5.VertxExtension -import io.vertx.junit5.VertxTestContext -import net.consensys.linea.contract.Web3JL2MessageServiceLogsClient -import net.consensys.zkevm.coordinator.clients.prover.CommonTestData.bridgeLogs -import net.consensys.zkevm.coordinator.clients.prover.CommonTestData.ethLogs -import net.consensys.zkevm.coordinator.clients.prover.FileBasedExecutionProverClient -import net.consensys.zkevm.coordinator.clients.prover.SimpleFileNameProvider -import net.consensys.zkevm.coordinator.clients.prover.randomExecutionPayloads -import net.consensys.zkevm.coordinator.clients.prover.serialization.JsonSerialization.proofResponseMapperV1 -import net.consensys.zkevm.coordinator.clients.prover.validateRequest -import net.consensys.zkevm.toULong -import org.apache.tuweni.bytes.Bytes32 -import org.assertj.core.api.Assertions -import org.junit.jupiter.api.BeforeEach -import org.junit.jupiter.api.extension.ExtendWith -import org.junit.jupiter.api.io.TempDir -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.Arguments -import org.junit.jupiter.params.provider.MethodSource -import org.mockito.Mockito -import org.mockito.kotlin.any -import org.mockito.kotlin.eq -import org.mockito.kotlin.mock -import org.mockito.kotlin.whenever -import org.web3j.protocol.Web3j -import org.web3j.protocol.core.methods.response.EthBlock -import tech.pegasys.teku.infrastructure.async.SafeFuture -import java.io.File -import java.nio.file.Path -import java.util.concurrent.TimeUnit -import java.util.stream.Stream -import kotlin.time.Duration.Companion.milliseconds -import kotlin.time.Duration.Companion.seconds - -@ExtendWith(VertxExtension::class) -class FileBasedBatchExecutionProverClientTest { - private val tracesFileName = "/some/path/1-3-conflated-traces.json.gz" - private val tracesEngineVersion = "0.2.3" - private val zkEvmStateManagerVersion = "0.3.4" - private val requestSubdirectory = "request" - private val responseSubdirectory = "response" - private val mapper = proofResponseMapperV1 - private val pollingInterval = 10.milliseconds - private val mockBridgeLogsClient = mock(defaultAnswer = Mockito.RETURNS_DEEP_STUBS) - private val mockL2Client = mock(defaultAnswer = Mockito.RETURNS_DEEP_STUBS) - private val previousStateRoot = Bytes32.random().toHexString() - - private val merkleProofJson: ArrayNode = let { - val testFilePath = "$testdataPath/type2state-manager/state-proof.json" - mapper.readTree(Path.of(testFilePath).toFile()).let { - val merkleProof = it.get("zkStateMerkleProof") - assert(merkleProof.isArray) - merkleProof as ArrayNode - } - } - - companion object { - private val testdataPath = "../../../../testdata" - private val proverOutputs: Array = File("$testdataPath/prover/output/").listFiles()!! - - @JvmStatic - private fun proofFiles(): Stream { - return proverOutputs.map { Arguments.of(it) }.stream() - } - - @JvmStatic - private fun blocksAndProofs(): Stream { - return proverOutputs.mapIndexed { index, file -> Arguments.of(index + 1, file) }.stream() - } - } - - private fun buildProverClient( - vertx: Vertx, - tempDir: Path - ): FileBasedExecutionProverClient { - val responseDirectory = Path.of(tempDir.toString(), responseSubdirectory) - return FileBasedExecutionProverClient( - config = FileBasedExecutionProverClient.Config( - requestDirectory = Path.of(tempDir.toString(), requestSubdirectory), - responseDirectory = responseDirectory, - inprogressProvingSuffixPattern = "\\.inprogress\\.prover.*", - pollingInterval = pollingInterval, - timeout = 1.seconds, - tracesVersion = tracesEngineVersion, - stateManagerVersion = zkEvmStateManagerVersion - ), - l2MessageServiceLogsClient = mockBridgeLogsClient, - vertx = vertx, - l2Web3jClient = mockL2Client, - mapper = mapper, - executionProofRequestFileNameProvider = SimpleFileNameProvider(), - executionProofResponseFileNameProvider = SimpleFileNameProvider() - ) - } - - @BeforeEach - fun beforeEach() { - // To warmup assertions otherwise first test may fail - Assertions.assertThat(true).isTrue() - whenever(mockBridgeLogsClient.getBridgeLogs(any())).thenAnswer { SafeFuture.completedFuture(bridgeLogs) } - whenever(mockL2Client.ethGetLogs(any()).send().logs).thenAnswer { ethLogs } - whenever(mockL2Client.ethGetBlockByNumber(any(), eq(false)).sendAsync()).thenAnswer { - val blockResponse = EthBlock() - blockResponse.result = EthBlock.Block() - blockResponse.block.stateRoot = previousStateRoot - SafeFuture.completedFuture(blockResponse) - } - } - - @Timeout(1, timeUnit = TimeUnit.SECONDS) - @ParameterizedTest - @MethodSource("proofFiles") - fun responseFileMonitor_discoversResponseFile( - proofFile: File, - vertx: Vertx, - @TempDir tempDir: Path, - testContext: VertxTestContext - ) { - val outputDirectory = Path.of(tempDir.toString(), responseSubdirectory) - val proverClient = buildProverClient(vertx, tempDir) - - val startBlockNumber = 3123UL - val endBlockNumber = 3129UL - val fileMonitor = proverClient.ResponseFileMonitor(startBlockNumber, endBlockNumber) - fileMonitor - .monitor() - .thenApply { response: Unit -> - testContext - .verify { Assertions.assertThat(response).isEqualTo(Unit) } - .completeNow() - } - .exceptionally { testContext.failNow(it) } - - val outputFileInprogress = - "$outputDirectory/$startBlockNumber-$endBlockNumber-getZkProof.json.inprogress" - val outputFile = "$outputDirectory/$startBlockNumber-$endBlockNumber-getZkProof.json" - Thread.sleep(pollingInterval.inWholeMilliseconds * 2) - val inprogressOutputFile = File(outputFileInprogress) - proofFile.copyTo(inprogressOutputFile, true) - inprogressOutputFile.renameTo(File(outputFile)) - } - - @Timeout(15, timeUnit = TimeUnit.SECONDS) - @ParameterizedTest - @MethodSource("blocksAndProofs") - fun fileBasedProverClient_returnsProofs( - blocksToGenerate: Int, - proofFile: File, - vertx: Vertx, - @TempDir tempDir: Path, - testContext: VertxTestContext - ) { - val outputDirectory = Path.of(tempDir.toString(), responseSubdirectory) - val inputDirectory = Path.of(tempDir.toString(), requestSubdirectory) - val proverClient = buildProverClient(vertx, tempDir) - val zkParentStateRootHash = Bytes32.random() - val blocks = randomExecutionPayloads(blocksToGenerate) - val startBlockNumber = blocks.first().blockNumber - val endBlockNumber = blocks.last().blockNumber - val tracesResponse = GenerateTracesResponse(tracesFileName, tracesEngineVersion) - val stateManagerResponse = - GetZkEVMStateMerkleProofResponse( - zkStateMerkleProof = merkleProofJson, - zkParentStateRootHash = zkParentStateRootHash, - zkStateManagerVersion = zkEvmStateManagerVersion, - zkEndStateRootHash = Bytes32.random() - ) - - proverClient - .requestBatchExecutionProof(blocks, tracesResponse, stateManagerResponse) - .thenApply { response -> - testContext - .verify { - val expectedRequestPath = - Path.of( - inputDirectory.toString(), - "$startBlockNumber-$endBlockNumber-getZkProof.json" - ) - Assertions.assertThat(expectedRequestPath).exists() - validateRequest( - mapper, - expectedRequestPath.toFile(), - stateManagerResponse, - blocks, - bridgeLogs, - tracesFileName, - tracesEngineVersion, - previousStateRoot - ) - - Assertions.assertThat(response) - .isEqualTo(GetProofResponse(startBlockNumber.toULong(), endBlockNumber.toULong())) - } - .completeNow() - } - .exceptionally { testContext.failNow(it) } - - val outputFileInprogress = - "$outputDirectory/$startBlockNumber-$endBlockNumber-getZkProof.json.inprogress" - val outputFile = "$outputDirectory/$startBlockNumber-$endBlockNumber-getZkProof.json" - - Thread.sleep(pollingInterval.inWholeMilliseconds * 2) - val inprogressOutputFile = File(outputFileInprogress) - proofFile.copyTo(inprogressOutputFile, true) - inprogressOutputFile.renameTo(File(outputFile)) - } - - @Timeout(2, timeUnit = TimeUnit.SECONDS) - @MethodSource("blocksAndProofs") - @ParameterizedTest - fun fileBasedProverClient_reusesAlreadyCreatedProofs_doesntRequestAgain( - blocksToGenerate: Int, - proofFile: File, - vertx: Vertx, - @TempDir tempDir: Path, - testContext: VertxTestContext - ) { - val requestDirectory = Path.of(tempDir.toString(), requestSubdirectory) - val responseDirectory = Path.of(tempDir.toString(), responseSubdirectory) - val proverClient = buildProverClient(vertx, tempDir) - val blocks = randomExecutionPayloads(blocksToGenerate) - val startBlockNumber = blocks.first().blockNumber - val endBlockNumber = blocks.last().blockNumber - - val tracesResponse = GenerateTracesResponse(tracesFileName, tracesEngineVersion) - val stateManagerResponse = - GetZkEVMStateMerkleProofResponse( - zkStateMerkleProof = merkleProofJson, - zkParentStateRootHash = Bytes32.random(), - zkEndStateRootHash = Bytes32.random(), - zkStateManagerVersion = zkEvmStateManagerVersion - ) - - proofFile.copyTo(File("$responseDirectory/$startBlockNumber-$endBlockNumber-getZkProof.json"), true) - - proverClient - .requestBatchExecutionProof(blocks, tracesResponse, stateManagerResponse) - .thenApply { response -> - testContext - .verify { - Assertions.assertThat(requestDirectory).isEmptyDirectory() - Assertions.assertThat(response).isEqualTo( - GetProofResponse(startBlockNumber.toULong(), endBlockNumber.toULong()) - ) - } - .completeNow() - } - .exceptionally { testContext.failNow(it) } - } -} diff --git a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/FileBasedBlobCompressionProverClientTest.kt b/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/FileBasedBlobCompressionProverClientTest.kt deleted file mode 100644 index a78feae5e..000000000 --- a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/FileBasedBlobCompressionProverClientTest.kt +++ /dev/null @@ -1,423 +0,0 @@ -package net.consensys.zkevm.coordinator.clients - -import com.github.michaelbull.result.Err -import com.github.michaelbull.result.Ok -import com.github.michaelbull.result.Result -import io.vertx.core.Vertx -import io.vertx.junit5.Timeout -import io.vertx.junit5.VertxExtension -import io.vertx.junit5.VertxTestContext -import net.consensys.encodeHex -import net.consensys.linea.traces.TracesCountersV1 -import net.consensys.zkevm.coordinator.clients.prover.CompressionProofRequestFileNameProvider -import net.consensys.zkevm.coordinator.clients.prover.FileBasedBlobCompressionProverClient -import net.consensys.zkevm.coordinator.clients.prover.serialization.BlobCompressionProofJsonResponse -import net.consensys.zkevm.domain.BlockIntervals -import net.consensys.zkevm.domain.ConflationCalculationResult -import net.consensys.zkevm.domain.ConflationTrigger -import net.consensys.zkevm.domain.ProofIndex -import net.consensys.zkevm.ethereum.coordination.blob.ShnarfResult -import net.consensys.zkevm.fileio.FileMonitor -import net.consensys.zkevm.fileio.FileReader -import net.consensys.zkevm.fileio.FileWriter -import org.assertj.core.api.Assertions -import org.junit.jupiter.api.BeforeAll -import org.junit.jupiter.api.Test -import org.junit.jupiter.api.TestInstance -import org.junit.jupiter.api.extension.ExtendWith -import org.junit.jupiter.api.io.TempDir -import org.mockito.Mockito -import org.mockito.kotlin.any -import org.mockito.kotlin.mock -import org.mockito.kotlin.whenever -import tech.pegasys.teku.infrastructure.async.SafeFuture -import java.nio.file.Path -import java.util.concurrent.TimeUnit -import kotlin.random.Random -import kotlin.time.Duration.Companion.milliseconds - -@ExtendWith(VertxExtension::class) -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class FileBasedBlobCompressionProverClientTest { - private val requestSubdirectory = "request" - private val responseSubdirectory = "response" - private val pollingInterval = 10.milliseconds - private val mockFileWriter = mock(defaultAnswer = Mockito.RETURNS_DEEP_STUBS) - private val mockFileReader = mock>( - defaultAnswer = Mockito.RETURNS_DEEP_STUBS - ) - private val mockFileMonitor = mock(defaultAnswer = Mockito.RETURNS_DEEP_STUBS) - private val conflations = listOf( - ConflationCalculationResult( - startBlockNumber = 100UL, - endBlockNumber = 110UL, - tracesCounters = TracesCountersV1.EMPTY_TRACES_COUNT, /* = Map */ - conflationTrigger = ConflationTrigger.TRACES_LIMIT - ), - ConflationCalculationResult( - startBlockNumber = 111UL, - endBlockNumber = 122UL, - tracesCounters = TracesCountersV1.EMPTY_TRACES_COUNT, /* = Map */ - conflationTrigger = ConflationTrigger.DATA_LIMIT - ) - ) - val parentStateRootHash = Random.nextBytes(32) - val finalStateRootHash = Random.nextBytes(32) - val parentDataHash = Random.nextBytes(32) - val prevShnarf = Random.nextBytes(32) - val expectedShnarfResult = ShnarfResult( - dataHash = Random.nextBytes(32), - snarkHash = Random.nextBytes(32), - expectedX = Random.nextBytes(32), - expectedY = Random.nextBytes(32), - expectedShnarf = Random.nextBytes(32), - commitment = ByteArray(0), - kzgProofContract = ByteArray(0), - kzgProofSideCar = ByteArray(0) - ) - val expectedShnarfResultWithEip4844 = ShnarfResult( - dataHash = Random.nextBytes(32), - snarkHash = Random.nextBytes(32), - expectedX = Random.nextBytes(32), - expectedY = Random.nextBytes(32), - expectedShnarf = Random.nextBytes(32), - commitment = Random.nextBytes(48), - kzgProofContract = Random.nextBytes(48), - kzgProofSideCar = Random.nextBytes(48) - ) - val compressedData = Random.nextBytes(32) - private val blobCompressionProofResponse = BlobCompressionProofJsonResponse( - compressedData = Random.nextBytes(128), - conflationOrder = BlockIntervals(100UL, listOf(110UL, 122UL)), - prevShnarf = Random.nextBytes(32), - parentStateRootHash = Random.nextBytes(32), - finalStateRootHash = Random.nextBytes(32), - parentDataHash = Random.nextBytes(32), - dataHash = Random.nextBytes(32), - snarkHash = Random.nextBytes(32), - expectedX = Random.nextBytes(32), - expectedY = Random.nextBytes(32), - expectedShnarf = Random.nextBytes(32), - decompressionProof = Random.nextBytes(512), - proverVersion = "mock-0.0.0", - verifierID = 6789 - ) - private val blobCompressionProofResponseWithEip4844 = BlobCompressionProofJsonResponse( - compressedData = Random.nextBytes(128), - conflationOrder = BlockIntervals(100UL, listOf(110UL, 122UL)), - prevShnarf = Random.nextBytes(32), - parentStateRootHash = Random.nextBytes(32), - finalStateRootHash = Random.nextBytes(32), - parentDataHash = Random.nextBytes(32), - dataHash = Random.nextBytes(32), - snarkHash = Random.nextBytes(32), - expectedX = Random.nextBytes(32), - expectedY = Random.nextBytes(32), - expectedShnarf = Random.nextBytes(32), - decompressionProof = Random.nextBytes(512), - proverVersion = "mock-0.0.0", - verifierID = 6789, - eip4844Enabled = true, - commitment = expectedShnarfResultWithEip4844.commitment, - kzgProofContract = expectedShnarfResultWithEip4844.kzgProofContract, - kzgProofSidecar = expectedShnarfResultWithEip4844.kzgProofSideCar - ) - - private fun buildProverClient( - vertx: Vertx, - requestFolderPath: Path, - responseFolderPath: Path - ): FileBasedBlobCompressionProverClient { - return FileBasedBlobCompressionProverClient( - FileBasedBlobCompressionProverClient.Config( - requestFileDirectory = requestFolderPath, - responseFileDirectory = responseFolderPath, - inprogressProvingSuffixPattern = ".*\\.inprogress\\.prover.*", - inprogressRequestFileSuffix = ".coordinator_writing_inprogress", - pollingInterval = pollingInterval, - timeout = 100.milliseconds - ), - vertx, - mockFileWriter, - mockFileReader, - mockFileMonitor - ) - } - - @BeforeAll - fun init() { - // To warmup assertions otherwise first test may fail - Assertions.assertThat(true).isTrue() - } - - @Timeout(15, timeUnit = TimeUnit.SECONDS) - @Test - fun fileBasedBlobCompressionProverClient_returnsProofs( - vertx: Vertx, - @TempDir tempDir: Path, - testContext: VertxTestContext - ) { - val inputDirectory = Path.of(tempDir.toString(), requestSubdirectory) - val outputDirectory = Path.of(tempDir.toString(), responseSubdirectory) - val proverClient = buildProverClient(vertx, inputDirectory, outputDirectory) - - whenever( - mockFileMonitor.fileExists( - any() - ) - ).thenAnswer { - SafeFuture.completedFuture(false) - } - - whenever( - mockFileMonitor.fileExists( - any(), - any() - ) - ).thenAnswer { - SafeFuture.completedFuture(false) - } - - whenever( - mockFileMonitor.monitor( - any() - ) - ).thenAnswer { - SafeFuture.completedFuture>( - Ok( - it.getArgument(0) - ) - ) - } - - whenever( - mockFileWriter.write( - any(), - any(), - any() - ) - ).thenAnswer { - SafeFuture.completedFuture( - it.getArgument(1) - ) - } - - whenever( - mockFileReader.read( - any() - ) - ).thenAnswer { - SafeFuture.completedFuture( - Ok(blobCompressionProofResponse) - ) - } - - proverClient - .requestBlobCompressionProof( - compressedData = compressedData, - conflations = conflations, - parentStateRootHash = parentStateRootHash, - finalStateRootHash = finalStateRootHash, - parentDataHash = parentDataHash, - prevShnarf = prevShnarf, - expectedShnarfResult = expectedShnarfResult, - commitment = expectedShnarfResult.commitment, - kzgProofContract = expectedShnarfResult.kzgProofContract, - kzgProofSideCar = expectedShnarfResult.kzgProofSideCar - ) - .thenApply { response -> - testContext - .verify { - if (response is Err) { - testContext.failNow(response.error.asException()) - } - Assertions.assertThat(response).isEqualTo(Ok(blobCompressionProofResponse.toDomainObject())) - } - .completeNow() - } - .exceptionally { testContext.failNow(it) } - } - - @Timeout(2, timeUnit = TimeUnit.SECONDS) - @Test - fun fileBasedBlobCompressionProverClient_reusesAlreadyCreatedProofs_doesntRequestAgain( - vertx: Vertx, - @TempDir tempDir: Path, - testContext: VertxTestContext - ) { - val inputDirectory = Path.of(tempDir.toString(), requestSubdirectory) - val outputDirectory = Path.of(tempDir.toString(), responseSubdirectory) - val proverClient = buildProverClient(vertx, inputDirectory, outputDirectory) - - whenever( - mockFileWriter.write( - any(), - any(), - any() - ) - ).thenAnswer { - SafeFuture.failedFuture( - Exception("Failed to write request") - ) - } - - whenever( - mockFileMonitor.monitor( - any() - ) - ).thenAnswer { - SafeFuture.failedFuture>( - Exception("Failed to monitor file") - ) - } - - whenever( - mockFileMonitor.fileExists( - any() - ) - ).thenAnswer { - SafeFuture.completedFuture(true) - } - - whenever( - mockFileReader.read( - any() - ) - ).thenAnswer { - SafeFuture.completedFuture( - Ok(blobCompressionProofResponse) - ) - } - - proverClient - .requestBlobCompressionProof( - compressedData = compressedData, - conflations = conflations, - parentStateRootHash = parentStateRootHash, - finalStateRootHash = finalStateRootHash, - parentDataHash = parentDataHash, - prevShnarf = prevShnarf, - expectedShnarfResult = expectedShnarfResult, - commitment = expectedShnarfResult.commitment, - kzgProofContract = expectedShnarfResult.kzgProofContract, - kzgProofSideCar = expectedShnarfResult.kzgProofSideCar - ) - .thenApply { response -> - testContext - .verify { - if (response is Err) { - testContext.failNow(response.error.asException()) - } - - Assertions.assertThat(response).isEqualTo(Ok(blobCompressionProofResponse.toDomainObject())) - } - .completeNow() - } - .exceptionally { testContext.failNow(it) } - } - - @Timeout(15, timeUnit = TimeUnit.SECONDS) - @Test - fun fileBasedBlobCompressionProverClient_requestsWithEip4844( - vertx: Vertx, - @TempDir tempDir: Path, - testContext: VertxTestContext - ) { - val inputDirectory = Path.of(tempDir.toString(), requestSubdirectory) - val outputDirectory = Path.of(tempDir.toString(), responseSubdirectory) - val proverClient = buildProverClient(vertx, inputDirectory, outputDirectory) - - whenever( - mockFileMonitor.fileExists( - any() - ) - ).thenAnswer { - SafeFuture.completedFuture(false) - } - - whenever( - mockFileMonitor.fileExists( - any(), - any() - ) - ).thenAnswer { - SafeFuture.completedFuture(false) - } - - whenever( - mockFileMonitor.monitor( - any() - ) - ).thenAnswer { - SafeFuture.completedFuture>( - Ok( - it.getArgument(0) - ) - ) - } - - whenever( - mockFileWriter.write( - any(), - any(), - any() - ) - ).thenAnswer { - SafeFuture.completedFuture( - it.getArgument(1) - ) - } - - whenever( - mockFileReader.read( - any() - ) - ).thenAnswer { - SafeFuture.completedFuture( - Ok(blobCompressionProofResponseWithEip4844) - ) - } - - proverClient - .requestBlobCompressionProof( - compressedData = compressedData, - conflations = conflations, - parentStateRootHash = parentStateRootHash, - finalStateRootHash = finalStateRootHash, - parentDataHash = parentDataHash, - prevShnarf = prevShnarf, - expectedShnarfResult = expectedShnarfResultWithEip4844, - commitment = expectedShnarfResultWithEip4844.commitment, - kzgProofContract = expectedShnarfResultWithEip4844.kzgProofContract, - kzgProofSideCar = expectedShnarfResultWithEip4844.kzgProofSideCar - ) - .thenApply { response -> - testContext - .verify { - if (response is Err) { - testContext.failNow(response.error.asException()) - } - - Assertions.assertThat(response).isEqualTo(Ok(blobCompressionProofResponseWithEip4844.toDomainObject())) - } - .completeNow() - } - .exceptionally { testContext.failNow(it) } - } - - @Test - fun `test request filename`() { - val requestHash = expectedShnarfResult.expectedShnarf - val requestHashString = requestHash.encodeHex(prefix = false) - val requestFileName = CompressionProofRequestFileNameProvider.getFileName( - ProofIndex( - startBlockNumber = 1uL, - endBlockNumber = 11uL, - hash = requestHash - ) - ) - Assertions.assertThat(requestFileName).isEqualTo( - "1-11-bcv0.0-ccv0.0-$requestHashString-getZkBlobCompressionProof.json" - ) - } -} diff --git a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/FileBasedProofAggregationClientTest.kt b/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/FileBasedProofAggregationClientTest.kt deleted file mode 100644 index 9c9e23746..000000000 --- a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/FileBasedProofAggregationClientTest.kt +++ /dev/null @@ -1,226 +0,0 @@ -package net.consensys.zkevm.coordinator.clients - -import com.github.michaelbull.result.Err -import com.github.michaelbull.result.Ok -import io.vertx.core.Vertx -import io.vertx.junit5.VertxExtension -import kotlinx.datetime.Clock -import kotlinx.datetime.Instant -import net.consensys.encodeHex -import net.consensys.linea.async.get -import net.consensys.linea.errors.ErrorResponse -import net.consensys.trimToSecondPrecision -import net.consensys.zkevm.coordinator.clients.prover.AggregationProofFileNameProvider -import net.consensys.zkevm.coordinator.clients.prover.FileBasedProofAggregationClient -import net.consensys.zkevm.coordinator.clients.prover.serialization.JsonSerialization.proofResponseMapperV1 -import net.consensys.zkevm.coordinator.clients.prover.serialization.ProofToFinalizeJsonResponse -import net.consensys.zkevm.domain.BlockIntervals -import net.consensys.zkevm.domain.ProofIndex -import net.consensys.zkevm.domain.ProofToFinalize -import net.consensys.zkevm.domain.ProofsToAggregate -import net.consensys.zkevm.fileio.FileMonitor -import net.consensys.zkevm.fileio.FileWriter -import org.junit.jupiter.api.AfterEach -import org.junit.jupiter.api.Assertions -import org.junit.jupiter.api.BeforeEach -import org.junit.jupiter.api.Test -import org.junit.jupiter.api.extension.ExtendWith -import java.nio.file.Files -import java.nio.file.Path -import kotlin.random.Random -import kotlin.time.DurationUnit -import kotlin.time.toDuration - -@ExtendWith(VertxExtension::class) -class FileBasedProofAggregationClientTest { - lateinit var tmpRequestDirectory: Path - lateinit var tmpResponseDirectory: Path - lateinit var config: FileBasedProofAggregationClient.Config - lateinit var aggregation: ProofsToAggregate - lateinit var responseFileName: String - lateinit var fileWriter: FileWriter - lateinit var fileMonitor: FileMonitor - lateinit var fileBasedProofAggregationClient: FileBasedProofAggregationClient - - val sampleResponse = ProofToFinalize( - aggregatedProof = "mock_aggregatedProof".toByteArray(), - aggregatedVerifierIndex = 1, - aggregatedProofPublicInput = "mock_aggregatedProofPublicInput".toByteArray(), - dataHashes = listOf("mock_dataHashes_1".toByteArray()), - dataParentHash = "mock_dataParentHash".toByteArray(), - parentStateRootHash = "mock_parentStateRootHash".toByteArray(), - parentAggregationLastBlockTimestamp = Clock.System.now().trimToSecondPrecision(), - finalTimestamp = Clock.System.now().trimToSecondPrecision(), - firstBlockNumber = 1, - finalBlockNumber = 23, - l1RollingHash = "mock_l1RollingHash".toByteArray(), - l1RollingHashMessageNumber = 4, - l2MerkleRoots = listOf("mock_l2MerkleRoots".toByteArray()), - l2MerkleTreesDepth = 5, - l2MessagingBlocksOffsets = "mock_l2MessagingBlocksOffsets".toByteArray() - ) - - private val requestHash = "request-hash".toByteArray() - - @BeforeEach - fun setup(vertx: Vertx) { - tmpRequestDirectory = - Files.createTempDirectory(FileBasedProofAggregationClientTest::class.toString() + "-request") - tmpResponseDirectory = - Files.createTempDirectory(FileBasedProofAggregationClientTest::class.toString() + "-response") - config = FileBasedProofAggregationClient.Config( - requestFileDirectory = tmpRequestDirectory, - responseFileDirectory = tmpResponseDirectory, - responseFilePollingInterval = 200.toDuration(DurationUnit.MILLISECONDS), - responseFileMonitorTimeout = 2.toDuration(DurationUnit.SECONDS), - inprogressRequestFileSuffix = "inp", - proverInProgressSuffixPattern = "\\.prover-inprogress" - ) - - aggregation = ProofsToAggregate( - compressionProofIndexes = listOf( - ProofIndex(11u, 23u, Random.nextBytes(32)), - ProofIndex(24u, 27u, Random.nextBytes(32)) - ), - executionProofs = BlockIntervals(11u, listOf(23u, 27u)), - parentAggregationLastBlockTimestamp = Instant.parse("2024-01-21T16:08:22Z"), - parentAggregationLastL1RollingHashMessageNumber = 1u.toULong(), - parentAggregationLastL1RollingHash = ByteArray(32) - ) - - responseFileName = AggregationProofFileNameProvider.getFileName( - ProofIndex( - startBlockNumber = 11u, - endBlockNumber = 27u, - hash = requestHash - ) - ) - fileWriter = FileWriter(vertx, proofResponseMapperV1) - fileMonitor = FileMonitor( - vertx, - FileMonitor.Config(50.toDuration(DurationUnit.MILLISECONDS), 2.toDuration(DurationUnit.SECONDS)) - ) - - fileBasedProofAggregationClient = FileBasedProofAggregationClient( - vertx = vertx, - config = config, - hashFunction = { _ -> requestHash } - ) - } - - @AfterEach - fun tearDown(vertx: Vertx) { - vertx.fileSystem().deleteRecursiveBlocking(tmpRequestDirectory.toString(), true) - vertx.fileSystem().deleteRecursiveBlocking(tmpResponseDirectory.toString(), true) - val vertxStopFuture = vertx.close() - vertxStopFuture.get() - } - - @Test - fun test_getAggregatedProof_proofExists() { - val responseFile = config.responseFileDirectory.resolve(responseFileName).toFile() - proofResponseMapperV1.writeValue(responseFile, ProofToFinalizeJsonResponse.fromDomainObject(sampleResponse)) - - val result = fileBasedProofAggregationClient.getAggregatedProof(aggregation).get() - Assertions.assertEquals( - Ok(sampleResponse), - result - ) - } - - @Test - fun test_getAggregatedProof_proofCreated() { - fileMonitor.fileExists(config.requestFileDirectory, ".*json").thenApply { - val responseFile = config.responseFileDirectory.resolve(responseFileName).toFile() - proofResponseMapperV1.writeValue(responseFile, ProofToFinalizeJsonResponse.fromDomainObject(sampleResponse)) - } - - val result = fileBasedProofAggregationClient.getAggregatedProof(aggregation).get() - Assertions.assertEquals( - Ok(sampleResponse), - result - ) - } - - @Test - fun test_getAggregatedProof_proofNotCreated() { - val error = Err(ErrorResponse(ProverErrorType.ResponseNotFound, "")) - val result = fileBasedProofAggregationClient.getAggregatedProof(aggregation).get() - Assertions.assertEquals(error, result) - } - - @Test - fun test_getAggregatedProof_requestWriteInProgress(vertx: Vertx) { - val requestFileName = fileBasedProofAggregationClient.getZkAggregatedProofRequestFileName( - fileBasedProofAggregationClient.buildRequest(aggregation), - aggregation - ) - val requestFilePath = config.requestFileDirectory.resolve(requestFileName) - val inProgressRequestFilePath = Path.of( - requestFilePath.toAbsolutePath().toString() + ".${config.inprogressRequestFileSuffix}" - ) - inProgressRequestFilePath.toFile().createNewFile() - - vertx.setTimer(500L) { - val responseFile = config.responseFileDirectory.resolve(responseFileName).toFile() - proofResponseMapperV1.writeValue(responseFile, ProofToFinalizeJsonResponse.fromDomainObject(sampleResponse)) - } - - val result = fileBasedProofAggregationClient.getAggregatedProof(aggregation).get() - Assertions.assertEquals( - Ok(sampleResponse), - result - ) - } - - @Test - fun test_getAggregatedProof_provingInProgress(vertx: Vertx) { - val requestFileName = fileBasedProofAggregationClient.getZkAggregatedProofRequestFileName( - fileBasedProofAggregationClient.buildRequest(aggregation), - aggregation - ) - val requestFilePath = config.requestFileDirectory.resolve(requestFileName) - val provingInProgressFilePath = Path.of( - requestFilePath.toAbsolutePath().toString() + ".prover-inprogress" - ) - provingInProgressFilePath.toFile().createNewFile() - - vertx.setTimer(500L) { - val responseFile = config.responseFileDirectory.resolve(responseFileName).toFile() - proofResponseMapperV1.writeValue(responseFile, ProofToFinalizeJsonResponse.fromDomainObject(sampleResponse)) - } - - val result = fileBasedProofAggregationClient.getAggregatedProof(aggregation).get() - Assertions.assertEquals( - Ok(sampleResponse), - result - ) - } - - @Test - fun test_getRequestFileName() { - val proofsToAggregate = ProofsToAggregate( - compressionProofIndexes = listOf( - ProofIndex(11u, 20u, Random.nextBytes(32)), - ProofIndex(21u, 27u, Random.nextBytes(32)) - ), - executionProofs = BlockIntervals(startingBlockNumber = 11u, upperBoundaries = listOf(20u, 27u)), - parentAggregationLastBlockTimestamp = Instant.parse("2024-01-21T16:08:22Z"), - parentAggregationLastL1RollingHashMessageNumber = 1u.toULong(), - parentAggregationLastL1RollingHash = ByteArray(32) - ) - - val blockInterval = proofsToAggregate.getStartEndBlockInterval() - - Assertions.assertEquals( - "11-27-${requestHash.encodeHex(prefix = false)}-getZkAggregatedProof.json", - AggregationProofFileNameProvider.getFileName( - ProofIndex( - startBlockNumber = blockInterval.startBlockNumber, - endBlockNumber = blockInterval.endBlockNumber, - hash = requestHash - ) - ) - ) - } -} diff --git a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/CommonFunctionality.kt b/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/CommonFunctionality.kt deleted file mode 100644 index 5439dc353..000000000 --- a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/CommonFunctionality.kt +++ /dev/null @@ -1,57 +0,0 @@ -package net.consensys.zkevm.coordinator.clients.prover - -import net.consensys.zkevm.domain.BridgeLogsData -import net.consensys.zkevm.domain.ProofIndex -import okhttp3.internal.toHexString -import org.apache.tuweni.bytes.Bytes -import tech.pegasys.teku.ethereum.executionclient.schema.ExecutionPayloadV1 -import tech.pegasys.teku.ethereum.executionclient.schema.randomExecutionPayload -import kotlin.random.Random - -class SimpleFileNameProvider : ProverFileNameProvider(FileNameSuffixes.EXECUTION_PROOF_SUFFIX) { - override fun getFileName(proofIndex: ProofIndex): String { - return "${proofIndex.startBlockNumber}-${proofIndex.endBlockNumber}" + - "-${FileNameSuffixes.EXECUTION_PROOF_SUFFIX}" - } -} - -fun randomExecutionPayloads(numberOfBlocks: Int): List { - return (1..numberOfBlocks) - .map { index -> - randomExecutionPayload(listOf(Bytes.fromHexString(CommonTestData.validTransactionRlp)), index.toLong()) - } - .toMutableList() - .apply { this.sortBy { it.blockNumber } } -} - -fun randomBridgeLogsDataList(numberOfBlocks: Int): List> { - return (1..numberOfBlocks) - .map { index -> - listOf( - BridgeLogsData( - removed = false, - logIndex = "0x0", - transactionIndex = "0x0", - transactionHash = "0x" + Random.nextBytes(32).joinToString("") { - java.lang.String.format("%02x", it) - }, - blockHash = "0x" + Random.nextBytes(32).joinToString("") { - java.lang.String.format("%02x", it) - }, - blockNumber = "0x" + index.toHexString(), - address = "0x" + Random.nextBytes(20).joinToString("") { - java.lang.String.format("%02x", it) - }, - data = "0x" + Random.nextBytes(128).joinToString("") { - java.lang.String.format("%02x", it) - }, - topics = listOf( - "0x" + Random.nextBytes(32).joinToString("") { - java.lang.String.format("%02x", it) - } - ) - ) - ) - } - .toMutableList() -} diff --git a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/ExecutionProofRequestDataDecoratorTest.kt b/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/ExecutionProofRequestDataDecoratorTest.kt new file mode 100644 index 000000000..f0cbabf07 --- /dev/null +++ b/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/ExecutionProofRequestDataDecoratorTest.kt @@ -0,0 +1,104 @@ +package net.consensys.zkevm.coordinator.clients.prover + +import com.fasterxml.jackson.databind.node.ArrayNode +import net.consensys.encodeHex +import net.consensys.zkevm.coordinator.clients.BatchExecutionProofRequestV1 +import net.consensys.zkevm.coordinator.clients.GenerateTracesResponse +import net.consensys.zkevm.coordinator.clients.GetZkEVMStateMerkleProofResponse +import net.consensys.zkevm.coordinator.clients.L2MessageServiceLogsClient +import net.consensys.zkevm.domain.RlpBridgeLogsData +import net.consensys.zkevm.encoding.ExecutionPayloadV1Encoder +import org.apache.tuweni.bytes.Bytes32 +import org.assertj.core.api.Assertions.assertThat +import org.junit.jupiter.api.BeforeEach +import org.junit.jupiter.api.Test +import org.mockito.Mockito +import org.mockito.kotlin.any +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.eq +import org.mockito.kotlin.mock +import org.mockito.kotlin.spy +import org.mockito.kotlin.whenever +import org.web3j.protocol.Web3j +import org.web3j.protocol.core.methods.response.EthBlock +import tech.pegasys.teku.ethereum.executionclient.schema.ExecutionPayloadV1 +import tech.pegasys.teku.ethereum.executionclient.schema.executionPayloadV1 +import tech.pegasys.teku.infrastructure.async.SafeFuture +import kotlin.random.Random + +class ExecutionProofRequestDataDecoratorTest { + + private lateinit var l2MessageServiceLogsClient: L2MessageServiceLogsClient + private lateinit var l2Web3jClient: Web3j + private lateinit var encoder: ExecutionPayloadV1Encoder + private lateinit var requestDatDecorator: ExecutionProofRequestDataDecorator + private val fakeEncoder: ExecutionPayloadV1Encoder = object : ExecutionPayloadV1Encoder { + override fun encode(payload: ExecutionPayloadV1): ByteArray { + return payload.blockNumber.toString().toByteArray() + } + } + + @BeforeEach + fun beforeEach() { + l2MessageServiceLogsClient = mock(defaultAnswer = Mockito.RETURNS_DEEP_STUBS) + l2Web3jClient = mock(defaultAnswer = Mockito.RETURNS_DEEP_STUBS) + encoder = spy(fakeEncoder) + requestDatDecorator = ExecutionProofRequestDataDecorator(l2MessageServiceLogsClient, l2Web3jClient, encoder) + } + + @Test + fun `should decorate data with bridge logs and parent stateRootHash`() { + val executionPayload1 = executionPayloadV1(blockNumber = 123) + val executionPayload2 = executionPayloadV1(blockNumber = 124) + val type2StateResponse = GetZkEVMStateMerkleProofResponse( + zkStateMerkleProof = ArrayNode(null), + zkParentStateRootHash = Bytes32.random(), + zkEndStateRootHash = Bytes32.random(), + zkStateManagerVersion = "2.0.0" + ) + val generateTracesResponse = GenerateTracesResponse( + tracesFileName = "123-114-conflated-traces.json", + tracesEngineVersion = "1.0.0" + ) + val request = BatchExecutionProofRequestV1( + blocks = listOf(executionPayload1, executionPayload2), + tracesResponse = generateTracesResponse, + type2StateData = type2StateResponse + ) + val stateRoot = Random.nextBytes(32).encodeHex() + whenever(l2Web3jClient.ethGetBlockByNumber(any(), any()).sendAsync()) + .thenAnswer { + val mockedEthBlock = mock(defaultAnswer = Mockito.RETURNS_DEEP_STUBS) { + on { block.stateRoot } doReturn stateRoot + } + SafeFuture.completedFuture(mockedEthBlock) + } + + whenever(l2MessageServiceLogsClient.getBridgeLogs(eq(executionPayload1.blockNumber.longValue()))) + .thenReturn(SafeFuture.completedFuture(listOf(CommonTestData.bridgeLogs[0]))) + whenever(l2MessageServiceLogsClient.getBridgeLogs(eq(executionPayload2.blockNumber.longValue()))) + .thenReturn(SafeFuture.completedFuture(listOf(CommonTestData.bridgeLogs[1]))) + + val requestDto = requestDatDecorator.invoke(request).get() + + assertThat(requestDto.keccakParentStateRootHash).isEqualTo(stateRoot) + assertThat(requestDto.zkParentStateRootHash).isEqualTo(type2StateResponse.zkParentStateRootHash.toHexString()) + assertThat(requestDto.conflatedExecutionTracesFile).isEqualTo("123-114-conflated-traces.json") + assertThat(requestDto.tracesEngineVersion).isEqualTo("1.0.0") + assertThat(requestDto.type2StateManagerVersion).isEqualTo("2.0.0") + assertThat(requestDto.zkStateMerkleProof).isEqualTo(type2StateResponse.zkStateMerkleProof) + assertThat(requestDto.blocksData).hasSize(2) + assertThat(requestDto.blocksData[0]).isEqualTo( + RlpBridgeLogsData( + rlp = "123".toByteArray().encodeHex(), + bridgeLogs = listOf(CommonTestData.bridgeLogs[0]) + ) + ) + assertThat(requestDto.blocksData[1]).isEqualTo( + RlpBridgeLogsData( + rlp = "124".toByteArray().encodeHex(), + bridgeLogs = listOf(CommonTestData.bridgeLogs[1]) + ) + ) + } +} diff --git a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/GenericFileBasedProverClientTest.kt b/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/GenericFileBasedProverClientTest.kt new file mode 100644 index 000000000..d945b1b96 --- /dev/null +++ b/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/GenericFileBasedProverClientTest.kt @@ -0,0 +1,223 @@ +package net.consensys.zkevm.coordinator.clients.prover + +import io.vertx.core.Vertx +import io.vertx.junit5.VertxExtension +import net.consensys.zkevm.coordinator.clients.prover.serialization.JsonSerialization +import net.consensys.zkevm.domain.BlockInterval +import net.consensys.zkevm.domain.ProofIndex +import net.consensys.zkevm.fileio.FileReader +import net.consensys.zkevm.fileio.FileWriter +import org.assertj.core.api.Assertions.assertThat +import org.awaitility.Awaitility.await +import org.junit.jupiter.api.BeforeEach +import org.junit.jupiter.api.Test +import org.junit.jupiter.api.assertThrows +import org.junit.jupiter.api.extension.ExtendWith +import org.junit.jupiter.api.io.TempDir +import tech.pegasys.teku.infrastructure.async.SafeFuture +import java.nio.file.Path +import kotlin.time.Duration.Companion.milliseconds +import kotlin.time.Duration.Companion.seconds +import kotlin.time.toJavaDuration + +@ExtendWith(VertxExtension::class) +class GenericFileBasedProverClientTest { + data class ProofRequest(override val startBlockNumber: ULong, override val endBlockNumber: ULong) : BlockInterval + data class ProofResponse(val startBlockNumber: ULong, val endBlockNumber: ULong) + data class ProofRequestDto(val blockNumberStart: ULong, val blockNumberEnd: ULong) { + companion object { + fun fromDomain(request: ProofRequest): ProofRequestDto { + return ProofRequestDto(request.startBlockNumber, request.endBlockNumber) + } + } + } + + // Repeated Dto class for illustration purposes + data class ProofResponseDto(val blockNumberStart: ULong, val blockNumberEnd: ULong) { + companion object { + fun toDomain(request: ProofResponseDto): ProofResponse { + return ProofResponse(request.blockNumberStart, request.blockNumberEnd) + } + } + } + + private val requestFileNameProvider = ProverFileNameProvider("proof-request.json") + private val responseFileNameProvider = ProverFileNameProvider("proof-response.json") + + private lateinit var proverClient: GenericFileBasedProverClient< + ProofRequest, + ProofResponse, + ProofRequestDto, + ProofResponseDto + > + private lateinit var proverConfig: FileBasedProverConfig + + private fun createProverClient( + config: FileBasedProverConfig, + vertx: Vertx + ): GenericFileBasedProverClient { + return GenericFileBasedProverClient( + config = config, + vertx = vertx, + fileWriter = FileWriter(vertx, JsonSerialization.proofResponseMapperV1), + fileReader = FileReader( + vertx, + JsonSerialization.proofResponseMapperV1, + ProofResponseDto::class.java + ), + requestFileNameProvider = requestFileNameProvider, + responseFileNameProvider = responseFileNameProvider, + requestMapper = { SafeFuture.completedFuture(ProofRequestDto.fromDomain(it)) }, + proofTypeLabel = "batch", + responseMapper = ProofResponseDto::toDomain + ) + } + + @BeforeEach + fun beforeEach( + vertx: Vertx, + @TempDir tempDir: Path + ) { + proverConfig = FileBasedProverConfig( + requestsDirectory = tempDir.resolve("requests"), + responsesDirectory = tempDir.resolve("responses"), + inprogressProvingSuffixPattern = ".*\\.inprogress\\.prover.*", + inprogressRequestWritingSuffix = "coordinator_writing_inprogress", + pollingInterval = 100.milliseconds, + pollingTimeout = 2.seconds + ) + + proverClient = createProverClient(proverConfig, vertx) + } + + private fun responseFilePath(proofIndex: ProofIndex): Path { + return proverConfig.responsesDirectory.resolve(responseFileNameProvider.getFileName(proofIndex)) + } + + private fun requestFilePath(proofIndex: ProofIndex): Path { + return proverConfig.requestsDirectory.resolve(requestFileNameProvider.getFileName(proofIndex)) + } + + private fun saveToFile(file: Path, content: Any) { + JsonSerialization.proofResponseMapperV1.writeValue(file.toFile(), content) + } + + private fun readFromFile(file: Path, valueType: Class): T { + return JsonSerialization.proofResponseMapperV1.readValue(file.toFile(), valueType) + } + + @Test + fun `when it cannot create request and response directories shall fail`( + vertx: Vertx + ) { + val dirWithoutWritePermissions = Path.of("/invalid/path") + val invalidConfig = proverConfig.copy( + requestsDirectory = dirWithoutWritePermissions.resolve("requests"), + responsesDirectory = dirWithoutWritePermissions.resolve("responses") + ) + assertThrows { + createProverClient(invalidConfig, vertx) + } + } + + @Test + fun `when request does not exist shall write it and wait for response`() { + val proofIndex = ProofIndex(startBlockNumber = 1u, endBlockNumber = 20u) + val responseFuture = proverClient.requestProof(proofIndex.toRequest()) + // assert it will write the request file + val expectedRequestFile = requestFilePath(proofIndex) + val expectedResponseFile = responseFilePath(proofIndex) + + // assert it will wait for the response file + assertThat(responseFuture.isDone).isFalse() + assertThat(responseFuture.isCancelled).isFalse() + assertThat(responseFuture.isCompletedExceptionally).isFalse() + assertThat(responseFuture.isCompletedNormally).isFalse() + assertRequestWaitingCountIs(1) + + // write response + saveToFile(expectedResponseFile, ProofResponseDto(blockNumberStart = 1u, blockNumberEnd = 20u)) + + val response = responseFuture.get() + assertThat(expectedRequestFile).exists() + assertThat(response).isEqualTo(ProofResponse(startBlockNumber = 1u, endBlockNumber = 20u)) + assertThat(proverClient.get()).isEqualTo(0) + } + + @Test + fun `when response already exists, should reuse it`() { + val proofIndex = ProofIndex(startBlockNumber = 2u, endBlockNumber = 22u) + // write response + saveToFile(responseFilePath(proofIndex), ProofResponseDto(blockNumberStart = 2u, blockNumberEnd = 22u)) + + val response = proverClient.requestProof(proofIndex.toRequest()).get() + assertThat(proverClient.get()).isEqualTo(0) + assertThat(response).isEqualTo(ProofResponse(startBlockNumber = 2u, endBlockNumber = 22u)) + assertThat(requestFilePath(proofIndex)).doesNotExist() + } + + @Test + fun `when request already exists shall skip writing it and wait for response`() { + // this is to prevent when coordinator is restarted + // and the request is already in the requests directory and create a duplicated one + + val proofIndex = ProofIndex(startBlockNumber = 3u, endBlockNumber = 33u) + + // write request + // Write with a different block number content to check that it was not overwritten + saveToFile(requestFilePath(proofIndex), ProofRequestDto(blockNumberStart = 3u, blockNumberEnd = 33333u)) + val responseFuture = proverClient.requestProof(proofIndex.toRequest()) + assertRequestWaitingCountIs(1) + + // write response + saveToFile(responseFilePath(proofIndex), ProofResponseDto(blockNumberStart = 3u, blockNumberEnd = 33u)) + + responseFuture.get() + + val proofRequest = readFromFile(requestFilePath(proofIndex), ProofRequestDto::class.java) + + // assert original request was not overwritten + assertThat(proofRequest).isEqualTo(ProofRequestDto(blockNumberStart = 3u, blockNumberEnd = 33333u)) + assertThat(proverClient.get()).isEqualTo(0) + } + + @Test + fun `when request is prooving inprogress shall skip writing it and wait for response`() { + // this is to prevent when coordinator is restarted + // and the request is already in the requests directory being proved by the prover, + // we must not create a duplicated one + val proofIndex = ProofIndex(startBlockNumber = 4u, endBlockNumber = 44u) + // example of PROD file name + // 8930088-8930101-etv0.2.0-stv2.2.0-getZkProof.json.inprogress.prover-aggregation-97695c877-vgsfg + val requestProvingInprogressFilePath = proverConfig.requestsDirectory.resolve( + requestFileNameProvider.getFileName(proofIndex) + + "some-midle-str.inprogress.prover-aggregation-97695c877-vgsfg" + ) + // write request with a different block number content to check tha it was not overwritten + saveToFile(requestProvingInprogressFilePath, ProofRequestDto(blockNumberStart = 4u, blockNumberEnd = 44444u)) + val responseFuture = proverClient.requestProof(proofIndex.toRequest()) + assertRequestWaitingCountIs(1) + + // write response + saveToFile(responseFilePath(proofIndex), ProofResponseDto(blockNumberStart = 4u, blockNumberEnd = 44u)) + + responseFuture.get() + + // assert that the request file was not written again + assertThat(requestFilePath(proofIndex)).doesNotExist() + assertThat(proverClient.get()).isEqualTo(0) + } + + private fun assertRequestWaitingCountIs(expectedCount: Int) { + await() + .atMost(5.seconds.toJavaDuration()) + .untilAsserted { + assertThat(proverClient.get()).isEqualTo(expectedCount.toLong()) + } + } + + private fun ProofIndex.toRequest(): ProofRequest = ProofRequest(startBlockNumber, endBlockNumber) +} diff --git a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/ProverClientFactoryTest.kt b/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/ProverClientFactoryTest.kt new file mode 100644 index 000000000..31e8995fa --- /dev/null +++ b/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/ProverClientFactoryTest.kt @@ -0,0 +1,150 @@ +package net.consensys.zkevm.coordinator.clients.prover + +import io.micrometer.core.instrument.MeterRegistry +import io.micrometer.core.instrument.simple.SimpleMeterRegistry +import io.vertx.core.Vertx +import io.vertx.junit5.VertxExtension +import kotlinx.datetime.Clock +import net.consensys.linea.metrics.MetricsFacade +import net.consensys.linea.metrics.micrometer.MicrometerMetricsFacade +import net.consensys.zkevm.domain.BlockIntervals +import net.consensys.zkevm.domain.ProofIndex +import net.consensys.zkevm.domain.ProofsToAggregate +import org.apache.tuweni.bytes.Bytes32 +import org.assertj.core.api.Assertions.assertThat +import org.awaitility.Awaitility.await +import org.junit.jupiter.api.BeforeEach +import org.junit.jupiter.api.Test +import org.junit.jupiter.api.extension.ExtendWith +import org.junit.jupiter.api.io.TempDir +import java.nio.file.Files +import java.nio.file.Path +import kotlin.time.Duration.Companion.milliseconds +import kotlin.time.Duration.Companion.seconds +import kotlin.time.toJavaDuration + +@ExtendWith(VertxExtension::class) +class ProverClientFactoryTest { + private fun buildProversConfig( + tmpDir: Path, + switchBlockNumber: Int? = null + ): ProversConfig { + fun buildProverConfig(proverDir: Path): ProverConfig { + return ProverConfig( + execution = FileBasedProverConfig( + requestsDirectory = proverDir.resolve("execution/requests"), + responsesDirectory = proverDir.resolve("execution/responses"), + pollingInterval = 100.milliseconds, + pollingTimeout = 500.milliseconds, + inprogressProvingSuffixPattern = ".*\\.inprogress\\.prover.*", + inprogressRequestWritingSuffix = ".inprogress_coordinator_writing" + ), + blobCompression = FileBasedProverConfig( + requestsDirectory = proverDir.resolve("compression/requests"), + responsesDirectory = proverDir.resolve("compression/responses"), + pollingInterval = 100.milliseconds, + pollingTimeout = 500.milliseconds, + inprogressProvingSuffixPattern = ".*\\.inprogress\\.prover.*", + inprogressRequestWritingSuffix = ".inprogress_coordinator_writing" + ), + proofAggregation = FileBasedProverConfig( + requestsDirectory = proverDir.resolve("aggregation/requests"), + responsesDirectory = proverDir.resolve("aggregation/responses"), + pollingInterval = 100.milliseconds, + pollingTimeout = 500.milliseconds, + inprogressProvingSuffixPattern = ".*\\.inprogress\\.prover.*", + inprogressRequestWritingSuffix = ".inprogress_coordinator_writing" + ) + ) + } + + return ProversConfig( + proverA = buildProverConfig(tmpDir.resolve("prover/v2")), + switchBlockNumberInclusive = switchBlockNumber?.toULong(), + proverB = switchBlockNumber?.let { + buildProverConfig(tmpDir.resolve("prover/v3")) + } + ) + } + + private lateinit var meterRegistry: MeterRegistry + private lateinit var metricsFacade: MetricsFacade + private lateinit var proverClientFactory: ProverClientFactory + private lateinit var vertx: Vertx + private lateinit var testTmpDir: Path + + private val request1 = ProofsToAggregate( + compressionProofIndexes = listOf(ProofIndex(startBlockNumber = 1uL, endBlockNumber = 9uL)), + executionProofs = BlockIntervals(startingBlockNumber = 1uL, listOf(9uL)), + parentAggregationLastBlockTimestamp = Clock.System.now(), + parentAggregationLastL1RollingHashMessageNumber = 0uL, + parentAggregationLastL1RollingHash = Bytes32.random().toArray() + ) + private val request2 = ProofsToAggregate( + compressionProofIndexes = listOf(ProofIndex(startBlockNumber = 10uL, endBlockNumber = 19uL)), + executionProofs = BlockIntervals(startingBlockNumber = 10uL, listOf(19uL)), + parentAggregationLastBlockTimestamp = Clock.System.now(), + parentAggregationLastL1RollingHashMessageNumber = 9uL, + parentAggregationLastL1RollingHash = Bytes32.random().toArray() + ) + private val request3 = ProofsToAggregate( + compressionProofIndexes = listOf(ProofIndex(startBlockNumber = 300uL, endBlockNumber = 319uL)), + executionProofs = BlockIntervals(startingBlockNumber = 300uL, listOf(319uL)), + parentAggregationLastBlockTimestamp = Clock.System.now(), + parentAggregationLastL1RollingHashMessageNumber = 299uL, + parentAggregationLastL1RollingHash = Bytes32.random().toArray() + ) + + @BeforeEach + fun beforeEach( + vertx: Vertx, + @TempDir tmpDir: Path + ) { + this.vertx = vertx + this.testTmpDir = tmpDir + meterRegistry = SimpleMeterRegistry() + metricsFacade = MicrometerMetricsFacade(registry = meterRegistry, "linea") + proverClientFactory = + ProverClientFactory(vertx, buildProversConfig(testTmpDir, switchBlockNumber = 200), metricsFacade) + } + + @Test + fun `should create a prover with routing when switch is defined`() { + val proverClient = proverClientFactory.proofAggregationProverClient() + assertThat(proverClient).isInstanceOf(ABProverClientRouter::class.java) + + // swallow timeout exception because responses are not available + kotlin.runCatching { proverClient.requestProof(request1).get() } + kotlin.runCatching { proverClient.requestProof(request2).get() } + kotlin.runCatching { proverClient.requestProof(request3).get() } + + await() + .atMost(5.seconds.toJavaDuration()) + .untilAsserted { + Files.list(testTmpDir.resolve("prover/v2/aggregation/requests")).use { + assertThat(it.count()).isEqualTo(2) + } + Files.list(testTmpDir.resolve("prover/v3/aggregation/requests")).use { + assertThat(it.count()).isEqualTo(1) + } + } + } + + @Test + fun `should create metrics gauge and aggregate them`() { + val proverClientI1 = proverClientFactory.proofAggregationProverClient() + val proverClientI2 = proverClientFactory.proofAggregationProverClient() + + kotlin.runCatching { proverClientI1.requestProof(request1).get() } + kotlin.runCatching { proverClientI2.requestProof(request2).get() } + kotlin.runCatching { proverClientI1.requestProof(request3).get() } + + assertThat(meterRegistry.find("linea.batch.prover.waiting").gauge()).isNotNull + assertThat(meterRegistry.find("linea.blob.prover.waiting").gauge()).isNotNull + assertThat(meterRegistry.find("linea.aggregation.prover.waiting").gauge()).isNotNull + + assertThat(meterRegistry.find("linea.batch.prover.waiting").gauge()!!.value()).isEqualTo(0.0) + assertThat(meterRegistry.find("linea.blob.prover.waiting").gauge()!!.value()).isEqualTo(0.0) + assertThat(meterRegistry.find("linea.aggregation.prover.waiting").gauge()!!.value()).isEqualTo(3.0) + } +} diff --git a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/RequestFileWriterTest.kt b/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/RequestFileWriterTest.kt deleted file mode 100644 index 150f4957e..000000000 --- a/coordinator/clients/prover-client/file-based-client/src/test/kotlin/net/consensys/zkevm/coordinator/clients/prover/RequestFileWriterTest.kt +++ /dev/null @@ -1,166 +0,0 @@ -package net.consensys.zkevm.coordinator.clients.prover - -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.node.ArrayNode -import io.vertx.core.Vertx -import io.vertx.junit5.Timeout -import io.vertx.junit5.VertxExtension -import io.vertx.junit5.VertxTestContext -import net.consensys.zkevm.coordinator.clients.GenerateTracesResponse -import net.consensys.zkevm.coordinator.clients.GetZkEVMStateMerkleProofResponse -import net.consensys.zkevm.coordinator.clients.prover.CommonTestData.bridgeLogs -import net.consensys.zkevm.coordinator.clients.prover.CommonTestData.validTransactionRlp -import net.consensys.zkevm.coordinator.clients.prover.serialization.JsonSerialization.proofResponseMapperV1 -import net.consensys.zkevm.domain.BridgeLogsData -import org.apache.logging.log4j.Logger -import org.apache.tuweni.bytes.Bytes32 -import org.assertj.core.api.Assertions.assertThat -import org.junit.jupiter.api.BeforeEach -import org.junit.jupiter.api.extension.ExtendWith -import org.junit.jupiter.api.io.TempDir -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.Arguments -import org.junit.jupiter.params.provider.MethodSource -import org.mockito.kotlin.mock -import tech.pegasys.teku.ethereum.executionclient.schema.ExecutionPayloadV1 -import java.io.File -import java.nio.file.Path -import java.util.concurrent.TimeUnit -import java.util.stream.Stream - -@ExtendWith(VertxExtension::class) -class RequestFileWriterTest { - private val tracesFileName = "/some/path/1-3-conflated-traces.json.gz" - private val tracesEngineVersion = "0.2.3" - private val zkEvmStateManagerVersion = "0.3.4" - private val mapper = proofResponseMapperV1 - private val previousStateRoot = Bytes32.random().toHexString() - - private val testdataPath = "../../../../testdata" - private val merkleProofJson: ArrayNode = let { - val testFilePath = "$testdataPath/type2state-manager/state-proof.json" - mapper.readTree(Path.of(testFilePath).toFile()).let { - val merkleProof = it.get("zkStateMerkleProof") - assert(merkleProof.isArray) - merkleProof as ArrayNode - } - } - - companion object { - @JvmStatic - private fun blocksToGenerate(): Stream? { - return Stream.of(Arguments.of(1), Arguments.of(5)) - } - } - - @BeforeEach - fun beforeEach() { - // To warmup assertions otherwise first test may fail - assertThat(true).isTrue() - } - - @ParameterizedTest - @Timeout(10, timeUnit = TimeUnit.SECONDS) - @MethodSource("blocksToGenerate") - fun requestFileWriter_writesValidFile( - blocksToGenerate: Int, - vertx: Vertx, - testContext: VertxTestContext, - @TempDir tempDir: Path - ) { - val zkParentStateRootHash = Bytes32.random() - val blocks = randomExecutionPayloads(blocksToGenerate) - val tracesResponse = GenerateTracesResponse(tracesFileName, tracesEngineVersion) - val stateManagerResponse = GetZkEVMStateMerkleProofResponse( - zkStateMerkleProof = merkleProofJson, - zkParentStateRootHash = zkParentStateRootHash, - zkEndStateRootHash = Bytes32.random(), - zkStateManagerVersion = zkEvmStateManagerVersion - ) - - val fileWriter = RequestFileWriter( - vertx, - SimpleFileNameProvider(), - RequestFileWriter.Config( - requestDirectory = tempDir, - writingInprogressSuffix = "coordinator_writting_inprogress", - proverInprogressSuffixPattern = "\\.inprogress\\.prover.*" - ), - mapper = mapper, - log = mock() - ) - fileWriter - .write( - blocks.map { it to bridgeLogs }, - tracesResponse, - stateManagerResponse, - previousStateRoot - ) - .thenApply { requestFilePath -> - testContext - .verify { - assertThat(requestFilePath.toString()) - .isEqualTo( - Path.of( - tempDir.toString(), - "${blocks.first().blockNumber}-${blocks.last().blockNumber}-getZkProof.json" - ) - .toString() - ) - assertThat(requestFilePath).exists() - validateRequest( - mapper, - requestFilePath.toFile(), - stateManagerResponse, - blocks, - bridgeLogs, - tracesFileName, - tracesEngineVersion, - previousStateRoot - ) - } - .completeNow() - } - .exceptionally { testContext.failNow(it) } - } -} - -fun validateRequest( - mapper: ObjectMapper, - requestFilePath: File, - stateManagerResponse: GetZkEVMStateMerkleProofResponse?, - blocks: List, - bridgeLogs: List, - expectedTracesFileName: String, - expectedTracesVersion: String, - expectedPreviousStateRoot: String -) { - val writtenRequest = - mapper.readValue(requestFilePath, FileBasedExecutionProverClient.GetProofRequest::class.java) - assertThat(writtenRequest).isNotNull - assertThat(writtenRequest.conflatedExecutionTracesFile).isEqualTo(expectedTracesFileName) - assertThat(writtenRequest.tracesEngineVersion).isEqualTo(expectedTracesVersion) - stateManagerResponse?.run { - assertThat(writtenRequest.zkParentStateRootHash) - .isEqualTo(stateManagerResponse.zkParentStateRootHash.toHexString()) - assertThat(writtenRequest.type2StateManagerVersion) - .isEqualTo(stateManagerResponse.zkStateManagerVersion) - assertThat(writtenRequest.zkStateMerkleProof) - .isEqualTo(stateManagerResponse.zkStateMerkleProof) - } - assertThat(writtenRequest.keccakParentStateRootHash).isEqualTo(expectedPreviousStateRoot) - assertThat(writtenRequest.blocksData).hasSameSizeAs(blocks) - writtenRequest.blocksData.zip(blocks).forEach { pair -> - val (rlpBridgeLogData, expected) = pair - assertThat(rlpBridgeLogData.rlp).contains(validTransactionRlp.removeRange(0, 2)) - assertThat(rlpBridgeLogData.rlp) - .contains(expected.parentHash.toUnprefixedHexString()) - assertThat(rlpBridgeLogData.rlp) - .contains(expected.stateRoot.toUnprefixedHexString()) - assertThat(rlpBridgeLogData.rlp) - .contains(expected.receiptsRoot.toUnprefixedHexString()) - assertThat(rlpBridgeLogData.rlp) - .contains(expected.logsBloom.toUnprefixedHexString()) - assertThat(rlpBridgeLogData.bridgeLogs).containsAll(bridgeLogs) - } -} diff --git a/coordinator/clients/prover-client/serialization/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/serialization/BlobJsonFileRequestResponse.kt b/coordinator/clients/prover-client/serialization/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/serialization/BlobJsonFileRequestResponse.kt index b26cbe344..d050c09cf 100644 --- a/coordinator/clients/prover-client/serialization/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/serialization/BlobJsonFileRequestResponse.kt +++ b/coordinator/clients/prover-client/serialization/src/main/kotlin/net/consensys/zkevm/coordinator/clients/prover/serialization/BlobJsonFileRequestResponse.kt @@ -12,6 +12,7 @@ import com.fasterxml.jackson.databind.annotation.JsonSerialize import net.consensys.decodeHex import net.consensys.encodeHex import net.consensys.zkevm.coordinator.clients.BlobCompressionProof +import net.consensys.zkevm.coordinator.clients.BlobCompressionProofRequest import net.consensys.zkevm.domain.BlockIntervals internal class ByteArrayDeserializer : JsonDeserializer() { @@ -66,7 +67,33 @@ data class BlobCompressionProofJsonRequest( @JsonProperty("kzgProofSidecar") @JsonSerialize(using = ByteArraySerializer::class) val kzgProofSidecar: ByteArray -) +) { + companion object { + fun fromDomainObject( + request: BlobCompressionProofRequest + ): BlobCompressionProofJsonRequest { + return BlobCompressionProofJsonRequest( + compressedData = request.compressedData, + conflationOrder = BlockIntervals( + startingBlockNumber = request.conflations.first().startBlockNumber, + upperBoundaries = request.conflations.map { it.endBlockNumber } + ), + prevShnarf = request.prevShnarf, + parentStateRootHash = request.parentStateRootHash, + finalStateRootHash = request.finalStateRootHash, + parentDataHash = request.parentDataHash, + dataHash = request.expectedShnarfResult.dataHash, + snarkHash = request.expectedShnarfResult.snarkHash, + expectedX = request.expectedShnarfResult.expectedX, + expectedY = request.expectedShnarfResult.expectedY, + expectedShnarf = request.expectedShnarfResult.expectedShnarf, + commitment = request.commitment, + kzgProofContract = request.kzgProofContract, + kzgProofSidecar = request.kzgProofSideCar + ) + } + } +} data class BlobCompressionProofJsonResponse( val compressedData: ByteArray, // The data that are explicitly sent in the blob (i.e. after compression) diff --git a/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/BatchExecutionProverRequestResponse.kt b/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/BatchExecutionProverRequestResponse.kt new file mode 100644 index 000000000..900827efd --- /dev/null +++ b/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/BatchExecutionProverRequestResponse.kt @@ -0,0 +1,21 @@ +package net.consensys.zkevm.coordinator.clients + +import net.consensys.zkevm.domain.BlockInterval +import net.consensys.zkevm.toULong +import tech.pegasys.teku.ethereum.executionclient.schema.ExecutionPayloadV1 + +data class BatchExecutionProofRequestV1( + val blocks: List, + val tracesResponse: GenerateTracesResponse, + val type2StateData: GetZkEVMStateMerkleProofResponse +) : BlockInterval { + override val startBlockNumber: ULong + get() = blocks.first().blockNumber.toULong() + override val endBlockNumber: ULong + get() = blocks.last().blockNumber.toULong() +} + +data class BatchExecutionProofResponse( + override val startBlockNumber: ULong, + override val endBlockNumber: ULong +) : BlockInterval diff --git a/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/BlobCompressionProverClient.kt b/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/BlobCompressionProverRequestResponse.kt similarity index 61% rename from coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/BlobCompressionProverClient.kt rename to coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/BlobCompressionProverRequestResponse.kt index 656cfc860..a2dc06ca6 100644 --- a/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/BlobCompressionProverClient.kt +++ b/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/BlobCompressionProverRequestResponse.kt @@ -1,11 +1,61 @@ package net.consensys.zkevm.coordinator.clients -import com.github.michaelbull.result.Result -import net.consensys.linea.errors.ErrorResponse +import net.consensys.zkevm.domain.BlockInterval import net.consensys.zkevm.domain.BlockIntervals import net.consensys.zkevm.domain.ConflationCalculationResult import net.consensys.zkevm.ethereum.coordination.blob.ShnarfResult -import tech.pegasys.teku.infrastructure.async.SafeFuture + +data class BlobCompressionProofRequest( + val compressedData: ByteArray, + val conflations: List, + val parentStateRootHash: ByteArray, + val finalStateRootHash: ByteArray, + val parentDataHash: ByteArray, + val prevShnarf: ByteArray, + val expectedShnarfResult: ShnarfResult, + val commitment: ByteArray, + val kzgProofContract: ByteArray, + val kzgProofSideCar: ByteArray +) : BlockInterval { + override val startBlockNumber: ULong + get() = conflations.first().startBlockNumber + override val endBlockNumber: ULong + get() = conflations.last().endBlockNumber + + override fun equals(other: Any?): Boolean { + if (this === other) return true + if (javaClass != other?.javaClass) return false + + other as BlobCompressionProofRequest + + if (!compressedData.contentEquals(other.compressedData)) return false + if (conflations != other.conflations) return false + if (!parentStateRootHash.contentEquals(other.parentStateRootHash)) return false + if (!finalStateRootHash.contentEquals(other.finalStateRootHash)) return false + if (!parentDataHash.contentEquals(other.parentDataHash)) return false + if (!prevShnarf.contentEquals(other.prevShnarf)) return false + if (expectedShnarfResult != other.expectedShnarfResult) return false + if (!commitment.contentEquals(other.commitment)) return false + if (!kzgProofContract.contentEquals(other.kzgProofContract)) return false + if (!kzgProofSideCar.contentEquals(other.kzgProofSideCar)) return false + + return true + } + + override fun hashCode(): Int { + var result = compressedData.contentHashCode() + result = 31 * result + conflations.hashCode() + result = 31 * result + parentStateRootHash.contentHashCode() + result = 31 * result + finalStateRootHash.contentHashCode() + result = 31 * result + parentDataHash.contentHashCode() + result = 31 * result + prevShnarf.contentHashCode() + result = 31 * result + expectedShnarfResult.hashCode() + result = 31 * result + commitment.contentHashCode() + result = 31 * result + kzgProofContract.contentHashCode() + result = 31 * result + kzgProofSideCar.contentHashCode() + return result + } +} // It only needs to parse a subset of the data to send to L1 or populate the DB. data class BlobCompressionProof( @@ -77,18 +127,3 @@ data class BlobCompressionProof( return result } } - -interface BlobCompressionProverClient { - fun requestBlobCompressionProof( - compressedData: ByteArray, - conflations: List, - parentStateRootHash: ByteArray, - finalStateRootHash: ByteArray, - parentDataHash: ByteArray, - prevShnarf: ByteArray, - expectedShnarfResult: ShnarfResult, - commitment: ByteArray, - kzgProofContract: ByteArray, - kzgProofSideCar: ByteArray - ): SafeFuture>> -} diff --git a/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/ExecutionProverClient.kt b/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/ExecutionProverClient.kt deleted file mode 100644 index 99a0b8e0d..000000000 --- a/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/ExecutionProverClient.kt +++ /dev/null @@ -1,21 +0,0 @@ -package net.consensys.zkevm.coordinator.clients - -import net.consensys.zkevm.domain.BlockInterval -import tech.pegasys.teku.ethereum.executionclient.schema.ExecutionPayloadV1 -import tech.pegasys.teku.infrastructure.async.SafeFuture - -data class GetProofResponse( - override val startBlockNumber: ULong, - override val endBlockNumber: ULong -) : BlockInterval - -interface ExecutionProverClient { - /** - * Creates a batch execution proof request and returns a future that will be completed when the proof is ready. - */ - fun requestBatchExecutionProof( - blocks: List, - tracesResponse: GenerateTracesResponse, - type2StateData: GetZkEVMStateMerkleProofResponse - ): SafeFuture -} diff --git a/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/ProofAggregationClient.kt b/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/ProofAggregationClient.kt deleted file mode 100644 index 1c476d452..000000000 --- a/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/ProofAggregationClient.kt +++ /dev/null @@ -1,12 +0,0 @@ -package net.consensys.zkevm.coordinator.clients - -import com.github.michaelbull.result.Result -import net.consensys.linea.errors.ErrorResponse -import net.consensys.zkevm.domain.ProofToFinalize -import net.consensys.zkevm.domain.ProofsToAggregate -import tech.pegasys.teku.infrastructure.async.SafeFuture - -interface ProofAggregationClient { - fun getAggregatedProof(aggregation: ProofsToAggregate): - SafeFuture>> -} diff --git a/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/ProverClient.kt b/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/ProverClient.kt new file mode 100644 index 000000000..94bdfb5c3 --- /dev/null +++ b/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/ProverClient.kt @@ -0,0 +1,13 @@ +package net.consensys.zkevm.coordinator.clients + +import net.consensys.zkevm.domain.ProofToFinalize +import net.consensys.zkevm.domain.ProofsToAggregate +import tech.pegasys.teku.infrastructure.async.SafeFuture + +interface ProverClient { + fun requestProof(proofRequest: ProofRequest): SafeFuture +} + +typealias BlobCompressionProverClientV2 = ProverClient +typealias ProofAggregationProverClientV2 = ProverClient +typealias ExecutionProverClientV2 = ProverClient diff --git a/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/ProverErrorType.kt b/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/ProverErrorType.kt deleted file mode 100644 index 8ccb04fec..000000000 --- a/coordinator/core/src/main/kotlin/net/consensys/zkevm/coordinator/clients/ProverErrorType.kt +++ /dev/null @@ -1,9 +0,0 @@ -package net.consensys.zkevm.coordinator.clients - -enum class ProverErrorType { - // to complete as we go - ResponseNotFound, - ParseError, - ResponseTimeout, - Unknown -} diff --git a/coordinator/core/src/main/kotlin/net/consensys/zkevm/domain/Aggregation.kt b/coordinator/core/src/main/kotlin/net/consensys/zkevm/domain/Aggregation.kt index 8389253c0..90f765de5 100644 --- a/coordinator/core/src/main/kotlin/net/consensys/zkevm/domain/Aggregation.kt +++ b/coordinator/core/src/main/kotlin/net/consensys/zkevm/domain/Aggregation.kt @@ -14,7 +14,10 @@ data class ProofsToAggregate( val parentAggregationLastBlockTimestamp: Instant, val parentAggregationLastL1RollingHashMessageNumber: ULong, val parentAggregationLastL1RollingHash: ByteArray -) { +) : BlockInterval { + override val startBlockNumber = compressionProofIndexes.first().startBlockNumber + override val endBlockNumber = compressionProofIndexes.last().endBlockNumber + fun getStartEndBlockInterval(): BlockInterval { val startBlockNumber = compressionProofIndexes.first().startBlockNumber val endBlockNumber = compressionProofIndexes.last().endBlockNumber diff --git a/coordinator/core/src/main/kotlin/net/consensys/zkevm/domain/Conflation.kt b/coordinator/core/src/main/kotlin/net/consensys/zkevm/domain/Conflation.kt index bd62fcae7..70bf2dd47 100644 --- a/coordinator/core/src/main/kotlin/net/consensys/zkevm/domain/Conflation.kt +++ b/coordinator/core/src/main/kotlin/net/consensys/zkevm/domain/Conflation.kt @@ -4,6 +4,7 @@ import kotlinx.datetime.Instant import net.consensys.isSortedBy import net.consensys.linea.CommonDomainFunctions import net.consensys.linea.traces.TracesCounters +import net.consensys.zkevm.toULong import tech.pegasys.teku.ethereum.executionclient.schema.ExecutionPayloadV1 /** @@ -53,10 +54,15 @@ fun assertConsecutiveIntervals(intervals: List) { data class BlocksConflation( val blocks: List, val conflationResult: ConflationCalculationResult -) { +) : BlockInterval { init { require(blocks.isSortedBy { it.blockNumber }) { "Blocks list must be sorted by blockNumber" } } + + override val startBlockNumber: ULong + get() = blocks.first().blockNumber.toULong() + override val endBlockNumber: ULong + get() = blocks.last().blockNumber.toULong() } data class Batch( diff --git a/coordinator/core/src/main/kotlin/net/consensys/zkevm/ethereum/coordination/aggregation/ProofAggregationCoordinatorService.kt b/coordinator/core/src/main/kotlin/net/consensys/zkevm/ethereum/coordination/aggregation/ProofAggregationCoordinatorService.kt index fdf84ed03..9b2fc81d3 100644 --- a/coordinator/core/src/main/kotlin/net/consensys/zkevm/ethereum/coordination/aggregation/ProofAggregationCoordinatorService.kt +++ b/coordinator/core/src/main/kotlin/net/consensys/zkevm/ethereum/coordination/aggregation/ProofAggregationCoordinatorService.kt @@ -1,14 +1,12 @@ package net.consensys.zkevm.ethereum.coordination.aggregation -import com.github.michaelbull.result.Err -import com.github.michaelbull.result.Ok import io.vertx.core.Vertx import kotlinx.datetime.Clock import net.consensys.linea.metrics.MetricsFacade import net.consensys.zkevm.LongRunningService import net.consensys.zkevm.PeriodicPollingService import net.consensys.zkevm.coordinator.clients.L2MessageServiceClient -import net.consensys.zkevm.coordinator.clients.ProofAggregationClient +import net.consensys.zkevm.coordinator.clients.ProofAggregationProverClientV2 import net.consensys.zkevm.domain.Aggregation import net.consensys.zkevm.domain.BlobAndBatchCounters import net.consensys.zkevm.domain.BlobsToAggregate @@ -33,7 +31,7 @@ class ProofAggregationCoordinatorService( private val aggregationCalculator: AggregationCalculator, private val aggregationsRepository: AggregationsRepository, private val consecutiveProvenBlobsProvider: ConsecutiveProvenBlobsProvider, - private val proofAggregationClient: ProofAggregationClient, + private val proofAggregationClient: ProofAggregationProverClientV2, private val aggregationL2StateProvider: AggregationL2StateProvider, private val log: Logger = LogManager.getLogger(ProofAggregationCoordinatorService::class.java), private val provenAggregationEndBlockNumberConsumer: Consumer = Consumer { } @@ -167,9 +165,7 @@ class ProofAggregationCoordinatorService( parentAggregationLastL1RollingHash = rollingInfo.parentAggregationLastL1RollingHash ) } - .thenCompose { proofsToAggregate -> - proofAggregationClient.getAggregatedProof(proofsToAggregate) - } + .thenCompose(proofAggregationClient::requestProof) .whenException { log.error( "Error getting aggregation proof: aggregation={} errorMessage={}", @@ -178,34 +174,27 @@ class ProofAggregationCoordinatorService( it ) } - .thenCompose { - when (it) { - is Ok -> { - val aggregation = Aggregation( - startBlockNumber = blobsToAggregate.startBlockNumber, - endBlockNumber = blobsToAggregate.endBlockNumber, - status = Aggregation.Status.Proven, - batchCount = batchCount.toULong(), - aggregationProof = it.value - ) - aggregationsRepository.saveNewAggregation(aggregation = aggregation) - .thenPeek { - provenAggregationEndBlockNumberConsumer.accept(aggregation.endBlockNumber) - } - .whenException { - log.error( - "Error saving proven aggregation to DB: aggregation={} errorMessage={}", - blobsToAggregate.intervalString(), - it.message, - it - ) - } + .thenCompose { aggregationProof -> + val aggregation = Aggregation( + startBlockNumber = blobsToAggregate.startBlockNumber, + endBlockNumber = blobsToAggregate.endBlockNumber, + status = Aggregation.Status.Proven, + batchCount = batchCount.toULong(), + aggregationProof = aggregationProof + ) + aggregationsRepository + .saveNewAggregation(aggregation = aggregation) + .thenPeek { + provenAggregationEndBlockNumberConsumer.accept(aggregation.endBlockNumber) } - is Err -> { - log.error(it.error) - SafeFuture.failedFuture(it.error.asException()) + .whenException { + log.error( + "Error saving proven aggregation to DB: aggregation={} errorMessage={}", + blobsToAggregate.intervalString(), + it.message, + it + ) } - } } } @@ -220,7 +209,7 @@ class ProofAggregationCoordinatorService( startBlockNumberInclusive: ULong, aggregationsRepository: AggregationsRepository, consecutiveProvenBlobsProvider: ConsecutiveProvenBlobsProvider, - proofAggregationClient: ProofAggregationClient, + proofAggregationClient: ProofAggregationProverClientV2, l2web3jClient: Web3j, l2MessageServiceClient: L2MessageServiceClient, aggregationDeadlineDelay: Duration, diff --git a/coordinator/core/src/main/kotlin/net/consensys/zkevm/ethereum/coordination/blob/BlobCompressionProofCoordinator.kt b/coordinator/core/src/main/kotlin/net/consensys/zkevm/ethereum/coordination/blob/BlobCompressionProofCoordinator.kt index c3b55de48..58fcabd3e 100644 --- a/coordinator/core/src/main/kotlin/net/consensys/zkevm/ethereum/coordination/blob/BlobCompressionProofCoordinator.kt +++ b/coordinator/core/src/main/kotlin/net/consensys/zkevm/ethereum/coordination/blob/BlobCompressionProofCoordinator.kt @@ -1,14 +1,13 @@ package net.consensys.zkevm.ethereum.coordination.blob -import com.github.michaelbull.result.Err -import com.github.michaelbull.result.Ok import io.vertx.core.Handler import io.vertx.core.Vertx import kotlinx.datetime.Instant import net.consensys.linea.metrics.LineaMetricsCategory import net.consensys.linea.metrics.MetricsFacade import net.consensys.zkevm.LongRunningService -import net.consensys.zkevm.coordinator.clients.BlobCompressionProverClient +import net.consensys.zkevm.coordinator.clients.BlobCompressionProofRequest +import net.consensys.zkevm.coordinator.clients.BlobCompressionProverClientV2 import net.consensys.zkevm.domain.Blob import net.consensys.zkevm.domain.BlobRecord import net.consensys.zkevm.domain.BlobStatus @@ -28,7 +27,7 @@ import kotlin.time.Duration class BlobCompressionProofCoordinator( private val vertx: Vertx, private val blobsRepository: BlobsRepository, - private val blobCompressionProverClient: BlobCompressionProverClient, + private val blobCompressionProverClient: BlobCompressionProverClientV2, private val rollingBlobShnarfCalculator: RollingBlobShnarfCalculator, private val blobZkStateProvider: BlobZkStateProvider, private val config: Config, @@ -61,10 +60,10 @@ class BlobCompressionProofCoordinator( @Synchronized private fun sendBlobToCompressionProver(blob: Blob): SafeFuture { - log.debug( - "Going to create the blob compression proof for ${blob.intervalString()}" - ) - val blobZkSateAndRollingShnarfFuture = blobZkStateProvider.getBlobZKState(blob.blocksRange) + log.debug("Preparing compression proof request for blob={}", blob.intervalString()) + + val blobZkSateAndRollingShnarfFuture = blobZkStateProvider + .getBlobZKState(blob.blocksRange) .thenCompose { blobZkState -> rollingBlobShnarfCalculator.calculateShnarf( compressedData = blob.compressedData, @@ -121,7 +120,7 @@ class BlobCompressionProofCoordinator( blobStartBlockTime: Instant, blobEndBlockTime: Instant ): SafeFuture { - return blobCompressionProverClient.requestBlobCompressionProof( + val proofRequest = BlobCompressionProofRequest( compressedData = compressedData, conflations = conflations, parentStateRootHash = parentStateRootHash, @@ -132,11 +131,9 @@ class BlobCompressionProofCoordinator( commitment = commitment, kzgProofContract = kzgProofContract, kzgProofSideCar = kzgProofSideCar - ).thenCompose { result -> - if (result is Err) { - SafeFuture.failedFuture(result.error.asException()) - } else { - val blobCompressionProof = (result as Ok).value + ) + return blobCompressionProverClient.requestProof(proofRequest) + .thenCompose { blobCompressionProof -> val blobRecord = BlobRecord( startBlockNumber = conflations.first().startBlockNumber, endBlockNumber = conflations.last().endBlockNumber, @@ -161,7 +158,6 @@ class BlobCompressionProofCoordinator( ) ).thenApply {} } - } } @Synchronized diff --git a/coordinator/core/src/main/kotlin/net/consensys/zkevm/ethereum/coordination/proofcreation/ZkProofCreationCoordinatorImpl.kt b/coordinator/core/src/main/kotlin/net/consensys/zkevm/ethereum/coordination/proofcreation/ZkProofCreationCoordinatorImpl.kt index ccc509594..1f1f00dda 100644 --- a/coordinator/core/src/main/kotlin/net/consensys/zkevm/ethereum/coordination/proofcreation/ZkProofCreationCoordinatorImpl.kt +++ b/coordinator/core/src/main/kotlin/net/consensys/zkevm/ethereum/coordination/proofcreation/ZkProofCreationCoordinatorImpl.kt @@ -1,6 +1,7 @@ package net.consensys.zkevm.ethereum.coordination.proofcreation -import net.consensys.zkevm.coordinator.clients.ExecutionProverClient +import net.consensys.zkevm.coordinator.clients.BatchExecutionProofRequestV1 +import net.consensys.zkevm.coordinator.clients.ExecutionProverClientV2 import net.consensys.zkevm.domain.Batch import net.consensys.zkevm.domain.BlocksConflation import net.consensys.zkevm.ethereum.coordination.conflation.BlocksTracesConflated @@ -10,7 +11,7 @@ import org.apache.logging.log4j.Logger import tech.pegasys.teku.infrastructure.async.SafeFuture class ZkProofCreationCoordinatorImpl( - private val executionProverClient: ExecutionProverClient + private val executionProverClient: ExecutionProverClientV2 ) : ZkProofCreationCoordinator { private val log: Logger = LogManager.getLogger(this::class.java) @@ -20,9 +21,10 @@ class ZkProofCreationCoordinatorImpl( ): SafeFuture { val startBlockNumber = blocksConflation.blocks.first().blockNumber.toULong() val endBlockNumber = blocksConflation.blocks.last().blockNumber.toULong() + val blocksConflationInterval = blocksConflation.intervalString() return executionProverClient - .requestBatchExecutionProof(blocksConflation.blocks, traces.tracesResponse, traces.zkStateTraces) + .requestProof(BatchExecutionProofRequestV1(blocksConflation.blocks, traces.tracesResponse, traces.zkStateTraces)) .thenApply { Batch( startBlockNumber = startBlockNumber, @@ -31,7 +33,7 @@ class ZkProofCreationCoordinatorImpl( ) } .whenException { - log.error("Prover returned error: errorMessage={}", it.message, it) + log.error("Prover returned for batch={} errorMessage={}", blocksConflationInterval, it.message, it) } } } diff --git a/coordinator/core/src/test/kotlin/net/consensys/zkevm/ethereum/coordination/aggregation/ProofAggregationCoordinatorServiceTest.kt b/coordinator/core/src/test/kotlin/net/consensys/zkevm/ethereum/coordination/aggregation/ProofAggregationCoordinatorServiceTest.kt index 53d0e326d..29370e5c8 100644 --- a/coordinator/core/src/test/kotlin/net/consensys/zkevm/ethereum/coordination/aggregation/ProofAggregationCoordinatorServiceTest.kt +++ b/coordinator/core/src/test/kotlin/net/consensys/zkevm/ethereum/coordination/aggregation/ProofAggregationCoordinatorServiceTest.kt @@ -1,11 +1,10 @@ package net.consensys.zkevm.ethereum.coordination.aggregation -import com.github.michaelbull.result.Ok import io.vertx.core.Vertx import kotlinx.datetime.Clock import kotlinx.datetime.Instant import net.consensys.trimToSecondPrecision -import net.consensys.zkevm.coordinator.clients.ProofAggregationClient +import net.consensys.zkevm.coordinator.clients.ProofAggregationProverClientV2 import net.consensys.zkevm.domain.Aggregation import net.consensys.zkevm.domain.BlobAndBatchCounters import net.consensys.zkevm.domain.BlobCounters @@ -72,7 +71,7 @@ class ProofAggregationCoordinatorServiceTest { // FIXME this it's only happy path, with should cover other scenarios val mockAggregationCalculator = mock() val mockAggregationsRepository = mock() - val mockProofAggregationClient = mock() + val mockProofAggregationClient = mock() val aggregationL2StateProvider = mock() val config = ProofAggregationCoordinatorService.Config( @@ -197,24 +196,16 @@ class ProofAggregationCoordinatorServiceTest { aggregationProof = aggregationProof2 ) - val proofAggregationResponse1 = Ok(aggregationProof1) - val proofAggregationResponse2 = Ok(aggregationProof2) - - whenever( - mockProofAggregationClient.getAggregatedProof( - argThat { - this == proofsToAggregate1 || this == proofsToAggregate2 + whenever(mockProofAggregationClient.requestProof(any())) + .thenAnswer { + if (it.getArgument(0) == proofsToAggregate1) { + SafeFuture.completedFuture(aggregationProof1) + } else if (it.getArgument(0) == proofsToAggregate2) { + SafeFuture.completedFuture(aggregationProof2) + } else { + throw IllegalStateException() } - ) - ).thenAnswer { - if (it.getArgument(0) == proofsToAggregate1) { - SafeFuture.completedFuture(proofAggregationResponse1) - } else if (it.getArgument(0) == proofsToAggregate2) { - SafeFuture.completedFuture(proofAggregationResponse2) - } else { - throw IllegalStateException() } - } whenever( mockAggregationsRepository.saveNewAggregation( @@ -228,14 +219,14 @@ class ProofAggregationCoordinatorServiceTest { // First aggregation should Trigger proofAggregationCoordinatorService.action().get() - verify(mockProofAggregationClient).getAggregatedProof(proofsToAggregate1) + verify(mockProofAggregationClient).requestProof(proofsToAggregate1) verify(mockAggregationsRepository).saveNewAggregation(aggregation1) assertThat(provenAggregation).isEqualTo(aggregation1.endBlockNumber) // Second aggregation should Trigger proofAggregationCoordinatorService.action().get() - verify(mockProofAggregationClient).getAggregatedProof(proofsToAggregate2) + verify(mockProofAggregationClient).requestProof(proofsToAggregate2) verify(mockAggregationsRepository).saveNewAggregation(aggregation2) assertThat(provenAggregation).isEqualTo(aggregation2.endBlockNumber) diff --git a/coordinator/ethereum/blob-submitter/src/main/kotlin/net/consensys/zkevm/ethereum/finalization/AggregationFinalizationCoordinator.kt b/coordinator/ethereum/blob-submitter/src/main/kotlin/net/consensys/zkevm/ethereum/finalization/AggregationFinalizationCoordinator.kt index d9d7a6bd0..5a1be3fa8 100644 --- a/coordinator/ethereum/blob-submitter/src/main/kotlin/net/consensys/zkevm/ethereum/finalization/AggregationFinalizationCoordinator.kt +++ b/coordinator/ethereum/blob-submitter/src/main/kotlin/net/consensys/zkevm/ethereum/finalization/AggregationFinalizationCoordinator.kt @@ -3,6 +3,7 @@ package net.consensys.zkevm.ethereum.finalization import io.vertx.core.Vertx import kotlinx.datetime.Clock import net.consensys.linea.async.AsyncFilter +import net.consensys.trimToMinutePrecision import net.consensys.zkevm.PeriodicPollingService import net.consensys.zkevm.coordinator.clients.smartcontract.LineaRollupSmartContractClient import net.consensys.zkevm.domain.Aggregation @@ -43,7 +44,7 @@ class AggregationFinalizationCoordinator( return lineaRollup.updateNonceAndReferenceBlockToLastL1Block() .thenComposeCombined(lineaRollup.finalizedL2BlockNumber()) { _, lastFinalizedBlock -> log.debug("fetching aggregation proofs for finalization: lastFinalizedBlock={}", lastFinalizedBlock) - val endBlockCreatedBefore = clock.now().minus(config.proofSubmissionDelay) + val endBlockCreatedBefore = clock.now().minus(config.proofSubmissionDelay).trimToMinutePrecision() fetchAggregationData(lastFinalizedBlock) .thenCompose { aggregationData -> if (aggregationData == null) { @@ -134,11 +135,11 @@ class AggregationFinalizationCoordinator( parentAggregationProof?.let { SafeFuture.completedFuture( AggregationData( - aggregationProof, - aggregationEndBlob, - aggregationStartBlob.blobCompressionProof!!.prevShnarf, - parentAggregationProof.l1RollingHash, - parentAggregationProof.l1RollingHashMessageNumber + aggregationProof = aggregationProof, + aggregationEndBlob = aggregationEndBlob, + parentShnarf = aggregationStartBlob.blobCompressionProof!!.prevShnarf, + parentL1RollingHash = parentAggregationProof.l1RollingHash, + parentL1RollingHashMessageNumber = parentAggregationProof.l1RollingHashMessageNumber ) ) } ?: run { diff --git a/coordinator/persistence/blob/src/integrationTest/kotlin/net/consensys/zkevm/ethereum/coordination/blob/BlobCompressionProofCoordinatorIntTest.kt b/coordinator/persistence/blob/src/integrationTest/kotlin/net/consensys/zkevm/ethereum/coordination/blob/BlobCompressionProofCoordinatorIntTest.kt index ac63e7f55..b2afbce6f 100644 --- a/coordinator/persistence/blob/src/integrationTest/kotlin/net/consensys/zkevm/ethereum/coordination/blob/BlobCompressionProofCoordinatorIntTest.kt +++ b/coordinator/persistence/blob/src/integrationTest/kotlin/net/consensys/zkevm/ethereum/coordination/blob/BlobCompressionProofCoordinatorIntTest.kt @@ -10,7 +10,8 @@ import kotlinx.datetime.Clock import kotlinx.datetime.Instant import net.consensys.linea.traces.TracesCountersV1 import net.consensys.zkevm.coordinator.clients.BlobCompressionProof -import net.consensys.zkevm.coordinator.clients.BlobCompressionProverClient +import net.consensys.zkevm.coordinator.clients.BlobCompressionProofRequest +import net.consensys.zkevm.coordinator.clients.BlobCompressionProverClientV2 import net.consensys.zkevm.coordinator.clients.GetZkEVMStateMerkleProofResponse import net.consensys.zkevm.coordinator.clients.Type2StateManagerClient import net.consensys.zkevm.domain.Blob @@ -70,7 +71,7 @@ class BlobCompressionProofCoordinatorIntTest : CleanDbTestSuiteParallel() { private var expectedBlobCompressionProofResponse: BlobCompressionProof? = null private val zkStateClientMock = mock() - private val blobCompressionProverClientMock = mock() + private val blobCompressionProverClientMock = mock() private val blobZkStateProvider = mock() private lateinit var mockShnarfCalculator: BlobShnarfCalculator private lateinit var blobsRepositorySpy: BlobsRepository @@ -112,37 +113,32 @@ class BlobCompressionProofCoordinatorIntTest : CleanDbTestSuiteParallel() { ) ) ) - whenever( - blobCompressionProverClientMock - .requestBlobCompressionProof(any(), any(), any(), any(), any(), any(), any(), any(), any(), any()) - ) - .thenAnswer { i -> + whenever(blobCompressionProverClientMock.requestProof(any())) + .thenAnswer { invocationMock -> + val proofReq = invocationMock.arguments[0] as BlobCompressionProofRequest expectedBlobCompressionProofResponse = BlobCompressionProof( - compressedData = i.getArgument(0), + compressedData = proofReq.compressedData, conflationOrder = BlockIntervals( - startingBlockNumber = - i.getArgument>(1).first().startBlockNumber, - upperBoundaries = - i.getArgument>(1).map { it.endBlockNumber } + startingBlockNumber = proofReq.startBlockNumber, + upperBoundaries = proofReq.conflations.map { it.endBlockNumber } ), - prevShnarf = i.getArgument(5), - parentStateRootHash = i.getArgument(2), - finalStateRootHash = i.getArgument(3), - parentDataHash = i.getArgument(4), - dataHash = i.getArgument(6).dataHash, - snarkHash = i.getArgument(6).snarkHash, - expectedX = i.getArgument(6).expectedX, - expectedY = i.getArgument(6).expectedY, - expectedShnarf = i.getArgument(6).expectedShnarf, + prevShnarf = proofReq.prevShnarf, + parentStateRootHash = proofReq.parentStateRootHash, + finalStateRootHash = proofReq.finalStateRootHash, + parentDataHash = proofReq.parentDataHash, + dataHash = proofReq.expectedShnarfResult.dataHash, + snarkHash = proofReq.expectedShnarfResult.snarkHash, + expectedX = proofReq.expectedShnarfResult.expectedX, + expectedY = proofReq.expectedShnarfResult.expectedY, + expectedShnarf = proofReq.expectedShnarfResult.expectedShnarf, decompressionProof = Random.nextBytes(512), proverVersion = "mock-0.0.0", verifierID = 6789, commitment = Random.nextBytes(48), kzgProofContract = Random.nextBytes(48), kzgProofSidecar = Random.nextBytes(48) - ) - SafeFuture.completedFuture(Ok(expectedBlobCompressionProofResponse)) + SafeFuture.completedFuture(expectedBlobCompressionProofResponse) } mockShnarfCalculator = spy(FakeBlobShnarfCalculator()) @@ -265,12 +261,8 @@ class BlobCompressionProofCoordinatorIntTest : CleanDbTestSuiteParallel() { assertThat(actualBlobs[1].blobHash).isEqualTo(blobCompressionProof?.dataHash) assertThat(blobCompressionProof?.parentDataHash).isEqualTo(prevBlobRecord.blobHash) assertThat(blobCompressionProof?.prevShnarf).isEqualTo(prevBlobRecord.expectedShnarf) - verify(mockShnarfCalculator) - .calculateShnarf(any(), any(), any(), any(), any()) - verify(blobCompressionProverClientMock) - .requestBlobCompressionProof( - any(), any(), any(), any(), any(), any(), any(), any(), any(), any() - ) + verify(mockShnarfCalculator).calculateShnarf(any(), any(), any(), any(), any()) + verify(blobCompressionProverClientMock).requestProof(any()) } testContext.completeNow() } diff --git a/coordinator/persistence/blob/src/test/kotlin/net/consensys/zkevm/ethereum/coordinator/blob/BlobCompressionProofCoordinatorTest.kt b/coordinator/persistence/blob/src/test/kotlin/net/consensys/zkevm/ethereum/coordinator/blob/BlobCompressionProofCoordinatorTest.kt index 84f0fd8d8..b53dc1d14 100644 --- a/coordinator/persistence/blob/src/test/kotlin/net/consensys/zkevm/ethereum/coordinator/blob/BlobCompressionProofCoordinatorTest.kt +++ b/coordinator/persistence/blob/src/test/kotlin/net/consensys/zkevm/ethereum/coordinator/blob/BlobCompressionProofCoordinatorTest.kt @@ -1,12 +1,12 @@ package net.consensys.zkevm.ethereum.coordinator.blob -import com.github.michaelbull.result.Ok import io.vertx.core.Vertx import io.vertx.junit5.VertxExtension import net.consensys.FakeFixedClock import net.consensys.linea.traces.TracesCountersV1 import net.consensys.zkevm.coordinator.clients.BlobCompressionProof -import net.consensys.zkevm.coordinator.clients.BlobCompressionProverClient +import net.consensys.zkevm.coordinator.clients.BlobCompressionProofRequest +import net.consensys.zkevm.coordinator.clients.BlobCompressionProverClientV2 import net.consensys.zkevm.domain.Blob import net.consensys.zkevm.domain.BlockIntervals import net.consensys.zkevm.domain.ConflationCalculationResult @@ -25,7 +25,6 @@ import org.junit.jupiter.api.TestInstance import org.junit.jupiter.api.extension.ExtendWith import org.mockito.Mockito import org.mockito.kotlin.any -import org.mockito.kotlin.eq import org.mockito.kotlin.mock import org.mockito.kotlin.times import org.mockito.kotlin.verify @@ -47,7 +46,7 @@ class BlobCompressionProofCoordinatorTest { private val expectedStartBlock = 1UL private val expectedEndBlock = 100UL - private val blobCompressionProverClient = mock().also { + private val blobCompressionProverClient = mock().also { val expectedBlobCompressionProofResponse = BlobCompressionProof( compressedData = Random.nextBytes(32), conflationOrder = BlockIntervals( @@ -73,10 +72,8 @@ class BlobCompressionProofCoordinatorTest { kzgProofSidecar = Random.nextBytes(48) ) - whenever( - it.requestBlobCompressionProof(any(), any(), any(), any(), any(), any(), any(), any(), any(), any()) - ) - .thenReturn(SafeFuture.completedFuture(Ok(expectedBlobCompressionProofResponse))) + whenever(it.requestProof(any())) + .thenReturn(SafeFuture.completedFuture(expectedBlobCompressionProofResponse)) } private val blobZkStateProvider = mock() private val blobsRepository = mock() @@ -157,17 +154,19 @@ class BlobCompressionProofCoordinatorTest { await() .untilAsserted { verify(blobCompressionProverClient) - .requestBlobCompressionProof( - compressedData = eq(blob.compressedData), - conflations = eq(blob.conflations), - parentStateRootHash = eq(parentStateRootHash), - finalStateRootHash = eq(finalStateRootHash), - parentDataHash = eq(expectedParentDataHash), - prevShnarf = eq(expectedPrevShnarf), - expectedShnarfResult = eq(shnarfResult), - commitment = eq(shnarfResult.commitment), - kzgProofContract = eq(shnarfResult.kzgProofContract), - kzgProofSideCar = eq(shnarfResult.kzgProofSideCar) + .requestProof( + BlobCompressionProofRequest( + compressedData = blob.compressedData, + conflations = blob.conflations, + parentStateRootHash = parentStateRootHash, + finalStateRootHash = finalStateRootHash, + parentDataHash = expectedParentDataHash, + prevShnarf = expectedPrevShnarf, + expectedShnarfResult = shnarfResult, + commitment = shnarfResult.commitment, + kzgProofContract = shnarfResult.kzgProofContract, + kzgProofSideCar = shnarfResult.kzgProofSideCar + ) ) } } @@ -253,30 +252,34 @@ class BlobCompressionProofCoordinatorTest { await() .untilAsserted { verify(blobCompressionProverClient, times(1)) - .requestBlobCompressionProof( - compressedData = eq(blob1.compressedData), - conflations = eq(blob1.conflations), - parentStateRootHash = eq(parentStateRootHash), - finalStateRootHash = eq(finalStateRootHash), - parentDataHash = eq(expectedParentDataHash), - prevShnarf = eq(expectedPrevShnarf), - expectedShnarfResult = eq(shnarfResult), - commitment = eq(shnarfResult.commitment), - kzgProofContract = eq(shnarfResult.kzgProofContract), - kzgProofSideCar = eq(shnarfResult.kzgProofSideCar) + .requestProof( + BlobCompressionProofRequest( + compressedData = blob1.compressedData, + conflations = blob1.conflations, + parentStateRootHash = parentStateRootHash, + finalStateRootHash = finalStateRootHash, + parentDataHash = expectedParentDataHash, + prevShnarf = expectedPrevShnarf, + expectedShnarfResult = shnarfResult, + commitment = shnarfResult.commitment, + kzgProofContract = shnarfResult.kzgProofContract, + kzgProofSideCar = shnarfResult.kzgProofSideCar + ) ) verify(blobCompressionProverClient, times(1)) - .requestBlobCompressionProof( - compressedData = eq(blob2.compressedData), - conflations = eq(blob2.conflations), - parentStateRootHash = eq(parentStateRootHash), - finalStateRootHash = eq(finalStateRootHash), - parentDataHash = eq(expectedParentDataHash), - prevShnarf = eq(expectedPrevShnarf), - expectedShnarfResult = eq(shnarfResult), - commitment = eq(shnarfResult.commitment), - kzgProofContract = eq(shnarfResult.kzgProofContract), - kzgProofSideCar = eq(shnarfResult.kzgProofSideCar) + .requestProof( + BlobCompressionProofRequest( + compressedData = blob2.compressedData, + conflations = blob2.conflations, + parentStateRootHash = parentStateRootHash, + finalStateRootHash = finalStateRootHash, + parentDataHash = expectedParentDataHash, + prevShnarf = expectedPrevShnarf, + expectedShnarfResult = shnarfResult, + commitment = shnarfResult.commitment, + kzgProofContract = shnarfResult.kzgProofContract, + kzgProofSideCar = shnarfResult.kzgProofSideCar + ) ) } } diff --git a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/DirectoryCleaner.kt b/coordinator/utilities/src/main/kotlin/net/consensys/zkevm/fileio/DirectoryCleaner.kt similarity index 97% rename from coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/DirectoryCleaner.kt rename to coordinator/utilities/src/main/kotlin/net/consensys/zkevm/fileio/DirectoryCleaner.kt index 6544bb8cc..e50d1655e 100644 --- a/coordinator/app/src/main/kotlin/net/consensys/zkevm/coordinator/app/DirectoryCleaner.kt +++ b/coordinator/utilities/src/main/kotlin/net/consensys/zkevm/fileio/DirectoryCleaner.kt @@ -1,4 +1,4 @@ -package net.consensys.zkevm.coordinator.app +package net.consensys.zkevm.fileio import io.vertx.core.Vertx import net.consensys.linea.async.toSafeFuture diff --git a/coordinator/app/src/test/kotlin/net/consensys/zkevm/coordinator/app/DirectoryCleanerTest.kt b/coordinator/utilities/src/test/kotlin/net/consensys/zkevm/fileio/DirectoryCleanerTest.kt similarity index 99% rename from coordinator/app/src/test/kotlin/net/consensys/zkevm/coordinator/app/DirectoryCleanerTest.kt rename to coordinator/utilities/src/test/kotlin/net/consensys/zkevm/fileio/DirectoryCleanerTest.kt index d67636cd0..65ac9e220 100644 --- a/coordinator/app/src/test/kotlin/net/consensys/zkevm/coordinator/app/DirectoryCleanerTest.kt +++ b/coordinator/utilities/src/test/kotlin/net/consensys/zkevm/fileio/DirectoryCleanerTest.kt @@ -1,4 +1,4 @@ -package net.consensys.zkevm.coordinator.app +package net.consensys.zkevm.fileio import io.vertx.core.Vertx import io.vertx.junit5.VertxExtension diff --git a/docker/compose.yml b/docker/compose.yml index f1adccf27..5d544644d 100644 --- a/docker/compose.yml +++ b/docker/compose.yml @@ -75,7 +75,7 @@ services: volumes: - ./scripts/file-downloader.sh:/file-downloader.sh:ro - ../tmp/linea-besu-sequencer/plugins:/linea-besu-sequencer/ - + linea-besu-sequencer-finalized-tag-updater-plugin-downloader: image: badouralix/curl-jq command: [ "sh", "/finalized-tag-updater-jar-downloader.sh", "${GITHUB_TOKEN}", "0.0.1", "/linea-besu-sequencer" ] @@ -257,7 +257,7 @@ services: coordinator: hostname: coordinator container_name: coordinator - image: consensys/linea-coordinator:${COORDINATOR_TAG:-5a6fdf3} + image: consensys/linea-coordinator:${COORDINATOR_TAG:-8a5690e} platform: linux/amd64 profiles: [ "l2", "debug" ] depends_on: diff --git a/docker/config/prover/v2/prover-aggregation.config.toml b/docker/config/prover/v2/prover-aggregation.config.toml index a2d2065da..7bb76e318 100644 --- a/docker/config/prover/v2/prover-aggregation.config.toml +++ b/docker/config/prover/v2/prover-aggregation.config.toml @@ -1,7 +1,7 @@ dev_mode = true version = "v2.0.0" -exec_proof_dir = "/data/prover-execution/v2/responses" -decompress_proof_dir = "/data/prover-compression/v2/responses" +exec_proof_dir = "/data/prover/v2/execution/responses" +decompress_proof_dir = "/data/prover/v2/compression/responses" [bn254_circuit] verifier_id = 0 diff --git a/docker/config/prover/v2/prover-controller.config.toml b/docker/config/prover/v2/prover-controller.config.toml index 38a479799..d614943b3 100644 --- a/docker/config/prover/v2/prover-controller.config.toml +++ b/docker/config/prover/v2/prover-controller.config.toml @@ -61,13 +61,13 @@ port = 9090 conf_file = "/opt/linea/prover/config/executor/execution.config.toml" # The directory where we read the files to handle - dir_from = "/data/prover-execution/v2/requests" + dir_from = "/data/prover/v2/execution/requests" # The directory where we write the generated file - dir_to = "/data/prover-execution/v2/responses" + dir_to = "/data/prover/v2/execution/responses" # The directory where we move the files when they have been handled. - dir_done = "/data/prover-execution/v2/requests-done" + dir_done = "/data/prover/v2/execution/requests-done" [compression] @@ -86,13 +86,13 @@ port = 9090 conf_file = "/opt/linea/prover/config/executor/decompression.config.toml" # The directory where we read the files to handle - dir_from = "/data/prover-compression/v2/requests" + dir_from = "/data/prover/v2/compression/requests" # The directory where we write the generated file - dir_to = "/data/prover-compression/v2/responses" + dir_to = "/data/prover/v2/compression/responses" # The directory where we move the files when they have been handled. - dir_done = "/data/prover-compression/v2/requests-done" + dir_done = "/data/prover/v2/compression/requests-done" [aggregation] @@ -111,10 +111,10 @@ port = 9090 conf_file = "/opt/linea/prover/config/executor/aggregation.config.toml" # The directory where we read the files to handle - dir_from = "/data/prover-aggregation/v2/requests" + dir_from = "/data/prover/v2/aggregation/requests" # The directory where we write the generated file - dir_to = "/data/prover-aggregation/v2/responses" + dir_to = "/data/prover/v2/aggregation/responses" # The directory where we move the files when they have been handled. - dir_done = "/data/prover-aggregation/v2/requests-done" + dir_done = "/data/prover/v2/aggregation/requests-done" diff --git a/docker/config/prover/v3/prover-config.toml b/docker/config/prover/v3/prover-config.toml index 449cd778c..6bd484d66 100644 --- a/docker/config/prover/v3/prover-config.toml +++ b/docker/config/prover/v3/prover-config.toml @@ -1,18 +1,18 @@ environment = "integration-development" version = "3.0.0" assets_dir = "/opt/linea/prover/prover-assets" -log_level = 4 # TODO @gbotrel will be refactored with new logger. +log_level = 4 # TODO @gbotrel will be refactored with new logger. [controller] retry_delays = [0, 1] [execution] prover_mode = "dev" -requests_root_dir = "/data/prover-execution/v3" +requests_root_dir = "/data/prover/v3/execution" [blob_decompression] prover_mode = "dev" -requests_root_dir = "/data/prover-compression/v3" +requests_root_dir = "/data/prover/v3/compression" [aggregation] prover_mode = "dev" @@ -23,7 +23,7 @@ allowed_inputs = [ "blob-decompression-v0", ] verifier_id = 0 -requests_root_dir = "/data/prover-aggregation/v3" +requests_root_dir = "/data/prover/v3/aggregation" [layer2] message_service_contract = "0xe537D669CA013d86EBeF1D64e40fC74CADC91987" diff --git a/jvm-libs/metrics/micrometer/src/main/kotlin/net/consensys/linea/metrics/micrometer/GaugeAggregator.kt b/jvm-libs/metrics/micrometer/src/main/kotlin/net/consensys/linea/metrics/micrometer/GaugeAggregator.kt new file mode 100644 index 000000000..565fce76c --- /dev/null +++ b/jvm-libs/metrics/micrometer/src/main/kotlin/net/consensys/linea/metrics/micrometer/GaugeAggregator.kt @@ -0,0 +1,26 @@ +package net.consensys.linea.metrics.micrometer + +import java.util.function.Supplier + +/** + * Util class to aggregate multiple counters/gauges into a single one. + * Useful for gauges where the total counting needs to come from multiple sources + * + * Note: it was considered using a WeakHashMap to store the reporters, + * but if the supplier is a lambda, it will be garbage collected and the value will be lost. + * Reporters are expected to be long-lived objects for the whole application lifespan + * so it should not be a problem. + */ +class GaugeAggregator : Supplier { + private val reporters = mutableSetOf>() + + @Synchronized + fun addReporter(reporter: Supplier) { + reporters.add(reporter) + } + + @Synchronized + override fun get(): Number { + return reporters.sumOf { it.get().toLong() } + } +} diff --git a/jvm-libs/metrics/micrometer/src/test/kotlin/net/consensys/linea/metrics/micrometer/GaugeAggregatorTest.kt b/jvm-libs/metrics/micrometer/src/test/kotlin/net/consensys/linea/metrics/micrometer/GaugeAggregatorTest.kt new file mode 100644 index 000000000..4f5a5b33e --- /dev/null +++ b/jvm-libs/metrics/micrometer/src/test/kotlin/net/consensys/linea/metrics/micrometer/GaugeAggregatorTest.kt @@ -0,0 +1,23 @@ +package net.consensys.linea.metrics.micrometer + +import org.assertj.core.api.Assertions.assertThat +import org.junit.jupiter.api.Test +import java.util.concurrent.atomic.AtomicInteger + +class GaugeAggregatorTest { + + @Test + fun `should aggregate multiple counters`() { + val counterA = AtomicInteger(1) + val counterB = AtomicInteger(2) + val aggregator = GaugeAggregator() + + aggregator.addReporter(counterA::get) + aggregator.addReporter(counterB::get) + + assertThat(aggregator.get()).isEqualTo(3L) + + counterB.set(10) + assertThat(aggregator.get()).isEqualTo(11L) + } +}