From dc2c476c19b16201f8b409caeedbc690e9d7e53e Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 4 Dec 2024 14:48:10 +0200 Subject: [PATCH 1/5] test: Benchmark ERC20 token transfers (#3281) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Benchmarks ERC20 token transfers. ## Why ❔ It would be useful to compare their performance to base token transfers. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- .../contracts/transfer/ERC20.sol | 11 +++++++ core/lib/test_contracts/src/contracts.rs | 7 ++++ core/tests/vm-benchmark/benches/batch.rs | 12 +++++-- core/tests/vm-benchmark/src/lib.rs | 6 ++-- core/tests/vm-benchmark/src/transaction.rs | 32 +++++++++++++++++++ core/tests/vm-benchmark/src/vm.rs | 16 +++++++++- 6 files changed, 77 insertions(+), 7 deletions(-) create mode 100644 core/lib/test_contracts/contracts/transfer/ERC20.sol diff --git a/core/lib/test_contracts/contracts/transfer/ERC20.sol b/core/lib/test_contracts/contracts/transfer/ERC20.sol new file mode 100644 index 000000000000..aad741e66a56 --- /dev/null +++ b/core/lib/test_contracts/contracts/transfer/ERC20.sol @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: UNLICENSED + +pragma solidity ^0.8.0; + +import "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +contract TestERC20 is ERC20("Test", "TEST") { + constructor(uint256 _toMint) { + _mint(msg.sender, _toMint); + } +} diff --git a/core/lib/test_contracts/src/contracts.rs b/core/lib/test_contracts/src/contracts.rs index 09a0535824df..36d758c46de2 100644 --- a/core/lib/test_contracts/src/contracts.rs +++ b/core/lib/test_contracts/src/contracts.rs @@ -171,6 +171,13 @@ impl TestContract { &CONTRACT } + /// Returns a test ERC20 token implementation. + pub fn test_erc20() -> &'static Self { + static CONTRACT: Lazy = + Lazy::new(|| TestContract::new(raw::transfer::TestERC20)); + &CONTRACT + } + /// Returns a mock version of `ContractDeployer`. pub fn mock_deployer() -> &'static Self { static CONTRACT: Lazy = diff --git a/core/tests/vm-benchmark/benches/batch.rs b/core/tests/vm-benchmark/benches/batch.rs index 608f6be6d089..f4151c39a6f8 100644 --- a/core/tests/vm-benchmark/benches/batch.rs +++ b/core/tests/vm-benchmark/benches/batch.rs @@ -18,9 +18,9 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughpu use rand::{rngs::StdRng, Rng, SeedableRng}; use vm_benchmark::{ criterion::{is_test_mode, BenchmarkGroup, BenchmarkId, CriterionExt, MeteredTime}, - get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, get_load_test_deploy_tx, - get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, BenchmarkingVm, - BenchmarkingVmFactory, Bytecode, Fast, Legacy, LoadTestParams, + get_deploy_tx_with_gas_limit, get_erc20_deploy_tx, get_erc20_transfer_tx, + get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, + get_transfer_tx, BenchmarkingVm, BenchmarkingVmFactory, Bytecode, Fast, Legacy, LoadTestParams, }; use zksync_types::Transaction; @@ -146,6 +146,12 @@ fn bench_fill_bootloader( run_vm::(&mut group, "load_test_heavy", &txs); drop(txs); + // ERC-20 token transfers + let txs = (1..=max_txs).map(get_erc20_transfer_tx); + let txs: Vec<_> = iter::once(get_erc20_deploy_tx()).chain(txs).collect(); + run_vm::(&mut group, "erc20_transfer", &txs); + drop(txs); + // Base token transfers let txs: Vec<_> = (0..max_txs).map(get_transfer_tx).collect(); run_vm::(&mut group, "transfer", &txs); diff --git a/core/tests/vm-benchmark/src/lib.rs b/core/tests/vm-benchmark/src/lib.rs index dbe2fdb808db..8f43f61b28b6 100644 --- a/core/tests/vm-benchmark/src/lib.rs +++ b/core/tests/vm-benchmark/src/lib.rs @@ -2,9 +2,9 @@ use zksync_types::Transaction; pub use crate::{ transaction::{ - get_deploy_tx, get_deploy_tx_with_gas_limit, get_heavy_load_test_tx, - get_load_test_deploy_tx, get_load_test_tx, get_realistic_load_test_tx, get_transfer_tx, - LoadTestParams, + get_deploy_tx, get_deploy_tx_with_gas_limit, get_erc20_deploy_tx, get_erc20_transfer_tx, + get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, + get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, }, vm::{BenchmarkingVm, BenchmarkingVmFactory, CountInstructions, Fast, Legacy, VmLabel}, }; diff --git a/core/tests/vm-benchmark/src/transaction.rs b/core/tests/vm-benchmark/src/transaction.rs index 5c1824e6ffa2..e50f40a06ef1 100644 --- a/core/tests/vm-benchmark/src/transaction.rs +++ b/core/tests/vm-benchmark/src/transaction.rs @@ -56,6 +56,38 @@ pub fn get_transfer_tx(nonce: u32) -> Transaction { signed.into() } +pub fn get_erc20_transfer_tx(nonce: u32) -> Transaction { + let transfer_fn = TestContract::test_erc20().function("transfer"); + let calldata = transfer_fn + .encode_input(&[ + Token::Address(Address::from_low_u64_be(nonce.into())), // send tokens to unique addresses + Token::Uint(1.into()), + ]) + .unwrap(); + + let mut signed = L2Tx::new_signed( + Some(*LOAD_TEST_CONTRACT_ADDRESS), + calldata, + Nonce(nonce), + tx_fee(1_000_000), + 0.into(), // value + L2ChainId::from(270), + &PRIVATE_KEY, + vec![], // factory deps + Default::default(), // paymaster params + ) + .expect("should create a signed execute transaction"); + + signed.set_input(H256::random().as_bytes().to_vec(), H256::random()); + signed.into() +} + +pub fn get_erc20_deploy_tx() -> Transaction { + let calldata = [Token::Uint(U256::one() << 128)]; // initial token amount minted to the deployer + let execute = TestContract::test_erc20().deploy_payload(&calldata); + Account::new(PRIVATE_KEY.clone()).get_l2_tx_for_execute(execute, Some(tx_fee(500_000_000))) +} + pub fn get_load_test_deploy_tx() -> Transaction { let calldata = [Token::Uint(LOAD_TEST_MAX_READS.into())]; let execute = TestContract::load_test().deploy_payload(&calldata); diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index 4bd7d7eb1aa6..e69e7ca1e909 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -240,7 +240,9 @@ mod tests { use super::*; use crate::{ get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, - get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, BYTECODES, + get_realistic_load_test_tx, get_transfer_tx, + transaction::{get_erc20_deploy_tx, get_erc20_transfer_tx}, + LoadTestParams, BYTECODES, }; #[test] @@ -259,6 +261,18 @@ mod tests { assert_matches!(res.result, ExecutionResult::Success { .. }); } + #[test] + fn can_erc20_transfer() { + let mut vm = BenchmarkingVm::new(); + let res = vm.run_transaction(&get_erc20_deploy_tx()); + assert_matches!(res.result, ExecutionResult::Success { .. }); + + for nonce in 1..=5 { + let res = vm.run_transaction(&get_erc20_transfer_tx(nonce)); + assert_matches!(res.result, ExecutionResult::Success { .. }); + } + } + #[test] fn can_load_test() { let mut vm = BenchmarkingVm::new(); From b12da8d1fddc7870bf17d5e08312d20773815269 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 4 Dec 2024 16:24:50 +0100 Subject: [PATCH 2/5] fix(tee): correct previous fix for race condition in batch locking (#3358) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Commit a7dc0ed5007f6b2f789f4c61cb3d137843151860 (PR #3342) was supposed to fix a race condition in batch locking by introducing SQL row-locking, but it [didn't work][2] as expected. ![Screenshot From 2024-12-04 11-32-32](https://github.com/user-attachments/assets/959ffc3c-593f-409a-87ab-68ec197040a0) Now we are switching back to coarser-grained table-level locking as [originally suggested][1] by Harald. The original fix was hard to test unless deployed to `stage` due to the undeterministic nature of the problem, so we needed to merge it to the `main` branch to properly test it. [1]: https://github.com/matter-labs/zksync-era/pull/3342#issuecomment-2514573386 [2]: https://grafana.matterlabs.dev/goto/AhEd5FVNg?orgId=1 ## Why ❔ To fix the bug that only activates after running `zksync-tee-prover` on multiple instances. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- ...6822a6e92698fcfcf5d0a9252d84b75459b2664.json} | 4 ++-- core/lib/dal/src/tee_proof_generation_dal.rs | 16 ++++++++++------ 2 files changed, 12 insertions(+), 8 deletions(-) rename core/lib/dal/.sqlx/{query-8ead57cdda5909348f31f8c4d989f73e353da3bc6af7ecb81102c4194df631aa.json => query-b6961d273f833f8babaf16f256822a6e92698fcfcf5d0a9252d84b75459b2664.json} (87%) diff --git a/core/lib/dal/.sqlx/query-8ead57cdda5909348f31f8c4d989f73e353da3bc6af7ecb81102c4194df631aa.json b/core/lib/dal/.sqlx/query-b6961d273f833f8babaf16f256822a6e92698fcfcf5d0a9252d84b75459b2664.json similarity index 87% rename from core/lib/dal/.sqlx/query-8ead57cdda5909348f31f8c4d989f73e353da3bc6af7ecb81102c4194df631aa.json rename to core/lib/dal/.sqlx/query-b6961d273f833f8babaf16f256822a6e92698fcfcf5d0a9252d84b75459b2664.json index 6266f93e6545..b206d337201b 100644 --- a/core/lib/dal/.sqlx/query-8ead57cdda5909348f31f8c4d989f73e353da3bc6af7ecb81102c4194df631aa.json +++ b/core/lib/dal/.sqlx/query-b6961d273f833f8babaf16f256822a6e92698fcfcf5d0a9252d84b75459b2664.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n p.l1_batch_number\n FROM\n proof_generation_details p\n LEFT JOIN\n tee_proof_generation_details tee\n ON\n p.l1_batch_number = tee.l1_batch_number\n AND tee.tee_type = $1\n WHERE\n (\n p.l1_batch_number >= $5\n AND p.vm_run_data_blob_url IS NOT NULL\n AND p.proof_gen_data_blob_url IS NOT NULL\n )\n AND (\n tee.l1_batch_number IS NULL\n OR (\n (tee.status = $2 OR tee.status = $3)\n AND tee.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n LIMIT 1\n FOR UPDATE OF p\n SKIP LOCKED\n ", + "query": "\n SELECT\n p.l1_batch_number\n FROM\n proof_generation_details p\n LEFT JOIN\n tee_proof_generation_details tee\n ON\n p.l1_batch_number = tee.l1_batch_number\n AND tee.tee_type = $1\n WHERE\n (\n p.l1_batch_number >= $5\n AND p.vm_run_data_blob_url IS NOT NULL\n AND p.proof_gen_data_blob_url IS NOT NULL\n )\n AND (\n tee.l1_batch_number IS NULL\n OR (\n (tee.status = $2 OR tee.status = $3)\n AND tee.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n LIMIT 1\n ", "describe": { "columns": [ { @@ -22,5 +22,5 @@ false ] }, - "hash": "8ead57cdda5909348f31f8c4d989f73e353da3bc6af7ecb81102c4194df631aa" + "hash": "b6961d273f833f8babaf16f256822a6e92698fcfcf5d0a9252d84b75459b2664" } diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index 61a9e23ffea5..12761a3d6d34 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -66,10 +66,16 @@ impl TeeProofGenerationDal<'_, '_> { let min_batch_number = i64::from(min_batch_number.0); let mut transaction = self.storage.start_transaction().await?; - // Lock rows in the proof_generation_details table to prevent race conditions. The - // tee_proof_generation_details table does not have corresponding entries yet if this is the - // first time the query is invoked for a batch. Locking rows in proof_generation_details - // ensures that two different TEE prover instances will not try to prove the same batch. + // Lock the entire tee_proof_generation_details table in EXCLUSIVE mode to prevent race + // conditions. Locking the table ensures that two different TEE prover instances will not + // try to prove the same batch. + sqlx::query("LOCK TABLE tee_proof_generation_details IN EXCLUSIVE MODE") + .instrument("lock_batch_for_proving#lock_table") + .execute(&mut transaction) + .await?; + + // The tee_proof_generation_details table does not have corresponding entries yet if this is + // the first time the query is invoked for a batch. let batch_number = sqlx::query!( r#" SELECT @@ -95,8 +101,6 @@ impl TeeProofGenerationDal<'_, '_> { ) ) LIMIT 1 - FOR UPDATE OF p - SKIP LOCKED "#, tee_type.to_string(), TeeProofGenerationJobStatus::PickedByProver.to_string(), From cf458a048540f401ecf946c710b694bb332a6692 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Fran=C3=A7a?= Date: Thu, 5 Dec 2024 02:27:13 +0000 Subject: [PATCH 3/5] docs(consensus): update decentralization guide (#3343) Update decentralization guide with more complete instructions. --- core/bin/snapshots_creator/README.md | 9 +- core/lib/merkle_tree/README.md | 6 +- core/tests/recovery-test/src/index.ts | 5 +- core/tests/ts-integration/src/l1-provider.ts | 5 +- .../ts-integration/src/retry-provider.ts | 5 +- core/tests/ts-integration/src/utils.ts | 6 +- .../ts-integration/tests/api/web3.test.ts | 5 +- .../tests/ts-integration/tests/system.test.ts | 8 +- core/tests/upgrade-test/tests/utils.ts | 28 +-- docs/src/guides/advanced/13_zk_intuition.md | 16 +- .../guides/advanced/16_decentralization.md | 166 +++++++++++++----- package.json | 4 +- prover/crates/bin/prover_cli/README.md | 4 +- yarn.lock | 36 +--- 14 files changed, 196 insertions(+), 107 deletions(-) diff --git a/core/bin/snapshots_creator/README.md b/core/bin/snapshots_creator/README.md index 26ebbb6d652a..31d9cc09210f 100644 --- a/core/bin/snapshots_creator/README.md +++ b/core/bin/snapshots_creator/README.md @@ -43,9 +43,8 @@ repository root. The storage location can be configured using the object store c filesystem, or Google Cloud Storage (GCS). Beware that for end-to-end testing of snapshot recovery, changes applied to the main node configuration must be reflected in the external node configuration. -Creating a snapshot is a part of the [snapshot recovery integration test]. You can run the test using -`yarn recovery-test snapshot-recovery-test`. It requires the main node to be launched with a command like -`zk server --components api,tree,eth,state_keeper,commitment_generator`. +Creating a snapshot is a part of the [snapshot recovery integration test]. You can run the test using `yarn recovery-test snapshot-recovery-test`. +It requires the main node to be launched with a command like `zk server --components api,tree,eth,state_keeper,commitment_generator`. ## Snapshots format @@ -59,8 +58,8 @@ Each snapshot consists of three types of data (see [`snapshots.rs`] for exact de enumeration index; both are used to restore the contents of the `initial_writes` table. Chunking storage logs is motivated by their parallel generation; each chunk corresponds to a distinct non-overlapping range of hashed storage keys. (This should be considered an implementation detail for the purposes of snapshot recovery; recovery must not - rely on any particular key distribution among chunks.) Stored as gzipped Protobuf messages in an [object store]; each - chunk is a separate object. + rely on any particular key distribution among chunks.) Stored as gzipped Protobuf messages in an [object store]; each chunk + is a separate object. - **Factory dependencies:** All bytecodes deployed on L2 at the time the snapshot is made. Stored as a single gzipped Protobuf message in an object store. diff --git a/core/lib/merkle_tree/README.md b/core/lib/merkle_tree/README.md index b3c8a31c9980..ed31ac4cbf80 100644 --- a/core/lib/merkle_tree/README.md +++ b/core/lib/merkle_tree/README.md @@ -1,8 +1,8 @@ # Merkle Tree -Binary Merkle tree implementation based on amortized radix-16 Merkle tree (AR16MT) described in the [Jellyfish Merkle -tree] white paper. Unlike Jellyfish Merkle tree, our construction uses vanilla binary tree hashing algorithm to make it -easier for the circuit creation. The depth of the tree is 256, and Blake2 is used as the hashing function. +Binary Merkle tree implementation based on amortized radix-16 Merkle tree (AR16MT) described in the [Jellyfish +Merkle tree] white paper. Unlike Jellyfish Merkle tree, our construction uses vanilla binary tree hashing algorithm to +make it easier for the circuit creation. The depth of the tree is 256, and Blake2 is used as the hashing function. ## Snapshot tests diff --git a/core/tests/recovery-test/src/index.ts b/core/tests/recovery-test/src/index.ts index 8567be6d6d30..d4eaa476b838 100644 --- a/core/tests/recovery-test/src/index.ts +++ b/core/tests/recovery-test/src/index.ts @@ -193,7 +193,10 @@ export class NodeProcess { return new NodeProcess(childProcess, logs); } - private constructor(private childProcess: ChildProcess, readonly logs: FileHandle) {} + private constructor( + private childProcess: ChildProcess, + readonly logs: FileHandle + ) {} exitCode() { return this.childProcess.exitCode; diff --git a/core/tests/ts-integration/src/l1-provider.ts b/core/tests/ts-integration/src/l1-provider.ts index 39b0397cd069..de41349951c4 100644 --- a/core/tests/ts-integration/src/l1-provider.ts +++ b/core/tests/ts-integration/src/l1-provider.ts @@ -28,7 +28,10 @@ class L1TransactionResponse extends ethers.TransactionResponse implements Augmen private isWaitingReported: boolean = false; private isReceiptReported: boolean = false; - constructor(base: ethers.TransactionResponse, public readonly reporter: Reporter) { + constructor( + base: ethers.TransactionResponse, + public readonly reporter: Reporter + ) { super(base, base.provider); } diff --git a/core/tests/ts-integration/src/retry-provider.ts b/core/tests/ts-integration/src/retry-provider.ts index 51d88357c6c3..4c89e0407b9e 100644 --- a/core/tests/ts-integration/src/retry-provider.ts +++ b/core/tests/ts-integration/src/retry-provider.ts @@ -81,7 +81,10 @@ class L2TransactionResponse extends zksync.types.TransactionResponse implements private isWaitingReported: boolean = false; private isReceiptReported: boolean = false; - constructor(base: zksync.types.TransactionResponse, public readonly reporter: Reporter) { + constructor( + base: zksync.types.TransactionResponse, + public readonly reporter: Reporter + ) { super(base, base.provider); } diff --git a/core/tests/ts-integration/src/utils.ts b/core/tests/ts-integration/src/utils.ts index f8378c8dff01..7088c9d4ee51 100644 --- a/core/tests/ts-integration/src/utils.ts +++ b/core/tests/ts-integration/src/utils.ts @@ -57,7 +57,11 @@ export enum NodeType { } export class Node { - constructor(public proc: ChildProcessWithoutNullStreams, public l2NodeUrl: string, private readonly type: TYPE) {} + constructor( + public proc: ChildProcessWithoutNullStreams, + public l2NodeUrl: string, + private readonly type: TYPE + ) {} public async terminate() { try { diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index ceed9654df91..16e712bb9255 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -1232,7 +1232,10 @@ export class MockMetamask { readonly isMetaMask: boolean = true; readonly chainId: string; - constructor(readonly wallet: zksync.Wallet, readonly networkVersion: bigint) { + constructor( + readonly wallet: zksync.Wallet, + readonly networkVersion: bigint + ) { this.chainId = ethers.toBeHex(networkVersion); } diff --git a/core/tests/ts-integration/tests/system.test.ts b/core/tests/ts-integration/tests/system.test.ts index 7ce2f69acd6a..38b21c5839ae 100644 --- a/core/tests/ts-integration/tests/system.test.ts +++ b/core/tests/ts-integration/tests/system.test.ts @@ -371,9 +371,11 @@ describe('System behavior checks', () => { function bootloaderUtilsContract() { const BOOTLOADER_UTILS_ADDRESS = '0x000000000000000000000000000000000000800c'; const BOOTLOADER_UTILS = new ethers.Interface( - require(`${ - testMaster.environment().pathToHome - }/contracts/system-contracts/zkout/BootloaderUtilities.sol/BootloaderUtilities.json`).abi + require( + `${ + testMaster.environment().pathToHome + }/contracts/system-contracts/zkout/BootloaderUtilities.sol/BootloaderUtilities.json` + ).abi ); return new ethers.Contract(BOOTLOADER_UTILS_ADDRESS, BOOTLOADER_UTILS, alice); diff --git a/core/tests/upgrade-test/tests/utils.ts b/core/tests/upgrade-test/tests/utils.ts index 2972f8411f5f..c8aae9d4ab0b 100644 --- a/core/tests/upgrade-test/tests/utils.ts +++ b/core/tests/upgrade-test/tests/utils.ts @@ -93,11 +93,13 @@ export function initContracts(pathToHome: string, zkStack: boolean): Contracts { complexUpgraderAbi: new ethers.Interface( require(`${CONTRACTS_FOLDER}/system-contracts/zkout/ComplexUpgrader.sol/ComplexUpgrader.json`).abi ), - counterBytecode: - require(`${pathToHome}/core/tests/ts-integration/artifacts-zk/contracts/counter/counter.sol/Counter.json`) - .deployedBytecode, + counterBytecode: require( + `${pathToHome}/core/tests/ts-integration/artifacts-zk/contracts/counter/counter.sol/Counter.json` + ).deployedBytecode, stateTransitonManager: new ethers.Interface( - require(`${CONTRACTS_FOLDER}/l1-contracts/out/StateTransitionManager.sol/StateTransitionManager.json`).abi + require( + `${CONTRACTS_FOLDER}/l1-contracts/out/StateTransitionManager.sol/StateTransitionManager.json` + ).abi ) }; } else { @@ -116,16 +118,22 @@ export function initContracts(pathToHome: string, zkStack: boolean): Contracts { require(`${L1_CONTRACTS_FOLDER}/governance/ChainAdmin.sol/ChainAdmin.json`).abi ), l2ForceDeployUpgraderAbi: new ethers.Interface( - require(`${pathToHome}/contracts/l2-contracts/artifacts-zk/contracts/ForceDeployUpgrader.sol/ForceDeployUpgrader.json`).abi + require( + `${pathToHome}/contracts/l2-contracts/artifacts-zk/contracts/ForceDeployUpgrader.sol/ForceDeployUpgrader.json` + ).abi ), complexUpgraderAbi: new ethers.Interface( - require(`${pathToHome}/contracts/system-contracts/artifacts-zk/contracts-preprocessed/ComplexUpgrader.sol/ComplexUpgrader.json`).abi + require( + `${pathToHome}/contracts/system-contracts/artifacts-zk/contracts-preprocessed/ComplexUpgrader.sol/ComplexUpgrader.json` + ).abi ), - counterBytecode: - require(`${pathToHome}/core/tests/ts-integration/artifacts-zk/contracts/counter/counter.sol/Counter.json`) - .deployedBytecode, + counterBytecode: require( + `${pathToHome}/core/tests/ts-integration/artifacts-zk/contracts/counter/counter.sol/Counter.json` + ).deployedBytecode, stateTransitonManager: new ethers.Interface( - require(`${L1_CONTRACTS_FOLDER}/state-transition/StateTransitionManager.sol/StateTransitionManager.json`).abi + require( + `${L1_CONTRACTS_FOLDER}/state-transition/StateTransitionManager.sol/StateTransitionManager.json` + ).abi ) }; } diff --git a/docs/src/guides/advanced/13_zk_intuition.md b/docs/src/guides/advanced/13_zk_intuition.md index 6e0224a3237f..cee4dcfd1797 100644 --- a/docs/src/guides/advanced/13_zk_intuition.md +++ b/docs/src/guides/advanced/13_zk_intuition.md @@ -85,8 +85,8 @@ located in a module [zksync core witness]. However, for the new proof system, th new location called [separate witness binary]. Inside this new location, after the necessary data is fetched from storage, the witness generator calls another piece of -code from [zkevm_test_harness witness] named `run_with_fixed_params`. This code is responsible for creating the -witnesses themselves (which can get really HUGE). +code from [zkevm_test_harness witness] named `run_with_fixed_params`. This code is responsible for creating the witnesses +themselves (which can get really HUGE). ## Generating the Proof @@ -96,9 +96,9 @@ The main goal of this step is to take an operation (for example, a calculation c into smaller pieces. Then, we represent this information as a special mathematical expression called a polynomial. To construct these polynomials, we use something called a `ConstraintSystem`. The specific type that we use is called -zkSNARK, and our custom version of it is named bellman. You can find our code for this in the [bellman repo]. -Additionally, we have an optimized version that's designed to run faster on certain types of hardware (using CUDA -technology), which you can find in the [bellman cuda repo]. +zkSNARK, and our custom version of it is named bellman. You can find our code for this in the [bellman repo]. Additionally, +we have an optimized version that's designed to run faster on certain types of hardware (using CUDA technology), which you +can find in the [bellman cuda repo]. An [example ecrecover circuit] might give you a clearer picture of what this looks like in practice. @@ -107,9 +107,9 @@ heavy calculations, we use GPUs to speed things up. ### Where is the Code -The main code that utilizes the GPUs to create proofs is located in a repository named [heavy_ops_service repo]. This -code combines elements from the [bellman cuda repo] that we mentioned earlier, along with a huge amount of data -generated by the witness, to produce the final proofs. +The main code that utilizes the GPUs to create proofs is located in a repository named [heavy_ops_service repo]. This code +combines elements from the [bellman cuda repo] that we mentioned earlier, along with a huge amount of data generated by the +witness, to produce the final proofs. ## What Does "Verify Proof on L1" Mean diff --git a/docs/src/guides/advanced/16_decentralization.md b/docs/src/guides/advanced/16_decentralization.md index a5f889a813d0..7e1ff9b71cb5 100644 --- a/docs/src/guides/advanced/16_decentralization.md +++ b/docs/src/guides/advanced/16_decentralization.md @@ -8,13 +8,62 @@ and enabled as follows: Run the following to generate consensus secrets: ``` -docker run --entrypoint /usr/bin/zksync_external_node "matterlabs/external-node:2.0-v25.0.0" generate-secrets > consensus_secrets.yaml -chmod 600 consensus_secrets.yaml +docker run --entrypoint /usr/bin/zksync_external_node "matterlabs/external-node:2.0-v25.0.0" generate-secrets +``` + +That will output something like this (but with different keys obviously): + +``` +#validator:public:bls12_381:84fe19a96b6443ca7ce...98dec0870f6d8aa95c8164102f0d62e4c47e3566c4e5c32354d +validator_key: validator:secret:bls12_381:1de85683e6decbfcf6c12aa42a5c8bfa98d7ae796dee068ae73dc784a58f5213 +# attester:public:secp256k1:02e262af8c97536b9e479c6d60f213920e759faf4086d8352e98bc25d06b4142e3 +attester_key: attester:secret:secp256k1:1111eb31c2389613f3ceb4288eadda35780e98df4cabb2b7663882262f72e422 +# node:public:ed25519:acb7e350cf53e3b4c2042e2c8044734384cee51f58a0fa052fd7e0c9c3f4b20d +node_key: node:secret:ed25519:0effb1d7c335d23606f656ca1ba87566144d5af2984bd7486379d4f83a204ba2 +``` + +You then have two different paths depending if your main node is using file-based or env-based configuration. + +## Configuring consensus + +### File-based configuration + +If you are using the recommended file-based configuration then you'll need to add the following information to your +`general.yaml` config file (see [Ecosystem Configuration](../launch.md#ecosystem-configuration)): + +```yaml +consensus: + server_addr: '0.0.0.0:3054' + public_addr: + # Address under which the node is accessible to the other nodes. + # It can be a public domain, like `example.com:3054`, in case the main node is accessible from the internet, + # or it can be a kubernetes cluster domain, like `server-v2-core..svc.cluster.local:3054` in + # case the main node should be only accessible within the cluster. + debug_page_addr: '0.0.0.0:5000' + max_payload_size: 3200000 + gossip_dynamic_inbound_limit: 10 + genesis_spec: + chain_id: ??? # chain id + protocol_version: 1 # consensus protocol version + validators: + - key: validator:public:??? # public key of the main node (copy this PUBLIC key from the secrets you generated) + weight: 1 + leader: validator:public:??? # same as above - main node will be the only validator and the only leader. ``` -## Preparing the consensus config +And the secrets you generated to your `secrets.yaml` config file: -Create `consensus_config.yaml` file with the following content (remember to replace the placeholders): +```yaml +consensus: + validator_key: validator:secret:??? + attester_key: attester:secret:??? + node_key: node:secret:??? +``` + +### Env-based configuration + +If you are using the env-based configuration you'll need to create a `consensus_config.yaml` file with the following +content: ```yaml server_addr: '0.0.0.0:3054' @@ -27,52 +76,80 @@ debug_page_addr: '0.0.0.0:5000' max_payload_size: 3200000 gossip_dynamic_inbound_limit: 10 genesis_spec: - chain_id: # chain id + chain_id: ??? # chain id protocol_version: 1 # consensus protocol version validators: - - key: validator:public:??? # public key of the main node (copy this PUBLIC key from consensus_secrets.yaml) + - key: validator:public:??? # public key of the main node (copy this PUBLIC key from the secrets you generated) weight: 1 leader: validator:public:??? # same as above - main node will be the only validator and the only leader. ``` -## Providing the configuration to the `zksync_server` +And a `consensus_secrets.yaml` file with the with the secrets you generated previously: -To enable consensus component for the main node you need to append -`--components=,consensus` to the `zksync_server` command line arguments. -In addition to that, you need to provide the configuration (from the files `consensus_config.yaml` and -`consensus_secrets.yaml` that we have just prepared) to the `zksync_server` binary. There are 2 ways (hopefully not for -long) to achieve that: +```yaml +validator_key: validator:secret:??? +attester_key: attester:secret:??? +node_key: node:secret:??? +``` + +Don't forget to set secure permissions to it: -- In file-based configuration system, the consensus config is embedded in the - [general config](https://github.com/matter-labs/zksync-era/blob/1edcabe0c6a02d5b6700c29c0d9f6220ec6fb03c/core/lib/config/src/configs/general.rs#L58), - and the consensus secrets are embedded in the - [secrets config](https://github.com/matter-labs/zksync-era/blob/main/core/bin/zksync_server/src/main.rs). Paste the - content of the generated `consensus_secrets.yaml` file to the `secrets` config, and prepared config to the `general` - config. +``` +chmod 600 consensus_secrets.yaml +``` -- In env-var-based configuration system, the consensus config and consensus secrets files are passed as standalone - files. The paths to these files need to be passed as env vars `CONSENSUS_CONFIG_PATH` and `CONSENSUS_SECRETS_PATH`. +Then you'll need to pass the paths to these files as env vars `CONSENSUS_CONFIG_PATH` and `CONSENSUS_SECRETS_PATH`. + +## Running the `zksync_server` + +Finally, to enable the consensus component for the main node you just need to append +`--components=,consensus` to the `zksync_server` command line arguments. ## Gitops repo config -If you are using the matterlabs gitops repo to configure the main node, it is even more complicated because the -`consensus_config.yaml` file is rendered from a helm chart. See the -[example](https://github.com/matter-labs/gitops-kubernetes/blob/main/apps/environments/mainnet2/server-v2/server-v2-core.yaml), -to see where you have to paste the content of the `consensus_config.yaml` file. +If you are using the matterlabs gitops repo to configure the main node, you'll need to add this information to your +kubernetes config for the core server, `server-v2-core.yaml` file (see +[example](https://github.com/matter-labs/gitops-kubernetes/blob/177dcd575c6ab446e70b9a9ced8024766095b516/apps/environments/era-stage-proofs/server-v2/server-v2-core.yaml#L23-L35)): + +```yaml +spec: + values: + args: + - --components=state_keeper,consensus + service: + main: + ports: + consensus: + enabled: true + port: 3054 +``` + +Then again you have two paths depending if the deployment is using file-based or env-based configuration. Although by +default you should be using file-based configuration. -You need to embed the `consensus_secrets.yaml` file into a kubernetes config: +### File-based configuration + +Just like before you'll add the consensus config information to the `general.yaml` config file (see +[example](https://github.com/matter-labs/gitops-kubernetes/blob/177dcd575c6ab446e70b9a9ced8024766095b516/apps/environments/era-stage-proofs/server-v2-config/general.yaml#L353-L368)). + +And the secrets you generated to your whatever secrets managing system you are using (see an example +[here](https://github.com/matter-labs/gitops-kubernetes/blob/177dcd575c6ab446e70b9a9ced8024766095b516/apps/clusters/era-stage-proofs/stage2/secrets/server-v2-secrets.yaml) +using SOPS). ```yaml -apiVersion: v1 -kind: Secret -metadata: - name: consensus-secrets -type: Opaque -stringData: - .consensus_secrets.yaml: +consensus: + validator_key: validator:secret:??? + attester_key: attester:secret:??? + node_key: node:secret:??? ``` -You need to add the following sections to your kubernetes config for the core server: +### Env-based configuration + +It is even more complicated because the `consensus_config.yaml` file is rendered from a helm chart. See the +[example](https://github.com/matter-labs/gitops-kubernetes/blob/177dcd575c6ab446e70b9a9ced8024766095b516/apps/environments/mainnet2/server-v2/server-v2-core.yaml#L37-L92), +to see where you have to paste the content of the `consensus_config.yaml` file. + +You also need to add the following sections to your `server-v2-core.yaml` file: ```yaml spec: @@ -83,14 +160,6 @@ spec: enabled: true type: secret mountPath: '/etc/consensus_secrets/' - args: - - --components=state_keeper,consensus - service: - main: - ports: - consensus: - enabled: true - port: 3054 configMap: consensus: enabled: true @@ -102,3 +171,18 @@ spec: - name: CONSENSUS_SECRETS_PATH value: /etc/consensus_secrets/.consensus_secrets.yaml ``` + +You need to embed the `consensus_secrets.yaml` file into a kubernetes config (see how to do it +[here](https://github.com/matter-labs/gitops-kubernetes/blob/177dcd575c6ab446e70b9a9ced8024766095b516/apps/environments/mainnet2/zksync-v2-secret/kustomization.yaml#L3-L4) +and +[here](https://github.com/matter-labs/gitops-kubernetes/blob/177dcd575c6ab446e70b9a9ced8024766095b516/apps/environments/mainnet2/zksync-v2-secret/consensus_secrets.yaml)): + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: consensus-secrets +type: Opaque +stringData: + .consensus_secrets.yaml: +``` diff --git a/package.json b/package.json index 9e3428e614cc..b293bedd8f69 100644 --- a/package.json +++ b/package.json @@ -39,12 +39,12 @@ "@typescript-eslint/eslint-plugin": "^6.7.4", "@typescript-eslint/parser": "^4.10.0", "babel-eslint": "^10.1.0", - "eslint-config-alloy": "^3.8.2", "eslint": "^7.16.0", + "eslint-config-alloy": "^3.8.2", "markdownlint-cli": "^0.24.0", "npm-run-all": "^4.1.5", + "prettier": "^3.3.3", "prettier-plugin-solidity": "=1.0.0-dev.22", - "prettier": "^2.3.2", "solhint": "^3.3.2", "sql-formatter": "^13.1.0" } diff --git a/prover/crates/bin/prover_cli/README.md b/prover/crates/bin/prover_cli/README.md index e0dd1697bf6d..af629dedc972 100644 --- a/prover/crates/bin/prover_cli/README.md +++ b/prover/crates/bin/prover_cli/README.md @@ -257,12 +257,12 @@ TODO | | | `-rt, --recursion-tip` | 🏗️ | | | | `-s, --scheduler` | 🏗️ | | | | `-c, --compressor` | 🏗️ | -| | | `-f, --failed` | 🏗 | +| | | `-f, --failed` | 🏗 | | `delete` | | `-n ` | ✅️️ | | | | `-a, --all` | ️️✅️️️️️️ | | `requeue` | | `—b, --batch ` | ✅️ | | | | `-a, --all` | ✅️️ | -| `config` | | `--db-url ` | 🏗 | +| `config` | | `--db-url ` | 🏗 | | `debug-proof` | | `--file ` | ✅️ | | `file-info` | | `--file-path ` | ✅️ | | `stats` | | `--period ` | ✅️ | diff --git a/yarn.lock b/yarn.lock index 15fb8bb7d967..5df8cb570e0f 100644 --- a/yarn.lock +++ b/yarn.lock @@ -9131,6 +9131,11 @@ prettier@^3.0.3: resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.2.5.tgz#e52bc3090586e824964a8813b09aba6233b28368" integrity sha512-3/GWa9aOC0YeD7LUfvOG2NiDyhOWRvt1k+rcKhOuYnMY24iiCphgneUfJDyFXd6rZCAnuLBv6UeAULtrhT/F4A== +prettier@^3.3.3: + version "3.3.3" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.3.3.tgz#30c54fe0be0d8d12e6ae61dbb10109ea00d53105" + integrity sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew== + pretty-format@^29.0.0, pretty-format@^29.7.0: version "29.7.0" resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-29.7.0.tgz#ca42c758310f365bfa71a0bda0a807160b776812" @@ -10191,7 +10196,7 @@ string-length@^4.0.1: char-regex "^1.0.2" strip-ansi "^6.0.0" -"string-width-cjs@npm:string-width@^4.2.0": +"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -10208,15 +10213,6 @@ string-width@^2.1.0, string-width@^2.1.1: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" -string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: - version "4.2.3" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" - string-width@^5.0.1, string-width@^5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" @@ -10283,7 +10279,7 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1": +"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== @@ -10304,13 +10300,6 @@ strip-ansi@^5.1.0: dependencies: ansi-regex "^4.1.0" -strip-ansi@^6.0.0, strip-ansi@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" - integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== - dependencies: - ansi-regex "^5.0.1" - strip-ansi@^7.0.1: version "7.1.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" @@ -11161,16 +11150,7 @@ workerpool@6.2.1: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" - integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== - dependencies: - ansi-styles "^4.0.0" - string-width "^4.1.0" - strip-ansi "^6.0.0" - -wrap-ansi@^7.0.0: +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== From d0078df02529cae841a8d9469a4caae26ecd9697 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 5 Dec 2024 09:49:30 +0200 Subject: [PATCH 4/5] =?UTF-8?q?refactor(api):=20Add=20more=20components=20?= =?UTF-8?q?to=20healthcheck=20=E2=80=93=20follow-ups=20(#3337)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Various minor follow-ups after https://github.com/matter-labs/zksync-era/pull/3193: - Rework app-level health details. - Fix `execution_time` unit of measurement for the database health check details. - Rework the database health check: do not hold a DB connection all the time; make it reactive. ## Why ❔ Makes the dependency graph lighter; simplifies maintenance. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- Cargo.lock | 15 +-- Cargo.toml | 1 - core/bin/external_node/Cargo.toml | 1 - .../external_node/src/metrics/framework.rs | 3 - core/lib/bin_metadata/Cargo.toml | 18 --- core/lib/dal/src/system_dal.rs | 8 +- core/lib/health_check/Cargo.toml | 1 - core/lib/health_check/src/binary.rs | 21 ---- core/lib/health_check/src/lib.rs | 19 ++- core/lib/health_check/src/tests.rs | 1 + core/node/node_framework/Cargo.toml | 1 - .../layers/healtcheck_server.rs | 15 ++- .../src/implementations/layers/postgres.rs | 117 +++++++++--------- core/node/shared_metrics/Cargo.toml | 4 +- .../shared_metrics}/build.rs | 0 core/node/shared_metrics/src/lib.rs | 9 +- .../shared_metrics/src/metadata.rs} | 30 +++-- 17 files changed, 111 insertions(+), 153 deletions(-) delete mode 100644 core/lib/bin_metadata/Cargo.toml delete mode 100644 core/lib/health_check/src/binary.rs rename core/{lib/bin_metadata => node/shared_metrics}/build.rs (100%) rename core/{lib/bin_metadata/src/lib.rs => node/shared_metrics/src/metadata.rs} (72%) diff --git a/Cargo.lock b/Cargo.lock index f9e754902f61..bb4399d1ae78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11157,16 +11157,6 @@ dependencies = [ "zksync_pairing", ] -[[package]] -name = "zksync_bin_metadata" -version = "0.1.0" -dependencies = [ - "rustc_version 0.4.1", - "serde", - "tracing", - "vise", -] - [[package]] name = "zksync_block_reverter" version = "0.1.0" @@ -11848,7 +11838,6 @@ dependencies = [ "zksync_object_store", "zksync_protobuf_config", "zksync_reorg_detector", - "zksync_shared_metrics", "zksync_snapshots_applier", "zksync_state", "zksync_state_keeper", @@ -11938,7 +11927,6 @@ dependencies = [ "tokio", "tracing", "vise", - "zksync_bin_metadata", ] [[package]] @@ -12275,7 +12263,6 @@ dependencies = [ "tracing", "trybuild", "zksync_base_token_adjuster", - "zksync_bin_metadata", "zksync_block_reverter", "zksync_circuit_breaker", "zksync_commitment_generator", @@ -12622,10 +12609,10 @@ dependencies = [ name = "zksync_shared_metrics" version = "0.1.0" dependencies = [ + "rustc_version 0.4.1", "serde", "tracing", "vise", - "zksync_bin_metadata", "zksync_dal", "zksync_types", ] diff --git a/Cargo.toml b/Cargo.toml index 60099874cf39..80a1e4104265 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,7 +79,6 @@ members = [ # Test infrastructure "core/tests/loadnext", "core/tests/vm-benchmark", - "core/lib/bin_metadata", ] resolver = "2" diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index a69fdf263794..91bdcefa2ec0 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -29,7 +29,6 @@ zksync_health_check.workspace = true zksync_web3_decl.workspace = true zksync_types.workspace = true zksync_block_reverter.workspace = true -zksync_shared_metrics.workspace = true zksync_node_genesis.workspace = true zksync_node_fee_model.workspace = true zksync_node_db_pruner.workspace = true diff --git a/core/bin/external_node/src/metrics/framework.rs b/core/bin/external_node/src/metrics/framework.rs index 228af8aa0417..81c9e57d9b9a 100644 --- a/core/bin/external_node/src/metrics/framework.rs +++ b/core/bin/external_node/src/metrics/framework.rs @@ -5,7 +5,6 @@ use zksync_node_framework::{ implementations::resources::pools::{MasterPool, PoolResource}, FromContext, IntoContext, StopReceiver, Task, TaskId, WiringError, WiringLayer, }; -use zksync_shared_metrics::{GIT_METRICS, RUST_METRICS}; use zksync_types::{L1ChainId, L2ChainId, SLChainId}; use super::EN_METRICS; @@ -39,8 +38,6 @@ impl WiringLayer for ExternalNodeMetricsLayer { } async fn wire(self, input: Self::Input) -> Result { - RUST_METRICS.initialize(); - GIT_METRICS.initialize(); EN_METRICS.observe_config( self.l1_chain_id, self.sl_chain_id, diff --git a/core/lib/bin_metadata/Cargo.toml b/core/lib/bin_metadata/Cargo.toml deleted file mode 100644 index e529ecfb49a7..000000000000 --- a/core/lib/bin_metadata/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "zksync_bin_metadata" -version.workspace = true -edition.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -license.workspace = true -keywords.workspace = true -categories.workspace = true - -[dependencies] -serde.workspace = true -vise.workspace = true -tracing.workspace = true - -[build-dependencies] -rustc_version.workspace = true diff --git a/core/lib/dal/src/system_dal.rs b/core/lib/dal/src/system_dal.rs index 6f2e64b1c1c5..f4de4faf8eb3 100644 --- a/core/lib/dal/src/system_dal.rs +++ b/core/lib/dal/src/system_dal.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, time::Duration}; -use chrono::DateTime; +use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; @@ -14,11 +14,11 @@ pub(crate) struct TableSize { pub total_size: u64, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct DatabaseMigration { pub version: i64, pub description: String, - pub installed_on: DateTime, + pub installed_on: DateTime, pub success: bool, pub checksum: String, pub execution_time: Duration, @@ -118,7 +118,7 @@ impl SystemDal<'_, '_> { installed_on: row.installed_on, success: row.success, checksum: hex::encode(row.checksum), - execution_time: Duration::from_millis(u64::try_from(row.execution_time).unwrap_or(0)), + execution_time: Duration::from_nanos(u64::try_from(row.execution_time).unwrap_or(0)), }) } } diff --git a/core/lib/health_check/Cargo.toml b/core/lib/health_check/Cargo.toml index 0e823c848ce5..6f1d863d8cec 100644 --- a/core/lib/health_check/Cargo.toml +++ b/core/lib/health_check/Cargo.toml @@ -20,7 +20,6 @@ serde_json.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["sync", "time"] } tracing.workspace = true -zksync_bin_metadata.workspace = true [dev-dependencies] assert_matches.workspace = true diff --git a/core/lib/health_check/src/binary.rs b/core/lib/health_check/src/binary.rs deleted file mode 100644 index b14ed2ed9392..000000000000 --- a/core/lib/health_check/src/binary.rs +++ /dev/null @@ -1,21 +0,0 @@ -use async_trait::async_trait; -use zksync_bin_metadata::BinMetadata; - -use crate::{CheckHealth, Health, HealthStatus}; - -impl From<&BinMetadata> for Health { - fn from(details: &BinMetadata) -> Self { - Self::from(HealthStatus::Ready).with_details(details) - } -} - -#[async_trait] -impl CheckHealth for BinMetadata { - fn name(&self) -> &'static str { - "metadata" - } - - async fn check_health(&self) -> Health { - self.into() - } -} diff --git a/core/lib/health_check/src/lib.rs b/core/lib/health_check/src/lib.rs index 7dcdb47aa2f9..76b1c4d8b0ff 100644 --- a/core/lib/health_check/src/lib.rs +++ b/core/lib/health_check/src/lib.rs @@ -11,12 +11,9 @@ pub use async_trait::async_trait; use futures::future; use serde::Serialize; use tokio::sync::watch; -use zksync_bin_metadata::BIN_METADATA; -use self::metrics::{CheckResult, METRICS}; -use crate::metrics::AppHealthCheckConfig; +use crate::metrics::{AppHealthCheckConfig, CheckResult, METRICS}; -mod binary; mod metrics; #[cfg(test)] @@ -114,6 +111,8 @@ pub struct AppHealthCheck { #[derive(Debug, Clone)] struct AppHealthCheckInner { + /// Application-level health details. + app_details: Option, components: Vec>, slow_time_limit: Duration, hard_time_limit: Duration, @@ -136,6 +135,7 @@ impl AppHealthCheck { let inner = AppHealthCheckInner { components: Vec::default(), + app_details: None, slow_time_limit, hard_time_limit, }; @@ -181,6 +181,13 @@ impl AppHealthCheck { } } + /// Sets app-level health details. They can include build info etc. + pub fn set_details(&self, details: impl Serialize) { + let details = serde_json::to_value(details).expect("failed serializing app details"); + let mut inner = self.inner.lock().expect("`AppHealthCheck` is poisoned"); + inner.app_details = Some(details); + } + /// Inserts health check for a component. /// /// # Errors @@ -220,6 +227,7 @@ impl AppHealthCheck { // Clone `inner` so that we don't hold a lock for them across a wait point. let AppHealthCheckInner { components, + app_details, slow_time_limit, hard_time_limit, } = self @@ -238,7 +246,8 @@ impl AppHealthCheck { .map(|health| health.status) .max_by_key(|status| status.priority_for_aggregation()) .unwrap_or(HealthStatus::Ready); - let inner = Health::with_details(aggregated_status.into(), BIN_METADATA); + let mut inner = Health::from(aggregated_status); + inner.details = app_details.clone(); let health = AppHealth { inner, components }; if !health.inner.status.is_healthy() { diff --git a/core/lib/health_check/src/tests.rs b/core/lib/health_check/src/tests.rs index 14c610e9fd83..76863db05415 100644 --- a/core/lib/health_check/src/tests.rs +++ b/core/lib/health_check/src/tests.rs @@ -82,6 +82,7 @@ async fn aggregating_health_checks() { let (first_check, first_updater) = ReactiveHealthCheck::new("first"); let (second_check, second_updater) = ReactiveHealthCheck::new("second"); let inner = AppHealthCheckInner { + app_details: None, components: vec![Arc::new(first_check), Arc::new(second_check)], slow_time_limit: AppHealthCheck::DEFAULT_SLOW_TIME_LIMIT, hard_time_limit: AppHealthCheck::DEFAULT_HARD_TIME_LIMIT, diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 6334495885f3..eec9b8ef4b7a 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -41,7 +41,6 @@ zksync_vm_executor.workspace = true zksync_state_keeper.workspace = true zksync_consistency_checker.workspace = true zksync_metadata_calculator.workspace = true -zksync_bin_metadata.workspace = true zksync_node_sync.workspace = true zksync_node_api_server.workspace = true zksync_node_consensus.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/healtcheck_server.rs b/core/node/node_framework/src/implementations/layers/healtcheck_server.rs index 83a74c63cb45..3a4e3ca11569 100644 --- a/core/node/node_framework/src/implementations/layers/healtcheck_server.rs +++ b/core/node/node_framework/src/implementations/layers/healtcheck_server.rs @@ -3,6 +3,8 @@ use std::sync::Arc; use zksync_config::configs::api::HealthCheckConfig; use zksync_health_check::AppHealthCheck; use zksync_node_api_server::healthcheck::HealthCheckHandle; +use zksync_shared_metrics::metadata::{GitMetadata, RustMetadata, GIT_METRICS, RUST_METRICS}; +use zksync_web3_decl::jsonrpsee::core::Serialize; use crate::{ implementations::resources::healthcheck::AppHealthCheckResource, @@ -12,6 +14,13 @@ use crate::{ FromContext, IntoContext, }; +/// Full metadata of the compiled binary. +#[derive(Debug, Serialize)] +pub struct BinMetadata { + pub rust: &'static RustMetadata, + pub git: &'static GitMetadata, +} + /// Wiring layer for health check server /// /// Expects other layers to insert different components' health checks @@ -73,8 +82,12 @@ impl Task for HealthCheckTask { } async fn run(mut self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + self.app_health_check.set_details(BinMetadata { + rust: RUST_METRICS.initialize(), + git: GIT_METRICS.initialize(), + }); let handle = - HealthCheckHandle::spawn_server(self.config.bind_addr(), self.app_health_check.clone()); + HealthCheckHandle::spawn_server(self.config.bind_addr(), self.app_health_check); stop_receiver.0.changed().await?; handle.stop().await; diff --git a/core/node/node_framework/src/implementations/layers/postgres.rs b/core/node/node_framework/src/implementations/layers/postgres.rs index 8a81b8709895..bf602f1de631 100644 --- a/core/node/node_framework/src/implementations/layers/postgres.rs +++ b/core/node/node_framework/src/implementations/layers/postgres.rs @@ -1,11 +1,15 @@ -use std::time::Duration; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; -use serde::{Deserialize, Serialize}; -use tokio::sync::watch; +use async_trait::async_trait; +use serde::Serialize; +use tokio::sync::RwLock; use zksync_dal::{ metrics::PostgresMetrics, system_dal::DatabaseMigration, ConnectionPool, Core, CoreDal, }; -use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; +use zksync_health_check::{CheckHealth, Health, HealthStatus}; use crate::{ implementations::resources::{ @@ -38,8 +42,6 @@ pub struct Input { pub struct Output { #[context(task)] pub metrics_task: PostgresMetricsScrapingTask, - #[context(task)] - pub health_task: DatabaseHealthTask, } #[async_trait::async_trait] @@ -58,16 +60,15 @@ impl WiringLayer for PostgresLayer { }; let app_health = input.app_health.0; - let health_task = DatabaseHealthTask::new(pool); - app_health - .insert_component(health_task.health_check()) + .insert_custom_component(Arc::new(DatabaseHealthCheck { + polling_interval: TASK_EXECUTION_INTERVAL, + pool, + cached: RwLock::default(), + })) .map_err(WiringError::internal)?; - Ok(Output { - metrics_task, - health_task, - }) + Ok(Output { metrics_task }) } } @@ -99,7 +100,7 @@ impl Task for PostgresMetricsScrapingTask { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize)] pub struct DatabaseInfo { last_migration: DatabaseMigration, } @@ -111,62 +112,60 @@ impl From for Health { } #[derive(Debug)] -pub struct DatabaseHealthTask { +struct DatabaseHealthCheck { polling_interval: Duration, - connection_pool: ConnectionPool, - updater: HealthUpdater, + pool: ConnectionPool, + cached: RwLock>, } -impl DatabaseHealthTask { - fn new(connection_pool: ConnectionPool) -> Self { - Self { - polling_interval: TASK_EXECUTION_INTERVAL, - connection_pool, - updater: ReactiveHealthCheck::new("database").1, - } +impl DatabaseHealthCheck { + async fn update(&self) -> anyhow::Result { + let mut conn = self.pool.connection_tagged("postgres_healthcheck").await?; + let last_migration = conn.system_dal().get_last_migration().await?; + Ok(DatabaseInfo { last_migration }) } - async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> - where - Self: Sized, - { - let timeout = self.polling_interval; - let mut conn = self - .connection_pool - .connection_tagged("postgres_healthcheck") - .await?; - - tracing::info!("Starting database healthcheck with frequency: {timeout:?}",); - - while !*stop_receiver.borrow_and_update() { - let last_migration = conn.system_dal().get_last_migration().await?; - self.updater.update(DatabaseInfo { last_migration }.into()); - - // Error here corresponds to a timeout w/o `stop_receiver` changed; we're OK with this. - tokio::time::timeout(timeout, stop_receiver.changed()) - .await - .ok(); + fn validate_cache(&self, cache: Option<&(DatabaseInfo, Instant)>) -> Option { + let now = Instant::now(); + if let Some((cached, cached_at)) = cache { + let elapsed = now + .checked_duration_since(*cached_at) + .unwrap_or(Duration::ZERO); + (elapsed <= self.polling_interval).then(|| cached.clone()) + } else { + None } - tracing::info!("Stop signal received; database healthcheck is shut down"); - Ok(()) - } - - pub fn health_check(&self) -> ReactiveHealthCheck { - self.updater.subscribe() } } -#[async_trait::async_trait] -impl Task for DatabaseHealthTask { - fn kind(&self) -> TaskKind { - TaskKind::UnconstrainedTask +#[async_trait] +impl CheckHealth for DatabaseHealthCheck { + fn name(&self) -> &'static str { + "database" } - fn id(&self) -> TaskId { - "database_health".into() - } + // If the DB malfunctions, this method would time out, which would lead to the health check marked as failed. + async fn check_health(&self) -> Health { + let cached = self.cached.read().await.clone(); + if let Some(cache) = self.validate_cache(cached.as_ref()) { + return cache.into(); + } - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await + let mut cached_lock = self.cached.write().await; + // The cached value may have been updated by another task. + if let Some(cache) = self.validate_cache(cached_lock.as_ref()) { + return cache.into(); + } + + match self.update().await { + Ok(info) => { + *cached_lock = Some((info.clone(), Instant::now())); + info.into() + } + Err(err) => { + tracing::warn!("Error updating database health: {err:#}"); + cached.map_or_else(|| HealthStatus::Affected.into(), |(info, _)| info.into()) + } + } } } diff --git a/core/node/shared_metrics/Cargo.toml b/core/node/shared_metrics/Cargo.toml index 23c669b4f963..618888ffddc0 100644 --- a/core/node/shared_metrics/Cargo.toml +++ b/core/node/shared_metrics/Cargo.toml @@ -16,4 +16,6 @@ vise.workspace = true tracing.workspace = true zksync_types.workspace = true zksync_dal.workspace = true -zksync_bin_metadata.workspace = true + +[build-dependencies] +rustc_version.workspace = true diff --git a/core/lib/bin_metadata/build.rs b/core/node/shared_metrics/build.rs similarity index 100% rename from core/lib/bin_metadata/build.rs rename to core/node/shared_metrics/build.rs diff --git a/core/node/shared_metrics/src/lib.rs b/core/node/shared_metrics/src/lib.rs index e37764c5a6d7..001293a72bc2 100644 --- a/core/node/shared_metrics/src/lib.rs +++ b/core/node/shared_metrics/src/lib.rs @@ -5,10 +5,11 @@ use std::{fmt, time::Duration}; use vise::{ Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics, Unit, }; -use zksync_bin_metadata::{GitMetrics, RustMetrics}; use zksync_dal::transactions_dal::L2TxSubmissionResult; use zksync_types::aggregated_operations::AggregatedActionType; +pub mod metadata; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] pub enum SnapshotRecoveryStage { @@ -195,9 +196,3 @@ pub struct ExternalNodeMetrics { #[vise::register] pub static EN_METRICS: vise::Global = vise::Global::new(); - -#[vise::register] -pub static RUST_METRICS: vise::Global = vise::Global::new(); - -#[vise::register] -pub static GIT_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/bin_metadata/src/lib.rs b/core/node/shared_metrics/src/metadata.rs similarity index 72% rename from core/lib/bin_metadata/src/lib.rs rename to core/node/shared_metrics/src/metadata.rs index d8a5221e4775..bc7e52ae1e97 100644 --- a/core/lib/bin_metadata/src/lib.rs +++ b/core/node/shared_metrics/src/metadata.rs @@ -3,24 +3,12 @@ use vise::{EncodeLabelSet, Info, Metrics}; use self::values::{GIT_METADATA, RUST_METADATA}; -pub mod values { +mod values { use super::{GitMetadata, RustMetadata}; include!(concat!(env!("OUT_DIR"), "/metadata_values.rs")); } -pub const BIN_METADATA: BinMetadata = BinMetadata { - rust: RUST_METADATA, - git: GIT_METADATA, -}; - -/// Metadata of the compiled binary. -#[derive(Debug, Serialize)] -pub struct BinMetadata { - pub rust: RustMetadata, - pub git: GitMetadata, -} - /// Rust metadata of the compiled binary. #[derive(Debug, EncodeLabelSet, Serialize)] pub struct RustMetadata { @@ -47,22 +35,32 @@ pub struct RustMetrics { } impl RustMetrics { - pub fn initialize(&self) { + pub fn initialize(&self) -> &RustMetadata { tracing::info!("Rust metadata for this binary: {RUST_METADATA:?}"); self.info.set(RUST_METADATA).ok(); + // `unwrap` is safe due to setting the value above + self.info.get().unwrap() } } #[derive(Debug, Metrics)] -#[metrics(prefix = "git_info")] +#[metrics(prefix = "git")] pub struct GitMetrics { /// General information about the compiled binary. info: Info, } impl GitMetrics { - pub fn initialize(&self) { + pub fn initialize(&self) -> &GitMetadata { tracing::info!("Git metadata for this binary: {GIT_METADATA:?}"); self.info.set(GIT_METADATA).ok(); + // `unwrap` is safe due to setting the value above + self.info.get().unwrap() } } + +#[vise::register] +pub static RUST_METRICS: vise::Global = vise::Global::new(); + +#[vise::register] +pub static GIT_METRICS: vise::Global = vise::Global::new(); From 7ace594fb3140212bd94ffd6bffcac99805cf4b1 Mon Sep 17 00:00:00 2001 From: Daniyar Itegulov Date: Fri, 6 Dec 2024 01:30:55 +1100 Subject: [PATCH 5/5] fix(api): batch fee input scaling for `debug_traceCall` (#3344) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Changes the `debug_traceCall` handler to use gas price factor instead of plain gas factor. Additionally removes entrypoints into `get_batch_fee_input_scaled` with default scaling factor (1.0) ## Why ❔ Previously, `debug_traceCall` was using incorrect gas scaling factor ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. --- core/lib/config/src/configs/api.rs | 2 +- core/node/api_server/src/tx_sender/mod.rs | 2 +- .../api_server/src/tx_sender/tests/send_tx.rs | 23 ++++++++----------- .../api_server/src/web3/namespaces/debug.rs | 7 +----- 4 files changed, 13 insertions(+), 21 deletions(-) diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index ce0d96129584..1321f25e7604 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -243,7 +243,7 @@ impl Web3JsonRpcConfig { pubsub_polling_interval: Some(200), max_nonce_ahead: 50, gas_price_scale_factor: 1.2, - estimate_gas_scale_factor: 1.2, + estimate_gas_scale_factor: 1.5, estimate_gas_acceptable_overestimation: 1000, estimate_gas_optimize_search: false, max_tx_size: 1000000, diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 76cfb83aec54..4c98fc7c455c 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -588,7 +588,7 @@ impl TxSender { } // For now, both L1 gas price and pubdata price are scaled with the same coefficient - async fn scaled_batch_fee_input(&self) -> anyhow::Result { + pub(crate) async fn scaled_batch_fee_input(&self) -> anyhow::Result { self.0 .batch_fee_input_provider .get_batch_fee_input_scaled( diff --git a/core/node/api_server/src/tx_sender/tests/send_tx.rs b/core/node/api_server/src/tx_sender/tests/send_tx.rs index c861f04a832e..c0b02e45ad89 100644 --- a/core/node/api_server/src/tx_sender/tests/send_tx.rs +++ b/core/node/api_server/src/tx_sender/tests/send_tx.rs @@ -6,7 +6,7 @@ use assert_matches::assert_matches; use chrono::NaiveDateTime; use test_casing::test_casing; use zksync_multivm::interface::{tracer::ValidationTraces, ExecutionResult}; -use zksync_node_fee_model::MockBatchFeeParamsProvider; +use zksync_node_fee_model::{BatchFeeModelInputProvider, MockBatchFeeParamsProvider}; use zksync_node_test_utils::create_l2_transaction; use zksync_types::K256PrivateKey; @@ -22,10 +22,9 @@ async fn submitting_tx_requires_one_connection() { .unwrap(); let l2_chain_id = L2ChainId::default(); - let fee_input = MockBatchFeeParamsProvider::default() - .get_batch_fee_input_scaled(1.0, 1.0) - .await - .unwrap(); + let fee_params_provider: &dyn BatchFeeModelInputProvider = + &MockBatchFeeParamsProvider::default(); + let fee_input = fee_params_provider.get_batch_fee_input().await.unwrap(); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); let tx = create_l2_transaction(base_fee, gas_per_pubdata); @@ -130,10 +129,9 @@ async fn fee_validation_errors() { let l2_chain_id = L2ChainId::default(); let tx_executor = SandboxExecutor::mock(MockOneshotExecutor::default()).await; let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - let fee_input = MockBatchFeeParamsProvider::default() - .get_batch_fee_input_scaled(1.0, 1.0) - .await - .unwrap(); + let fee_params_provider: &dyn BatchFeeModelInputProvider = + &MockBatchFeeParamsProvider::default(); + let fee_input = fee_params_provider.get_batch_fee_input().await.unwrap(); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); let tx = create_l2_transaction(base_fee, gas_per_pubdata); @@ -322,10 +320,9 @@ async fn submit_tx_with_validation_traces(actual_range: Range, expected_ran .unwrap(); let l2_chain_id = L2ChainId::default(); - let fee_input = MockBatchFeeParamsProvider::default() - .get_batch_fee_input_scaled(1.0, 1.0) - .await - .unwrap(); + let fee_params_provider: &dyn BatchFeeModelInputProvider = + &MockBatchFeeParamsProvider::default(); + let fee_input = fee_params_provider.get_batch_fee_input().await.unwrap(); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); let tx = create_l2_transaction(base_fee, gas_per_pubdata); diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 4fd32c1b5223..d96c1e659541 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -244,12 +244,7 @@ impl DebugNamespace { // It is important to drop a DB connection before calling the provider, since it acquires a connection internally // on the main node. drop(connection); - let scale_factor = self.state.api_config.estimate_gas_scale_factor; - let fee_input_provider = &self.state.tx_sender.0.batch_fee_input_provider; - // For now, the same scaling is used for both the L1 gas price and the pubdata price - fee_input_provider - .get_batch_fee_input_scaled(scale_factor, scale_factor) - .await? + self.state.tx_sender.scaled_batch_fee_input().await? } else { let fee_input = block_args.historical_fee_input(&mut connection).await?; drop(connection);