From d82fa3d6edbdc9f9fdca9af8183fe4936978404f Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Tue, 21 Nov 2023 08:38:05 +0100 Subject: [PATCH 01/60] Restrict best LC update collection to canonical blocks Currently, the best LC update for a sync committee period may refer to blocks that have later been orphaned, if they rank better than canonical blocks according to `is_better_update`. This was done because the most important task of the light client sync protocol is to track the correct `next_sync_committee`. However, practical implementation is quite tricky because existing infrastructure such as fork choice modules can only be reused in limited form when collecting light client data. Furthermore, it becomes impossible to deterministically obtain the absolute best LC update available for any given sync committee period, because orphaned blocks may become unavailable. For these reasons, `LightClientUpdate` should only be served if they refer to data from the canonical chain as selected by fork choice. This also assists efforts for a reliable backward sync in the future. --- specs/altair/light-client/full-node.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/light-client/full-node.md b/specs/altair/light-client/full-node.md index 27651af01f..9a69b253f5 100644 --- a/specs/altair/light-client/full-node.md +++ b/specs/altair/light-client/full-node.md @@ -143,7 +143,7 @@ Full nodes SHOULD provide the best derivable `LightClientUpdate` (according to ` - `LightClientUpdate` are assigned to sync committee periods based on their `attested_header.beacon.slot` - `LightClientUpdate` are only considered if `compute_sync_committee_period_at_slot(update.attested_header.beacon.slot) == compute_sync_committee_period_at_slot(update.signature_slot)` -- Only `LightClientUpdate` with `next_sync_committee` as selected by fork choice are provided, regardless of ranking by `is_better_update`. To uniquely identify a non-finalized sync committee fork, all of `period`, `current_sync_committee` and `next_sync_committee` need to be incorporated, as sync committees may reappear over time. +- Only `LightClientUpdate` with `sync_aggregate` from blocks on the canonical chain as selected by fork choice are considered, regardless of ranking by `is_better_update`. `LightClientUpdate` referring to orphaned blocks SHOULD NOT be provided. ### `create_light_client_finality_update` From be2984156bb086d9e73445245ad046a0e8054228 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Tue, 6 Feb 2024 13:00:17 +0100 Subject: [PATCH 02/60] Add canonical data collection test infrastructure --- .../light_client/test_data_collection.py | 934 ++++++++++++++++++ tests/formats/light_client/README.md | 1 + tests/formats/light_client/data_collection.md | 76 ++ tests/generators/light_client/main.py | 1 + 4 files changed, 1012 insertions(+) create mode 100644 tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py create mode 100644 tests/formats/light_client/data_collection.md diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py new file mode 100644 index 0000000000..264c654810 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -0,0 +1,934 @@ +from typing import (Any, Dict, List, Set) +from dataclasses import dataclass + +from eth_utils import encode_hex +from eth2spec.test.context import ( + spec_state_test_with_matching_config, + with_presets, + with_light_client, +) +from eth2spec.test.helpers.constants import ( + ALTAIR, + MINIMAL, +) +from eth2spec.test.helpers.fork_transition import ( + transition_across_forks, +) +from eth2spec.test.helpers.forks import ( + is_post_altair, +) +from eth2spec.test.helpers.light_client import ( + compute_start_slot_at_sync_committee_period, + get_sync_aggregate, + upgrade_lc_header_to_new_spec, + upgrade_lc_update_to_new_spec, +) + + +def next_epoch_boundary_slot(spec, slot): + ## Compute the first possible epoch boundary state slot of a `Checkpoint` + ## referring to a block at given slot. + epoch = spec.compute_epoch_at_slot(slot + spec.SLOTS_PER_EPOCH - 1) + return spec.compute_start_slot_at_epoch(epoch) + + +@dataclass(frozen=True) +class BlockId(object): + slot: Any + root: Any + + +def block_to_block_id(block): + return BlockId( + slot=block.message.slot, + root=block.message.hash_tree_root(), + ) + + +def state_to_block_id(state): + parent_header = state.latest_block_header.copy() + parent_header.state_root = state.hash_tree_root() + return BlockId(slot=parent_header.slot, root=parent_header.hash_tree_root()) + + +def bootstrap_bid(bootstrap): + return BlockId( + slot=bootstrap.header.beacon.slot, + root=bootstrap.header.beacon.hash_tree_root(), + ) + + +def update_attested_bid(update): + return BlockId( + slot=update.attested_header.beacon.slot, + root=update.attested_header.beacon.hash_tree_root(), + ) + + +@dataclass +class ForkedBeaconState(object): + spec: Any + data: Any + + +@dataclass +class ForkedSignedBeaconBlock(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientHeader(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientBootstrap(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientUpdate(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientFinalityUpdate(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientOptimisticUpdate(object): + spec: Any + data: Any + + +@dataclass +class CachedLightClientData(object): + # Sync committee branches at block's post-state + current_sync_committee_branch: Any # CurrentSyncCommitteeBranch + next_sync_committee_branch: Any # NextSyncCommitteeBranch + + # Finality information at block's post-state + finalized_slot: Any # Slot + finality_branch: Any # FinalityBranch + + # Best / latest light client data + current_period_best_update: ForkedLightClientUpdate + latest_signature_slot: Any # Slot + + +@dataclass +class LightClientDataCache(object): + # Cached data for creating future `LightClientUpdate` instances. + # Key is the block ID of which the post state was used to get the data. + # Data stored for the finalized head block and all non-finalized blocks. + data: Dict[BlockId, CachedLightClientData] + + # Light client data for the latest slot that was signed by at least + # `MIN_SYNC_COMMITTEE_PARTICIPANTS`. May be older than head + latest: ForkedLightClientFinalityUpdate + + # The earliest slot for which light client data is imported + tail_slot: Any # Slot + + +@dataclass +class LightClientDataDb(object): + headers: Dict[Any, ForkedLightClientHeader] # Root -> ForkedLightClientHeader + current_branches: Dict[Any, Any] # Slot -> CurrentSyncCommitteeBranch + sync_committees: Dict[Any, Any] # SyncCommitteePeriod -> SyncCommittee + best_updates: Dict[Any, ForkedLightClientUpdate] # SyncCommitteePeriod -> ForkedLightClientUpdate + + +@dataclass +class LightClientDataStore(object): + # Cached data to accelerate creating light client data + cache: LightClientDataCache + + # Persistent light client data + db: LightClientDataDb + + +@dataclass +class LightClientDataCollectionTest(object): + steps: List[Dict[str, Any]] + files: Set[str] + + # Fork schedule + spec: Any + phases: Any + + # History access + blocks: Dict[Any, ForkedSignedBeaconBlock] # Block root -> ForkedSignedBeaconBlock + finalized_block_roots: Dict[Any, Any] # Slot -> Root + states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState + finalized_checkpoint_states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState + latest_finalized_epoch: Any # Epoch + latest_finalized_bid: BlockId + historical_tail_slot: Any # Slot + + # Light client data + lc_data_store: LightClientDataStore + + +def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockId] + try: + block = test.blocks[bid.root] + while True: + if block.data.message.slot <= slot: + return block_to_block_id(block.data) + + block = test.blocks[block.data.message.parent_root] + except KeyError: + return None + + +def block_id_at_finalized_slot(test, slot): # -> Optional[BlockId] + while slot >= test.historical_tail_slot: + try: + return BlockId(slot=slot, root=test.finalized_block_roots[slot]) + except KeyError: + slot = slot - 1 + return None + + +def get_current_sync_committee_for_finalized_period(test, period): # -> Optional[SyncCommittee] + low_slot = max( + test.historical_tail_slot, + test.spec.compute_start_slot_at_epoch(test.spec.config.ALTAIR_FORK_EPOCH) + ) + if period < test.spec.compute_sync_committee_period_at_slot(low_slot): + return None + period_start_slot = compute_start_slot_at_sync_committee_period(test.spec, period) + sync_committee_slot = max(period_start_slot, low_slot) + bid = block_id_at_finalized_slot(test, sync_committee_slot) + if bid is None: + return None + block = test.blocks[bid.root] + state = test.finalized_checkpoint_states[block.data.message.state_root] + if sync_committee_slot > state.data.slot: + state.spec, state.data, _ = transition_across_forks(state.spec, state.data, sync_committee_slot, phases=test.phases) + assert is_post_altair(state.spec) + return state.data.current_sync_committee + + +def light_client_header_for_block(test, block): # -> ForkedLightClientHeader + if not is_post_altair(block.spec): + spec = test.phases[ALTAIR] + else: + spec = block.spec + return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) + + +def light_client_header_for_block_id(test, bid): # -> ForkedLightClientHeader + block = test.blocks[bid.root] + if not is_post_altair(block.spec): + spec = test.phases[ALTAIR] + else: + spec = block.spec + return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) + + +def sync_aggregate_for_block_id(test, bid): # -> Optional[SyncAggregate] + block = test.blocks[bid.root] + if not is_post_altair(block.spec): + return None + return block.data.message.body.sync_aggregate + + +def get_light_client_data(lc_data_store, bid): # -> CachedLightClientData + ## Fetch cached light client data about a given block. + ## Data must be cached (`cache_light_client_data`) before calling this function. + try: + return lc_data_store.cache.data[bid] + except KeyError: + raise ValueError("Trying to get light client data that was not cached") + + +def cache_light_client_data(lc_data_store, spec, state, bid, current_period_best_update, latest_signature_slot): + ## Cache data for a given block and its post-state to speed up creating future + ## `LightClientUpdate` and `LightClientBootstrap` instances that refer to this + ## block and state. + cached_data = CachedLightClientData( + current_sync_committee_branch=spec.compute_merkle_proof(state, spec.CURRENT_SYNC_COMMITTEE_GINDEX), + next_sync_committee_branch=spec.compute_merkle_proof(state, spec.NEXT_SYNC_COMMITTEE_GINDEX), + finalized_slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), + finality_branch=spec.compute_merkle_proof(state, spec.FINALIZED_ROOT_GINDEX), + current_period_best_update=current_period_best_update, + latest_signature_slot=latest_signature_slot, + ) + if bid in lc_data_store.cache.data: + raise ValueError("Redundant `cache_light_client_data` call") + lc_data_store.cache.data[bid] = cached_data + + +def delete_light_client_data(lc_data_store, bid): + ## Delete cached light client data for a given block. This needs to be called + ## when a block becomes unreachable due to finalization of a different fork. + del lc_data_store.cache.data[bid] + + +def create_light_client_finality_update_from_light_client_data(test, + attested_bid, + signature_slot, + sync_aggregate): # -> ForkedLightClientFinalityUpdate + attested_header = light_client_header_for_block_id(test, attested_bid) + attested_data = get_light_client_data(test.lc_data_store, attested_bid) + finalized_bid = block_id_at_finalized_slot(test, attested_data.finalized_slot) + if finalized_bid is not None: + if finalized_bid.slot != attested_data.finalized_slot: + # Empty slots at end of epoch, update cache for latest block slot + attested_data.finalized_slot = finalized_bid.slot + if finalized_bid.slot == attested_header.spec.GENESIS_SLOT: + finalized_header = ForkedLightClientHeader( + spec=attested_header.spec, + data=attested_header.spec.LightClientHeader(), + ) + else: + finalized_header = light_client_header_for_block_id(test, finalized_bid) + finalized_header = ForkedLightClientHeader( + spec=attested_header.spec, + data=upgrade_lc_header_to_new_spec( + finalized_header.spec, + attested_header.spec, + finalized_header.data, + ) + ) + finality_branch = attested_data.finality_branch + return ForkedLightClientFinalityUpdate( + spec=attested_header.spec, + data=attested_header.spec.LightClientFinalityUpdate( + attested_header=attested_header.data, + finalized_header=finalized_header.data, + finality_branch=finality_branch, + sync_aggregate=sync_aggregate, + signature_slot=signature_slot, + ), + ) + + +def create_light_client_update_from_light_client_data(test, + attested_bid, + signature_slot, + sync_aggregate, + next_sync_committee): # -> ForkedLightClientUpdate + finality_update = create_light_client_finality_update_from_light_client_data( + test, attested_bid, signature_slot, sync_aggregate) + attested_data = get_light_client_data(test.lc_data_store, attested_bid) + return ForkedLightClientUpdate( + spec=finality_update.spec, + data=finality_update.spec.LightClientUpdate( + attested_header=finality_update.data.attested_header, + next_sync_committee=next_sync_committee, + next_sync_committee_branch=attested_data.next_sync_committee_branch, + finalized_header=finality_update.data.finalized_header, + finality_branch=finality_update.data.finality_branch, + sync_aggregate=finality_update.data.sync_aggregate, + signature_slot=finality_update.data.signature_slot, + ) + ) + + +def create_light_client_update(test, spec, state, block, parent_bid): + ## Create `LightClientUpdate` instances for a given block and its post-state, + ## and keep track of best / latest ones. Data about the parent block's + ## post-state must be cached (`cache_light_client_data`) before calling this. + + # Verify attested block (parent) is recent enough and that state is available + attested_bid = parent_bid + attested_slot = attested_bid.slot + if attested_slot < test.lc_data_store.cache.tail_slot: + cache_light_client_data( + test.lc_data_store, + spec, + state, + block_to_block_id(block), + current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), + latest_signature_slot=spec.GENESIS_SLOT, + ) + return + + # If sync committee period changed, reset `best` + attested_period = spec.compute_sync_committee_period_at_slot(attested_slot) + signature_slot = block.message.slot + signature_period = spec.compute_sync_committee_period_at_slot(signature_slot) + attested_data = get_light_client_data(test.lc_data_store, attested_bid) + if attested_period != signature_period: + best = ForkedLightClientUpdate(spec=None, data=None) + else: + best = attested_data.current_period_best_update + + # If sync committee does not have sufficient participants, do not bump latest + sync_aggregate = block.message.body.sync_aggregate + num_active_participants = sum(sync_aggregate.sync_committee_bits) + if num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS: + latest_signature_slot = attested_data.latest_signature_slot + else: + latest_signature_slot = signature_slot + + # To update `best`, sync committee must have sufficient participants, and + # `signature_slot` must be in `attested_slot`'s sync committee period + if ( + num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS + or attested_period != signature_period + ): + cache_light_client_data( + test.lc_data_store, + spec, + state, + block_to_block_id(block), + current_period_best_update=best, + latest_signature_slot=latest_signature_slot, + ) + return + + # Check if light client data improved + update = create_light_client_update_from_light_client_data( + test, attested_bid, signature_slot, sync_aggregate, state.next_sync_committee) + is_better = ( + best.spec is None + or spec.is_better_update(update.data, upgrade_lc_update_to_new_spec(best.spec, update.spec, best.data)) + ) + + # Update best light client data for current sync committee period + if is_better: + best = update + cache_light_client_data( + test.lc_data_store, + spec, + state, + block_to_block_id(block), + current_period_best_update=best, + latest_signature_slot=latest_signature_slot, + ) + + +def create_light_client_bootstrap(test, spec, bid): + block = test.blocks[bid.root] + period = spec.compute_sync_committee_period_at_slot(bid.slot) + if period not in test.lc_data_store.db.sync_committees: + test.lc_data_store.db.sync_committees[period] = \ + get_current_sync_committee_for_finalized_period(test, period) + test.lc_data_store.db.headers[bid.root] = ForkedLightClientHeader( + spec=block.spec, data=block.spec.block_to_light_client_header(block.data)) + test.lc_data_store.db.current_branches[bid.slot] = \ + get_light_client_data(test.lc_data_store, bid).current_sync_committee_branch + + +def process_new_block_for_light_client(test, spec, state, block, parent_bid): + ## Update light client data with information from a new block. + if block.message.slot < test.lc_data_store.cache.tail_slot: + return + + if is_post_altair(spec): + create_light_client_update(test, spec, state, block, parent_bid) + else: + raise ValueError("`tail_slot` cannot be before Altair") + + +def process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid): + ## Update light client data to account for a new head block. + ## Note that `old_finalized_bid` is not yet updated when this is called. + if head_bid.slot < test.lc_data_store.cache.tail_slot: + return + + # Commit best light client data for non-finalized periods + head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) + low_slot = max(test.lc_data_store.cache.tail_slot, old_finalized_bid.slot) + low_period = spec.compute_sync_committee_period_at_slot(low_slot) + bid = head_bid + for period in reversed(range(low_period, head_period + 1)): + period_end_slot = compute_start_slot_at_sync_committee_period(spec, period + 1) - 1 + bid = get_ancestor_of_block_id(test, bid, period_end_slot) + if bid is None or bid.slot < low_slot: + break + best = get_light_client_data(test.lc_data_store, bid).current_period_best_update + if ( + best.spec is None + or sum(best.data.sync_aggregate.sync_committee_bits) < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS + ): + test.lc_data_store.db.best_updates.pop(period, None) + else: + test.lc_data_store.db.best_updates[period] = best + + # Update latest light client data + head_data = get_light_client_data(test.lc_data_store, head_bid) + signature_slot = head_data.latest_signature_slot + if signature_slot <= low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + signature_bid = get_ancestor_of_block_id(test, head_bid, signature_slot) + if signature_bid is None or signature_bid.slot <= low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + attested_bid = get_ancestor_of_block_id(test, signature_bid, signature_bid.slot - 1) + if attested_bid is None or attested_bid.slot < low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + sync_aggregate = sync_aggregate_for_block_id(test, signature_bid) + assert sync_aggregate is not None + test.lc_data_store.cache.latest = create_light_client_finality_update_from_light_client_data( + test, attested_bid, signature_slot, sync_aggregate) + + +def process_finalization_for_light_client(test, spec, finalized_bid, old_finalized_bid): + ## Prune cached data that is no longer useful for creating future + ## `LightClientUpdate` and `LightClientBootstrap` instances. + ## This needs to be called whenever `finalized_checkpoint` changes. + finalized_slot = finalized_bid.slot + if finalized_slot < test.lc_data_store.cache.tail_slot: + return + + # Cache `LightClientBootstrap` for newly finalized epoch boundary blocks + first_new_slot = old_finalized_bid.slot + 1 + low_slot = max(first_new_slot, test.lc_data_store.cache.tail_slot) + boundary_slot = finalized_slot + while boundary_slot >= low_slot: + bid = block_id_at_finalized_slot(test, boundary_slot) + if bid is None: + break + if bid.slot >= low_slot: + create_light_client_bootstrap(test, spec, bid) + boundary_slot = next_epoch_boundary_slot(spec, bid.slot) + if boundary_slot < spec.SLOTS_PER_EPOCH: + break + boundary_slot = boundary_slot - spec.SLOTS_PER_EPOCH + + # Prune light client data that is no longer referrable by future updates + bids_to_delete = [] + for bid in test.lc_data_store.cache.data: + if bid.slot >= finalized_bid.slot: + continue + bids_to_delete.append(bid) + for bid in bids_to_delete: + delete_light_client_data(test.lc_data_store, bid) + + +def get_light_client_bootstrap(test, block_root): # -> ForkedLightClientBootstrap + try: + header = test.lc_data_store.db.headers[block_root] + except KeyError: + return ForkedLightClientBootstrap(spec=None, data=None) + + slot = header.data.beacon.slot + period = header.spec.compute_sync_committee_period_at_slot(slot) + return ForkedLightClientBootstrap( + spec=header.spec, + data=header.spec.LightClientBootstrap( + header=header.data, + current_sync_committee=test.lc_data_store.db.sync_committees[period], + current_sync_committee_branch=test.lc_data_store.db.current_branches[slot], + ) + ) + + +def get_light_client_update_for_period(test, period): # -> ForkedLightClientUpdate + try: + return test.lc_data_store.db.best_updates[period] + except KeyError: + return ForkedLightClientUpdate(spec=None, data=None) + + +def get_light_client_finality_update(test): # -> ForkedLightClientFinalityUpdate + return test.lc_data_store.cache.latest + + +def get_light_client_optimistic_update(test): # -> ForkedLightClientOptimisticUpdate + finality_update = get_light_client_finality_update(test) + if finality_update.spec is None: + return ForkedLightClientOptimisticUpdate(spec=None, data=None) + return ForkedLightClientOptimisticUpdate( + spec=finality_update.spec, + data=finality_update.spec.LightClientOptimisticUpdate( + attested_header=finality_update.data.attested_header, + sync_aggregate=finality_update.data.sync_aggregate, + signature_slot=finality_update.data.signature_slot, + ), + ) + + +def setup_test(spec, state, phases=None): + assert spec.compute_slots_since_epoch_start(state.slot) == 0 + + test = LightClientDataCollectionTest( + steps=[], + files=set(), + spec=spec, + phases=phases, + blocks={}, + finalized_block_roots={}, + states={}, + finalized_checkpoint_states={}, + latest_finalized_epoch=state.finalized_checkpoint.epoch, + latest_finalized_bid=BlockId( + slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), + root=state.finalized_checkpoint.root, + ), + historical_tail_slot=state.slot, + lc_data_store=LightClientDataStore( + cache=LightClientDataCache( + data={}, + latest=ForkedLightClientFinalityUpdate(spec=None, data=None), + tail_slot=max(state.slot, spec.compute_start_slot_at_epoch(spec.config.ALTAIR_FORK_EPOCH)), + ), + db=LightClientDataDb( + headers={}, + current_branches={}, + sync_committees={}, + best_updates={}, + ), + ), + ) + bid = state_to_block_id(state) + yield "initial_state", state + test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=spec.SignedBeaconBlock( + message=spec.BeaconBlock(state_root=state.hash_tree_root()), + )) + test.finalized_block_roots[bid.slot] = bid.root + test.states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) + test.finalized_checkpoint_states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) + cache_light_client_data( + test.lc_data_store, spec, state, bid, + current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), + latest_signature_slot=spec.GENESIS_SLOT, + ) + create_light_client_bootstrap(test, spec, bid) + + return test + + +def finish_test(test): + yield "steps", test.steps + + +def encode_object(test, prefix, obj, slot, genesis_validators_root): + yield from [] # Consistently enable `yield from` syntax in calling tests + + file_name = f"{prefix}_{slot}_{encode_hex(obj.data.hash_tree_root())}" + if file_name not in test.files: + test.files.add(file_name) + yield file_name, obj.data + return { + "fork_digest": encode_hex(obj.spec.compute_fork_digest( + obj.spec.compute_fork_version(obj.spec.compute_epoch_at_slot(slot)), + genesis_validators_root, + )), + "data": file_name, + } + + +def add_new_block(test, spec, state, slot=None, num_sync_participants=0): + if slot is None: + slot = state.slot + 1 + assert slot > state.slot + parent_bid = state_to_block_id(state) + + # Advance to target slot - 1 to ensure sync aggregate can be efficiently computed + if state.slot < slot - 1: + spec, state, _ = transition_across_forks(spec, state, slot - 1, phases=test.phases) + + # Compute sync aggregate, using: + # - sync committee based on target slot + # - fork digest based on target slot - 1 + # - signed data based on parent_bid.slot + # All three slots may be from different forks + sync_aggregate, signature_slot = get_sync_aggregate( + spec, state, num_participants=num_sync_participants, phases=test.phases) + assert signature_slot == slot + + # Apply final block with computed sync aggregate + spec, state, block = transition_across_forks( + spec, state, slot, phases=test.phases, with_block=True, sync_aggregate=sync_aggregate) + bid = block_to_block_id(block) + test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=block) + test.states[block.message.state_root] = ForkedBeaconState(spec=spec, data=state) + process_new_block_for_light_client(test, spec, state, block, parent_bid) + block_obj = yield from encode_object( + test, "block", ForkedSignedBeaconBlock(spec=spec, data=block), block.message.slot, + state.genesis_validators_root, + ) + test.steps.append({ + "new_block": block_obj + }) + return spec, state, bid + + +def select_new_head(test, spec, head_bid): + old_finalized_bid = test.latest_finalized_bid + process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid) + + # Process finalization + block = test.blocks[head_bid.root] + state = test.states[block.data.message.state_root] + if state.data.finalized_checkpoint.epoch != spec.GENESIS_EPOCH: + block = test.blocks[state.data.finalized_checkpoint.root] + bid = block_to_block_id(block.data) + new_finalized_bid = bid + if new_finalized_bid.slot > old_finalized_bid.slot: + old_finalized_epoch = None + new_finalized_epoch = state.data.finalized_checkpoint.epoch + while bid.slot > test.latest_finalized_bid.slot: + test.finalized_block_roots[bid.slot] = bid.root + finalized_epoch = spec.compute_epoch_at_slot(bid.slot + spec.SLOTS_PER_EPOCH - 1) + if finalized_epoch != old_finalized_epoch: + state = test.states[block.data.message.state_root] + test.finalized_checkpoint_states[block.data.message.state_root] = state + old_finalized_epoch = finalized_epoch + block = test.blocks[block.data.message.parent_root] + bid = block_to_block_id(block.data) + test.latest_finalized_epoch = new_finalized_epoch + test.latest_finalized_bid = new_finalized_bid + process_finalization_for_light_client(test, spec, new_finalized_bid, old_finalized_bid) + + blocks_to_delete = [] + for block_root, block in test.blocks.items(): + if block.data.message.slot < new_finalized_bid.slot: + blocks_to_delete.append(block_root) + for block_root in blocks_to_delete: + del test.blocks[block_root] + states_to_delete = [] + for state_root, state in test.states.items(): + if state.data.slot < new_finalized_bid.slot: + states_to_delete.append(state_root) + for state_root in states_to_delete: + del test.states[state_root] + + yield from [] # Consistently enable `yield from` syntax in calling tests + + bootstraps = [] + for state in test.finalized_checkpoint_states.values(): + bid = state_to_block_id(state.data) + entry = { + "block_root": encode_hex(bid.root), + } + bootstrap = get_light_client_bootstrap(test, bid.root) + if bootstrap.spec is not None: + bootstrap_obj = yield from encode_object( + test, "bootstrap", bootstrap, bootstrap.data.header.beacon.slot, + state.data.genesis_validators_root, + ) + entry["bootstrap"] = bootstrap_obj + bootstraps.append(entry) + + best_updates = [] + low_period = spec.compute_sync_committee_period_at_slot(test.lc_data_store.cache.tail_slot) + head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) + for period in range(low_period, head_period + 1): + entry = { + "period": int(period), + } + update = get_light_client_update_for_period(test, period) + if update.spec is not None: + update_obj = yield from encode_object( + test, "update", update, update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + entry["update"] = update_obj + best_updates.append(entry) + + checks = { + "latest_finalized_checkpoint": { + "epoch": int(test.latest_finalized_epoch), + "root": encode_hex(test.latest_finalized_bid.root), + }, + "bootstraps": bootstraps, + "best_updates": best_updates, + } + finality_update = get_light_client_finality_update(test) + if finality_update.spec is not None: + finality_update_obj = yield from encode_object( + test, "finality_update", finality_update, finality_update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + checks["latest_finality_update"] = finality_update_obj + optimistic_update = get_light_client_finality_update(test) + if optimistic_update.spec is not None: + optimistic_update_obj = yield from encode_object( + test, "optimistic_update", optimistic_update, optimistic_update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + checks["latest_finality_update"] = optimistic_update_obj + + test.steps.append({ + "new_head": { + "head_block_root": encode_hex(head_bid.root), + "checks": checks, + } + }) + + +@with_light_client +@spec_state_test_with_matching_config +@with_presets([MINIMAL], reason="too slow") +def test_light_client_data_collection(spec, state): + # Start test + test = yield from setup_test(spec, state) + + # Genesis block is post Altair and is finalized, so can be used as bootstrap + genesis_bid = BlockId(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) + assert bootstrap_bid(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid + + # No blocks have been imported, so no other light client data is available + period = spec.compute_sync_committee_period_at_slot(state.slot) + assert get_light_client_update_for_period(test, period).spec is None + assert get_light_client_finality_update(test).spec is None + assert get_light_client_optimistic_update(test).spec is None + + # Start branch A with a block that has an empty sync aggregate + spec_a, state_a, bid_1 = yield from add_new_block(test, spec, state, slot=1) + yield from select_new_head(test, spec_a, bid_1) + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_light_client_update_for_period(test, period).spec is None + assert get_light_client_finality_update(test).spec is None + assert get_light_client_optimistic_update(test).spec is None + + # Start branch B with a block that has 1 participant + spec_b, state_b, bid_2 = yield from add_new_block(test, spec, state, slot=2, num_sync_participants=1) + yield from select_new_head(test, spec_b, bid_2) + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == genesis_bid + assert update_attested_bid(get_light_client_finality_update(test).data) == genesis_bid + assert update_attested_bid(get_light_client_optimistic_update(test).data) == genesis_bid + + # Build on branch A, once more with an empty sync aggregate + spec_a, state_a, bid_3 = yield from add_new_block(test, spec_a, state_a, slot=3) + yield from select_new_head(test, spec_a, bid_3) + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_light_client_update_for_period(test, period).spec is None + assert get_light_client_finality_update(test).spec is None + assert get_light_client_optimistic_update(test).spec is None + + # Build on branch B, this time with an empty sync aggregate + spec_b, state_b, bid_4 = yield from add_new_block(test, spec_b, state_b, slot=4) + yield from select_new_head(test, spec_b, bid_4) + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == genesis_bid + assert update_attested_bid(get_light_client_finality_update(test).data) == genesis_bid + assert update_attested_bid(get_light_client_optimistic_update(test).data) == genesis_bid + + # Build on branch B, once more with 1 participant + spec_b, state_b, bid_5 = yield from add_new_block(test, spec_b, state_b, slot=5, num_sync_participants=1) + yield from select_new_head(test, spec_b, bid_5) + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == genesis_bid + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_4 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_4 + + # Build on branch B, this time with 3 participants + spec_b, state_b, bid_6 = yield from add_new_block(test, spec_b, state_b, slot=6, num_sync_participants=3) + yield from select_new_head(test, spec_b, bid_6) + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_5 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_5 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_5 + + # Build on branch A, with 2 participants + spec_a, state_a, bid_7 = yield from add_new_block(test, spec_a, state_a, slot=7, num_sync_participants=2) + yield from select_new_head(test, spec_a, bid_7) + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_3 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_3 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_3 + + # Branch A: epoch 1, slot 5 + slot = spec_a.compute_start_slot_at_epoch(1) + 5 + spec_a, state_a, bid_1_5 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=4) + yield from select_new_head(test, spec_a, bid_1_5) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_7 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_7 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_7 + + # Branch B: epoch 2, slot 4 + slot = spec_b.compute_start_slot_at_epoch(2) + 4 + spec_b, state_b, bid_2_4 = yield from add_new_block(test, spec_b, state_b, slot=slot, num_sync_participants=5) + yield from select_new_head(test, spec_b, bid_2_4) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_6 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_6 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_6 + + # Branch A: epoch 3, slot 0 + slot = spec_a.compute_start_slot_at_epoch(3) + 0 + spec_a, state_a, bid_3_0 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=6) + yield from select_new_head(test, spec_a, bid_3_0) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_light_client_bootstrap(test, bid_3_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_1_5 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_1_5 + + # Branch A: fill epoch + for i in range(1, spec_a.SLOTS_PER_EPOCH): + spec_a, state_a, bid_a = yield from add_new_block(test, spec_a, state_a) + yield from select_new_head(test, spec_a, bid_a) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_light_client_bootstrap(test, bid_3_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_1_5 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_1_5 + assert state_a.slot == spec_a.compute_start_slot_at_epoch(4) - 1 + bid_3_n = bid_a + + # Branch A: epoch 4, slot 0 + slot = spec_a.compute_start_slot_at_epoch(4) + 0 + spec_a, state_a, bid_4_0 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=6) + yield from select_new_head(test, spec_a, bid_4_0) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_light_client_bootstrap(test, bid_3_0.root).spec is None + assert get_light_client_bootstrap(test, bid_4_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_3_n + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_3_n + + # Branch A: fill epoch + for i in range(1, spec_a.SLOTS_PER_EPOCH): + spec_a, state_a, bid_a = yield from add_new_block(test, spec_a, state_a) + yield from select_new_head(test, spec_a, bid_a) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_light_client_bootstrap(test, bid_3_0.root).spec is None + assert get_light_client_bootstrap(test, bid_4_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_3_n + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_3_n + assert state_a.slot == spec_a.compute_start_slot_at_epoch(5) - 1 + bid_4_n = bid_a + + # Branch A: epoch 6, slot 2 + slot = spec_a.compute_start_slot_at_epoch(6) + 2 + spec_a, state_a, bid_6_2 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=6) + yield from select_new_head(test, spec_a, bid_6_2) + assert bootstrap_bid(get_light_client_bootstrap(test, bid_7.root).data) == bid_7 + assert bootstrap_bid(get_light_client_bootstrap(test, bid_1_5.root).data) == bid_1_5 + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert bootstrap_bid(get_light_client_bootstrap(test, bid_3_0.root).data) == bid_3_0 + assert get_light_client_bootstrap(test, bid_4_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_4_n + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_4_n + + # Finish test + yield from finish_test(test) diff --git a/tests/formats/light_client/README.md b/tests/formats/light_client/README.md index 505b416019..050b406f0b 100644 --- a/tests/formats/light_client/README.md +++ b/tests/formats/light_client/README.md @@ -3,6 +3,7 @@ This series of tests provides reference test vectors for the light client sync protocol spec. Handlers: +- `data_collection`: see [Light client data collection test format](./data_collection.md) - `single_merkle_proof`: see [Single leaf merkle proof test format](./single_merkle_proof.md) - `sync`: see [Light client sync test format](./sync.md) - `update_ranking`: see [`LightClientUpdate` ranking test format](./update_ranking.md) diff --git a/tests/formats/light_client/data_collection.md b/tests/formats/light_client/data_collection.md new file mode 100644 index 0000000000..d8f13e5ed0 --- /dev/null +++ b/tests/formats/light_client/data_collection.md @@ -0,0 +1,76 @@ +# Light client data collection tests + +This series of tests provies reference test vectors for validating that a full node collects canonical data for serving to light clients implementing the light client sync protocol to sync to the latest block header. + +## Test case format + +### `initial_state.ssz_snappy` + +An SSZ-snappy encoded object of type `BeaconState` to initialize the blockchain from. The state's `slot` is epoch aligned. + +### `steps.yaml` + +The steps to execute in sequence. + +#### `new_block` execution step + +The new block described by the test step should be imported, but does not become head yet. + +```yaml +{ + fork_digest: string -- Encoded `ForkDigest`-context of `block` + data: string -- name of the `*.ssz_snappy` file to load + as a `SignedBeaconBlock` object +} +``` + +#### `new_head` execution step + +The given block (previously imported) should become head, leading to potential updates to: + +- The best `LightClientUpdate` for non-finalized sync committee periods. +- The latest `LightClientFinalityUpdate` and `LightClientOptimisticUpdate`. +- The latest finalized `Checkpoint` (across all branches). +- The available `LightClientBootstrap` instances for newly finalized `Checkpoint`s. + +```yaml +{ + head_block_root: Bytes32 -- string, hex encoded, with 0x prefix + checks: { + latest_finalized_checkpoint: { -- tracked across all branches + epoch: int -- integer, decimal + root: Bytes32 -- string, hex encoded, with 0x prefix + } + bootstraps: [ -- one entry per `LightClientBootstrap` + block_root: Bytes32 -- string, hex encoded, with 0x prefix + bootstrap: { -- only exists if a `LightClientBootstrap` is available + fork_digest: string -- Encoded `ForkDigest`-context of `data` + data: string -- name of the `*.ssz_snappy` file to load + as a `LightClientBootstrap` object + } + ] + best_updates: [ -- one entry per sync committee period + period: int, -- integer, decimal + update: { -- only exists if a best `LightClientUpdate` is available + fork_digest: string -- Encoded `ForkDigest`-context of `data` + data: string -- name of the `*.ssz_snappy` file to load + as a `LightClientUpdate` object + } + ] + latest_finality_update: { -- only exists if a `LightClientFinalityUpdate` is available + fork_digest: string -- Encoded `ForkDigest`-context of `data` + data: string -- name of the `*.ssz_snappy` file to load + as a `LightClientFinalityUpdate` object + } + latest_optimistic_update: { -- only exists if a `LightClientOptimisticUpdate` is available + fork_digest: string -- Encoded `ForkDigest`-context of `data` + data: string -- name of the `*.ssz_snappy` file to load + as a `LightClientOptimisticUpdate` object + } + } +} +``` + +## Condition + +A test-runner should initialize a simplified blockchain from `initial_state`. An external signal is used to control fork choice. The test-runner should then proceed to execute all the test steps in sequence, collecting light client data during execution. After each `new_head` step, it should verify that the collected light client data matches the provided `checks`. diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index cfe34aee4b..341321a2ae 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -4,6 +4,7 @@ if __name__ == "__main__": altair_mods = {key: 'eth2spec.test.altair.light_client.test_' + key for key in [ + 'data_collection', 'single_merkle_proof', 'sync', 'update_ranking', From 2154298e080ff30d8adecc34be7ee204f64174f9 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Tue, 6 Feb 2024 13:01:58 +0100 Subject: [PATCH 03/60] Typo --- tests/formats/light_client/data_collection.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/formats/light_client/data_collection.md b/tests/formats/light_client/data_collection.md index d8f13e5ed0..f9c1fa7a0e 100644 --- a/tests/formats/light_client/data_collection.md +++ b/tests/formats/light_client/data_collection.md @@ -1,6 +1,6 @@ # Light client data collection tests -This series of tests provies reference test vectors for validating that a full node collects canonical data for serving to light clients implementing the light client sync protocol to sync to the latest block header. +This series of tests provides reference test vectors for validating that a full node collects canonical data for serving to light clients implementing the light client sync protocol to sync to the latest block header. ## Test case format From 248f32b59a81d44e33612cfd5800f00a5973b119 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Tue, 6 Feb 2024 13:49:21 +0100 Subject: [PATCH 04/60] Lint --- .../light_client/test_data_collection.py | 39 ++++++++++--------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py index 264c654810..2cc39131c1 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -26,8 +26,8 @@ def next_epoch_boundary_slot(spec, slot): - ## Compute the first possible epoch boundary state slot of a `Checkpoint` - ## referring to a block at given slot. + # Compute the first possible epoch boundary state slot of a `Checkpoint` + # referring to a block at given slot. epoch = spec.compute_epoch_at_slot(slot + spec.SLOTS_PER_EPOCH - 1) return spec.compute_start_slot_at_epoch(epoch) @@ -212,7 +212,8 @@ def get_current_sync_committee_for_finalized_period(test, period): # -> Optiona block = test.blocks[bid.root] state = test.finalized_checkpoint_states[block.data.message.state_root] if sync_committee_slot > state.data.slot: - state.spec, state.data, _ = transition_across_forks(state.spec, state.data, sync_committee_slot, phases=test.phases) + state.spec, state.data, _ = transition_across_forks( + state.spec, state.data, sync_committee_slot, phases=test.phases) assert is_post_altair(state.spec) return state.data.current_sync_committee @@ -242,8 +243,8 @@ def sync_aggregate_for_block_id(test, bid): # -> Optional[SyncAggregate] def get_light_client_data(lc_data_store, bid): # -> CachedLightClientData - ## Fetch cached light client data about a given block. - ## Data must be cached (`cache_light_client_data`) before calling this function. + # Fetch cached light client data about a given block. + # Data must be cached (`cache_light_client_data`) before calling this function. try: return lc_data_store.cache.data[bid] except KeyError: @@ -251,9 +252,9 @@ def get_light_client_data(lc_data_store, bid): # -> CachedLightClientData def cache_light_client_data(lc_data_store, spec, state, bid, current_period_best_update, latest_signature_slot): - ## Cache data for a given block and its post-state to speed up creating future - ## `LightClientUpdate` and `LightClientBootstrap` instances that refer to this - ## block and state. + # Cache data for a given block and its post-state to speed up creating future + # `LightClientUpdate` and `LightClientBootstrap` instances that refer to this + # block and state. cached_data = CachedLightClientData( current_sync_committee_branch=spec.compute_merkle_proof(state, spec.CURRENT_SYNC_COMMITTEE_GINDEX), next_sync_committee_branch=spec.compute_merkle_proof(state, spec.NEXT_SYNC_COMMITTEE_GINDEX), @@ -268,8 +269,8 @@ def cache_light_client_data(lc_data_store, spec, state, bid, current_period_best def delete_light_client_data(lc_data_store, bid): - ## Delete cached light client data for a given block. This needs to be called - ## when a block becomes unreachable due to finalization of a different fork. + # Delete cached light client data for a given block. This needs to be called + # when a block becomes unreachable due to finalization of a different fork. del lc_data_store.cache.data[bid] @@ -335,9 +336,9 @@ def create_light_client_update_from_light_client_data(test, def create_light_client_update(test, spec, state, block, parent_bid): - ## Create `LightClientUpdate` instances for a given block and its post-state, - ## and keep track of best / latest ones. Data about the parent block's - ## post-state must be cached (`cache_light_client_data`) before calling this. + # Create `LightClientUpdate` instances for a given block and its post-state, + # and keep track of best / latest ones. Data about the parent block's + # post-state must be cached (`cache_light_client_data`) before calling this. # Verify attested block (parent) is recent enough and that state is available attested_bid = parent_bid @@ -421,7 +422,7 @@ def create_light_client_bootstrap(test, spec, bid): def process_new_block_for_light_client(test, spec, state, block, parent_bid): - ## Update light client data with information from a new block. + # Update light client data with information from a new block. if block.message.slot < test.lc_data_store.cache.tail_slot: return @@ -432,8 +433,8 @@ def process_new_block_for_light_client(test, spec, state, block, parent_bid): def process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid): - ## Update light client data to account for a new head block. - ## Note that `old_finalized_bid` is not yet updated when this is called. + # Update light client data to account for a new head block. + # Note that `old_finalized_bid` is not yet updated when this is called. if head_bid.slot < test.lc_data_store.cache.tail_slot: return @@ -477,9 +478,9 @@ def process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid def process_finalization_for_light_client(test, spec, finalized_bid, old_finalized_bid): - ## Prune cached data that is no longer useful for creating future - ## `LightClientUpdate` and `LightClientBootstrap` instances. - ## This needs to be called whenever `finalized_checkpoint` changes. + # Prune cached data that is no longer useful for creating future + # `LightClientUpdate` and `LightClientBootstrap` instances. + # This needs to be called whenever `finalized_checkpoint` changes. finalized_slot = finalized_bid.slot if finalized_slot < test.lc_data_store.cache.tail_slot: return From c0d037f1b4648738683538aa30ca8ef77bb1a600 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Fri, 23 Feb 2024 13:56:56 +0100 Subject: [PATCH 05/60] Fix missing `optimistc_update` in new tests --- .../eth2spec/test/altair/light_client/test_data_collection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py index 2cc39131c1..8cd32e40a1 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -747,13 +747,13 @@ def select_new_head(test, spec, head_bid): state.data.genesis_validators_root, ) checks["latest_finality_update"] = finality_update_obj - optimistic_update = get_light_client_finality_update(test) + optimistic_update = get_light_client_optimistic_update(test) if optimistic_update.spec is not None: optimistic_update_obj = yield from encode_object( test, "optimistic_update", optimistic_update, optimistic_update.data.attested_header.beacon.slot, state.data.genesis_validators_root, ) - checks["latest_finality_update"] = optimistic_update_obj + checks["latest_optimistic_update"] = optimistic_update_obj test.steps.append({ "new_head": { From b8f0ddcf78da9da31e99162648b17f82b709a29c Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Sun, 3 Mar 2024 20:49:37 +0100 Subject: [PATCH 06/60] Add more tests for multi-period reorgs --- .../light_client/test_data_collection.py | 157 +++++++++++++++++- 1 file changed, 156 insertions(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py index 8cd32e40a1..55ee5a74be 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -4,11 +4,16 @@ from eth_utils import encode_hex from eth2spec.test.context import ( spec_state_test_with_matching_config, + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, with_presets, + with_state, with_light_client, ) from eth2spec.test.helpers.constants import ( - ALTAIR, + ALTAIR, BELLATRIX, CAPELLA, DENEB, MINIMAL, ) from eth2spec.test.helpers.fork_transition import ( @@ -933,3 +938,153 @@ def test_light_client_data_collection(spec, state): # Finish test yield from finish_test(test) + + +def run_test_multi_fork(spec, phases, state, fork_1, fork_2): + # Start test + test = yield from setup_test(spec, state, phases=phases) + + # Genesis block is post Altair and is finalized, so can be used as bootstrap + genesis_bid = BlockId(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) + assert bootstrap_bid(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid + + # Shared history up to final epoch of period before `fork_1` + fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') + fork_1_period = spec.compute_sync_committee_period(fork_1_epoch) + slot = compute_start_slot_at_sync_committee_period(spec, fork_1_period) - spec.SLOTS_PER_EPOCH + spec, state, bid = yield from add_new_block(test, spec, state, slot=slot, num_sync_participants=1) + yield from select_new_head(test, spec, bid) + assert get_light_client_bootstrap(test, bid.root).spec is None + slot_period = spec.compute_sync_committee_period_at_slot(slot) + if slot_period == 0: + assert update_attested_bid(get_light_client_update_for_period(test, 0).data) == genesis_bid + else: + for period in range(0, slot_period): + assert get_light_client_update_for_period(test, period).spec is None # attested period != signature period + state_period = spec.compute_sync_committee_period_at_slot(state.slot) + + # Branch A: Advance past `fork_2`, having blocks at slots 0 and 4 of each epoch + spec_a = spec + state_a = state + slot_a = state_a.slot + bids_a = [bid] + num_sync_participants_a = 1 + fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') + while spec_a.get_current_epoch(state_a) <= fork_2_epoch: + attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + slot_a += 4 + signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + if signature_period != attested_period: + num_sync_participants_a = 0 + num_sync_participants_a += 1 + spec_a, state_a, bid_a = yield from add_new_block( + test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) + yield from select_new_head(test, spec_a, bid_a) + for bid in bids_a: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] + else: + assert signature_period == attested_period + 1 + assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert update_attested_bid(get_light_client_finality_update(test).data) == bids_a[-1] + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bids_a[-1] + bids_a.append(bid_a) + + # Branch B: Advance past `fork_2`, having blocks at slots 1 and 5 of each epoch but no sync participation + spec_b = spec + state_b = state + slot_b = state_b.slot + bids_b = [bid] + while spec_b.get_current_epoch(state_b) <= fork_2_epoch: + slot_b += 4 + signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + spec_b, state_b, bid_b = yield from add_new_block( + test, spec_b, state_b, slot=slot_b) + # Simulate that this does not become head yet, e.g., this branch was withheld + for bid in bids_b: + assert get_light_client_bootstrap(test, bid.root).spec is None + bids_b.append(bid_b) + + # Branch B: Another block that becomes head + attested_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + slot_b += 1 + signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + num_sync_participants_b = 1 + spec_b, state_b, bid_b = yield from add_new_block( + test, spec_b, state_b, slot=slot_b, num_sync_participants=num_sync_participants_b) + yield from select_new_head(test, spec_b, bid_b) + for bid in bids_b: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_b[-1] + else: + assert signature_period == attested_period + 1 + assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_b[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert update_attested_bid(get_light_client_finality_update(test).data) == bids_b[-1] + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bids_b[-1] + bids_b.append(bid_b) + + # All data for periods between the common ancestor of the two branches should have reorged. + # As there was no sync participation on branch B, that means it is deleted. + state_b_period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + for period in range(state_period + 1, state_b_period): + assert get_light_client_update_for_period(test, period).spec is None + + # Branch A: Another block, reorging branch B once more + attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + slot_a = slot_b + 1 + signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + if signature_period != attested_period: + num_sync_participants_a = 0 + num_sync_participants_a += 1 + spec_a, state_a, bid_a = yield from add_new_block( + test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) + yield from select_new_head(test, spec_a, bid_a) + for bid in bids_a: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] + else: + assert signature_period == attested_period + 1 + assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert update_attested_bid(get_light_client_finality_update(test).data) == bids_a[-1] + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bids_a[-1] + bids_a.append(bid_a) + + # Data has been restored + state_a_period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + for period in range(state_period + 1, state_a_period): + assert get_light_client_update_for_period(test, period).spec is not None + + # Finish test + yield from finish_test(test) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 + 'DENEB_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_aligned(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) + 'DENEB_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_unaligned(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) From 337cd1edbdd9b6889648fce989838b38925f645a Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Thu, 8 Aug 2024 17:13:06 +0200 Subject: [PATCH 07/60] ENR structure: Add `tcp6`, `quic6` and `udp6`. As discussed in ACDC #139. --- specs/phase0/p2p-interface.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index e8c2ce9d63..fa569573f3 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -959,9 +959,9 @@ The Ethereum Node Record (ENR) for an Ethereum consensus client MUST contain the The ENR MAY contain the following entries: - An IPv4 address (`ip` field) and/or IPv6 address (`ip6` field). -- A TCP port (`tcp` field) representing the local libp2p TCP listening port. -- A QUIC port (`quic` field) representing the local libp2p QUIC (UDP) listening port. -- A UDP port (`udp` field) representing the local discv5 listening port. +- An IPv4 TCP port (`tcp` field) representing the local libp2p TCP listening port and/or the corresponding IPv6 port (`tcp6` field). +- An IPv4 QUIC port (`quic` field) representing the local libp2p QUIC (UDP) listening port and/or the corresponding IPv6 port (`quic6` field). +- An IPv4 UDP port (`udp` field) representing the local discv5 listening port and/or the corresponding IPv6 port (`udp6` field). Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778). From 946849637f89c8c182c71f5f8a16ac0fe6d216dc Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Fri, 22 Nov 2024 07:20:53 -0600 Subject: [PATCH 08/60] Fix nits in data_collection format --- tests/formats/light_client/data_collection.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/formats/light_client/data_collection.md b/tests/formats/light_client/data_collection.md index f9c1fa7a0e..b0d17a68e9 100644 --- a/tests/formats/light_client/data_collection.md +++ b/tests/formats/light_client/data_collection.md @@ -18,7 +18,7 @@ The new block described by the test step should be imported, but does not become ```yaml { - fork_digest: string -- Encoded `ForkDigest`-context of `block` + fork_digest: string -- encoded `ForkDigest`-context of `block` data: string -- name of the `*.ssz_snappy` file to load as a `SignedBeaconBlock` object } @@ -44,26 +44,26 @@ The given block (previously imported) should become head, leading to potential u bootstraps: [ -- one entry per `LightClientBootstrap` block_root: Bytes32 -- string, hex encoded, with 0x prefix bootstrap: { -- only exists if a `LightClientBootstrap` is available - fork_digest: string -- Encoded `ForkDigest`-context of `data` + fork_digest: string -- encoded `ForkDigest`-context of `data` data: string -- name of the `*.ssz_snappy` file to load as a `LightClientBootstrap` object } ] best_updates: [ -- one entry per sync committee period - period: int, -- integer, decimal + period: int -- integer, decimal update: { -- only exists if a best `LightClientUpdate` is available - fork_digest: string -- Encoded `ForkDigest`-context of `data` + fork_digest: string -- encoded `ForkDigest`-context of `data` data: string -- name of the `*.ssz_snappy` file to load as a `LightClientUpdate` object } ] latest_finality_update: { -- only exists if a `LightClientFinalityUpdate` is available - fork_digest: string -- Encoded `ForkDigest`-context of `data` + fork_digest: string -- encoded `ForkDigest`-context of `data` data: string -- name of the `*.ssz_snappy` file to load as a `LightClientFinalityUpdate` object } latest_optimistic_update: { -- only exists if a `LightClientOptimisticUpdate` is available - fork_digest: string -- Encoded `ForkDigest`-context of `data` + fork_digest: string -- encoded `ForkDigest`-context of `data` data: string -- name of the `*.ssz_snappy` file to load as a `LightClientOptimisticUpdate` object } From 5639ca69d6ae13ffbaeafd29561e5fce448394fe Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Fri, 22 Nov 2024 09:45:56 -0600 Subject: [PATCH 09/60] Rename two classes for consistency --- .../light_client/test_data_collection.py | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py index 27e8e5437c..57a7183077 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -42,13 +42,13 @@ def next_epoch_boundary_slot(spec, slot): @dataclass(frozen=True) -class BlockId(object): +class BlockID(object): slot: Any root: Any def block_to_block_id(block): - return BlockId( + return BlockID( slot=block.message.slot, root=block.message.hash_tree_root(), ) @@ -57,18 +57,18 @@ def block_to_block_id(block): def state_to_block_id(state): parent_header = state.latest_block_header.copy() parent_header.state_root = state.hash_tree_root() - return BlockId(slot=parent_header.slot, root=parent_header.hash_tree_root()) + return BlockID(slot=parent_header.slot, root=parent_header.hash_tree_root()) def bootstrap_bid(bootstrap): - return BlockId( + return BlockID( slot=bootstrap.header.beacon.slot, root=bootstrap.header.beacon.hash_tree_root(), ) def update_attested_bid(update): - return BlockId( + return BlockID( slot=update.attested_header.beacon.slot, root=update.attested_header.beacon.hash_tree_root(), ) @@ -136,7 +136,7 @@ class LightClientDataCache(object): # Cached data for creating future `LightClientUpdate` instances. # Key is the block ID of which the post state was used to get the data. # Data stored for the finalized head block and all non-finalized blocks. - data: Dict[BlockId, CachedLightClientData] + data: Dict[BlockID, CachedLightClientData] # Light client data for the latest slot that was signed by at least # `MIN_SYNC_COMMITTEE_PARTICIPANTS`. May be older than head @@ -147,7 +147,7 @@ class LightClientDataCache(object): @dataclass -class LightClientDataDb(object): +class LightClientDataDB(object): headers: Dict[Any, ForkedLightClientHeader] # Root -> ForkedLightClientHeader current_branches: Dict[Any, Any] # Slot -> CurrentSyncCommitteeBranch sync_committees: Dict[Any, Any] # SyncCommitteePeriod -> SyncCommittee @@ -162,7 +162,7 @@ class LightClientDataStore(object): cache: LightClientDataCache # Persistent light client data - db: LightClientDataDb + db: LightClientDataDB @dataclass @@ -179,14 +179,14 @@ class LightClientDataCollectionTest(object): states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState finalized_checkpoint_states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState latest_finalized_epoch: Any # Epoch - latest_finalized_bid: BlockId + latest_finalized_bid: BlockID historical_tail_slot: Any # Slot # Light client data lc_data_store: LightClientDataStore -def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockId] +def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockID] try: block = test.blocks[bid.root] while True: @@ -198,10 +198,10 @@ def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockId] return None -def block_id_at_finalized_slot(test, slot): # -> Optional[BlockId] +def block_id_at_finalized_slot(test, slot): # -> Optional[BlockID] while slot >= test.historical_tail_slot: try: - return BlockId(slot=slot, root=test.finalized_block_roots[slot]) + return BlockID(slot=slot, root=test.finalized_block_roots[slot]) except KeyError: slot = slot - 1 return None @@ -586,7 +586,7 @@ def setup_test(spec, state, phases=None): states={}, finalized_checkpoint_states={}, latest_finalized_epoch=state.finalized_checkpoint.epoch, - latest_finalized_bid=BlockId( + latest_finalized_bid=BlockID( slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), root=state.finalized_checkpoint.root, ), @@ -598,7 +598,7 @@ def setup_test(spec, state, phases=None): latest=ForkedLightClientFinalityUpdate(spec=None, data=None), tail_slot=max(state.slot, spec.compute_start_slot_at_epoch(spec.config.ALTAIR_FORK_EPOCH)), ), - db=LightClientDataDb( + db=LightClientDataDB( headers={}, current_branches={}, sync_committees={}, @@ -792,7 +792,7 @@ def test_light_client_data_collection(spec, state): test = yield from setup_test(spec, state) # Genesis block is post Altair and is finalized, so can be used as bootstrap - genesis_bid = BlockId(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) + genesis_bid = BlockID(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) assert bootstrap_bid(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid # No blocks have been imported, so no other light client data is available @@ -961,7 +961,7 @@ def run_test_multi_fork(spec, phases, state, fork_1, fork_2): test = yield from setup_test(spec, state, phases=phases) # Genesis block is post Altair and is finalized, so can be used as bootstrap - genesis_bid = BlockId(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) + genesis_bid = BlockID(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) assert bootstrap_bid(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid # Shared history up to final epoch of period before `fork_1` From aff4e348354cab9be3ffadb90a4ac78eeb41cf82 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Fri, 22 Nov 2024 10:43:05 -0600 Subject: [PATCH 10/60] Move bellatrix/capella tests into respective dirs --- .../test/bellatrix/light_client/__init__.py | 0 .../light_client/test_data_collection.py | 41 +++++++++++++++++++ .../light_client/test_data_collection.py | 40 ++++++++++++++++++ tests/generators/light_client/main.py | 8 +++- 4 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py create mode 100644 tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py new file mode 100644 index 0000000000..dced8d0b3e --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py @@ -0,0 +1,41 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + BELLATRIX, CAPELLA, DENEB, + MINIMAL, +) +from eth2spec.test.altair.light_client.test_data_collection import ( + run_test_multi_fork +) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 + 'DENEB_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_aligned(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) + 'DENEB_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_unaligned(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py new file mode 100644 index 0000000000..7911f1c320 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py @@ -0,0 +1,40 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + CAPELLA, DENEB, ELECTRA, + MINIMAL, +) +from eth2spec.test.altair.light_client.test_data_collection import ( + run_test_multi_fork +) + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 + 'ELECTRA_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_reorg_aligned(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) + 'ELECTRA_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_reorg_unaligned(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index 04d1d423be..2501773ac5 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -9,12 +9,18 @@ 'sync', 'update_ranking', ]} - bellatrix_mods = altair_mods + + _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.light_client.test_' + key for key in [ + 'data_collection', + ]} + bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) _new_capella_mods = {key: 'eth2spec.test.capella.light_client.test_' + key for key in [ + 'data_collection', 'single_merkle_proof', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) + deneb_mods = capella_mods electra_mods = deneb_mods From b6259a9fd7f6bca6ae89dc09f04f2f0d61638469 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Fri, 22 Nov 2024 10:59:05 -0600 Subject: [PATCH 11/60] Revert "Move bellatrix/capella tests into respective dirs" This reverts commit aff4e348354cab9be3ffadb90a4ac78eeb41cf82. --- .../test/bellatrix/light_client/__init__.py | 0 .../light_client/test_data_collection.py | 41 ------------------- .../light_client/test_data_collection.py | 40 ------------------ tests/generators/light_client/main.py | 8 +--- 4 files changed, 1 insertion(+), 88 deletions(-) delete mode 100644 tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py delete mode 100644 tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py delete mode 100644 tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py deleted file mode 100644 index dced8d0b3e..0000000000 --- a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py +++ /dev/null @@ -1,41 +0,0 @@ -from eth2spec.test.context import ( - spec_test, - with_config_overrides, - with_matching_spec_config, - with_phases, - with_presets, - with_state, -) -from eth2spec.test.helpers.constants import ( - BELLATRIX, CAPELLA, DENEB, - MINIMAL, -) -from eth2spec.test.altair.light_client.test_data_collection import ( - run_test_multi_fork -) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 - 'DENEB_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_reorg_aligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) - 'DENEB_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_reorg_unaligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py deleted file mode 100644 index 7911f1c320..0000000000 --- a/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py +++ /dev/null @@ -1,40 +0,0 @@ -from eth2spec.test.context import ( - spec_test, - with_config_overrides, - with_matching_spec_config, - with_phases, - with_presets, - with_state, -) -from eth2spec.test.helpers.constants import ( - CAPELLA, DENEB, ELECTRA, - MINIMAL, -) -from eth2spec.test.altair.light_client.test_data_collection import ( - run_test_multi_fork -) - -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 - 'ELECTRA_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_reorg_aligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) - - -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) - 'ELECTRA_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_reorg_unaligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index 2501773ac5..04d1d423be 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -9,18 +9,12 @@ 'sync', 'update_ranking', ]} - - _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.light_client.test_' + key for key in [ - 'data_collection', - ]} - bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) + bellatrix_mods = altair_mods _new_capella_mods = {key: 'eth2spec.test.capella.light_client.test_' + key for key in [ - 'data_collection', 'single_merkle_proof', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - deneb_mods = capella_mods electra_mods = deneb_mods From e00e866b84cb5b1b3a5fd25ef9af6d43088cb479 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 27 Nov 2024 13:15:08 +0100 Subject: [PATCH 12/60] Synchronise capitalization change request across files --- tests/formats/light_client/sync.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/formats/light_client/sync.md b/tests/formats/light_client/sync.md index 1706b4c162..c6e62a7c8b 100644 --- a/tests/formats/light_client/sync.md +++ b/tests/formats/light_client/sync.md @@ -9,8 +9,8 @@ This series of tests provides reference test vectors for validating that a light ```yaml genesis_validators_root: Bytes32 -- string, hex encoded, with 0x prefix trusted_block_root: Bytes32 -- string, hex encoded, with 0x prefix -bootstrap_fork_digest: string -- Encoded `ForkDigest`-context of `bootstrap` -store_fork_digest: string -- Encoded `ForkDigest`-context of `store` object being tested +bootstrap_fork_digest: string -- encoded `ForkDigest`-context of `bootstrap` +store_fork_digest: string -- encoded `ForkDigest`-context of `store` object being tested ``` ### `bootstrap.ssz_snappy` @@ -60,7 +60,7 @@ The function `process_light_client_update(store, update, current_slot, genesis_v ```yaml { - update_fork_digest: string -- Encoded `ForkDigest`-context of `update` + update_fork_digest: string -- encoded `ForkDigest`-context of `update` update: string -- name of the `*.ssz_snappy` file to load as a `LightClientUpdate` object current_slot: int -- integer, decimal @@ -78,7 +78,7 @@ The `store` should be upgraded to reflect the new `store_fork_digest`: ```yaml { - store_fork_digest: string -- Encoded `ForkDigest`-context of `store` + store_fork_digest: string -- encoded `ForkDigest`-context of `store` checks: {: value} -- the assertions. } ``` From 84bef3c6881edaa4892362461433a2de3f848e52 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 27 Nov 2024 13:50:37 +0100 Subject: [PATCH 13/60] Split LC sync test into multiple files --- .../test/altair/light_client/test_sync.py | 460 +----------------- .../test/capella/light_client/test_sync.py | 36 ++ .../test/deneb/light_client/__init__.py | 0 .../test/deneb/light_client/test_sync.py | 50 ++ .../test/electra/light_client/__init__.py | 0 .../test/electra/light_client/test_sync.py | 64 +++ .../test/helpers/light_client_sync.py | 342 +++++++++++++ 7 files changed, 505 insertions(+), 447 deletions(-) create mode 100644 tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py create mode 100644 tests/core/pyspec/eth2spec/test/deneb/light_client/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py create mode 100644 tests/core/pyspec/eth2spec/test/electra/light_client/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py create mode 100644 tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py index 45c7d77887..8000ceb799 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py @@ -1,14 +1,6 @@ -from typing import (Any, Dict, List) - -from eth_utils import encode_hex from eth2spec.test.context import ( spec_state_test_with_matching_config, - spec_test, - with_config_overrides, - with_matching_spec_config, - with_phases, with_presets, - with_state, with_light_client, ) from eth2spec.test.helpers.attestations import ( @@ -16,23 +8,17 @@ state_transition_with_full_block, ) from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, MINIMAL, ) -from eth2spec.test.helpers.fork_transition import ( - do_fork, - transition_across_forks, -) -from eth2spec.test.helpers.forks import ( - get_spec_for_fork_version, - is_post_capella, is_post_deneb, is_post_electra, -) from eth2spec.test.helpers.light_client import ( - compute_start_slot_at_next_sync_committee_period, get_sync_aggregate, - upgrade_lc_bootstrap_to_new_spec, - upgrade_lc_update_to_new_spec, - upgrade_lc_store_to_new_spec, + compute_start_slot_at_next_sync_committee_period, +) +from eth2spec.test.helpers.light_client_sync import ( + emit_force_update, + emit_update, + finish_lc_sync_test, + setup_lc_sync_test, ) from eth2spec.test.helpers.state import ( next_slots, @@ -40,162 +26,12 @@ ) -class LightClientSyncTest(object): - steps: List[Dict[str, Any]] - genesis_validators_root: Any - s_spec: Any - store: Any - - -def get_store_fork_version(s_spec): - if is_post_electra(s_spec): - return s_spec.config.ELECTRA_FORK_VERSION - if is_post_deneb(s_spec): - return s_spec.config.DENEB_FORK_VERSION - if is_post_capella(s_spec): - return s_spec.config.CAPELLA_FORK_VERSION - return s_spec.config.ALTAIR_FORK_VERSION - - -def setup_test(spec, state, s_spec=None, phases=None): - test = LightClientSyncTest() - test.steps = [] - - if s_spec is None: - s_spec = spec - if phases is None: - phases = { - spec.fork: spec, - s_spec.fork: s_spec, - } - test.s_spec = s_spec - - yield "genesis_validators_root", "meta", "0x" + state.genesis_validators_root.hex() - test.genesis_validators_root = state.genesis_validators_root - - next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1) - trusted_block = state_transition_with_full_block(spec, state, True, True) - trusted_block_root = trusted_block.message.hash_tree_root() - yield "trusted_block_root", "meta", "0x" + trusted_block_root.hex() - - data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(trusted_block.message.slot)) - data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root) - d_spec = get_spec_for_fork_version(spec, data_fork_version, phases) - data = d_spec.create_light_client_bootstrap(state, trusted_block) - yield "bootstrap_fork_digest", "meta", encode_hex(data_fork_digest) - yield "bootstrap", data - - upgraded = upgrade_lc_bootstrap_to_new_spec(d_spec, test.s_spec, data, phases) - test.store = test.s_spec.initialize_light_client_store(trusted_block_root, upgraded) - store_fork_version = get_store_fork_version(test.s_spec) - store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root) - yield "store_fork_digest", "meta", encode_hex(store_fork_digest) - - return test - - -def finish_test(test): - yield "steps", test.steps - - -def get_update_file_name(d_spec, update): - if d_spec.is_sync_committee_update(update): - suffix1 = "s" - else: - suffix1 = "x" - if d_spec.is_finality_update(update): - suffix2 = "f" - else: - suffix2 = "x" - return f"update_{encode_hex(update.attested_header.beacon.hash_tree_root())}_{suffix1}{suffix2}" - - -def get_checks(s_spec, store): - if is_post_capella(s_spec): - return { - "finalized_header": { - 'slot': int(store.finalized_header.beacon.slot), - 'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()), - 'execution_root': encode_hex(s_spec.get_lc_execution_root(store.finalized_header)), - }, - "optimistic_header": { - 'slot': int(store.optimistic_header.beacon.slot), - 'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()), - 'execution_root': encode_hex(s_spec.get_lc_execution_root(store.optimistic_header)), - }, - } - - return { - "finalized_header": { - 'slot': int(store.finalized_header.beacon.slot), - 'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()), - }, - "optimistic_header": { - 'slot': int(store.optimistic_header.beacon.slot), - 'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()), - }, - } - - -def emit_force_update(test, spec, state): - current_slot = state.slot - test.s_spec.process_light_client_store_force_update(test.store, current_slot) - - yield from [] # Consistently enable `yield from` syntax in calling tests - test.steps.append({ - "force_update": { - "current_slot": int(current_slot), - "checks": get_checks(test.s_spec, test.store), - } - }) - - -def emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=True, phases=None): - data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(attested_block.message.slot)) - data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root) - d_spec = get_spec_for_fork_version(spec, data_fork_version, phases) - data = d_spec.create_light_client_update(state, block, attested_state, attested_block, finalized_block) - if not with_next: - data.next_sync_committee = spec.SyncCommittee() - data.next_sync_committee_branch = spec.NextSyncCommitteeBranch() - current_slot = state.slot - - upgraded = upgrade_lc_update_to_new_spec(d_spec, test.s_spec, data, phases) - test.s_spec.process_light_client_update(test.store, upgraded, current_slot, test.genesis_validators_root) - - yield get_update_file_name(d_spec, data), data - test.steps.append({ - "process_update": { - "update_fork_digest": encode_hex(data_fork_digest), - "update": get_update_file_name(d_spec, data), - "current_slot": int(current_slot), - "checks": get_checks(test.s_spec, test.store), - } - }) - return upgraded - - -def emit_upgrade_store(test, new_s_spec, phases=None): - test.store = upgrade_lc_store_to_new_spec(test.s_spec, new_s_spec, test.store, phases) - test.s_spec = new_s_spec - store_fork_version = get_store_fork_version(test.s_spec) - store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root) - - yield from [] # Consistently enable `yield from` syntax in calling tests - test.steps.append({ - "upgrade_store": { - "store_fork_digest": encode_hex(store_fork_digest), - "checks": get_checks(test.s_spec, test.store), - } - }) - - @with_light_client @spec_state_test_with_matching_config @with_presets([MINIMAL], reason="too slow") def test_light_client_sync(spec, state): # Start test - test = yield from setup_test(spec, state) + test = yield from setup_lc_sync_test(spec, state) # Initial `LightClientUpdate`, populating `store.next_sync_committee` # ``` @@ -409,7 +245,7 @@ def test_light_client_sync(spec, state): assert test.store.optimistic_header.beacon.slot == attested_state.slot # Finish test - yield from finish_test(test) + yield from finish_lc_sync_test(test) @with_light_client @@ -428,7 +264,7 @@ def test_supply_sync_committee_from_past_update(spec, state): past_state = state.copy() # Start test - test = yield from setup_test(spec, state) + test = yield from setup_lc_sync_test(spec, state) assert not spec.is_next_sync_committee_known(test.store) # Apply `LightClientUpdate` from the past, populating `store.next_sync_committee` @@ -439,7 +275,7 @@ def test_supply_sync_committee_from_past_update(spec, state): assert test.store.optimistic_header.beacon.slot == state.slot # Finish test - yield from finish_test(test) + yield from finish_lc_sync_test(test) @with_light_client @@ -447,7 +283,7 @@ def test_supply_sync_committee_from_past_update(spec, state): @with_presets([MINIMAL], reason="too slow") def test_advance_finality_without_sync_committee(spec, state): # Start test - test = yield from setup_test(spec, state) + test = yield from setup_lc_sync_test(spec, state) # Initial `LightClientUpdate`, populating `store.next_sync_committee` next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1) @@ -515,274 +351,4 @@ def test_advance_finality_without_sync_committee(spec, state): assert test.store.optimistic_header.beacon.slot == attested_state.slot # Finish test - yield from finish_test(test) - - -def run_test_single_fork(spec, phases, state, fork): - # Start test - test = yield from setup_test(spec, state, phases=phases) - - # Initial `LightClientUpdate` - finalized_block = spec.SignedBeaconBlock() - finalized_block.message.state_root = state.hash_tree_root() - finalized_state = state.copy() - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Jump to two slots before fork - fork_epoch = getattr(phases[fork].config, fork.upper() + '_FORK_EPOCH') - transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch) - 4) - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - update = yield from emit_update( - test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Perform `LightClientStore` upgrade - yield from emit_upgrade_store(test, phases[fork], phases=phases) - update = test.store.best_valid_update - - # Final slot before fork, check that importing the pre-fork format still works - attested_block = block.copy() - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Upgrade to post-fork spec, attested block is still before the fork - attested_block = block.copy() - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate) - spec = phases[fork] - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Another block after the fork, this time attested block is after the fork - attested_block = block.copy() - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Jump to next epoch - transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch + 1) - 2) - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finalize the fork - finalized_block = block.copy() - finalized_state = state.copy() - _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True) - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finish test - yield from finish_test(test) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=CAPELLA) -@with_presets([MINIMAL], reason="too slow") -def test_capella_fork(spec, phases, state): - yield from run_test_single_fork(spec, phases, state, CAPELLA) - - -@with_phases(phases=[CAPELLA], other_phases=[DENEB]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_fork(spec, phases, state): - yield from run_test_single_fork(spec, phases, state, DENEB) - - -@with_phases(phases=[DENEB], other_phases=[ELECTRA]) -@spec_test -@with_config_overrides({ - 'ELECTRA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_electra_fork(spec, phases, state): - yield from run_test_single_fork(spec, phases, state, ELECTRA) - - -def run_test_multi_fork(spec, phases, state, fork_1, fork_2): - # Start test - test = yield from setup_test(spec, state, phases[fork_2], phases) - - # Set up so that finalized is from `spec`, ... - finalized_block = spec.SignedBeaconBlock() - finalized_block.message.state_root = state.hash_tree_root() - finalized_state = state.copy() - - # ..., attested is from `fork_1`, ... - fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') - spec, state, attested_block = transition_across_forks( - spec, - state, - spec.compute_start_slot_at_epoch(fork_1_epoch), - phases, - with_block=True, - ) - attested_state = state.copy() - - # ..., and signature is from `fork_2` - fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') - spec, state, _ = transition_across_forks( - spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1, phases) - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - spec, state, block = transition_across_forks( - spec, - state, - spec.compute_start_slot_at_epoch(fork_2_epoch), - phases, - with_block=True, - sync_aggregate=sync_aggregate, - ) - - # Check that update applies - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finish test - yield from finish_test(test) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 - 'DENEB_FORK_EPOCH': 4, -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_fork(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 - 'DENEB_FORK_EPOCH': 4, - 'ELECTRA_FORK_EPOCH': 5, -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_capella_electra_fork(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, ELECTRA) - - -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 - 'ELECTRA_FORK_EPOCH': 4, -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_fork(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) - - -def run_test_upgraded_store_with_legacy_data(spec, phases, state, fork): - # Start test (Legacy bootstrap with an upgraded store) - test = yield from setup_test(spec, state, phases[fork], phases) - - # Initial `LightClientUpdate` (check that the upgraded store can process it) - finalized_block = spec.SignedBeaconBlock() - finalized_block.message.state_root = state.hash_tree_root() - finalized_state = state.copy() - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finish test - yield from finish_test(test) - - -@with_phases(phases=[ALTAIR, BELLATRIX], other_phases=[CAPELLA]) -@spec_test -@with_state -@with_matching_spec_config(emitted_fork=CAPELLA) -@with_presets([MINIMAL], reason="too slow") -def test_capella_store_with_legacy_data(spec, phases, state): - yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) - - -@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_store_with_legacy_data(spec, phases, state): - yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) - - -@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA, DENEB], other_phases=[CAPELLA, DENEB, ELECTRA]) -@spec_test -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_electra_store_with_legacy_data(spec, phases, state): - yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, ELECTRA) + yield from finish_lc_sync_test(test) diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py new file mode 100644 index 0000000000..3958900be5 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py @@ -0,0 +1,36 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + ALTAIR, BELLATRIX, CAPELLA, + MINIMAL, +) +from eth2spec.test.helpers.light_client_sync import ( + run_lc_sync_test_single_fork, + run_lc_sync_test_upgraded_store_with_legacy_data, +) + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=CAPELLA) +@with_presets([MINIMAL], reason="too slow") +def test_capella_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, CAPELLA) + + +@with_phases(phases=[ALTAIR, BELLATRIX], other_phases=[CAPELLA]) +@spec_test +@with_state +@with_matching_spec_config(emitted_fork=CAPELLA) +@with_presets([MINIMAL], reason="too slow") +def test_capella_store_with_legacy_data(spec, phases, state): + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py new file mode 100644 index 0000000000..d19e1e0238 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py @@ -0,0 +1,50 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + ALTAIR, BELLATRIX, CAPELLA, DENEB, + MINIMAL, +) +from eth2spec.test.helpers.light_client_sync import ( + run_lc_sync_test_multi_fork, + run_lc_sync_test_single_fork, + run_lc_sync_test_upgraded_store_with_legacy_data, +) + +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, DENEB) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'DENEB_FORK_EPOCH': 4, +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, DENEB) + + +@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_store_with_legacy_data(spec, phases, state): + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/__init__.py b/tests/core/pyspec/eth2spec/test/electra/light_client/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py new file mode 100644 index 0000000000..2b20552d6b --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py @@ -0,0 +1,64 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, + MINIMAL, +) +from eth2spec.test.helpers.light_client_sync import ( + run_lc_sync_test_multi_fork, + run_lc_sync_test_single_fork, + run_lc_sync_test_upgraded_store_with_legacy_data, +) + +@with_phases(phases=[DENEB], other_phases=[ELECTRA]) +@spec_test +@with_config_overrides({ + 'ELECTRA_FORK_EPOCH': 3, # Test setup advances to epoch 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_electra_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, ELECTRA) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'DENEB_FORK_EPOCH': 4, + 'ELECTRA_FORK_EPOCH': 5, +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_capella_electra_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, ELECTRA) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'ELECTRA_FORK_EPOCH': 4, +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, DENEB, ELECTRA) + + +@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA, DENEB], other_phases=[CAPELLA, DENEB, ELECTRA]) +@spec_test +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_electra_store_with_legacy_data(spec, phases, state): + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py b/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py new file mode 100644 index 0000000000..e64b0a2eca --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py @@ -0,0 +1,342 @@ +from typing import (Any, Dict, List) + +from eth_utils import encode_hex +from eth2spec.test.helpers.attestations import ( + next_slots_with_attestations, + state_transition_with_full_block, +) +from eth2spec.test.helpers.fork_transition import ( + do_fork, + transition_across_forks, +) +from eth2spec.test.helpers.forks import ( + get_spec_for_fork_version, + is_post_capella, is_post_deneb, is_post_electra, +) +from eth2spec.test.helpers.light_client import ( + get_sync_aggregate, + upgrade_lc_bootstrap_to_new_spec, + upgrade_lc_update_to_new_spec, + upgrade_lc_store_to_new_spec, +) +from eth2spec.test.helpers.state import ( + next_slots, + transition_to, +) + + +class LightClientSyncTest(object): + steps: List[Dict[str, Any]] + genesis_validators_root: Any + s_spec: Any + store: Any + + +def _get_store_fork_version(s_spec): + if is_post_electra(s_spec): + return s_spec.config.ELECTRA_FORK_VERSION + if is_post_deneb(s_spec): + return s_spec.config.DENEB_FORK_VERSION + if is_post_capella(s_spec): + return s_spec.config.CAPELLA_FORK_VERSION + return s_spec.config.ALTAIR_FORK_VERSION + + +def setup_lc_sync_test(spec, state, s_spec=None, phases=None): + test = LightClientSyncTest() + test.steps = [] + + if s_spec is None: + s_spec = spec + if phases is None: + phases = { + spec.fork: spec, + s_spec.fork: s_spec, + } + test.s_spec = s_spec + + yield "genesis_validators_root", "meta", "0x" + state.genesis_validators_root.hex() + test.genesis_validators_root = state.genesis_validators_root + + next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1) + trusted_block = state_transition_with_full_block(spec, state, True, True) + trusted_block_root = trusted_block.message.hash_tree_root() + yield "trusted_block_root", "meta", "0x" + trusted_block_root.hex() + + data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(trusted_block.message.slot)) + data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root) + d_spec = get_spec_for_fork_version(spec, data_fork_version, phases) + data = d_spec.create_light_client_bootstrap(state, trusted_block) + yield "bootstrap_fork_digest", "meta", encode_hex(data_fork_digest) + yield "bootstrap", data + + upgraded = upgrade_lc_bootstrap_to_new_spec(d_spec, test.s_spec, data, phases) + test.store = test.s_spec.initialize_light_client_store(trusted_block_root, upgraded) + store_fork_version = _get_store_fork_version(test.s_spec) + store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root) + yield "store_fork_digest", "meta", encode_hex(store_fork_digest) + + return test + + +def finish_lc_sync_test(test): + yield "steps", test.steps + + +def _get_update_file_name(d_spec, update): + if d_spec.is_sync_committee_update(update): + suffix1 = "s" + else: + suffix1 = "x" + if d_spec.is_finality_update(update): + suffix2 = "f" + else: + suffix2 = "x" + return f"update_{encode_hex(update.attested_header.beacon.hash_tree_root())}_{suffix1}{suffix2}" + + +def _get_checks(s_spec, store): + if is_post_capella(s_spec): + return { + "finalized_header": { + 'slot': int(store.finalized_header.beacon.slot), + 'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()), + 'execution_root': encode_hex(s_spec.get_lc_execution_root(store.finalized_header)), + }, + "optimistic_header": { + 'slot': int(store.optimistic_header.beacon.slot), + 'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()), + 'execution_root': encode_hex(s_spec.get_lc_execution_root(store.optimistic_header)), + }, + } + + return { + "finalized_header": { + 'slot': int(store.finalized_header.beacon.slot), + 'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()), + }, + "optimistic_header": { + 'slot': int(store.optimistic_header.beacon.slot), + 'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()), + }, + } + + +def emit_force_update(test, spec, state): + current_slot = state.slot + test.s_spec.process_light_client_store_force_update(test.store, current_slot) + + yield from [] # Consistently enable `yield from` syntax in calling tests + test.steps.append({ + "force_update": { + "current_slot": int(current_slot), + "checks": _get_checks(test.s_spec, test.store), + } + }) + + +def emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=True, phases=None): + data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(attested_block.message.slot)) + data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root) + d_spec = get_spec_for_fork_version(spec, data_fork_version, phases) + data = d_spec.create_light_client_update(state, block, attested_state, attested_block, finalized_block) + if not with_next: + data.next_sync_committee = spec.SyncCommittee() + data.next_sync_committee_branch = spec.NextSyncCommitteeBranch() + current_slot = state.slot + + upgraded = upgrade_lc_update_to_new_spec(d_spec, test.s_spec, data, phases) + test.s_spec.process_light_client_update(test.store, upgraded, current_slot, test.genesis_validators_root) + + yield _get_update_file_name(d_spec, data), data + test.steps.append({ + "process_update": { + "update_fork_digest": encode_hex(data_fork_digest), + "update": _get_update_file_name(d_spec, data), + "current_slot": int(current_slot), + "checks": _get_checks(test.s_spec, test.store), + } + }) + return upgraded + + +def _emit_upgrade_store(test, new_s_spec, phases=None): + test.store = upgrade_lc_store_to_new_spec(test.s_spec, new_s_spec, test.store, phases) + test.s_spec = new_s_spec + store_fork_version = _get_store_fork_version(test.s_spec) + store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root) + + yield from [] # Consistently enable `yield from` syntax in calling tests + test.steps.append({ + "upgrade_store": { + "store_fork_digest": encode_hex(store_fork_digest), + "checks": _get_checks(test.s_spec, test.store), + } + }) + + +def run_lc_sync_test_single_fork(spec, phases, state, fork): + # Start test + test = yield from setup_lc_sync_test(spec, state, phases=phases) + + # Initial `LightClientUpdate` + finalized_block = spec.SignedBeaconBlock() + finalized_block.message.state_root = state.hash_tree_root() + finalized_state = state.copy() + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Jump to two slots before fork + fork_epoch = getattr(phases[fork].config, fork.upper() + '_FORK_EPOCH') + transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch) - 4) + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + update = yield from emit_update( + test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Perform `LightClientStore` upgrade + yield from _emit_upgrade_store(test, phases[fork], phases=phases) + update = test.store.best_valid_update + + # Final slot before fork, check that importing the pre-fork format still works + attested_block = block.copy() + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Upgrade to post-fork spec, attested block is still before the fork + attested_block = block.copy() + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate) + spec = phases[fork] + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Another block after the fork, this time attested block is after the fork + attested_block = block.copy() + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Jump to next epoch + transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch + 1) - 2) + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finalize the fork + finalized_block = block.copy() + finalized_state = state.copy() + _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True) + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finish test + yield from finish_lc_sync_test(test) + + +def run_lc_sync_test_multi_fork(spec, phases, state, fork_1, fork_2): + # Start test + test = yield from setup_lc_sync_test(spec, state, phases[fork_2], phases) + + # Set up so that finalized is from `spec`, ... + finalized_block = spec.SignedBeaconBlock() + finalized_block.message.state_root = state.hash_tree_root() + finalized_state = state.copy() + + # ..., attested is from `fork_1`, ... + fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') + spec, state, attested_block = transition_across_forks( + spec, + state, + spec.compute_start_slot_at_epoch(fork_1_epoch), + phases, + with_block=True, + ) + attested_state = state.copy() + + # ..., and signature is from `fork_2` + fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') + spec, state, _ = transition_across_forks( + spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1, phases) + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + spec, state, block = transition_across_forks( + spec, + state, + spec.compute_start_slot_at_epoch(fork_2_epoch), + phases, + with_block=True, + sync_aggregate=sync_aggregate, + ) + + # Check that update applies + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finish test + yield from finish_lc_sync_test(test) + + +def run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, fork): + # Start test (Legacy bootstrap with an upgraded store) + test = yield from setup_lc_sync_test(spec, state, phases[fork], phases) + + # Initial `LightClientUpdate` (check that the upgraded store can process it) + finalized_block = spec.SignedBeaconBlock() + finalized_block.message.state_root = state.hash_tree_root() + finalized_state = state.copy() + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finish test + yield from finish_lc_sync_test(test) From 75c65e63bf1636011166fb65db50fc3a1830bcb4 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 27 Nov 2024 14:25:20 +0100 Subject: [PATCH 14/60] Split LC data collection test into multiple files --- .../light_client/test_data_collection.py | 1047 +---------------- .../light_client/test_data_collection.py | 40 + .../light_client/test_data_collection.py | 41 + .../helpers/light_client_data_collection.py | 897 ++++++++++++++ 4 files changed, 1032 insertions(+), 993 deletions(-) create mode 100644 tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py create mode 100644 tests/core/pyspec/eth2spec/test/electra/light_client/test_data_collection.py create mode 100644 tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py index 57a7183077..af73b26345 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -1,799 +1,36 @@ -from typing import (Any, Dict, List, Set) -from dataclasses import dataclass - -from eth_utils import encode_hex from eth2spec.test.context import ( spec_state_test_with_matching_config, - spec_test, - with_config_overrides, - with_matching_spec_config, - with_phases, with_presets, - with_state, with_light_client, ) from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, MINIMAL, ) -from eth2spec.test.helpers.fork_transition import ( - transition_across_forks, -) -from eth2spec.test.helpers.forks import ( - is_post_altair, -) -from eth2spec.test.helpers.light_client import ( - compute_start_slot_at_sync_committee_period, - get_sync_aggregate, - latest_current_sync_committee_gindex, - latest_finalized_root_gindex, - latest_next_sync_committee_gindex, - latest_normalize_merkle_branch, - upgrade_lc_header_to_new_spec, - upgrade_lc_update_to_new_spec, +from eth2spec.test.helpers.light_client_data_collection import ( + add_new_block, + finish_lc_data_collection_test, + get_lc_bootstrap_block_id, + get_lc_update_attested_block_id, + get_light_client_bootstrap, + get_light_client_finality_update, + get_light_client_optimistic_update, + get_light_client_update_for_period, + select_new_head, + setup_lc_data_collection_test, + BlockID, ) -def next_epoch_boundary_slot(spec, slot): - # Compute the first possible epoch boundary state slot of a `Checkpoint` - # referring to a block at given slot. - epoch = spec.compute_epoch_at_slot(slot + spec.SLOTS_PER_EPOCH - 1) - return spec.compute_start_slot_at_epoch(epoch) - - -@dataclass(frozen=True) -class BlockID(object): - slot: Any - root: Any - - -def block_to_block_id(block): - return BlockID( - slot=block.message.slot, - root=block.message.hash_tree_root(), - ) - - -def state_to_block_id(state): - parent_header = state.latest_block_header.copy() - parent_header.state_root = state.hash_tree_root() - return BlockID(slot=parent_header.slot, root=parent_header.hash_tree_root()) - - -def bootstrap_bid(bootstrap): - return BlockID( - slot=bootstrap.header.beacon.slot, - root=bootstrap.header.beacon.hash_tree_root(), - ) - - -def update_attested_bid(update): - return BlockID( - slot=update.attested_header.beacon.slot, - root=update.attested_header.beacon.hash_tree_root(), - ) - - -@dataclass -class ForkedBeaconState(object): - spec: Any - data: Any - - -@dataclass -class ForkedSignedBeaconBlock(object): - spec: Any - data: Any - - -@dataclass -class ForkedLightClientHeader(object): - spec: Any - data: Any - - -@dataclass -class ForkedLightClientBootstrap(object): - spec: Any - data: Any - - -@dataclass -class ForkedLightClientUpdate(object): - spec: Any - data: Any - - -@dataclass -class ForkedLightClientFinalityUpdate(object): - spec: Any - data: Any - - -@dataclass -class ForkedLightClientOptimisticUpdate(object): - spec: Any - data: Any - - -@dataclass -class CachedLightClientData(object): - # Sync committee branches at block's post-state - current_sync_committee_branch: Any # CurrentSyncCommitteeBranch - next_sync_committee_branch: Any # NextSyncCommitteeBranch - - # Finality information at block's post-state - finalized_slot: Any # Slot - finality_branch: Any # FinalityBranch - - # Best / latest light client data - current_period_best_update: ForkedLightClientUpdate - latest_signature_slot: Any # Slot - - -@dataclass -class LightClientDataCache(object): - # Cached data for creating future `LightClientUpdate` instances. - # Key is the block ID of which the post state was used to get the data. - # Data stored for the finalized head block and all non-finalized blocks. - data: Dict[BlockID, CachedLightClientData] - - # Light client data for the latest slot that was signed by at least - # `MIN_SYNC_COMMITTEE_PARTICIPANTS`. May be older than head - latest: ForkedLightClientFinalityUpdate - - # The earliest slot for which light client data is imported - tail_slot: Any # Slot - - -@dataclass -class LightClientDataDB(object): - headers: Dict[Any, ForkedLightClientHeader] # Root -> ForkedLightClientHeader - current_branches: Dict[Any, Any] # Slot -> CurrentSyncCommitteeBranch - sync_committees: Dict[Any, Any] # SyncCommitteePeriod -> SyncCommittee - best_updates: Dict[Any, ForkedLightClientUpdate] # SyncCommitteePeriod -> ForkedLightClientUpdate - - -@dataclass -class LightClientDataStore(object): - spec: Any - - # Cached data to accelerate creating light client data - cache: LightClientDataCache - - # Persistent light client data - db: LightClientDataDB - - -@dataclass -class LightClientDataCollectionTest(object): - steps: List[Dict[str, Any]] - files: Set[str] - - # Fork schedule - phases: Any - - # History access - blocks: Dict[Any, ForkedSignedBeaconBlock] # Block root -> ForkedSignedBeaconBlock - finalized_block_roots: Dict[Any, Any] # Slot -> Root - states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState - finalized_checkpoint_states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState - latest_finalized_epoch: Any # Epoch - latest_finalized_bid: BlockID - historical_tail_slot: Any # Slot - - # Light client data - lc_data_store: LightClientDataStore - - -def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockID] - try: - block = test.blocks[bid.root] - while True: - if block.data.message.slot <= slot: - return block_to_block_id(block.data) - - block = test.blocks[block.data.message.parent_root] - except KeyError: - return None - - -def block_id_at_finalized_slot(test, slot): # -> Optional[BlockID] - while slot >= test.historical_tail_slot: - try: - return BlockID(slot=slot, root=test.finalized_block_roots[slot]) - except KeyError: - slot = slot - 1 - return None - - -def get_current_sync_committee_for_finalized_period(test, period): # -> Optional[SyncCommittee] - low_slot = max( - test.historical_tail_slot, - test.lc_data_store.spec.compute_start_slot_at_epoch( - test.lc_data_store.spec.config.ALTAIR_FORK_EPOCH) - ) - if period < test.lc_data_store.spec.compute_sync_committee_period_at_slot(low_slot): - return None - period_start_slot = compute_start_slot_at_sync_committee_period(test.lc_data_store.spec, period) - sync_committee_slot = max(period_start_slot, low_slot) - bid = block_id_at_finalized_slot(test, sync_committee_slot) - if bid is None: - return None - block = test.blocks[bid.root] - state = test.finalized_checkpoint_states[block.data.message.state_root] - if sync_committee_slot > state.data.slot: - state.spec, state.data, _ = transition_across_forks( - state.spec, state.data, sync_committee_slot, phases=test.phases) - assert is_post_altair(state.spec) - return state.data.current_sync_committee - - -def light_client_header_for_block(test, block): # -> ForkedLightClientHeader - if not is_post_altair(block.spec): - spec = test.phases[ALTAIR] - else: - spec = block.spec - return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) - - -def light_client_header_for_block_id(test, bid): # -> ForkedLightClientHeader - block = test.blocks[bid.root] - if not is_post_altair(block.spec): - spec = test.phases[ALTAIR] - else: - spec = block.spec - return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) - - -def sync_aggregate_for_block_id(test, bid): # -> Optional[SyncAggregate] - block = test.blocks[bid.root] - if not is_post_altair(block.spec): - return None - return block.data.message.body.sync_aggregate - - -def get_light_client_data(lc_data_store, bid): # -> CachedLightClientData - # Fetch cached light client data about a given block. - # Data must be cached (`cache_light_client_data`) before calling this function. - try: - return lc_data_store.cache.data[bid] - except KeyError: - raise ValueError("Trying to get light client data that was not cached") - - -def cache_light_client_data(lc_data_store, spec, state, bid, current_period_best_update, latest_signature_slot): - # Cache data for a given block and its post-state to speed up creating future - # `LightClientUpdate` and `LightClientBootstrap` instances that refer to this - # block and state. - cached_data = CachedLightClientData( - current_sync_committee_branch=latest_normalize_merkle_branch( - lc_data_store.spec, - spec.compute_merkle_proof(state, spec.current_sync_committee_gindex_at_slot(state.slot)), - latest_current_sync_committee_gindex(lc_data_store.spec)), - next_sync_committee_branch=latest_normalize_merkle_branch( - lc_data_store.spec, - spec.compute_merkle_proof(state, spec.next_sync_committee_gindex_at_slot(state.slot)), - latest_next_sync_committee_gindex(lc_data_store.spec)), - finalized_slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), - finality_branch=latest_normalize_merkle_branch( - lc_data_store.spec, - spec.compute_merkle_proof(state, spec.finalized_root_gindex_at_slot(state.slot)), - latest_finalized_root_gindex(lc_data_store.spec)), - current_period_best_update=current_period_best_update, - latest_signature_slot=latest_signature_slot, - ) - if bid in lc_data_store.cache.data: - raise ValueError("Redundant `cache_light_client_data` call") - lc_data_store.cache.data[bid] = cached_data - - -def delete_light_client_data(lc_data_store, bid): - # Delete cached light client data for a given block. This needs to be called - # when a block becomes unreachable due to finalization of a different fork. - del lc_data_store.cache.data[bid] - - -def create_light_client_finality_update_from_light_client_data(test, - attested_bid, - signature_slot, - sync_aggregate): # -> ForkedLightClientFinalityUpdate - attested_header = light_client_header_for_block_id(test, attested_bid) - attested_data = get_light_client_data(test.lc_data_store, attested_bid) - finalized_bid = block_id_at_finalized_slot(test, attested_data.finalized_slot) - if finalized_bid is not None: - if finalized_bid.slot != attested_data.finalized_slot: - # Empty slots at end of epoch, update cache for latest block slot - attested_data.finalized_slot = finalized_bid.slot - if finalized_bid.slot == attested_header.spec.GENESIS_SLOT: - finalized_header = ForkedLightClientHeader( - spec=attested_header.spec, - data=attested_header.spec.LightClientHeader(), - ) - else: - finalized_header = light_client_header_for_block_id(test, finalized_bid) - finalized_header = ForkedLightClientHeader( - spec=attested_header.spec, - data=upgrade_lc_header_to_new_spec( - finalized_header.spec, - attested_header.spec, - finalized_header.data, - ) - ) - finality_branch = attested_data.finality_branch - return ForkedLightClientFinalityUpdate( - spec=attested_header.spec, - data=attested_header.spec.LightClientFinalityUpdate( - attested_header=attested_header.data, - finalized_header=finalized_header.data, - finality_branch=finality_branch, - sync_aggregate=sync_aggregate, - signature_slot=signature_slot, - ), - ) - - -def create_light_client_update_from_light_client_data(test, - attested_bid, - signature_slot, - sync_aggregate, - next_sync_committee): # -> ForkedLightClientUpdate - finality_update = create_light_client_finality_update_from_light_client_data( - test, attested_bid, signature_slot, sync_aggregate) - attested_data = get_light_client_data(test.lc_data_store, attested_bid) - return ForkedLightClientUpdate( - spec=finality_update.spec, - data=finality_update.spec.LightClientUpdate( - attested_header=finality_update.data.attested_header, - next_sync_committee=next_sync_committee, - next_sync_committee_branch=attested_data.next_sync_committee_branch, - finalized_header=finality_update.data.finalized_header, - finality_branch=finality_update.data.finality_branch, - sync_aggregate=finality_update.data.sync_aggregate, - signature_slot=finality_update.data.signature_slot, - ) - ) - - -def create_light_client_update(test, spec, state, block, parent_bid): - # Create `LightClientUpdate` instances for a given block and its post-state, - # and keep track of best / latest ones. Data about the parent block's - # post-state must be cached (`cache_light_client_data`) before calling this. - - # Verify attested block (parent) is recent enough and that state is available - attested_bid = parent_bid - attested_slot = attested_bid.slot - if attested_slot < test.lc_data_store.cache.tail_slot: - cache_light_client_data( - test.lc_data_store, - spec, - state, - block_to_block_id(block), - current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), - latest_signature_slot=spec.GENESIS_SLOT, - ) - return - - # If sync committee period changed, reset `best` - attested_period = spec.compute_sync_committee_period_at_slot(attested_slot) - signature_slot = block.message.slot - signature_period = spec.compute_sync_committee_period_at_slot(signature_slot) - attested_data = get_light_client_data(test.lc_data_store, attested_bid) - if attested_period != signature_period: - best = ForkedLightClientUpdate(spec=None, data=None) - else: - best = attested_data.current_period_best_update - - # If sync committee does not have sufficient participants, do not bump latest - sync_aggregate = block.message.body.sync_aggregate - num_active_participants = sum(sync_aggregate.sync_committee_bits) - if num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS: - latest_signature_slot = attested_data.latest_signature_slot - else: - latest_signature_slot = signature_slot - - # To update `best`, sync committee must have sufficient participants, and - # `signature_slot` must be in `attested_slot`'s sync committee period - if ( - num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS - or attested_period != signature_period - ): - cache_light_client_data( - test.lc_data_store, - spec, - state, - block_to_block_id(block), - current_period_best_update=best, - latest_signature_slot=latest_signature_slot, - ) - return - - # Check if light client data improved - update = create_light_client_update_from_light_client_data( - test, attested_bid, signature_slot, sync_aggregate, state.next_sync_committee) - is_better = ( - best.spec is None - or spec.is_better_update(update.data, upgrade_lc_update_to_new_spec( - best.spec, update.spec, best.data, test.phases)) - ) - - # Update best light client data for current sync committee period - if is_better: - best = update - cache_light_client_data( - test.lc_data_store, - spec, - state, - block_to_block_id(block), - current_period_best_update=best, - latest_signature_slot=latest_signature_slot, - ) - - -def create_light_client_bootstrap(test, spec, bid): - block = test.blocks[bid.root] - period = spec.compute_sync_committee_period_at_slot(bid.slot) - if period not in test.lc_data_store.db.sync_committees: - test.lc_data_store.db.sync_committees[period] = \ - get_current_sync_committee_for_finalized_period(test, period) - test.lc_data_store.db.headers[bid.root] = ForkedLightClientHeader( - spec=block.spec, data=block.spec.block_to_light_client_header(block.data)) - test.lc_data_store.db.current_branches[bid.slot] = \ - get_light_client_data(test.lc_data_store, bid).current_sync_committee_branch - - -def process_new_block_for_light_client(test, spec, state, block, parent_bid): - # Update light client data with information from a new block. - if block.message.slot < test.lc_data_store.cache.tail_slot: - return - - if is_post_altair(spec): - create_light_client_update(test, spec, state, block, parent_bid) - else: - raise ValueError("`tail_slot` cannot be before Altair") - - -def process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid): - # Update light client data to account for a new head block. - # Note that `old_finalized_bid` is not yet updated when this is called. - if head_bid.slot < test.lc_data_store.cache.tail_slot: - return - - # Commit best light client data for non-finalized periods - head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) - low_slot = max(test.lc_data_store.cache.tail_slot, old_finalized_bid.slot) - low_period = spec.compute_sync_committee_period_at_slot(low_slot) - bid = head_bid - for period in reversed(range(low_period, head_period + 1)): - period_end_slot = compute_start_slot_at_sync_committee_period(spec, period + 1) - 1 - bid = get_ancestor_of_block_id(test, bid, period_end_slot) - if bid is None or bid.slot < low_slot: - break - best = get_light_client_data(test.lc_data_store, bid).current_period_best_update - if ( - best.spec is None - or sum(best.data.sync_aggregate.sync_committee_bits) < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS - ): - test.lc_data_store.db.best_updates.pop(period, None) - else: - test.lc_data_store.db.best_updates[period] = best - - # Update latest light client data - head_data = get_light_client_data(test.lc_data_store, head_bid) - signature_slot = head_data.latest_signature_slot - if signature_slot <= low_slot: - test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) - return - signature_bid = get_ancestor_of_block_id(test, head_bid, signature_slot) - if signature_bid is None or signature_bid.slot <= low_slot: - test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) - return - attested_bid = get_ancestor_of_block_id(test, signature_bid, signature_bid.slot - 1) - if attested_bid is None or attested_bid.slot < low_slot: - test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) - return - sync_aggregate = sync_aggregate_for_block_id(test, signature_bid) - assert sync_aggregate is not None - test.lc_data_store.cache.latest = create_light_client_finality_update_from_light_client_data( - test, attested_bid, signature_slot, sync_aggregate) - - -def process_finalization_for_light_client(test, spec, finalized_bid, old_finalized_bid): - # Prune cached data that is no longer useful for creating future - # `LightClientUpdate` and `LightClientBootstrap` instances. - # This needs to be called whenever `finalized_checkpoint` changes. - finalized_slot = finalized_bid.slot - if finalized_slot < test.lc_data_store.cache.tail_slot: - return - - # Cache `LightClientBootstrap` for newly finalized epoch boundary blocks - first_new_slot = old_finalized_bid.slot + 1 - low_slot = max(first_new_slot, test.lc_data_store.cache.tail_slot) - boundary_slot = finalized_slot - while boundary_slot >= low_slot: - bid = block_id_at_finalized_slot(test, boundary_slot) - if bid is None: - break - if bid.slot >= low_slot: - create_light_client_bootstrap(test, spec, bid) - boundary_slot = next_epoch_boundary_slot(spec, bid.slot) - if boundary_slot < spec.SLOTS_PER_EPOCH: - break - boundary_slot = boundary_slot - spec.SLOTS_PER_EPOCH - - # Prune light client data that is no longer referrable by future updates - bids_to_delete = [] - for bid in test.lc_data_store.cache.data: - if bid.slot >= finalized_bid.slot: - continue - bids_to_delete.append(bid) - for bid in bids_to_delete: - delete_light_client_data(test.lc_data_store, bid) - - -def get_light_client_bootstrap(test, block_root): # -> ForkedLightClientBootstrap - try: - header = test.lc_data_store.db.headers[block_root] - except KeyError: - return ForkedLightClientBootstrap(spec=None, data=None) - - slot = header.data.beacon.slot - period = header.spec.compute_sync_committee_period_at_slot(slot) - return ForkedLightClientBootstrap( - spec=header.spec, - data=header.spec.LightClientBootstrap( - header=header.data, - current_sync_committee=test.lc_data_store.db.sync_committees[period], - current_sync_committee_branch=test.lc_data_store.db.current_branches[slot], - ) - ) - - -def get_light_client_update_for_period(test, period): # -> ForkedLightClientUpdate - try: - return test.lc_data_store.db.best_updates[period] - except KeyError: - return ForkedLightClientUpdate(spec=None, data=None) - - -def get_light_client_finality_update(test): # -> ForkedLightClientFinalityUpdate - return test.lc_data_store.cache.latest - - -def get_light_client_optimistic_update(test): # -> ForkedLightClientOptimisticUpdate - finality_update = get_light_client_finality_update(test) - if finality_update.spec is None: - return ForkedLightClientOptimisticUpdate(spec=None, data=None) - return ForkedLightClientOptimisticUpdate( - spec=finality_update.spec, - data=finality_update.spec.LightClientOptimisticUpdate( - attested_header=finality_update.data.attested_header, - sync_aggregate=finality_update.data.sync_aggregate, - signature_slot=finality_update.data.signature_slot, - ), - ) - - -def setup_test(spec, state, phases=None): - assert spec.compute_slots_since_epoch_start(state.slot) == 0 - - test = LightClientDataCollectionTest( - steps=[], - files=set(), - phases=phases, - blocks={}, - finalized_block_roots={}, - states={}, - finalized_checkpoint_states={}, - latest_finalized_epoch=state.finalized_checkpoint.epoch, - latest_finalized_bid=BlockID( - slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), - root=state.finalized_checkpoint.root, - ), - historical_tail_slot=state.slot, - lc_data_store=LightClientDataStore( - spec=spec, - cache=LightClientDataCache( - data={}, - latest=ForkedLightClientFinalityUpdate(spec=None, data=None), - tail_slot=max(state.slot, spec.compute_start_slot_at_epoch(spec.config.ALTAIR_FORK_EPOCH)), - ), - db=LightClientDataDB( - headers={}, - current_branches={}, - sync_committees={}, - best_updates={}, - ), - ), - ) - bid = state_to_block_id(state) - yield "initial_state", state - test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=spec.SignedBeaconBlock( - message=spec.BeaconBlock(state_root=state.hash_tree_root()), - )) - test.finalized_block_roots[bid.slot] = bid.root - test.states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) - test.finalized_checkpoint_states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) - cache_light_client_data( - test.lc_data_store, spec, state, bid, - current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), - latest_signature_slot=spec.GENESIS_SLOT, - ) - create_light_client_bootstrap(test, spec, bid) - - return test - - -def finish_test(test): - yield "steps", test.steps - - -def encode_object(test, prefix, obj, slot, genesis_validators_root): - yield from [] # Consistently enable `yield from` syntax in calling tests - - file_name = f"{prefix}_{slot}_{encode_hex(obj.data.hash_tree_root())}" - if file_name not in test.files: - test.files.add(file_name) - yield file_name, obj.data - return { - "fork_digest": encode_hex(obj.spec.compute_fork_digest( - obj.spec.compute_fork_version(obj.spec.compute_epoch_at_slot(slot)), - genesis_validators_root, - )), - "data": file_name, - } - - -def add_new_block(test, spec, state, slot=None, num_sync_participants=0): - if slot is None: - slot = state.slot + 1 - assert slot > state.slot - parent_bid = state_to_block_id(state) - - # Advance to target slot - 1 to ensure sync aggregate can be efficiently computed - if state.slot < slot - 1: - spec, state, _ = transition_across_forks(spec, state, slot - 1, phases=test.phases) - - # Compute sync aggregate, using: - # - sync committee based on target slot - # - fork digest based on target slot - 1 - # - signed data based on parent_bid.slot - # All three slots may be from different forks - sync_aggregate, signature_slot = get_sync_aggregate( - spec, state, num_participants=num_sync_participants, phases=test.phases) - assert signature_slot == slot - - # Apply final block with computed sync aggregate - spec, state, block = transition_across_forks( - spec, state, slot, phases=test.phases, with_block=True, sync_aggregate=sync_aggregate) - bid = block_to_block_id(block) - test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=block) - test.states[block.message.state_root] = ForkedBeaconState(spec=spec, data=state) - process_new_block_for_light_client(test, spec, state, block, parent_bid) - block_obj = yield from encode_object( - test, "block", ForkedSignedBeaconBlock(spec=spec, data=block), block.message.slot, - state.genesis_validators_root, - ) - test.steps.append({ - "new_block": block_obj - }) - return spec, state, bid - - -def select_new_head(test, spec, head_bid): - old_finalized_bid = test.latest_finalized_bid - process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid) - - # Process finalization - block = test.blocks[head_bid.root] - state = test.states[block.data.message.state_root] - if state.data.finalized_checkpoint.epoch != spec.GENESIS_EPOCH: - block = test.blocks[state.data.finalized_checkpoint.root] - bid = block_to_block_id(block.data) - new_finalized_bid = bid - if new_finalized_bid.slot > old_finalized_bid.slot: - old_finalized_epoch = None - new_finalized_epoch = state.data.finalized_checkpoint.epoch - while bid.slot > test.latest_finalized_bid.slot: - test.finalized_block_roots[bid.slot] = bid.root - finalized_epoch = spec.compute_epoch_at_slot(bid.slot + spec.SLOTS_PER_EPOCH - 1) - if finalized_epoch != old_finalized_epoch: - state = test.states[block.data.message.state_root] - test.finalized_checkpoint_states[block.data.message.state_root] = state - old_finalized_epoch = finalized_epoch - block = test.blocks[block.data.message.parent_root] - bid = block_to_block_id(block.data) - test.latest_finalized_epoch = new_finalized_epoch - test.latest_finalized_bid = new_finalized_bid - process_finalization_for_light_client(test, spec, new_finalized_bid, old_finalized_bid) - - blocks_to_delete = [] - for block_root, block in test.blocks.items(): - if block.data.message.slot < new_finalized_bid.slot: - blocks_to_delete.append(block_root) - for block_root in blocks_to_delete: - del test.blocks[block_root] - states_to_delete = [] - for state_root, state in test.states.items(): - if state.data.slot < new_finalized_bid.slot: - states_to_delete.append(state_root) - for state_root in states_to_delete: - del test.states[state_root] - - yield from [] # Consistently enable `yield from` syntax in calling tests - - bootstraps = [] - for state in test.finalized_checkpoint_states.values(): - bid = state_to_block_id(state.data) - entry = { - "block_root": encode_hex(bid.root), - } - bootstrap = get_light_client_bootstrap(test, bid.root) - if bootstrap.spec is not None: - bootstrap_obj = yield from encode_object( - test, "bootstrap", bootstrap, bootstrap.data.header.beacon.slot, - state.data.genesis_validators_root, - ) - entry["bootstrap"] = bootstrap_obj - bootstraps.append(entry) - - best_updates = [] - low_period = spec.compute_sync_committee_period_at_slot(test.lc_data_store.cache.tail_slot) - head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) - for period in range(low_period, head_period + 1): - entry = { - "period": int(period), - } - update = get_light_client_update_for_period(test, period) - if update.spec is not None: - update_obj = yield from encode_object( - test, "update", update, update.data.attested_header.beacon.slot, - state.data.genesis_validators_root, - ) - entry["update"] = update_obj - best_updates.append(entry) - - checks = { - "latest_finalized_checkpoint": { - "epoch": int(test.latest_finalized_epoch), - "root": encode_hex(test.latest_finalized_bid.root), - }, - "bootstraps": bootstraps, - "best_updates": best_updates, - } - finality_update = get_light_client_finality_update(test) - if finality_update.spec is not None: - finality_update_obj = yield from encode_object( - test, "finality_update", finality_update, finality_update.data.attested_header.beacon.slot, - state.data.genesis_validators_root, - ) - checks["latest_finality_update"] = finality_update_obj - optimistic_update = get_light_client_optimistic_update(test) - if optimistic_update.spec is not None: - optimistic_update_obj = yield from encode_object( - test, "optimistic_update", optimistic_update, optimistic_update.data.attested_header.beacon.slot, - state.data.genesis_validators_root, - ) - checks["latest_optimistic_update"] = optimistic_update_obj - - test.steps.append({ - "new_head": { - "head_block_root": encode_hex(head_bid.root), - "checks": checks, - } - }) - - @with_light_client @spec_state_test_with_matching_config @with_presets([MINIMAL], reason="too slow") def test_light_client_data_collection(spec, state): # Start test - test = yield from setup_test(spec, state) + test = yield from setup_lc_data_collection_test(spec, state) # Genesis block is post Altair and is finalized, so can be used as bootstrap genesis_bid = BlockID(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) - assert bootstrap_bid(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid # No blocks have been imported, so no other light client data is available period = spec.compute_sync_committee_period_at_slot(state.slot) @@ -813,9 +50,9 @@ def test_light_client_data_collection(spec, state): spec_b, state_b, bid_2 = yield from add_new_block(test, spec, state, slot=2, num_sync_participants=1) yield from select_new_head(test, spec_b, bid_2) period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == genesis_bid - assert update_attested_bid(get_light_client_finality_update(test).data) == genesis_bid - assert update_attested_bid(get_light_client_optimistic_update(test).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == genesis_bid # Build on branch A, once more with an empty sync aggregate spec_a, state_a, bid_3 = yield from add_new_block(test, spec_a, state_a, slot=3) @@ -829,33 +66,33 @@ def test_light_client_data_collection(spec, state): spec_b, state_b, bid_4 = yield from add_new_block(test, spec_b, state_b, slot=4) yield from select_new_head(test, spec_b, bid_4) period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == genesis_bid - assert update_attested_bid(get_light_client_finality_update(test).data) == genesis_bid - assert update_attested_bid(get_light_client_optimistic_update(test).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == genesis_bid # Build on branch B, once more with 1 participant spec_b, state_b, bid_5 = yield from add_new_block(test, spec_b, state_b, slot=5, num_sync_participants=1) yield from select_new_head(test, spec_b, bid_5) period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == genesis_bid - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_4 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_4 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_4 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_4 # Build on branch B, this time with 3 participants spec_b, state_b, bid_6 = yield from add_new_block(test, spec_b, state_b, slot=6, num_sync_participants=3) yield from select_new_head(test, spec_b, bid_6) period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_5 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_5 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_5 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_5 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_5 # Build on branch A, with 2 participants spec_a, state_a, bid_7 = yield from add_new_block(test, spec_a, state_a, slot=7, num_sync_participants=2) yield from select_new_head(test, spec_a, bid_7) period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_3 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_3 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_3 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_3 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_3 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_3 # Branch A: epoch 1, slot 5 slot = spec_a.compute_start_slot_at_epoch(1) + 5 @@ -864,9 +101,9 @@ def test_light_client_data_collection(spec, state): assert get_light_client_bootstrap(test, bid_7.root).spec is None assert get_light_client_bootstrap(test, bid_1_5.root).spec is None period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_7 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_7 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_7 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_7 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_7 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_7 # Branch B: epoch 2, slot 4 slot = spec_b.compute_start_slot_at_epoch(2) + 4 @@ -876,9 +113,9 @@ def test_light_client_data_collection(spec, state): assert get_light_client_bootstrap(test, bid_1_5.root).spec is None assert get_light_client_bootstrap(test, bid_2_4.root).spec is None period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_6 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_6 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_6 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_6 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_6 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_6 # Branch A: epoch 3, slot 0 slot = spec_a.compute_start_slot_at_epoch(3) + 0 @@ -889,9 +126,9 @@ def test_light_client_data_collection(spec, state): assert get_light_client_bootstrap(test, bid_2_4.root).spec is None assert get_light_client_bootstrap(test, bid_3_0.root).spec is None period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_1_5 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_1_5 # Branch A: fill epoch for i in range(1, spec_a.SLOTS_PER_EPOCH): @@ -902,9 +139,9 @@ def test_light_client_data_collection(spec, state): assert get_light_client_bootstrap(test, bid_2_4.root).spec is None assert get_light_client_bootstrap(test, bid_3_0.root).spec is None period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_1_5 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_1_5 assert state_a.slot == spec_a.compute_start_slot_at_epoch(4) - 1 bid_3_n = bid_a @@ -918,9 +155,9 @@ def test_light_client_data_collection(spec, state): assert get_light_client_bootstrap(test, bid_3_0.root).spec is None assert get_light_client_bootstrap(test, bid_4_0.root).spec is None period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_3_n - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_3_n + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_3_n + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_3_n # Branch A: fill epoch for i in range(1, spec_a.SLOTS_PER_EPOCH): @@ -932,9 +169,9 @@ def test_light_client_data_collection(spec, state): assert get_light_client_bootstrap(test, bid_3_0.root).spec is None assert get_light_client_bootstrap(test, bid_4_0.root).spec is None period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_3_n - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_3_n + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_3_n + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_3_n assert state_a.slot == spec_a.compute_start_slot_at_epoch(5) - 1 bid_4_n = bid_a @@ -942,191 +179,15 @@ def test_light_client_data_collection(spec, state): slot = spec_a.compute_start_slot_at_epoch(6) + 2 spec_a, state_a, bid_6_2 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=6) yield from select_new_head(test, spec_a, bid_6_2) - assert bootstrap_bid(get_light_client_bootstrap(test, bid_7.root).data) == bid_7 - assert bootstrap_bid(get_light_client_bootstrap(test, bid_1_5.root).data) == bid_1_5 + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, bid_7.root).data) == bid_7 + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, bid_1_5.root).data) == bid_1_5 assert get_light_client_bootstrap(test, bid_2_4.root).spec is None - assert bootstrap_bid(get_light_client_bootstrap(test, bid_3_0.root).data) == bid_3_0 + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, bid_3_0.root).data) == bid_3_0 assert get_light_client_bootstrap(test, bid_4_0.root).spec is None period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_4_n - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_4_n - - # Finish test - yield from finish_test(test) - - -def run_test_multi_fork(spec, phases, state, fork_1, fork_2): - # Start test - test = yield from setup_test(spec, state, phases=phases) - - # Genesis block is post Altair and is finalized, so can be used as bootstrap - genesis_bid = BlockID(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) - assert bootstrap_bid(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid - - # Shared history up to final epoch of period before `fork_1` - fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') - fork_1_period = spec.compute_sync_committee_period(fork_1_epoch) - slot = compute_start_slot_at_sync_committee_period(spec, fork_1_period) - spec.SLOTS_PER_EPOCH - spec, state, bid = yield from add_new_block(test, spec, state, slot=slot, num_sync_participants=1) - yield from select_new_head(test, spec, bid) - assert get_light_client_bootstrap(test, bid.root).spec is None - slot_period = spec.compute_sync_committee_period_at_slot(slot) - if slot_period == 0: - assert update_attested_bid(get_light_client_update_for_period(test, 0).data) == genesis_bid - else: - for period in range(0, slot_period): - assert get_light_client_update_for_period(test, period).spec is None # attested period != signature period - state_period = spec.compute_sync_committee_period_at_slot(state.slot) - - # Branch A: Advance past `fork_2`, having blocks at slots 0 and 4 of each epoch - spec_a = spec - state_a = state - slot_a = state_a.slot - bids_a = [bid] - num_sync_participants_a = 1 - fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') - while spec_a.get_current_epoch(state_a) <= fork_2_epoch: - attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) - slot_a += 4 - signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) - if signature_period != attested_period: - num_sync_participants_a = 0 - num_sync_participants_a += 1 - spec_a, state_a, bid_a = yield from add_new_block( - test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) - yield from select_new_head(test, spec_a, bid_a) - for bid in bids_a: - assert get_light_client_bootstrap(test, bid.root).spec is None - if attested_period == signature_period: - assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] - else: - assert signature_period == attested_period + 1 - assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] - assert get_light_client_update_for_period(test, signature_period).spec is None - assert update_attested_bid(get_light_client_finality_update(test).data) == bids_a[-1] - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bids_a[-1] - bids_a.append(bid_a) - - # Branch B: Advance past `fork_2`, having blocks at slots 1 and 5 of each epoch but no sync participation - spec_b = spec - state_b = state - slot_b = state_b.slot - bids_b = [bid] - while spec_b.get_current_epoch(state_b) <= fork_2_epoch: - slot_b += 4 - signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) - spec_b, state_b, bid_b = yield from add_new_block( - test, spec_b, state_b, slot=slot_b) - # Simulate that this does not become head yet, e.g., this branch was withheld - for bid in bids_b: - assert get_light_client_bootstrap(test, bid.root).spec is None - bids_b.append(bid_b) - - # Branch B: Another block that becomes head - attested_period = spec_b.compute_sync_committee_period_at_slot(slot_b) - slot_b += 1 - signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) - num_sync_participants_b = 1 - spec_b, state_b, bid_b = yield from add_new_block( - test, spec_b, state_b, slot=slot_b, num_sync_participants=num_sync_participants_b) - yield from select_new_head(test, spec_b, bid_b) - for bid in bids_b: - assert get_light_client_bootstrap(test, bid.root).spec is None - if attested_period == signature_period: - assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_b[-1] - else: - assert signature_period == attested_period + 1 - assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_b[-2] - assert get_light_client_update_for_period(test, signature_period).spec is None - assert update_attested_bid(get_light_client_finality_update(test).data) == bids_b[-1] - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bids_b[-1] - bids_b.append(bid_b) - - # All data for periods between the common ancestor of the two branches should have reorged. - # As there was no sync participation on branch B, that means it is deleted. - state_b_period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) - for period in range(state_period + 1, state_b_period): - assert get_light_client_update_for_period(test, period).spec is None - - # Branch A: Another block, reorging branch B once more - attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) - slot_a = slot_b + 1 - signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) - if signature_period != attested_period: - num_sync_participants_a = 0 - num_sync_participants_a += 1 - spec_a, state_a, bid_a = yield from add_new_block( - test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) - yield from select_new_head(test, spec_a, bid_a) - for bid in bids_a: - assert get_light_client_bootstrap(test, bid.root).spec is None - if attested_period == signature_period: - assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] - else: - assert signature_period == attested_period + 1 - assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] - assert get_light_client_update_for_period(test, signature_period).spec is None - assert update_attested_bid(get_light_client_finality_update(test).data) == bids_a[-1] - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bids_a[-1] - bids_a.append(bid_a) - - # Data has been restored - state_a_period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - for period in range(state_period + 1, state_a_period): - assert get_light_client_update_for_period(test, period).spec is not None + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_4_n + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_4_n # Finish test - yield from finish_test(test) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 - 'DENEB_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_reorg_aligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) - 'DENEB_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_reorg_unaligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) - - -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 - 'ELECTRA_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_reorg_aligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) - - -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) - 'ELECTRA_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_reorg_unaligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) + yield from finish_lc_data_collection_test(test) diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py new file mode 100644 index 0000000000..03b7286988 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py @@ -0,0 +1,40 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + BELLATRIX, CAPELLA, DENEB, + MINIMAL, +) +from eth2spec.test.helpers.light_client_data_collection import ( + run_lc_data_collection_test_multi_fork, +) + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 + 'DENEB_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_aligned(spec, phases, state): + yield from run_lc_data_collection_test_multi_fork(spec, phases, state, CAPELLA, DENEB) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) + 'DENEB_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_unaligned(spec, phases, state): + yield from run_lc_data_collection_test_multi_fork(spec, phases, state, CAPELLA, DENEB) diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/electra/light_client/test_data_collection.py new file mode 100644 index 0000000000..d85b0dfda1 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/electra/light_client/test_data_collection.py @@ -0,0 +1,41 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + CAPELLA, DENEB, ELECTRA, + MINIMAL, +) +from eth2spec.test.helpers.light_client_data_collection import ( + run_lc_data_collection_test_multi_fork, +) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 + 'ELECTRA_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_reorg_aligned(spec, phases, state): + yield from run_lc_data_collection_test_multi_fork(spec, phases, state, DENEB, ELECTRA) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) + 'ELECTRA_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_reorg_unaligned(spec, phases, state): + yield from run_lc_data_collection_test_multi_fork(spec, phases, state, DENEB, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py b/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py new file mode 100644 index 0000000000..d56ea05310 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py @@ -0,0 +1,897 @@ +from typing import (Any, Dict, List, Set) +from dataclasses import dataclass + +from eth_utils import encode_hex +from eth2spec.test.helpers.constants import ( + ALTAIR, +) +from eth2spec.test.helpers.fork_transition import ( + transition_across_forks, +) +from eth2spec.test.helpers.forks import ( + is_post_altair, +) +from eth2spec.test.helpers.light_client import ( + compute_start_slot_at_sync_committee_period, + get_sync_aggregate, + latest_current_sync_committee_gindex, + latest_finalized_root_gindex, + latest_next_sync_committee_gindex, + latest_normalize_merkle_branch, + upgrade_lc_header_to_new_spec, + upgrade_lc_update_to_new_spec, +) + + +def _next_epoch_boundary_slot(spec, slot): + # Compute the first possible epoch boundary state slot of a `Checkpoint` + # referring to a block at given slot. + epoch = spec.compute_epoch_at_slot(slot + spec.SLOTS_PER_EPOCH - 1) + return spec.compute_start_slot_at_epoch(epoch) + + +@dataclass(frozen=True) +class BlockID(object): + slot: Any + root: Any + + +def _block_to_block_id(block): + return BlockID( + slot=block.message.slot, + root=block.message.hash_tree_root(), + ) + + +def _state_to_block_id(state): + parent_header = state.latest_block_header.copy() + parent_header.state_root = state.hash_tree_root() + return BlockID(slot=parent_header.slot, root=parent_header.hash_tree_root()) + + +def get_lc_bootstrap_block_id(bootstrap): + return BlockID( + slot=bootstrap.header.beacon.slot, + root=bootstrap.header.beacon.hash_tree_root(), + ) + + +def get_lc_update_attested_block_id(update): + return BlockID( + slot=update.attested_header.beacon.slot, + root=update.attested_header.beacon.hash_tree_root(), + ) + + +@dataclass +class ForkedBeaconState(object): + spec: Any + data: Any + + +@dataclass +class ForkedSignedBeaconBlock(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientHeader(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientBootstrap(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientUpdate(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientFinalityUpdate(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientOptimisticUpdate(object): + spec: Any + data: Any + + +@dataclass +class CachedLightClientData(object): + # Sync committee branches at block's post-state + current_sync_committee_branch: Any # CurrentSyncCommitteeBranch + next_sync_committee_branch: Any # NextSyncCommitteeBranch + + # Finality information at block's post-state + finalized_slot: Any # Slot + finality_branch: Any # FinalityBranch + + # Best / latest light client data + current_period_best_update: ForkedLightClientUpdate + latest_signature_slot: Any # Slot + + +@dataclass +class LightClientDataCache(object): + # Cached data for creating future `LightClientUpdate` instances. + # Key is the block ID of which the post state was used to get the data. + # Data stored for the finalized head block and all non-finalized blocks. + data: Dict[BlockID, CachedLightClientData] + + # Light client data for the latest slot that was signed by at least + # `MIN_SYNC_COMMITTEE_PARTICIPANTS`. May be older than head + latest: ForkedLightClientFinalityUpdate + + # The earliest slot for which light client data is imported + tail_slot: Any # Slot + + +@dataclass +class LightClientDataDB(object): + headers: Dict[Any, ForkedLightClientHeader] # Root -> ForkedLightClientHeader + current_branches: Dict[Any, Any] # Slot -> CurrentSyncCommitteeBranch + sync_committees: Dict[Any, Any] # SyncCommitteePeriod -> SyncCommittee + best_updates: Dict[Any, ForkedLightClientUpdate] # SyncCommitteePeriod -> ForkedLightClientUpdate + + +@dataclass +class LightClientDataStore(object): + spec: Any + + # Cached data to accelerate creating light client data + cache: LightClientDataCache + + # Persistent light client data + db: LightClientDataDB + + +@dataclass +class LightClientDataCollectionTest(object): + steps: List[Dict[str, Any]] + files: Set[str] + + # Fork schedule + phases: Any + + # History access + blocks: Dict[Any, ForkedSignedBeaconBlock] # Block root -> ForkedSignedBeaconBlock + finalized_block_roots: Dict[Any, Any] # Slot -> Root + states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState + finalized_checkpoint_states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState + latest_finalized_epoch: Any # Epoch + latest_finalized_bid: BlockID + historical_tail_slot: Any # Slot + + # Light client data + lc_data_store: LightClientDataStore + + +def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockID] + try: + block = test.blocks[bid.root] + while True: + if block.data.message.slot <= slot: + return _block_to_block_id(block.data) + + block = test.blocks[block.data.message.parent_root] + except KeyError: + return None + + +def _block_id_at_finalized_slot(test, slot): # -> Optional[BlockID] + while slot >= test.historical_tail_slot: + try: + return BlockID(slot=slot, root=test.finalized_block_roots[slot]) + except KeyError: + slot = slot - 1 + return None + + +def _get_current_sync_committee_for_finalized_period(test, period): # -> Optional[SyncCommittee] + low_slot = max( + test.historical_tail_slot, + test.lc_data_store.spec.compute_start_slot_at_epoch( + test.lc_data_store.spec.config.ALTAIR_FORK_EPOCH) + ) + if period < test.lc_data_store.spec.compute_sync_committee_period_at_slot(low_slot): + return None + period_start_slot = compute_start_slot_at_sync_committee_period(test.lc_data_store.spec, period) + sync_committee_slot = max(period_start_slot, low_slot) + bid = _block_id_at_finalized_slot(test, sync_committee_slot) + if bid is None: + return None + block = test.blocks[bid.root] + state = test.finalized_checkpoint_states[block.data.message.state_root] + if sync_committee_slot > state.data.slot: + state.spec, state.data, _ = transition_across_forks( + state.spec, state.data, sync_committee_slot, phases=test.phases) + assert is_post_altair(state.spec) + return state.data.current_sync_committee + + +def _light_client_header_for_block(test, block): # -> ForkedLightClientHeader + if not is_post_altair(block.spec): + spec = test.phases[ALTAIR] + else: + spec = block.spec + return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) + + +def _light_client_header_for_block_id(test, bid): # -> ForkedLightClientHeader + block = test.blocks[bid.root] + if not is_post_altair(block.spec): + spec = test.phases[ALTAIR] + else: + spec = block.spec + return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) + + +def _sync_aggregate_for_block_id(test, bid): # -> Optional[SyncAggregate] + block = test.blocks[bid.root] + if not is_post_altair(block.spec): + return None + return block.data.message.body.sync_aggregate + + +def _get_light_client_data(lc_data_store, bid): # -> CachedLightClientData + # Fetch cached light client data about a given block. + # Data must be cached (`_cache_lc_data`) before calling this function. + try: + return lc_data_store.cache.data[bid] + except KeyError: + raise ValueError("Trying to get light client data that was not cached") + + +def _cache_lc_data(lc_data_store, spec, state, bid, current_period_best_update, latest_signature_slot): + # Cache data for a given block and its post-state to speed up creating future + # `LightClientUpdate` and `LightClientBootstrap` instances that refer to this + # block and state. + cached_data = CachedLightClientData( + current_sync_committee_branch=latest_normalize_merkle_branch( + lc_data_store.spec, + spec.compute_merkle_proof(state, spec.current_sync_committee_gindex_at_slot(state.slot)), + latest_current_sync_committee_gindex(lc_data_store.spec)), + next_sync_committee_branch=latest_normalize_merkle_branch( + lc_data_store.spec, + spec.compute_merkle_proof(state, spec.next_sync_committee_gindex_at_slot(state.slot)), + latest_next_sync_committee_gindex(lc_data_store.spec)), + finalized_slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), + finality_branch=latest_normalize_merkle_branch( + lc_data_store.spec, + spec.compute_merkle_proof(state, spec.finalized_root_gindex_at_slot(state.slot)), + latest_finalized_root_gindex(lc_data_store.spec)), + current_period_best_update=current_period_best_update, + latest_signature_slot=latest_signature_slot, + ) + if bid in lc_data_store.cache.data: + raise ValueError("Redundant `_cache_lc_data` call") + lc_data_store.cache.data[bid] = cached_data + + +def _delete_light_client_data(lc_data_store, bid): + # Delete cached light client data for a given block. This needs to be called + # when a block becomes unreachable due to finalization of a different fork. + del lc_data_store.cache.data[bid] + + +def _create_lc_finality_update_from_lc_data(test, + attested_bid, + signature_slot, + sync_aggregate): # -> ForkedLightClientFinalityUpdate + attested_header = _light_client_header_for_block_id(test, attested_bid) + attested_data = _get_light_client_data(test.lc_data_store, attested_bid) + finalized_bid = _block_id_at_finalized_slot(test, attested_data.finalized_slot) + if finalized_bid is not None: + if finalized_bid.slot != attested_data.finalized_slot: + # Empty slots at end of epoch, update cache for latest block slot + attested_data.finalized_slot = finalized_bid.slot + if finalized_bid.slot == attested_header.spec.GENESIS_SLOT: + finalized_header = ForkedLightClientHeader( + spec=attested_header.spec, + data=attested_header.spec.LightClientHeader(), + ) + else: + finalized_header = _light_client_header_for_block_id(test, finalized_bid) + finalized_header = ForkedLightClientHeader( + spec=attested_header.spec, + data=upgrade_lc_header_to_new_spec( + finalized_header.spec, + attested_header.spec, + finalized_header.data, + ) + ) + finality_branch = attested_data.finality_branch + return ForkedLightClientFinalityUpdate( + spec=attested_header.spec, + data=attested_header.spec.LightClientFinalityUpdate( + attested_header=attested_header.data, + finalized_header=finalized_header.data, + finality_branch=finality_branch, + sync_aggregate=sync_aggregate, + signature_slot=signature_slot, + ), + ) + + +def _create_lc_update_from_lc_data(test, + attested_bid, + signature_slot, + sync_aggregate, + next_sync_committee): # -> ForkedLightClientUpdate + finality_update = _create_lc_finality_update_from_lc_data( + test, attested_bid, signature_slot, sync_aggregate) + attested_data = _get_light_client_data(test.lc_data_store, attested_bid) + return ForkedLightClientUpdate( + spec=finality_update.spec, + data=finality_update.spec.LightClientUpdate( + attested_header=finality_update.data.attested_header, + next_sync_committee=next_sync_committee, + next_sync_committee_branch=attested_data.next_sync_committee_branch, + finalized_header=finality_update.data.finalized_header, + finality_branch=finality_update.data.finality_branch, + sync_aggregate=finality_update.data.sync_aggregate, + signature_slot=finality_update.data.signature_slot, + ) + ) + + +def _create_lc_update(test, spec, state, block, parent_bid): + # Create `LightClientUpdate` instances for a given block and its post-state, + # and keep track of best / latest ones. Data about the parent block's + # post-state must be cached (`_cache_lc_data`) before calling this. + + # Verify attested block (parent) is recent enough and that state is available + attested_bid = parent_bid + attested_slot = attested_bid.slot + if attested_slot < test.lc_data_store.cache.tail_slot: + _cache_lc_data( + test.lc_data_store, + spec, + state, + _block_to_block_id(block), + current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), + latest_signature_slot=spec.GENESIS_SLOT, + ) + return + + # If sync committee period changed, reset `best` + attested_period = spec.compute_sync_committee_period_at_slot(attested_slot) + signature_slot = block.message.slot + signature_period = spec.compute_sync_committee_period_at_slot(signature_slot) + attested_data = _get_light_client_data(test.lc_data_store, attested_bid) + if attested_period != signature_period: + best = ForkedLightClientUpdate(spec=None, data=None) + else: + best = attested_data.current_period_best_update + + # If sync committee does not have sufficient participants, do not bump latest + sync_aggregate = block.message.body.sync_aggregate + num_active_participants = sum(sync_aggregate.sync_committee_bits) + if num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS: + latest_signature_slot = attested_data.latest_signature_slot + else: + latest_signature_slot = signature_slot + + # To update `best`, sync committee must have sufficient participants, and + # `signature_slot` must be in `attested_slot`'s sync committee period + if ( + num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS + or attested_period != signature_period + ): + _cache_lc_data( + test.lc_data_store, + spec, + state, + _block_to_block_id(block), + current_period_best_update=best, + latest_signature_slot=latest_signature_slot, + ) + return + + # Check if light client data improved + update = _create_lc_update_from_lc_data( + test, attested_bid, signature_slot, sync_aggregate, state.next_sync_committee) + is_better = ( + best.spec is None + or spec.is_better_update(update.data, upgrade_lc_update_to_new_spec( + best.spec, update.spec, best.data, test.phases)) + ) + + # Update best light client data for current sync committee period + if is_better: + best = update + _cache_lc_data( + test.lc_data_store, + spec, + state, + _block_to_block_id(block), + current_period_best_update=best, + latest_signature_slot=latest_signature_slot, + ) + + +def _create_lc_bootstrap(test, spec, bid): + block = test.blocks[bid.root] + period = spec.compute_sync_committee_period_at_slot(bid.slot) + if period not in test.lc_data_store.db.sync_committees: + test.lc_data_store.db.sync_committees[period] = \ + _get_current_sync_committee_for_finalized_period(test, period) + test.lc_data_store.db.headers[bid.root] = ForkedLightClientHeader( + spec=block.spec, data=block.spec.block_to_light_client_header(block.data)) + test.lc_data_store.db.current_branches[bid.slot] = \ + _get_light_client_data(test.lc_data_store, bid).current_sync_committee_branch + + +def _process_new_block_for_light_client(test, spec, state, block, parent_bid): + # Update light client data with information from a new block. + if block.message.slot < test.lc_data_store.cache.tail_slot: + return + + if is_post_altair(spec): + _create_lc_update(test, spec, state, block, parent_bid) + else: + raise ValueError("`tail_slot` cannot be before Altair") + + +def _process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid): + # Update light client data to account for a new head block. + # Note that `old_finalized_bid` is not yet updated when this is called. + if head_bid.slot < test.lc_data_store.cache.tail_slot: + return + + # Commit best light client data for non-finalized periods + head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) + low_slot = max(test.lc_data_store.cache.tail_slot, old_finalized_bid.slot) + low_period = spec.compute_sync_committee_period_at_slot(low_slot) + bid = head_bid + for period in reversed(range(low_period, head_period + 1)): + period_end_slot = compute_start_slot_at_sync_committee_period(spec, period + 1) - 1 + bid = get_ancestor_of_block_id(test, bid, period_end_slot) + if bid is None or bid.slot < low_slot: + break + best = _get_light_client_data(test.lc_data_store, bid).current_period_best_update + if ( + best.spec is None + or sum(best.data.sync_aggregate.sync_committee_bits) < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS + ): + test.lc_data_store.db.best_updates.pop(period, None) + else: + test.lc_data_store.db.best_updates[period] = best + + # Update latest light client data + head_data = _get_light_client_data(test.lc_data_store, head_bid) + signature_slot = head_data.latest_signature_slot + if signature_slot <= low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + signature_bid = get_ancestor_of_block_id(test, head_bid, signature_slot) + if signature_bid is None or signature_bid.slot <= low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + attested_bid = get_ancestor_of_block_id(test, signature_bid, signature_bid.slot - 1) + if attested_bid is None or attested_bid.slot < low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + sync_aggregate = _sync_aggregate_for_block_id(test, signature_bid) + assert sync_aggregate is not None + test.lc_data_store.cache.latest = _create_lc_finality_update_from_lc_data( + test, attested_bid, signature_slot, sync_aggregate) + + +def _process_finalization_for_light_client(test, spec, finalized_bid, old_finalized_bid): + # Prune cached data that is no longer useful for creating future + # `LightClientUpdate` and `LightClientBootstrap` instances. + # This needs to be called whenever `finalized_checkpoint` changes. + finalized_slot = finalized_bid.slot + if finalized_slot < test.lc_data_store.cache.tail_slot: + return + + # Cache `LightClientBootstrap` for newly finalized epoch boundary blocks + first_new_slot = old_finalized_bid.slot + 1 + low_slot = max(first_new_slot, test.lc_data_store.cache.tail_slot) + boundary_slot = finalized_slot + while boundary_slot >= low_slot: + bid = _block_id_at_finalized_slot(test, boundary_slot) + if bid is None: + break + if bid.slot >= low_slot: + _create_lc_bootstrap(test, spec, bid) + boundary_slot = _next_epoch_boundary_slot(spec, bid.slot) + if boundary_slot < spec.SLOTS_PER_EPOCH: + break + boundary_slot = boundary_slot - spec.SLOTS_PER_EPOCH + + # Prune light client data that is no longer referrable by future updates + bids_to_delete = [] + for bid in test.lc_data_store.cache.data: + if bid.slot >= finalized_bid.slot: + continue + bids_to_delete.append(bid) + for bid in bids_to_delete: + _delete_light_client_data(test.lc_data_store, bid) + + +def get_light_client_bootstrap(test, block_root): # -> ForkedLightClientBootstrap + try: + header = test.lc_data_store.db.headers[block_root] + except KeyError: + return ForkedLightClientBootstrap(spec=None, data=None) + + slot = header.data.beacon.slot + period = header.spec.compute_sync_committee_period_at_slot(slot) + return ForkedLightClientBootstrap( + spec=header.spec, + data=header.spec.LightClientBootstrap( + header=header.data, + current_sync_committee=test.lc_data_store.db.sync_committees[period], + current_sync_committee_branch=test.lc_data_store.db.current_branches[slot], + ) + ) + + +def get_light_client_update_for_period(test, period): # -> ForkedLightClientUpdate + try: + return test.lc_data_store.db.best_updates[period] + except KeyError: + return ForkedLightClientUpdate(spec=None, data=None) + + +def get_light_client_finality_update(test): # -> ForkedLightClientFinalityUpdate + return test.lc_data_store.cache.latest + + +def get_light_client_optimistic_update(test): # -> ForkedLightClientOptimisticUpdate + finality_update = get_light_client_finality_update(test) + if finality_update.spec is None: + return ForkedLightClientOptimisticUpdate(spec=None, data=None) + return ForkedLightClientOptimisticUpdate( + spec=finality_update.spec, + data=finality_update.spec.LightClientOptimisticUpdate( + attested_header=finality_update.data.attested_header, + sync_aggregate=finality_update.data.sync_aggregate, + signature_slot=finality_update.data.signature_slot, + ), + ) + + +def setup_lc_data_collection_test(spec, state, phases=None): + assert spec.compute_slots_since_epoch_start(state.slot) == 0 + + test = LightClientDataCollectionTest( + steps=[], + files=set(), + phases=phases, + blocks={}, + finalized_block_roots={}, + states={}, + finalized_checkpoint_states={}, + latest_finalized_epoch=state.finalized_checkpoint.epoch, + latest_finalized_bid=BlockID( + slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), + root=state.finalized_checkpoint.root, + ), + historical_tail_slot=state.slot, + lc_data_store=LightClientDataStore( + spec=spec, + cache=LightClientDataCache( + data={}, + latest=ForkedLightClientFinalityUpdate(spec=None, data=None), + tail_slot=max(state.slot, spec.compute_start_slot_at_epoch(spec.config.ALTAIR_FORK_EPOCH)), + ), + db=LightClientDataDB( + headers={}, + current_branches={}, + sync_committees={}, + best_updates={}, + ), + ), + ) + bid = _state_to_block_id(state) + yield "initial_state", state + test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=spec.SignedBeaconBlock( + message=spec.BeaconBlock(state_root=state.hash_tree_root()), + )) + test.finalized_block_roots[bid.slot] = bid.root + test.states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) + test.finalized_checkpoint_states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) + _cache_lc_data( + test.lc_data_store, spec, state, bid, + current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), + latest_signature_slot=spec.GENESIS_SLOT, + ) + _create_lc_bootstrap(test, spec, bid) + + return test + + +def finish_lc_data_collection_test(test): + yield "steps", test.steps + + +def _encode_lc_object(test, prefix, obj, slot, genesis_validators_root): + yield from [] # Consistently enable `yield from` syntax in calling tests + + file_name = f"{prefix}_{slot}_{encode_hex(obj.data.hash_tree_root())}" + if file_name not in test.files: + test.files.add(file_name) + yield file_name, obj.data + return { + "fork_digest": encode_hex(obj.spec.compute_fork_digest( + obj.spec.compute_fork_version(obj.spec.compute_epoch_at_slot(slot)), + genesis_validators_root, + )), + "data": file_name, + } + + +def add_new_block(test, spec, state, slot=None, num_sync_participants=0): + if slot is None: + slot = state.slot + 1 + assert slot > state.slot + parent_bid = _state_to_block_id(state) + + # Advance to target slot - 1 to ensure sync aggregate can be efficiently computed + if state.slot < slot - 1: + spec, state, _ = transition_across_forks(spec, state, slot - 1, phases=test.phases) + + # Compute sync aggregate, using: + # - sync committee based on target slot + # - fork digest based on target slot - 1 + # - signed data based on parent_bid.slot + # All three slots may be from different forks + sync_aggregate, signature_slot = get_sync_aggregate( + spec, state, num_participants=num_sync_participants, phases=test.phases) + assert signature_slot == slot + + # Apply final block with computed sync aggregate + spec, state, block = transition_across_forks( + spec, state, slot, phases=test.phases, with_block=True, sync_aggregate=sync_aggregate) + bid = _block_to_block_id(block) + test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=block) + test.states[block.message.state_root] = ForkedBeaconState(spec=spec, data=state) + _process_new_block_for_light_client(test, spec, state, block, parent_bid) + block_obj = yield from _encode_lc_object( + test, "block", ForkedSignedBeaconBlock(spec=spec, data=block), block.message.slot, + state.genesis_validators_root, + ) + test.steps.append({ + "new_block": block_obj + }) + return spec, state, bid + + +def select_new_head(test, spec, head_bid): + old_finalized_bid = test.latest_finalized_bid + _process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid) + + # Process finalization + block = test.blocks[head_bid.root] + state = test.states[block.data.message.state_root] + if state.data.finalized_checkpoint.epoch != spec.GENESIS_EPOCH: + block = test.blocks[state.data.finalized_checkpoint.root] + bid = _block_to_block_id(block.data) + new_finalized_bid = bid + if new_finalized_bid.slot > old_finalized_bid.slot: + old_finalized_epoch = None + new_finalized_epoch = state.data.finalized_checkpoint.epoch + while bid.slot > test.latest_finalized_bid.slot: + test.finalized_block_roots[bid.slot] = bid.root + finalized_epoch = spec.compute_epoch_at_slot(bid.slot + spec.SLOTS_PER_EPOCH - 1) + if finalized_epoch != old_finalized_epoch: + state = test.states[block.data.message.state_root] + test.finalized_checkpoint_states[block.data.message.state_root] = state + old_finalized_epoch = finalized_epoch + block = test.blocks[block.data.message.parent_root] + bid = _block_to_block_id(block.data) + test.latest_finalized_epoch = new_finalized_epoch + test.latest_finalized_bid = new_finalized_bid + _process_finalization_for_light_client(test, spec, new_finalized_bid, old_finalized_bid) + + blocks_to_delete = [] + for block_root, block in test.blocks.items(): + if block.data.message.slot < new_finalized_bid.slot: + blocks_to_delete.append(block_root) + for block_root in blocks_to_delete: + del test.blocks[block_root] + states_to_delete = [] + for state_root, state in test.states.items(): + if state.data.slot < new_finalized_bid.slot: + states_to_delete.append(state_root) + for state_root in states_to_delete: + del test.states[state_root] + + yield from [] # Consistently enable `yield from` syntax in calling tests + + bootstraps = [] + for state in test.finalized_checkpoint_states.values(): + bid = _state_to_block_id(state.data) + entry = { + "block_root": encode_hex(bid.root), + } + bootstrap = get_light_client_bootstrap(test, bid.root) + if bootstrap.spec is not None: + bootstrap_obj = yield from _encode_lc_object( + test, "bootstrap", bootstrap, bootstrap.data.header.beacon.slot, + state.data.genesis_validators_root, + ) + entry["bootstrap"] = bootstrap_obj + bootstraps.append(entry) + + best_updates = [] + low_period = spec.compute_sync_committee_period_at_slot(test.lc_data_store.cache.tail_slot) + head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) + for period in range(low_period, head_period + 1): + entry = { + "period": int(period), + } + update = get_light_client_update_for_period(test, period) + if update.spec is not None: + update_obj = yield from _encode_lc_object( + test, "update", update, update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + entry["update"] = update_obj + best_updates.append(entry) + + checks = { + "latest_finalized_checkpoint": { + "epoch": int(test.latest_finalized_epoch), + "root": encode_hex(test.latest_finalized_bid.root), + }, + "bootstraps": bootstraps, + "best_updates": best_updates, + } + finality_update = get_light_client_finality_update(test) + if finality_update.spec is not None: + finality_update_obj = yield from _encode_lc_object( + test, "finality_update", finality_update, finality_update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + checks["latest_finality_update"] = finality_update_obj + optimistic_update = get_light_client_optimistic_update(test) + if optimistic_update.spec is not None: + optimistic_update_obj = yield from _encode_lc_object( + test, "optimistic_update", optimistic_update, optimistic_update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + checks["latest_optimistic_update"] = optimistic_update_obj + + test.steps.append({ + "new_head": { + "head_block_root": encode_hex(head_bid.root), + "checks": checks, + } + }) + + +def run_lc_data_collection_test_multi_fork(spec, phases, state, fork_1, fork_2): + # Start test + test = yield from setup_lc_data_collection_test(spec, state, phases=phases) + + # Genesis block is post Altair and is finalized, so can be used as bootstrap + genesis_bid = BlockID(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid + + # Shared history up to final epoch of period before `fork_1` + fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') + fork_1_period = spec.compute_sync_committee_period(fork_1_epoch) + slot = compute_start_slot_at_sync_committee_period(spec, fork_1_period) - spec.SLOTS_PER_EPOCH + spec, state, bid = yield from add_new_block(test, spec, state, slot=slot, num_sync_participants=1) + yield from select_new_head(test, spec, bid) + assert get_light_client_bootstrap(test, bid.root).spec is None + slot_period = spec.compute_sync_committee_period_at_slot(slot) + if slot_period == 0: + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, 0).data) == genesis_bid + else: + for period in range(0, slot_period): + assert get_light_client_update_for_period(test, period).spec is None # attested period != signature period + state_period = spec.compute_sync_committee_period_at_slot(state.slot) + + # Branch A: Advance past `fork_2`, having blocks at slots 0 and 4 of each epoch + spec_a = spec + state_a = state + slot_a = state_a.slot + bids_a = [bid] + num_sync_participants_a = 1 + fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') + while spec_a.get_current_epoch(state_a) <= fork_2_epoch: + attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + slot_a += 4 + signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + if signature_period != attested_period: + num_sync_participants_a = 0 + num_sync_participants_a += 1 + spec_a, state_a, bid_a = yield from add_new_block( + test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) + yield from select_new_head(test, spec_a, bid_a) + for bid in bids_a: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] + else: + assert signature_period == attested_period + 1 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_a[-1] + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_a[-1] + bids_a.append(bid_a) + + # Branch B: Advance past `fork_2`, having blocks at slots 1 and 5 of each epoch but no sync participation + spec_b = spec + state_b = state + slot_b = state_b.slot + bids_b = [bid] + while spec_b.get_current_epoch(state_b) <= fork_2_epoch: + slot_b += 4 + signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + spec_b, state_b, bid_b = yield from add_new_block( + test, spec_b, state_b, slot=slot_b) + # Simulate that this does not become head yet, e.g., this branch was withheld + for bid in bids_b: + assert get_light_client_bootstrap(test, bid.root).spec is None + bids_b.append(bid_b) + + # Branch B: Another block that becomes head + attested_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + slot_b += 1 + signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + num_sync_participants_b = 1 + spec_b, state_b, bid_b = yield from add_new_block( + test, spec_b, state_b, slot=slot_b, num_sync_participants=num_sync_participants_b) + yield from select_new_head(test, spec_b, bid_b) + for bid in bids_b: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_b[-1] + else: + assert signature_period == attested_period + 1 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_b[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_b[-1] + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_b[-1] + bids_b.append(bid_b) + + # All data for periods between the common ancestor of the two branches should have reorged. + # As there was no sync participation on branch B, that means it is deleted. + state_b_period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + for period in range(state_period + 1, state_b_period): + assert get_light_client_update_for_period(test, period).spec is None + + # Branch A: Another block, reorging branch B once more + attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + slot_a = slot_b + 1 + signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + if signature_period != attested_period: + num_sync_participants_a = 0 + num_sync_participants_a += 1 + spec_a, state_a, bid_a = yield from add_new_block( + test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) + yield from select_new_head(test, spec_a, bid_a) + for bid in bids_a: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] + else: + assert signature_period == attested_period + 1 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_a[-1] + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_a[-1] + bids_a.append(bid_a) + + # Data has been restored + state_a_period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + for period in range(state_period + 1, state_a_period): + assert get_light_client_update_for_period(test, period).spec is not None + + # Finish test + yield from finish_lc_data_collection_test(test) From 24dffad1af31fe2dbda3b78a043de4b7445f9a2c Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 27 Nov 2024 14:28:19 +0100 Subject: [PATCH 15/60] Link tests with generator --- tests/generators/light_client/main.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index 6534524fe3..a5775b1cbe 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -12,11 +12,23 @@ bellatrix_mods = altair_mods _new_capella_mods = {key: 'eth2spec.test.capella.light_client.test_' + key for key in [ + 'data_collection', 'single_merkle_proof', + 'sync', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - deneb_mods = capella_mods - electra_mods = deneb_mods + + _new_deneb_mods = {key: 'eth2spec.test.deneb.light_client.test_' + key for key in [ + 'data_collection', + 'sync', + ]} + deneb_mods = combine_mods(_new_deneb_mods, capella_mods) + + _new_electra_mods = {key: 'eth2spec.test.electra.light_client.test_' + key for key in [ + 'data_collection', + 'sync', + ]} + electra_mods = combine_mods(_new_electra_mods, deneb_mods) all_mods = { ALTAIR: altair_mods, From eaed600263d10c1e7f15f2a98d09fb2bfffd5a73 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 27 Nov 2024 14:29:44 +0100 Subject: [PATCH 16/60] Lint --- .../test/capella/light_client/test_sync.py | 1 + .../light_client/test_data_collection.py | 1 + .../test/deneb/light_client/test_sync.py | 1 + .../test/electra/light_client/test_sync.py | 1 + .../helpers/light_client_data_collection.py | 24 ++++++++++++++----- 5 files changed, 22 insertions(+), 6 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py index 3958900be5..99a56f96e0 100644 --- a/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py @@ -15,6 +15,7 @@ run_lc_sync_test_upgraded_store_with_legacy_data, ) + @with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) @spec_test @with_config_overrides({ diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py index 03b7286988..5e894a5d13 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py @@ -14,6 +14,7 @@ run_lc_data_collection_test_multi_fork, ) + @with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) @spec_test @with_config_overrides({ diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py index d19e1e0238..45a8ff2c8f 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py @@ -16,6 +16,7 @@ run_lc_sync_test_upgraded_store_with_legacy_data, ) + @with_phases(phases=[CAPELLA], other_phases=[DENEB]) @spec_test @with_config_overrides({ diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py index 2b20552d6b..c37e8b21e1 100644 --- a/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py @@ -16,6 +16,7 @@ run_lc_sync_test_upgraded_store_with_legacy_data, ) + @with_phases(phases=[DENEB], other_phases=[ELECTRA]) @spec_test @with_config_overrides({ diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py b/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py index d56ea05310..5de9b37c61 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py @@ -816,10 +816,14 @@ def run_lc_data_collection_test_multi_fork(spec, phases, state, fork_1, fork_2): for bid in bids_a: assert get_light_client_bootstrap(test, bid.root).spec is None if attested_period == signature_period: - assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_a[-1] else: assert signature_period == attested_period + 1 - assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_a[-2] assert get_light_client_update_for_period(test, signature_period).spec is None assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_a[-1] assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_a[-1] @@ -851,10 +855,14 @@ def run_lc_data_collection_test_multi_fork(spec, phases, state, fork_1, fork_2): for bid in bids_b: assert get_light_client_bootstrap(test, bid.root).spec is None if attested_period == signature_period: - assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_b[-1] + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_b[-1] else: assert signature_period == attested_period + 1 - assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_b[-2] + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_b[-2] assert get_light_client_update_for_period(test, signature_period).spec is None assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_b[-1] assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_b[-1] @@ -879,10 +887,14 @@ def run_lc_data_collection_test_multi_fork(spec, phases, state, fork_1, fork_2): for bid in bids_a: assert get_light_client_bootstrap(test, bid.root).spec is None if attested_period == signature_period: - assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_a[-1] else: assert signature_period == attested_period + 1 - assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_a[-2] assert get_light_client_update_for_period(test, signature_period).spec is None assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_a[-1] assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_a[-1] From 531a0b08862d3c7b937802e07479b2d4dc8764bb Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 27 Nov 2024 15:43:26 +0100 Subject: [PATCH 17/60] Fix module list --- tests/generators/light_client/main.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index a5775b1cbe..a6174b277d 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -12,7 +12,6 @@ bellatrix_mods = altair_mods _new_capella_mods = {key: 'eth2spec.test.capella.light_client.test_' + key for key in [ - 'data_collection', 'single_merkle_proof', 'sync', ]} From 12401a5be5867b7fe219a27954e5690a5bc5439e Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 28 Nov 2024 13:02:12 +0100 Subject: [PATCH 18/60] Move fork tests to origin rather than destination to fix issues --- .../test/altair/light_client/test_sync.py | 56 ++++++++++++++++++- .../light_client/__init__.py | 0 .../light_client/test_data_collection.py | 0 .../light_client/test_sync.py | 42 ++++++-------- .../light_client/test_data_collection.py | 0 .../test/capella/light_client/test_sync.py | 26 +++++---- tests/core/pyspec/eth2spec/test/context.py | 9 +++ .../test/deneb/light_client/test_sync.py | 36 ++---------- .../test/helpers/light_client_sync.py | 22 -------- tests/generators/light_client/main.py | 15 +++-- 10 files changed, 108 insertions(+), 98 deletions(-) rename tests/core/pyspec/eth2spec/test/{electra => bellatrix}/light_client/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{deneb => bellatrix}/light_client/test_data_collection.py (100%) rename tests/core/pyspec/eth2spec/test/{electra => bellatrix}/light_client/test_sync.py (55%) rename tests/core/pyspec/eth2spec/test/{electra => capella}/light_client/test_data_collection.py (100%) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py index 8000ceb799..1c77e648ab 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py @@ -1,13 +1,18 @@ from eth2spec.test.context import ( spec_state_test_with_matching_config, - with_presets, + spec_test, + with_all_phases_to, with_light_client, + with_matching_spec_config, + with_presets, + with_state, ) from eth2spec.test.helpers.attestations import ( next_slots_with_attestations, state_transition_with_full_block, ) from eth2spec.test.helpers.constants import ( + CAPELLA, DENEB, ELECTRA, MINIMAL, ) from eth2spec.test.helpers.light_client import ( @@ -352,3 +357,52 @@ def test_advance_finality_without_sync_committee(spec, state): # Finish test yield from finish_lc_sync_test(test) + + +def run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, fork): + # Start test (Legacy bootstrap with an upgraded store) + test = yield from setup_lc_sync_test(spec, state, phases[fork], phases) + + # Initial `LightClientUpdate` (check that the upgraded store can process it) + finalized_block = spec.SignedBeaconBlock() + finalized_block.message.state_root = state.hash_tree_root() + finalized_state = state.copy() + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finish test + yield from finish_lc_sync_test(test) + + +@with_all_phases_to(CAPELLA, other_phases=[CAPELLA]) +@spec_test +@with_state +@with_matching_spec_config(emitted_fork=CAPELLA) +@with_presets([MINIMAL], reason="too slow") +def test_capella_store_with_legacy_data(spec, phases, state): + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) + + +@with_all_phases_to(DENEB, other_phases=[CAPELLA, DENEB]) +@spec_test +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_store_with_legacy_data(spec, phases, state): + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) + + +@with_all_phases_to(ELECTRA, other_phases=[CAPELLA, DENEB, ELECTRA]) +@spec_test +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_electra_store_with_legacy_data(spec, phases, state): + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/__init__.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/electra/light_client/__init__.py rename to tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py rename to tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_sync.py similarity index 55% rename from tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py rename to tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_sync.py index c37e8b21e1..81b44d8749 100644 --- a/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_sync.py @@ -7,59 +7,49 @@ with_state, ) from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, + BELLATRIX, CAPELLA, DENEB, ELECTRA, MINIMAL, ) from eth2spec.test.helpers.light_client_sync import ( run_lc_sync_test_multi_fork, run_lc_sync_test_single_fork, - run_lc_sync_test_upgraded_store_with_legacy_data, ) -@with_phases(phases=[DENEB], other_phases=[ELECTRA]) +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) @spec_test @with_config_overrides({ - 'ELECTRA_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 }, emit=False) @with_state -@with_matching_spec_config(emitted_fork=ELECTRA) +@with_matching_spec_config(emitted_fork=CAPELLA) @with_presets([MINIMAL], reason="too slow") -def test_electra_fork(spec, phases, state): - yield from run_lc_sync_test_single_fork(spec, phases, state, ELECTRA) +def test_capella_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, CAPELLA) -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB, ELECTRA]) +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) @spec_test @with_config_overrides({ 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 'DENEB_FORK_EPOCH': 4, - 'ELECTRA_FORK_EPOCH': 5, }, emit=False) @with_state -@with_matching_spec_config(emitted_fork=ELECTRA) +@with_matching_spec_config(emitted_fork=DENEB) @with_presets([MINIMAL], reason="too slow") -def test_capella_electra_fork(spec, phases, state): - yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, ELECTRA) +def test_capella_deneb_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, DENEB) -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB, ELECTRA]) @spec_test @with_config_overrides({ - 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 - 'ELECTRA_FORK_EPOCH': 4, + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'DENEB_FORK_EPOCH': 4, + 'ELECTRA_FORK_EPOCH': 5, }, emit=False) @with_state @with_matching_spec_config(emitted_fork=ELECTRA) @with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_fork(spec, phases, state): - yield from run_lc_sync_test_multi_fork(spec, phases, state, DENEB, ELECTRA) - - -@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA, DENEB], other_phases=[CAPELLA, DENEB, ELECTRA]) -@spec_test -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_electra_store_with_legacy_data(spec, phases, state): - yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, ELECTRA) +def test_capella_electra_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/electra/light_client/test_data_collection.py rename to tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py index 99a56f96e0..faa727d6d2 100644 --- a/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py @@ -7,31 +7,35 @@ with_state, ) from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, + CAPELLA, DENEB, ELECTRA, MINIMAL, ) from eth2spec.test.helpers.light_client_sync import ( + run_lc_sync_test_multi_fork, run_lc_sync_test_single_fork, - run_lc_sync_test_upgraded_store_with_legacy_data, ) -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) @spec_test @with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 }, emit=False) @with_state -@with_matching_spec_config(emitted_fork=CAPELLA) +@with_matching_spec_config(emitted_fork=DENEB) @with_presets([MINIMAL], reason="too slow") -def test_capella_fork(spec, phases, state): - yield from run_lc_sync_test_single_fork(spec, phases, state, CAPELLA) +def test_deneb_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, DENEB) -@with_phases(phases=[ALTAIR, BELLATRIX], other_phases=[CAPELLA]) +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) @spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'ELECTRA_FORK_EPOCH': 4, +}, emit=False) @with_state -@with_matching_spec_config(emitted_fork=CAPELLA) +@with_matching_spec_config(emitted_fork=ELECTRA) @with_presets([MINIMAL], reason="too slow") -def test_capella_store_with_legacy_data(spec, phases, state): - yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) +def test_deneb_electra_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, DENEB, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 8b2e8de6d3..f2298d297b 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -436,6 +436,15 @@ def with_all_phases_from_except(earliest_phase, except_phases=None): return with_all_phases_from(earliest_phase, [phase for phase in ALL_PHASES if phase not in except_phases]) +def with_all_phases_to(next_phase, all_phases=ALL_PHASES): + """ + A decorator factory for running a tests with every phase except the ones listed + """ + def decorator(fn): + return with_phases([phase for phase in all_phases if is_post_fork(next_phase, phase)])(fn) + return decorator + + def with_all_phases_except(exclusion_phases): """ A decorator factory for running a tests with every phase except the ones listed diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py index 45a8ff2c8f..2a2b4db118 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py @@ -7,45 +7,21 @@ with_state, ) from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, DENEB, + DENEB, ELECTRA, MINIMAL, ) from eth2spec.test.helpers.light_client_sync import ( - run_lc_sync_test_multi_fork, run_lc_sync_test_single_fork, - run_lc_sync_test_upgraded_store_with_legacy_data, ) -@with_phases(phases=[CAPELLA], other_phases=[DENEB]) +@with_phases(phases=[DENEB], other_phases=[ELECTRA]) @spec_test @with_config_overrides({ - 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'ELECTRA_FORK_EPOCH': 3, # Test setup advances to epoch 2 }, emit=False) @with_state -@with_matching_spec_config(emitted_fork=DENEB) +@with_matching_spec_config(emitted_fork=ELECTRA) @with_presets([MINIMAL], reason="too slow") -def test_deneb_fork(spec, phases, state): - yield from run_lc_sync_test_single_fork(spec, phases, state, DENEB) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 - 'DENEB_FORK_EPOCH': 4, -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_fork(spec, phases, state): - yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, DENEB) - - -@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_store_with_legacy_data(spec, phases, state): - yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) +def test_electra_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py b/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py index e64b0a2eca..54a5c0f970 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py @@ -318,25 +318,3 @@ def run_lc_sync_test_multi_fork(spec, phases, state, fork_1, fork_2): # Finish test yield from finish_lc_sync_test(test) - - -def run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, fork): - # Start test (Legacy bootstrap with an upgraded store) - test = yield from setup_lc_sync_test(spec, state, phases[fork], phases) - - # Initial `LightClientUpdate` (check that the upgraded store can process it) - finalized_block = spec.SignedBeaconBlock() - finalized_block.message.state_root = state.hash_tree_root() - finalized_state = state.copy() - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finish test - yield from finish_lc_sync_test(test) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index a6174b277d..e362c6b4c0 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -9,25 +9,24 @@ 'sync', 'update_ranking', ]} - bellatrix_mods = altair_mods + + _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.light_client.test_' + key for key in [ + 'data_collection', + ]} + bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) _new_capella_mods = {key: 'eth2spec.test.capella.light_client.test_' + key for key in [ + 'data_collection', 'single_merkle_proof', 'sync', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) _new_deneb_mods = {key: 'eth2spec.test.deneb.light_client.test_' + key for key in [ - 'data_collection', 'sync', ]} deneb_mods = combine_mods(_new_deneb_mods, capella_mods) - - _new_electra_mods = {key: 'eth2spec.test.electra.light_client.test_' + key for key in [ - 'data_collection', - 'sync', - ]} - electra_mods = combine_mods(_new_electra_mods, deneb_mods) + electra_mods = deneb_mods all_mods = { ALTAIR: altair_mods, From 30bed615ffde18429dc349ee07b7fbcc715b9a79 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 28 Nov 2024 13:06:19 +0100 Subject: [PATCH 19/60] Add missing mod --- tests/generators/light_client/main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index e362c6b4c0..6420382240 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -12,6 +12,7 @@ _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.light_client.test_' + key for key in [ 'data_collection', + 'sync', ]} bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) From a52a82c11e1e8ab544b5ecbb4be7297ed6b3a164 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 28 Nov 2024 14:36:58 +0100 Subject: [PATCH 20/60] Extend decorator factory to support `other_phases` --- tests/core/pyspec/eth2spec/test/context.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index f2298d297b..16149bb861 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -436,12 +436,15 @@ def with_all_phases_from_except(earliest_phase, except_phases=None): return with_all_phases_from(earliest_phase, [phase for phase in ALL_PHASES if phase not in except_phases]) -def with_all_phases_to(next_phase, all_phases=ALL_PHASES): +def with_all_phases_to(next_phase, other_phases=None, all_phases=ALL_PHASES): """ - A decorator factory for running a tests with every phase except the ones listed + A decorator factory for running a tests with every phase up to and excluding the one listed """ def decorator(fn): - return with_phases([phase for phase in all_phases if is_post_fork(next_phase, phase)])(fn) + return with_phases( + [phase for phase in all_phases if is_post_fork(next_phase, phase)], + other_phases=other_phases, + )(fn) return decorator From 09e8f013105e40f487bfbd060f2a5732d9fd1ebe Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 28 Nov 2024 15:17:00 +0100 Subject: [PATCH 21/60] Make `from` -> `to` bounds explicit --- .../eth2spec/test/altair/light_client/test_sync.py | 10 +++++----- tests/core/pyspec/eth2spec/test/context.py | 10 +++++++--- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py index 1c77e648ab..15437f0959 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py @@ -1,7 +1,7 @@ from eth2spec.test.context import ( spec_state_test_with_matching_config, spec_test, - with_all_phases_to, + with_all_phases_from_to, with_light_client, with_matching_spec_config, with_presets, @@ -12,7 +12,7 @@ state_transition_with_full_block, ) from eth2spec.test.helpers.constants import ( - CAPELLA, DENEB, ELECTRA, + ALTAIR, CAPELLA, DENEB, ELECTRA, MINIMAL, ) from eth2spec.test.helpers.light_client import ( @@ -381,7 +381,7 @@ def run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, fork): yield from finish_lc_sync_test(test) -@with_all_phases_to(CAPELLA, other_phases=[CAPELLA]) +@with_all_phases_from_to(ALTAIR, CAPELLA, other_phases=[CAPELLA]) @spec_test @with_state @with_matching_spec_config(emitted_fork=CAPELLA) @@ -390,7 +390,7 @@ def test_capella_store_with_legacy_data(spec, phases, state): yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) -@with_all_phases_to(DENEB, other_phases=[CAPELLA, DENEB]) +@with_all_phases_from_to(ALTAIR, DENEB, other_phases=[CAPELLA, DENEB]) @spec_test @with_state @with_matching_spec_config(emitted_fork=DENEB) @@ -399,7 +399,7 @@ def test_deneb_store_with_legacy_data(spec, phases, state): yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) -@with_all_phases_to(ELECTRA, other_phases=[CAPELLA, DENEB, ELECTRA]) +@with_all_phases_from_to(ALTAIR, ELECTRA, other_phases=[CAPELLA, DENEB, ELECTRA]) @spec_test @with_state @with_matching_spec_config(emitted_fork=ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 16149bb861..a90190287d 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -436,13 +436,17 @@ def with_all_phases_from_except(earliest_phase, except_phases=None): return with_all_phases_from(earliest_phase, [phase for phase in ALL_PHASES if phase not in except_phases]) -def with_all_phases_to(next_phase, other_phases=None, all_phases=ALL_PHASES): +def with_all_phases_from_to(from_phase, to_phase, other_phases=None, all_phases=ALL_PHASES): """ - A decorator factory for running a tests with every phase up to and excluding the one listed + A decorator factory for running a tests with every phase + from a given start phase up to and excluding a given end phase """ def decorator(fn): return with_phases( - [phase for phase in all_phases if is_post_fork(next_phase, phase)], + [phase for phase in all_phases if ( + phase != to_phase and is_post_fork(to_phase, phase) + and is_post_fork(phase, from_phase) + )], other_phases=other_phases, )(fn) return decorator From 8ab7bc60a5c5f7d709951e1486a9556d471e3ffb Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Thu, 12 Dec 2024 10:19:49 +0100 Subject: [PATCH 22/60] Address jxs comment. --- specs/phase0/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index fa569573f3..48228f0ff0 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -123,8 +123,8 @@ This section outlines the specification for the networking stack in Ethereum con Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently), we hereby define a profile for basic interoperability. -All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). -The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously). +All implementations MUST support the TCP libp2p transport, MAY support the QUIC (UDP) libp2p transport, and MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). +The libp2p TCP and QUIC (UDP) transports support listening on IPv4 and IPv6 addresses (and on multiple simultaneously). Clients must support listening on at least one of IPv4 or IPv6. Clients that do _not_ have support for listening on IPv4 SHOULD be cognizant of the potential disadvantages in terms of From 85eff0c67eff2ebf9da4a9bb3d489bdfd8826df8 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 12 Dec 2024 14:58:02 +0100 Subject: [PATCH 23/60] Clarify gossip limits In the gossip specification, the `GOSSIP_MAX_SIZE` constant is specified for the uncompressed payload size in the gossipsub message. This PR clarifies how this limit applies to the various fields of the gossipsub message and provides additional limits derived from it that allow clients to more aggressively discard messages. In particular, clients are allowed to impose more strict limits on topics such as attestation and aggregates - an `Attestation` for example takes no more than `~228` bytes (to be verified!), far below the 10mb limit, though implicitly clients should already see these limits imposed as rejections by their SSZ decoder - this clarification mainly highlights the possibilty to perform this check earlier in the process. --- specs/phase0/p2p-interface.md | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 396e4671b8..5fd2771829 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -268,12 +268,22 @@ This defines both the type of data being sent on the topic and how the data fiel - `Encoding` - the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encodings) section for further details. +Clients MUST reject messages with unknown topic. + *Note*: `ForkDigestValue` is composed of values that are not known until the genesis block/state are available. Due to this, clients SHOULD NOT subscribe to gossipsub topics until these genesis values are known. -Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`. -Clients MUST reject (fail validation) messages that are over this size limit. -Likewise, clients MUST NOT emit or propagate messages larger than this limit. +The uncompressed payload in the [`data`](https://github.com/libp2p/go-libp2p-pubsub/blob/c06df2f9a38e9382e644b241adf0e96e5ca00955/pb/rpc.proto#L19) +must have has a size no greater than `GOSSIP_MAX_SIZE`. + +After compression, the payload in the `data` field must have a size no greater than +`32 + GOSSIP_MAX_SIZE + GOSSIP_MAX_SIZE / 6` (rounded down), as given by the +[snappy maximum compressed size function](https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47). + +Clients MUST reject (fail validation) messages with payloads that are over these size limits. +Likewise, clients MUST NOT emit or propagate messages larger than these limits. + +Clients MAY use [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds) to determine the payload size limit, when this size is lower than `GOSSIP_MAX_SIZE`. The optional `from` (1), `seqno` (3), `signature` (5) and `key` (6) protobuf fields are omitted from the message, since messages are identified by content, anonymous, and signed where necessary in the application layer. @@ -288,6 +298,10 @@ The `message-id` of a gossipsub message MUST be the following 20 byte value comp the concatenation of `MESSAGE_DOMAIN_INVALID_SNAPPY` with the raw message data, i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + message.data)[:20]`. +Where relevant, clients MUST reject messages with `message-id` sizes other than 20 bytes. + +Clients MAY reject messages whose protobuf-encoded size exceeds the maximum possible size based on the limits above. + *Note*: The above logic handles two exceptional cases: (1) multiple snappy `data` can decompress to the same value, and (2) some message `data` can fail to snappy decompress altogether. From 9c4447bdde7f12dbe033facf0ee7b4d369c24427 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Fri, 13 Dec 2024 15:22:05 -0600 Subject: [PATCH 24/60] Pepper in some lru_cache decorators --- setup.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 0bc90ae787..55f1d0e344 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ import copy from collections import OrderedDict import json -from functools import reduce +from functools import lru_cache from pysetup.constants import ( # code names @@ -70,6 +70,7 @@ def installPackage(package: str): from marko.ext.gfm.elements import Table +@lru_cache(maxsize=None) def _get_name_from_heading(heading: Heading) -> Optional[str]: last_child = heading.children[-1] if isinstance(last_child, CodeSpan): @@ -77,15 +78,18 @@ def _get_name_from_heading(heading: Heading) -> Optional[str]: return None +@lru_cache(maxsize=None) def _get_source_from_code_block(block: FencedCode) -> str: return block.children[0].children.strip() +@lru_cache(maxsize=None) def _get_function_name_from_source(source: str) -> str: fn = ast.parse(source).body[0] return fn.name +@lru_cache(maxsize=None) def _get_self_type_from_source(source: str) -> Optional[str]: fn = ast.parse(source).body[0] args = fn.args.args @@ -98,6 +102,7 @@ def _get_self_type_from_source(source: str) -> Optional[str]: return args[0].annotation.id +@lru_cache(maxsize=None) def _get_class_info_from_source(source: str) -> Tuple[str, Optional[str]]: class_def = ast.parse(source).body[0] base = class_def.bases[0] @@ -113,12 +118,14 @@ def _get_class_info_from_source(source: str) -> Tuple[str, Optional[str]]: return class_def.name, parent_class +@lru_cache(maxsize=None) def _is_constant_id(name: str) -> bool: if name[0] not in string.ascii_uppercase + '_': return False return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:])) +@lru_cache(maxsize=None) def _load_kzg_trusted_setups(preset_name): trusted_setups_file_path = str(Path(__file__).parent) + '/presets/' + preset_name + '/trusted_setups/trusted_setup_4096.json' @@ -130,6 +137,7 @@ def _load_kzg_trusted_setups(preset_name): return trusted_setup_G1_monomial, trusted_setup_G1_lagrange, trusted_setup_G2_monomial +@lru_cache(maxsize=None) def _load_curdleproofs_crs(preset_name): """ NOTE: File generated from https://github.com/asn-d6/curdleproofs/blob/8e8bf6d4191fb6a844002f75666fb7009716319b/tests/crs.rs#L53-L67 @@ -153,6 +161,7 @@ def _load_curdleproofs_crs(preset_name): } +@lru_cache(maxsize=None) def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]: _, _, title = child._parse_info if not (title[0] == "(" and title[len(title)-1] == ")"): @@ -163,6 +172,7 @@ def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]: return title[len(ETH2_SPEC_COMMENT_PREFIX):].strip() +@lru_cache(maxsize=None) def _parse_value(name: str, typed_value: str, type_hint: Optional[str] = None) -> VariableDefinition: comment = None if name in ("ROOT_OF_UNITY_EXTENDED", "ROOTS_OF_UNITY_EXTENDED", "ROOTS_OF_UNITY_REDUCED"): @@ -185,6 +195,11 @@ def _update_constant_vars_with_kzg_setups(constant_vars, preset_name): constant_vars['KZG_SETUP_G2_MONOMIAL'] = VariableDefinition(constant_vars['KZG_SETUP_G2_MONOMIAL'].value, str(kzg_setups[2]), comment, None) +@lru_cache(maxsize=None) +def parse_markdown(content: str): + return gfm.parse(content) + + def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], preset_name=str) -> SpecObject: functions: Dict[str, str] = {} protocols: Dict[str, ProtocolDefinition] = {} @@ -198,7 +213,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr custom_types: Dict[str, str] = {} with open(file_name) as source_file: - document = gfm.parse(source_file.read()) + document = parse_markdown(source_file.read()) current_name = None should_skip = False @@ -326,6 +341,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr ) +@lru_cache(maxsize=None) def load_preset(preset_files: Sequence[Path]) -> Dict[str, str]: """ Loads the a directory of preset files, merges the result into one preset. @@ -344,6 +360,7 @@ def load_preset(preset_files: Sequence[Path]) -> Dict[str, str]: return parse_config_vars(preset) +@lru_cache(maxsize=None) def load_config(config_path: Path) -> Dict[str, str]: """ Loads the given configuration file. @@ -358,7 +375,7 @@ def build_spec(fork: str, source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str: - preset = load_preset(preset_files) + preset = load_preset(tuple(preset_files)) config = load_config(config_file) all_specs = [get_spec(spec, preset, config, preset_name) for spec in source_files] From a58b1f52ddbf1daed66cac210256272694f8bb79 Mon Sep 17 00:00:00 2001 From: Paul Harris Date: Sun, 15 Dec 2024 08:37:22 +1000 Subject: [PATCH 25/60] clarify gossip sources wording --- specs/_features/eip7732/p2p-interface.md | 4 ++-- specs/deneb/p2p-interface.md | 2 +- specs/fulu/p2p-interface.md | 2 +- specs/phase0/p2p-interface.md | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/specs/_features/eip7732/p2p-interface.md b/specs/_features/eip7732/p2p-interface.md index df02cc2382..22b0ba7ede 100644 --- a/specs/_features/eip7732/p2p-interface.md +++ b/specs/_features/eip7732/p2p-interface.md @@ -151,7 +151,7 @@ This topic is used to propagate execution payload messages as `SignedExecutionPa The following validations MUST pass before forwarding the `signed_execution_payload_envelope` on the network, assuming the alias `envelope = signed_execution_payload_envelope.message`, `payload = payload_envelope.payload`: -- _[IGNORE]_ The envelope's block root `envelope.block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue payload for processing once the block is retrieved). +- _[IGNORE]_ The envelope's block root `envelope.block_root` has been seen (via gossip or non-gossip sources) (a client MAY queue payload for processing once the block is retrieved). - _[IGNORE]_ The node has not seen another valid `SignedExecutionPayloadEnvelope` for this block root from this builder. Let `block` be the block with `envelope.beacon_block_root`. @@ -171,7 +171,7 @@ The following validations MUST pass before forwarding the `payload_attestation_m - _[IGNORE]_ The message's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `data.slot == current_slot`. - _[REJECT]_ The message's payload status is a valid status, i.e. `data.payload_status < PAYLOAD_INVALID_STATUS`. - _[IGNORE]_ The `payload_attestation_message` is the first valid message received from the validator with index `payload_attestation_message.validate_index`. -- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after). +- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via gossip or non-gossip sources) (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after). - _[REJECT]_ The message's block `data.beacon_block_root` passes validation. - _[REJECT]_ The message's validator index is within the payload committee in `get_ptc(state, data.slot)`. The `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice. - _[REJECT]_ The message's signature of `payload_attestation_message.signature` is valid with respect to the validator index. diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 5f71bc854a..b3edc9d5bf 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -181,7 +181,7 @@ The following validations MUST pass before forwarding the `blob_sidecar` on the - _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `block_header.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)` - _[REJECT]_ The proposer signature of `blob_sidecar.signed_block_header`, is valid with respect to the `block_header.proposer_index` pubkey. -- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). +- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via gossip or non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `block_header.parent_root`) passes validation. - _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `block_header.parent_root`). - _[REJECT]_ The current finalized_checkpoint is an ancestor of the sidecar's block -- i.e. `get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`. diff --git a/specs/fulu/p2p-interface.md b/specs/fulu/p2p-interface.md index abebbffecc..26227a7eda 100644 --- a/specs/fulu/p2p-interface.md +++ b/specs/fulu/p2p-interface.md @@ -195,7 +195,7 @@ The following validations MUST pass before forwarding the `sidecar: DataColumnSi - _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `block_header.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` - _[REJECT]_ The proposer signature of `sidecar.signed_block_header`, is valid with respect to the `block_header.proposer_index` pubkey. -- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). +- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via gossip or non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `block_header.parent_root`) passes validation. - _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `block_header.parent_root`). - _[REJECT]_ The current finalized_checkpoint is an ancestor of the sidecar's block -- i.e. `get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`. diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 396e4671b8..4f7749a007 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -338,7 +338,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block` - _[IGNORE]_ The block is the first block with valid signature received for the proposer for the slot, `signed_beacon_block.message.slot`. - _[REJECT]_ The proposer signature, `signed_beacon_block.signature`, is valid with respect to the `proposer_index` pubkey. - _[IGNORE]_ The block's parent (defined by `block.parent_root`) has been seen - (via both gossip and non-gossip sources) + (via gossip or non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). - _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation. - _[REJECT]_ The block is from a higher slot than its parent. @@ -387,7 +387,7 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_ - _[REJECT]_ The aggregator signature, `signed_aggregate_and_proof.signature`, is valid. - _[REJECT]_ The signature of `aggregate` is valid. - _[IGNORE]_ The block being voted for (`aggregate.data.beacon_block_root`) has been seen - (via both gossip and non-gossip sources) + (via gossip or non-gossip sources) (a client MAY queue aggregates for processing once block is retrieved). - _[REJECT]_ The block being voted for (`aggregate.data.beacon_block_root`) passes validation. - _[REJECT]_ The aggregate attestation's target block is an ancestor of the block named in the LMD vote -- i.e. @@ -462,7 +462,7 @@ The following validations MUST pass before forwarding the `attestation` on the s that has an identical `attestation.data.target.epoch` and participating validator index. - _[REJECT]_ The signature of `attestation` is valid. - _[IGNORE]_ The block being voted for (`attestation.data.beacon_block_root`) has been seen - (via both gossip and non-gossip sources) + (via gossip or non-gossip sources) (a client MAY queue attestations for processing once block is retrieved). - _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation. - _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e. From 022bb22c777dd9b050c8eb6a1686101bfe2f5013 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 17 Dec 2024 11:31:35 +0100 Subject: [PATCH 26/60] Use single constant for gossip/req/resp, clarify encoded sizes --- specs/phase0/p2p-interface.md | 70 +++++++++++++++++++++++------------ 1 file changed, 47 insertions(+), 23 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 5fd2771829..29624e4fa8 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -193,11 +193,10 @@ This section outlines configurations that are used in this spec. | Name | Value | Description | |---|---|---| -| `GOSSIP_MAX_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed gossip messages. | +| `MAX_PAYLOAD_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed payload in gossipsub messages / RPC chunks. | | `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request | | `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | Number of epochs on a subnet subscription (~27 hours) | | `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks | -| `MAX_CHUNK_SIZE` | `10 * 2**20` (=10485760, 10 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. | | `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated. | | `MAXIMUM_GOSSIP_CLOCK_DISPARITY` | `500` | The maximum **milliseconds** of clock disparity assumed between honest nodes. | | `MESSAGE_DOMAIN_INVALID_SNAPPY` | `DomainType('0x00000000')` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages | @@ -229,6 +228,21 @@ Where is entirely independent of the ENR sequence number, and will in most cases be out of sync with the ENR sequence number. +### Maximum message sizes + +Maximum message sizes are derived from the maximum payload size that the network can carry according to the following functions: + +```python +def max_compressed_len(n): + # Worst-case compressed length for a given payload of size n when using snappy + # https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 + return int(32 + n + n / 6) + +def max_message_size(): + # Allow 1024 bytes for framing and encoding overhead but at least 1MB in case MAX_PAYLOAD_SIZE is small. + return max(max_compressed_len(MAX_PAYLOAD_SIZE) + 1024, 1024*1024) +``` + ### The gossip domain: gossipsub Clients MUST support the [gossipsub v1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) libp2p Protocol @@ -273,18 +287,6 @@ Clients MUST reject messages with unknown topic. *Note*: `ForkDigestValue` is composed of values that are not known until the genesis block/state are available. Due to this, clients SHOULD NOT subscribe to gossipsub topics until these genesis values are known. -The uncompressed payload in the [`data`](https://github.com/libp2p/go-libp2p-pubsub/blob/c06df2f9a38e9382e644b241adf0e96e5ca00955/pb/rpc.proto#L19) -must have has a size no greater than `GOSSIP_MAX_SIZE`. - -After compression, the payload in the `data` field must have a size no greater than -`32 + GOSSIP_MAX_SIZE + GOSSIP_MAX_SIZE / 6` (rounded down), as given by the -[snappy maximum compressed size function](https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47). - -Clients MUST reject (fail validation) messages with payloads that are over these size limits. -Likewise, clients MUST NOT emit or propagate messages larger than these limits. - -Clients MAY use [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds) to determine the payload size limit, when this size is lower than `GOSSIP_MAX_SIZE`. - The optional `from` (1), `seqno` (3), `signature` (5) and `key` (6) protobuf fields are omitted from the message, since messages are identified by content, anonymous, and signed where necessary in the application layer. Starting from Gossipsub v1.1, clients MUST enforce this by applying the `StrictNoSign` @@ -300,8 +302,6 @@ The `message-id` of a gossipsub message MUST be the following 20 byte value comp Where relevant, clients MUST reject messages with `message-id` sizes other than 20 bytes. -Clients MAY reject messages whose protobuf-encoded size exceeds the maximum possible size based on the limits above. - *Note*: The above logic handles two exceptional cases: (1) multiple snappy `data` can decompress to the same value, and (2) some message `data` can fail to snappy decompress altogether. @@ -516,6 +516,16 @@ so [basic snappy block compression](https://github.com/google/snappy/blob/master Implementations MUST use a single encoding for gossip. Changing an encoding will require coordination between participating implementations. +#### Gossipsub size limits + +Size limits are placed both on the [`RPCMsg`](https://github.com/libp2p/specs/blob/b5f7fce29b32d4c7d0efe37b019936a11e5db872/pubsub/README.md#the-rpc) frame as well as the encoded payload in each [`Message`](https://github.com/libp2p/specs/blob/b5f7fce29b32d4c7d0efe37b019936a11e5db872/pubsub/README.md#the-message). + +Clients MUST reject and MUST NOT emit or propagate messages whose size exceed the following limits: + +* the size of the encoded `RPCMsg`, including control messages and framing, must not exceed `max_message_size()` +* the size of the compressed payload in the `Message.data` field must not exceed `max_compressed_len(MAX_PAYLOAD_SIZE)`. +* the size of the uncompressed payload must not exceed `MAX_PAYLOAD_SIZE` or the [type-specific SSZ bound](#what-are-ssz-type-size-bounds), whichever is lower. + ### The Req/Resp domain #### Protocol identification @@ -565,7 +575,7 @@ All other response types (non-Lists) send a single `response_chunk`. For both `request`s and `response`s, the `encoding-dependent-header` MUST be valid, and the `encoded-payload` must be valid within the constraints of the `encoding-dependent-header`. This includes type-specific bounds on payload size for some encoding strategies. -Regardless of these type specific bounds, a global maximum uncompressed byte size of `MAX_CHUNK_SIZE` MUST be applied to all method response chunks. +Regardless of these type specific bounds, a global maximum uncompressed byte size of `MAX_PAYLOAD_SIZE` MUST be applied to all method response chunks. Clients MUST ensure that lengths are within these bounds; if not, they SHOULD reset the stream immediately. Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance. @@ -679,15 +689,13 @@ When snappy is applied, it can be passed through a buffered Snappy reader to dec Before reading the payload, the header MUST be validated: - The unsigned protobuf varint used for the length-prefix MUST not be longer than 10 bytes, which is sufficient for any `uint64`. -- The length-prefix is within the expected [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds). +- The length-prefix is within the expected [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds) or `MAX_PAYLOAD_SIZE`, whichever is smaller. After reading a valid header, the payload MAY be read, while maintaining the size constraints from the header. -A reader SHOULD NOT read more than `max_encoded_len(n)` bytes after reading the SSZ length-prefix `n` from the header. -- For `ssz_snappy` this is: `32 + n + n // 6`. - This is considered the [worst-case compression result](https://github.com/google/snappy/blob/537f4ad6240e586970fe554614542e9717df7902/snappy.cc#L98) by Snappy. +A reader MUST NOT read more than `max_compressed_len(n)` bytes after reading the SSZ length-prefix `n` from the header. -A reader SHOULD consider the following cases as invalid input: +A reader MUST consider the following cases as invalid input: - Any remaining bytes, after having read the `n` SSZ bytes. An EOF is expected if more bytes are read than required. - An early EOF, before fully reading the declared length-prefix worth of SSZ bytes. @@ -1444,7 +1452,7 @@ Nevertheless, in the case of `ssz_snappy`, messages are still length-prefixed wi * Alignment with protocols like gRPC over HTTP/2 that prefix with length * Sanity checking of message length, and enabling much stricter message length limiting based on SSZ type information, to provide even more DOS protection than the global message length already does. - E.g. a small `Status` message does not nearly require `MAX_CHUNK_SIZE` bytes. + E.g. a small `Status` message does not nearly require `MAX_PAYLOAD_SIZE` bytes. [Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length (unsigned here) ints. Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte. @@ -1693,6 +1701,22 @@ Other types are static, they have a fixed size: no dynamic-length content is inv For reference, the type bounds can be computed ahead of time, [as per this example](https://gist.github.com/protolambda/db75c7faa1e94f2464787a480e5d613e). It is advisable to derive these lengths from the SSZ type definitions in use, to ensure that version changes do not cause out-of-sync type bounds. +#### Why is the message size defined in terms of application payload? + +When transmitting messages over gossipsub and / or req/resp, we want to ensure that the same payload sizes are supported no matter the underlying transport, decoupling the consensus layer from libp2p-induced overhead and the particular transmission strategy. + +To derive "encoded size limits" from desired application sizes we take into account snappy compression and framing overhead. + +In the case of gossipsub, the protocol supports sending multiple application payloads as well as mixing application data with control messages in each gossipsub frame - the limit is set such that at least one max-sized application-level message together with a small amount (1kb) of gossipsub overhead is allowed - implementations are free to pack multiple smaller application messages into a single gossipsub frame, and / or combine it with control messages as they see fit. + +The limit is set on the uncompressed payload size in particular to protect against decompression bombs - although + +#### Why is there a limit on message sizes at all? + +The message size limit protects against several forms of DoS and network-based amplification attacks and provide upper bounds for resource (network, memory) usage in the client based on protocol requirements to decode, buffer, cache, store and re-transmit messages which in turn translate into performance and protection tradeoffs, ensuring capacity to handle worst cases during recovery from network instability. + +In particular, blocks which at the time of writing is the only message type without a practical SSZ-derived upper bound on size cannot be fully verified synchronously as part of gossipsub validity checks meaning that there exist cases where invalid messages signed by a validator may be amplified by the network. + ## libp2p implementations matrix This section will soon contain a matrix showing the maturity/state of the libp2p features required From 44ab11d1551dc5e3d017d9a98ef37d339a437d18 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 17 Dec 2024 11:44:26 +0100 Subject: [PATCH 27/60] doctoc --- specs/phase0/p2p-interface.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 29624e4fa8..8bc05844c1 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -16,6 +16,7 @@ - [Constants](#constants) - [Configuration](#configuration) - [MetaData](#metadata) + - [Maximum message sizes](#maximum-message-sizes) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - [Topics and messages](#topics-and-messages) - [Global topics](#global-topics) @@ -28,6 +29,7 @@ - [`beacon_attestation_{subnet_id}`](#beacon_attestation_subnet_id) - [Attestations and Aggregation](#attestations-and-aggregation) - [Encodings](#encodings) + - [Gossipsub size limits](#gossipsub-size-limits) - [The Req/Resp domain](#the-reqresp-domain) - [Protocol identification](#protocol-identification) - [Req/Resp interaction](#reqresp-interaction) @@ -102,6 +104,8 @@ - [Why are we using Snappy for compression?](#why-are-we-using-snappy-for-compression) - [Can I get access to unencrypted bytes on the wire for debugging purposes?](#can-i-get-access-to-unencrypted-bytes-on-the-wire-for-debugging-purposes) - [What are SSZ type size bounds?](#what-are-ssz-type-size-bounds) + - [Why is the message size defined in terms of application payload?](#why-is-the-message-size-defined-in-terms-of-application-payload) + - [Why is there a limit on message sizes at all?](#why-is-there-a-limit-on-message-sizes-at-all) - [libp2p implementations matrix](#libp2p-implementations-matrix) @@ -522,7 +526,7 @@ Size limits are placed both on the [`RPCMsg`](https://github.com/libp2p/specs/bl Clients MUST reject and MUST NOT emit or propagate messages whose size exceed the following limits: -* the size of the encoded `RPCMsg`, including control messages and framing, must not exceed `max_message_size()` +* the size of the encoded `RPCMsg`, including control messages, framing, topics etc, must not exceed `max_message_size()` * the size of the compressed payload in the `Message.data` field must not exceed `max_compressed_len(MAX_PAYLOAD_SIZE)`. * the size of the uncompressed payload must not exceed `MAX_PAYLOAD_SIZE` or the [type-specific SSZ bound](#what-are-ssz-type-size-bounds), whichever is lower. From 6c581ca42c7e98fbdee7beed5643a41b874100d2 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 17 Dec 2024 21:46:20 +0800 Subject: [PATCH 28/60] Fix a few typos (#4055) --- specs/_features/whisk/beacon-chain.md | 2 +- tests/core/pyspec/eth2spec/gen_helpers/README.md | 2 +- tests/formats/fork_choice/README.md | 4 ++-- tests/formats/light_client/sync.md | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/specs/_features/whisk/beacon-chain.md b/specs/_features/whisk/beacon-chain.md index de8051ffeb..3b527900e7 100644 --- a/specs/_features/whisk/beacon-chain.md +++ b/specs/_features/whisk/beacon-chain.md @@ -54,7 +54,7 @@ This document details the beacon chain additions and changes of to support the W | `WHISK_PROPOSER_TRACKERS_COUNT` | `uint64(2**13)` (= 8,192) | number of proposer trackers | | `WHISK_VALIDATORS_PER_SHUFFLE` | `uint64(2**7 - 4)` (= 124) | number of validators shuffled per shuffle step | | `WHISK_MAX_SHUFFLE_PROOF_SIZE` | `uint64(2**15)` | max size of a shuffle proof | -| `WHISK_MAX_OPENING_PROOF_SIZE` | `uint64(2**10)` | max size of a opening proof | +| `WHISK_MAX_OPENING_PROOF_SIZE` | `uint64(2**10)` | max size of an opening proof | ## Configuration diff --git a/tests/core/pyspec/eth2spec/gen_helpers/README.md b/tests/core/pyspec/eth2spec/gen_helpers/README.md index 8fda6b585e..595b411f70 100644 --- a/tests/core/pyspec/eth2spec/gen_helpers/README.md +++ b/tests/core/pyspec/eth2spec/gen_helpers/README.md @@ -26,7 +26,7 @@ Options: ## `gen_from_tests` -This is an util to derive tests from a tests source file. +This is a util to derive tests from a tests source file. This requires the tests to yield test-case-part outputs. These outputs are then written to the test case directory. Yielding data is illegal in normal pytests, so it is only done when in "generator mode". diff --git a/tests/formats/fork_choice/README.md b/tests/formats/fork_choice/README.md index 58709b3fee..37d09f4787 100644 --- a/tests/formats/fork_choice/README.md +++ b/tests/formats/fork_choice/README.md @@ -156,10 +156,10 @@ value that Execution Layer client mock returns in responses to the following Eng The checks to verify the current status of `store`. ```yaml -checks: {: value} -- the assertions. +checks: {: value} -- the assertions. ``` -`` is the field member or property of [`Store`](../../../specs/phase0/fork-choice.md#store) object that maintained by client implementation. The fields include: +`` is the field member or property of [`Store`](../../../specs/phase0/fork-choice.md#store) object that maintained by client implementation. The fields include: ```yaml head: { diff --git a/tests/formats/light_client/sync.md b/tests/formats/light_client/sync.md index 1706b4c162..d4c8d3ae99 100644 --- a/tests/formats/light_client/sync.md +++ b/tests/formats/light_client/sync.md @@ -48,7 +48,7 @@ should be executed with the specified parameters: ```yaml { current_slot: int -- integer, decimal - checks: {: value} -- the assertions. + checks: {: value} -- the assertions. } ``` @@ -64,7 +64,7 @@ The function `process_light_client_update(store, update, current_slot, genesis_v update: string -- name of the `*.ssz_snappy` file to load as a `LightClientUpdate` object current_slot: int -- integer, decimal - checks: {: value} -- the assertions. + checks: {: value} -- the assertions. } ``` @@ -79,7 +79,7 @@ The `store` should be upgraded to reflect the new `store_fork_digest`: ```yaml { store_fork_digest: string -- Encoded `ForkDigest`-context of `store` - checks: {: value} -- the assertions. + checks: {: value} -- the assertions. } ``` From 305f30e89505615d924e42f81105b6de104c8e74 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 10:15:13 -0600 Subject: [PATCH 29/60] Bump circleci's cached venv key --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1de55179d4..37e094e1de 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,13 +35,13 @@ commands: description: "Restore the cache with pyspec keys" steps: - restore_cached_venv: - venv_name: v30-pyspec + venv_name: v31-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} save_pyspec_cached_venv: description: Save a venv into a cache with pyspec keys" steps: - save_cached_venv: - venv_name: v30-pyspec + venv_name: v31-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} venv_path: ./venv jobs: From 702722fe6995d2917aa9d6e1eb23085dde539b06 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 10:25:21 -0600 Subject: [PATCH 30/60] Bump circleci's cached repo key --- .circleci/config.yml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 37e094e1de..9be3106db1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -53,16 +53,16 @@ jobs: # Restore git repo at point close to target branch/revision, to speed up checkout - restore_cache: keys: - - v3-specs-repo-{{ .Branch }}-{{ .Revision }} - - v3-specs-repo-{{ .Branch }}- - - v3-specs-repo- + - v4-specs-repo-{{ .Branch }}-{{ .Revision }} + - v4-specs-repo-{{ .Branch }}- + - v4-specs-repo- - checkout - run: name: Clean up git repo to reduce cache size command: git gc # Save the git checkout as a cache, to make cloning next time faster. - save_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} paths: - ~/specs-repo install_pyspec_test: @@ -71,7 +71,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Install pyspec requirements @@ -83,7 +83,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -96,7 +96,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -109,7 +109,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -122,7 +122,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -135,7 +135,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -148,7 +148,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -161,7 +161,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -174,7 +174,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -187,7 +187,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Install doctoc From b1205ef967de705957df1f50e6c5453d8bde09de Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 10:42:46 -0600 Subject: [PATCH 31/60] Revert "Bump circleci's cached venv key" This reverts commit 305f30e89505615d924e42f81105b6de104c8e74. --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9be3106db1..d142e4ac24 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,13 +35,13 @@ commands: description: "Restore the cache with pyspec keys" steps: - restore_cached_venv: - venv_name: v31-pyspec + venv_name: v30-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} save_pyspec_cached_venv: description: Save a venv into a cache with pyspec keys" steps: - save_cached_venv: - venv_name: v31-pyspec + venv_name: v30-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} venv_path: ./venv jobs: From 46f1dde2b7fd487b107a69b90aeb60366da762cf Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 10:42:56 -0600 Subject: [PATCH 32/60] Revert "Bump circleci's cached repo key" This reverts commit 702722fe6995d2917aa9d6e1eb23085dde539b06. --- .circleci/config.yml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index d142e4ac24..1de55179d4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -53,16 +53,16 @@ jobs: # Restore git repo at point close to target branch/revision, to speed up checkout - restore_cache: keys: - - v4-specs-repo-{{ .Branch }}-{{ .Revision }} - - v4-specs-repo-{{ .Branch }}- - - v4-specs-repo- + - v3-specs-repo-{{ .Branch }}-{{ .Revision }} + - v3-specs-repo-{{ .Branch }}- + - v3-specs-repo- - checkout - run: name: Clean up git repo to reduce cache size command: git gc # Save the git checkout as a cache, to make cloning next time faster. - save_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} paths: - ~/specs-repo install_pyspec_test: @@ -71,7 +71,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Install pyspec requirements @@ -83,7 +83,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -96,7 +96,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -109,7 +109,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -122,7 +122,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -135,7 +135,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -148,7 +148,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -161,7 +161,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -174,7 +174,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -187,7 +187,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Install doctoc From cb4ed99f4e889c754dba3f2aadad3ed744c00e23 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 10:54:15 -0600 Subject: [PATCH 33/60] Fix linting errors for new functions --- specs/phase0/p2p-interface.md | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 8bc05844c1..e400dff58c 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -17,6 +17,8 @@ - [Configuration](#configuration) - [MetaData](#metadata) - [Maximum message sizes](#maximum-message-sizes) + - [`max_compressed_len`](#max_compressed_len) + - [`max_message_size`](#max_message_size) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - [Topics and messages](#topics-and-messages) - [Global topics](#global-topics) @@ -236,15 +238,21 @@ and will in most cases be out of sync with the ENR sequence number. Maximum message sizes are derived from the maximum payload size that the network can carry according to the following functions: +#### `max_compressed_len` + +```python +def max_compressed_len(n: uint64) -> uint64: + # Worst-case compressed length for a given payload of size n when using snappy + # https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 + return uint64(32 + n + n / 6) +``` + +#### `max_message_size` + ```python -def max_compressed_len(n): - # Worst-case compressed length for a given payload of size n when using snappy - # https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 - return int(32 + n + n / 6) - -def max_message_size(): - # Allow 1024 bytes for framing and encoding overhead but at least 1MB in case MAX_PAYLOAD_SIZE is small. - return max(max_compressed_len(MAX_PAYLOAD_SIZE) + 1024, 1024*1024) +def max_message_size() -> uint64: + # Allow 1024 bytes for framing and encoding overhead but at least 1MiB in case MAX_PAYLOAD_SIZE is small. + return max(max_compressed_len(MAX_PAYLOAD_SIZE) + 1024, 1024 * 1024) ``` ### The gossip domain: gossipsub From 2b710f337c701eaa925a8e88ee824e395379c92a Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Tue, 17 Dec 2024 10:33:56 -0700 Subject: [PATCH 34/60] clarify blob count validation on blob subnets --- specs/electra/p2p-interface.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md index 0016976e93..8ebec6a8e6 100644 --- a/specs/electra/p2p-interface.md +++ b/specs/electra/p2p-interface.md @@ -14,6 +14,7 @@ - [Global topics](#global-topics) - [`beacon_block`](#beacon_block) - [`beacon_aggregate_and_proof`](#beacon_aggregate_and_proof) + - [`blob_sidecar_{subnet_id}`](#blob_sidecar_subnet_id) - [Attestation subnets](#attestation-subnets) - [`beacon_attestation_{subnet_id}`](#beacon_attestation_subnet_id) - [The Req/Resp domain](#the-reqresp-domain) @@ -77,6 +78,14 @@ The following validations are added: * [REJECT] `len(committee_indices) == 1`, where `committee_indices = get_committee_indices(aggregate)`. * [REJECT] `aggregate.data.index == 0` +###### `blob_sidecar_{subnet_id}` + +*[Modified in Electra:EIP7691]* + +The existing validations all apply as given from previous forks, with the following exceptions: + +* Uses of `MAX_BLOBS_PER_BLOCK` in existing validations are replaced with `MAX_BLOBS_PER_BLOCK_ELECTRA`. + ##### Attestation subnets ###### `beacon_attestation_{subnet_id}` From d41b7bddf5e77c8a7d49832b11485f53f7c5e83f Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 11:35:49 -0600 Subject: [PATCH 35/60] Bump venv cache key again --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1de55179d4..38bd6f422d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,13 +35,13 @@ commands: description: "Restore the cache with pyspec keys" steps: - restore_cached_venv: - venv_name: v30-pyspec + venv_name: v32-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} save_pyspec_cached_venv: description: Save a venv into a cache with pyspec keys" steps: - save_cached_venv: - venv_name: v30-pyspec + venv_name: v32-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} venv_path: ./venv jobs: From ea37fc5140b299249e3dd5adcac748d4303b6ccc Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 14:23:25 -0600 Subject: [PATCH 36/60] Fix a few nits dealing with updated makefile * Hide output from forced eth2spec rebuild * Call detect_errors after all generators are done * Allow output to stderr to show up in console when testing * Add note about printing to stderr * Make check_toc private, as one should only use make lint * Move _check_toc rule closer to lint rule * Force rebuild eth2spec when running generators * And do not rebuild pyspec now, no longer needed --- Makefile | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index a3a3e24288..09e914c3ca 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,6 @@ ALL_EXECUTABLE_SPEC_NAMES = \ # A list of fake targets. .PHONY: \ - check_toc \ clean \ coverage \ detect_errors \ @@ -39,7 +38,6 @@ NORM = $(shell tput sgr0) # Print target descriptions. help: - @echo "make $(BOLD)check_toc$(NORM) -- check table of contents" @echo "make $(BOLD)clean$(NORM) -- delete all untracked files" @echo "make $(BOLD)coverage$(NORM) -- run pyspec tests with coverage" @echo "make $(BOLD)detect_errors$(NORM) -- detect generator errors" @@ -85,7 +83,7 @@ $(ETH2SPEC): setup.py | $(VENV) # Force rebuild/install the eth2spec package. eth2spec: - $(MAKE) --always-make $(ETH2SPEC) + @$(MAKE) --always-make $(ETH2SPEC) # Create the pyspec for all phases. pyspec: $(VENV) setup.py @@ -99,6 +97,8 @@ pyspec: $(VENV) setup.py TEST_REPORT_DIR = $(PYSPEC_DIR)/test-reports # Run pyspec tests. +# Note: for debugging output to show, print to stderr. +# # To run a specific test, append k=, eg: # make test k=test_verify_kzg_proof # To run tests for a specific fork, append fork=, eg: @@ -117,6 +117,7 @@ test: $(ETH2SPEC) pyspec @mkdir -p $(TEST_REPORT_DIR) @$(PYTHON_VENV) -m pytest \ -n auto \ + --capture=no \ $(MAYBE_TEST) \ $(MAYBE_FORK) \ $(PRESET) \ @@ -193,10 +194,6 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \ $(wildcard $(SPEC_DIR)/_features/*/*/*.md) \ $(wildcard $(SSZ_DIR)/*.md) -# Check all files and error if any ToC were modified. -check_toc: $(MARKDOWN_FILES:=.toc) - @[ "$$(find . -name '*.md.tmp' -print -quit)" ] && exit 1 || exit 0 - # Generate ToC sections & save copy of original if modified. %.toc: @cp $* $*.tmp; \ @@ -209,8 +206,12 @@ check_toc: $(MARKDOWN_FILES:=.toc) echo "\033[1;34m See $*.tmp\033[0m"; \ fi +# Check all files and error if any ToC were modified. +_check_toc: $(MARKDOWN_FILES:=.toc) + @[ "$$(find . -name '*.md.tmp' -print -quit)" ] && exit 1 || exit 0 + # Check for mistakes. -lint: $(ETH2SPEC) pyspec check_toc +lint: $(ETH2SPEC) pyspec _check_toc @$(CODESPELL_VENV) . --skip "./.git,$(VENV),$(PYSPEC_DIR)/.mypy_cache" -I .codespell-whitelist @$(PYTHON_VENV) -m flake8 --config $(FLAKE8_CONFIG) $(PYSPEC_DIR)/eth2spec @$(PYTHON_VENV) -m flake8 --config $(FLAKE8_CONFIG) $(TEST_GENERATORS_DIR) @@ -235,17 +236,19 @@ gen_list: done # Run one generator. +# This will forcibly rebuild eth2spec just in case. # To check modules for a generator, append modcheck=true, eg: # make gen_genesis modcheck=true gen_%: MAYBE_MODCHECK := $(if $(filter true,$(modcheck)),--modcheck) -gen_%: $(ETH2SPEC) pyspec +gen_%: eth2spec @mkdir -p $(TEST_VECTOR_DIR) @$(PYTHON_VENV) $(GENERATOR_DIR)/$*/main.py \ --output $(TEST_VECTOR_DIR) \ $(MAYBE_MODCHECK) # Run all generators then check for errors. -gen_all: $(GENERATOR_TARGETS) detect_errors +gen_all: $(GENERATOR_TARGETS) + @$(MAKE) detect_errors # Detect errors in generators. detect_errors: $(TEST_VECTOR_DIR) From 0964db294c718b0d3574945a963cd2502a1aef32 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 18 Dec 2024 17:28:51 +1100 Subject: [PATCH 37/60] Fix custody `sampling_size` logic. --- specs/fulu/das-core.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/fulu/das-core.md b/specs/fulu/das-core.md index 25576bc1f4..6908144799 100644 --- a/specs/fulu/das-core.md +++ b/specs/fulu/das-core.md @@ -237,7 +237,7 @@ The particular columns/groups that a node custodies are selected pseudo-randomly ## Custody sampling -At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count)` total custody groups. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. +At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count * (NUMBER_OF_COLUMNS / NUMBER_OF_CUSTODY_GROUPS))` total columns. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. ## Extended data From 7be22acf6ac2b07f15f8b2af16b13e81ce57557f Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 18 Dec 2024 16:26:16 +0800 Subject: [PATCH 38/60] Remove non commit-pinned blob links --- tests/README.md | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/tests/README.md b/tests/README.md index 798627577d..dc2e02439d 100644 --- a/tests/README.md +++ b/tests/README.md @@ -54,15 +54,14 @@ To learn how consensus spec tests are written, let's go over the code: This [decorator](https://book.pythontips.com/en/latest/decorators.html) specifies that this test is applicable to all the phases of consensus layer development. These phases are similar to forks (Istanbul, -Berlin, London, etc.) in the execution blockchain. If you are interested, [you can see the definition of -this decorator here](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py#L331-L335). +Berlin, London, etc.) in the execution blockchain. ```python @spec_state_test ``` -[This decorator](https://github.com/qbzzt/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py#L232-L234) specifies -that this test is a state transition test, and that it does not include a transition between different forks. +This decorator specifies that this test is a state transition test, and that it does not include a transition +between different forks. ```python def test_empty_block_transition(spec, state): @@ -162,8 +161,7 @@ find . -name '*.py' -exec grep 'def state_transition_and_sign_block' {} \; -prin ``` And you'll find that the function is defined in -[`eth2spec/test/helpers/state.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/state.py). Looking -in that file, we see that the second function is: +`eth2spec/test/helpers/state.py`. Looking in that file, we see that the second function is: ```python def next_slot(spec, state): @@ -199,8 +197,7 @@ verify this). It is important to make sure that the system rejects invalid input, so our next step is to deal with cases where the protocol is supposed to reject something. To see such a test, look at `test_prev_slot_block_transition` (in the same -file we used previously, -[`~/consensus-specs/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py)). +file we used previously, `~/consensus-specs/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py`). ```python @with_all_phases @@ -230,8 +227,7 @@ Transition to the new slot, which naturally has a different proposer. ``` Specify that the function `transition_unsigned_block` will cause an assertion error. -You can see this function in -[`~/consensus-specs/tests/core/pyspec/eth2spec/test/helpers/block.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/block.py), +You can see this function in `~/consensus-specs/tests/core/pyspec/eth2spec/test/helpers/block.py`, and one of the tests is that the block must be for this slot: > ```python > assert state.slot == block.slot From 35603f5417f6fa9cdd723fb3a546a59c215384ae Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Wed, 18 Dec 2024 13:29:44 +0100 Subject: [PATCH 39/60] Metadata: Replace `csc` by `cgc`. --- specs/fulu/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/fulu/p2p-interface.md b/specs/fulu/p2p-interface.md index abebbffecc..e846cb59fc 100644 --- a/specs/fulu/p2p-interface.md +++ b/specs/fulu/p2p-interface.md @@ -152,14 +152,14 @@ The `MetaData` stored locally by clients is updated with an additional field to seq_number: uint64 attnets: Bitvector[ATTESTATION_SUBNET_COUNT] syncnets: Bitvector[SYNC_COMMITTEE_SUBNET_COUNT] - custody_subnet_count: uint64 # csc + custody_group_count: uint64 # cgc ) ``` Where - `seq_number`, `attnets`, and `syncnets` have the same meaning defined in the Altair document. -- `custody_subnet_count` represents the node's custody subnet count. Clients MAY reject peers with a value less than `CUSTODY_REQUIREMENT`. +- `custody_group_count` represents the node's custody group count. Clients MAY reject peers with a value less than `CUSTODY_REQUIREMENT`. ### The gossip domain: gossipsub From dde81194d7dc68c839d643cb255fbc6243a696ba Mon Sep 17 00:00:00 2001 From: Suphanat Chunhapanya Date: Wed, 18 Dec 2024 22:06:45 +0700 Subject: [PATCH 40/60] EIP-7594: Fix custody group spec tests --- .../test_compute_columns_for_custody_group.py | 62 ++++++++++ .../networking/test_get_custody_columns.py | 113 ------------------ .../networking/test_get_custody_groups.py | 106 ++++++++++++++++ tests/generators/networking/main.py | 3 +- 4 files changed, 170 insertions(+), 114 deletions(-) create mode 100644 tests/core/pyspec/eth2spec/test/fulu/networking/test_compute_columns_for_custody_group.py delete mode 100644 tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_columns.py create mode 100644 tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_groups.py diff --git a/tests/core/pyspec/eth2spec/test/fulu/networking/test_compute_columns_for_custody_group.py b/tests/core/pyspec/eth2spec/test/fulu/networking/test_compute_columns_for_custody_group.py new file mode 100644 index 0000000000..61752e919a --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/fulu/networking/test_compute_columns_for_custody_group.py @@ -0,0 +1,62 @@ +import random + +from eth2spec.test.context import ( + single_phase, + spec_test, + with_fulu_and_later, +) + + +def _run_compute_columns_for_custody_group(spec, rng, custody_group=None): + if custody_group is None: + custody_group = rng.randint(0, spec.config.NUMBER_OF_CUSTODY_GROUPS - 1) + + result = spec.compute_columns_for_custody_group(custody_group) + yield 'custody_group', 'meta', custody_group + + assert len(result) == len(set(result)) + assert len(result) == spec.config.NUMBER_OF_COLUMNS // spec.config.NUMBER_OF_CUSTODY_GROUPS + assert all(i < spec.config.NUMBER_OF_COLUMNS for i in result) + python_list_result = [int(i) for i in result] + + yield 'result', 'meta', python_list_result + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__min_custody_group(spec): + rng = random.Random(1111) + yield from _run_compute_columns_for_custody_group(spec, rng, custody_group=0) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__max_custody_group(spec): + rng = random.Random(1111) + yield from _run_compute_columns_for_custody_group(spec, rng, custody_group=spec.config.NUMBER_OF_CUSTODY_GROUPS - 1) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__1(spec): + rng = random.Random(1111) + yield from _run_compute_columns_for_custody_group(spec, rng) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__2(spec): + rng = random.Random(2222) + yield from _run_compute_columns_for_custody_group(spec, rng) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__3(spec): + rng = random.Random(3333) + yield from _run_compute_columns_for_custody_group(spec, rng) diff --git a/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_columns.py b/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_columns.py deleted file mode 100644 index d3be42ce16..0000000000 --- a/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_columns.py +++ /dev/null @@ -1,113 +0,0 @@ -import random - -from eth2spec.test.context import ( - single_phase, - spec_test, - with_fulu_and_later, -) - - -def _run_get_custody_columns(spec, rng, node_id=None, custody_group_count=None): - if node_id is None: - node_id = rng.randint(0, 2**256 - 1) - - if custody_group_count is None: - custody_group_count = rng.randint(0, spec.config.NUMBER_OF_CUSTODY_GROUPS) - - columns_per_group = spec.config.NUMBER_OF_COLUMNS // spec.config.NUMBER_OF_CUSTODY_GROUPS - groups = spec.get_custody_groups(node_id, custody_group_count) - yield 'node_id', 'meta', node_id - yield 'custody_group_count', 'meta', int(custody_group_count) - - result = [] - for group in groups: - group_columns = spec.compute_columns_for_custody_group(group) - assert len(group_columns) == columns_per_group - result.extend(group_columns) - - assert len(result) == len(set(result)) - assert len(result) == custody_group_count * columns_per_group - assert all(i < spec.config.NUMBER_OF_COLUMNS for i in result) - python_list_result = [int(i) for i in result] - - yield 'result', 'meta', python_list_result - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__min_node_id_min_custody_group_count(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns(spec, rng, node_id=0, custody_group_count=0) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__min_node_id_max_custody_group_count(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns( - spec, rng, node_id=0, - custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__max_node_id_min_custody_group_count(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns(spec, rng, node_id=2**256 - 1, custody_group_count=0) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__max_node_id_max_custody_group_count(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns( - spec, rng, node_id=2**256 - 1, - custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS, - ) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__max_node_id_max_custody_group_count_minus_1(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns( - spec, rng, node_id=2**256 - 2, - custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS, - ) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__short_node_id(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns(spec, rng, node_id=1048576, custody_group_count=1) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__1(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns(spec, rng) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__2(spec): - rng = random.Random(2222) - yield from _run_get_custody_columns(spec, rng) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__3(spec): - rng = random.Random(3333) - yield from _run_get_custody_columns(spec, rng) diff --git a/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_groups.py b/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_groups.py new file mode 100644 index 0000000000..8d33a2b920 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_groups.py @@ -0,0 +1,106 @@ +import random + +from eth2spec.test.context import ( + single_phase, + spec_test, + with_fulu_and_later, +) + + +def _run_get_custody_groups(spec, rng, node_id=None, custody_group_count=None): + if node_id is None: + node_id = rng.randint(0, 2**256 - 1) + + if custody_group_count is None: + custody_group_count = rng.randint(0, spec.config.NUMBER_OF_CUSTODY_GROUPS) + + result = spec.get_custody_groups(node_id, custody_group_count) + yield 'node_id', 'meta', node_id + yield 'custody_group_count', 'meta', int(custody_group_count) + + assert len(result) == len(set(result)) + assert len(result) == custody_group_count + assert all(i < spec.config.NUMBER_OF_CUSTODY_GROUPS for i in result) + python_list_result = [int(i) for i in result] + + yield 'result', 'meta', python_list_result + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__min_node_id_min_custody_group_count(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups(spec, rng, node_id=0, custody_group_count=0) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__min_node_id_max_custody_group_count(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups( + spec, rng, node_id=0, + custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__max_node_id_min_custody_group_count(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups(spec, rng, node_id=2**256 - 1, custody_group_count=0) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__max_node_id_max_custody_group_count(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups( + spec, rng, node_id=2**256 - 1, + custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS, + ) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__max_node_id_max_custody_group_count_minus_1(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups( + spec, rng, node_id=2**256 - 2, + custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS, + ) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__short_node_id(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups(spec, rng, node_id=1048576, custody_group_count=1) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__1(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups(spec, rng) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__2(spec): + rng = random.Random(2222) + yield from _run_get_custody_groups(spec, rng) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__3(spec): + rng = random.Random(3333) + yield from _run_get_custody_groups(spec, rng) diff --git a/tests/generators/networking/main.py b/tests/generators/networking/main.py index 3217c2cce2..a670f7bd4d 100644 --- a/tests/generators/networking/main.py +++ b/tests/generators/networking/main.py @@ -5,7 +5,8 @@ if __name__ == "__main__": fulu_mods = {key: 'eth2spec.test.fulu.networking.test_' + key for key in [ - 'get_custody_columns', + 'compute_columns_for_custody_group', + 'get_custody_groups', ]} all_mods = { FULU: fulu_mods From c33124ebe40fe2092c138b2c90959330810f539a Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Wed, 18 Dec 2024 11:38:55 -0600 Subject: [PATCH 41/60] Use integer division --- specs/fulu/das-core.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/fulu/das-core.md b/specs/fulu/das-core.md index 6908144799..923cb7db29 100644 --- a/specs/fulu/das-core.md +++ b/specs/fulu/das-core.md @@ -237,7 +237,7 @@ The particular columns/groups that a node custodies are selected pseudo-randomly ## Custody sampling -At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count * (NUMBER_OF_COLUMNS / NUMBER_OF_CUSTODY_GROUPS))` total columns. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. +At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count * (NUMBER_OF_COLUMNS // NUMBER_OF_CUSTODY_GROUPS))` total columns. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. ## Extended data From 8e0d0d48e81d6c7c5a8253ab61340f5ea5bac66a Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 19 Dec 2024 09:29:58 +1100 Subject: [PATCH 42/60] Simplify inline code Co-authored-by: Justin Traglia <95511699+jtraglia@users.noreply.github.com> --- specs/fulu/das-core.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/fulu/das-core.md b/specs/fulu/das-core.md index 923cb7db29..31c4af3c38 100644 --- a/specs/fulu/das-core.md +++ b/specs/fulu/das-core.md @@ -237,7 +237,7 @@ The particular columns/groups that a node custodies are selected pseudo-randomly ## Custody sampling -At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count * (NUMBER_OF_COLUMNS // NUMBER_OF_CUSTODY_GROUPS))` total columns. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. +At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count * columns_per_group)` total columns, where `columns_per_group = NUMBER_OF_COLUMNS // NUMBER_OF_CUSTODY_GROUPS`. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. ## Extended data From f17663f89c25e0212279a05446065c17344beaa0 Mon Sep 17 00:00:00 2001 From: Nico Flaig Date: Fri, 20 Dec 2024 14:55:39 +0000 Subject: [PATCH 43/60] Update blob sidecar subnet computation for EIP-7691 --- specs/electra/validator.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/specs/electra/validator.md b/specs/electra/validator.md index 2e980d5345..3620c30790 100644 --- a/specs/electra/validator.md +++ b/specs/electra/validator.md @@ -24,6 +24,8 @@ - [Deposits](#deposits) - [Execution payload](#execution-payload) - [Execution Requests](#execution-requests) + - [Constructing the `BlobSidecar`s](#constructing-the-blobsidecars) + - [Sidecar](#sidecar) - [Attesting](#attesting) - [Construct attestation](#construct-attestation) - [Attestation aggregation](#attestation-aggregation) @@ -240,6 +242,17 @@ def get_execution_requests(execution_requests_list: Sequence[bytes]) -> Executio ) ``` +### Constructing the `BlobSidecar`s + +#### Sidecar + +*[Modified in Electra:EIP7691]* + +```python +def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID: + return SubnetID(blob_index % BLOB_SIDECAR_SUBNET_COUNT_ELECTRA) +``` + ## Attesting ### Construct attestation From 9e6d8a71f0f1335f38114b6160dce01d4dfaa6dd Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Mon, 23 Dec 2024 12:33:41 -0600 Subject: [PATCH 44/60] Fix garbled blob_kzg_commitments accesses --- specs/_features/eip7732/p2p-interface.md | 2 +- specs/deneb/p2p-interface.md | 2 +- specs/electra/p2p-interface.md | 2 +- specs/fulu/p2p-interface.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/_features/eip7732/p2p-interface.md b/specs/_features/eip7732/p2p-interface.md index 22b0ba7ede..a2716933cd 100644 --- a/specs/_features/eip7732/p2p-interface.md +++ b/specs/_features/eip7732/p2p-interface.md @@ -130,7 +130,7 @@ The *type* of the payload of this topic changes to the (modified) `SignedBeaconB There are no new validations for this topic. However, all validations with regards to the `ExecutionPayload` are removed: -- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- i.e. validate that len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK +- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` - _[REJECT]_ The block's execution payload timestamp is correct with respect to the slot -- i.e. `execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot)`. - If `execution_payload` verification of block's parent by an execution node is *not* complete: diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index b3edc9d5bf..e38a50ba2e 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -147,7 +147,7 @@ The *type* of the payload of this topic changes to the (modified) `SignedBeaconB New validation: - _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- - i.e. validate that `len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` + i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` ###### `beacon_aggregate_and_proof` diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md index 8ebec6a8e6..d0663943f1 100644 --- a/specs/electra/p2p-interface.md +++ b/specs/electra/p2p-interface.md @@ -67,7 +67,7 @@ The derivation of the `message-id` remains stable. *Updated validation* - _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- - i.e. validate that `len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA` + i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA` ###### `beacon_aggregate_and_proof` diff --git a/specs/fulu/p2p-interface.md b/specs/fulu/p2p-interface.md index 0782d6ac0b..ef8a9b9c03 100644 --- a/specs/fulu/p2p-interface.md +++ b/specs/fulu/p2p-interface.md @@ -174,7 +174,7 @@ Some gossip meshes are upgraded in the Fulu fork to support upgraded types. *Updated validation* - _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- - i.e. validate that `len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_FULU` + i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_FULU` ##### Blob subnets From 6590cd0398adae53fd2ef13dd06e80d3471386a1 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Mon, 23 Dec 2024 12:37:28 -0600 Subject: [PATCH 45/60] Fix two minor typos --- docker/README.md | 2 +- specs/_features/custody_game/beacon-chain.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/README.md b/docker/README.md index 4824fc283a..34bdd94c51 100644 --- a/docker/README.md +++ b/docker/README.md @@ -10,7 +10,7 @@ Handy commands: Ideally manual running of docker containers is for advanced users, we recommend the script based approach described below for most users. -The `scripts/build_run_docker_tests.sh` script will cover most usecases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies. +The `scripts/build_run_docker_tests.sh` script will cover most use cases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies. E.g: - `./build_run_docker_tests.sh --p mainnet` will run the mainnet preset tests diff --git a/specs/_features/custody_game/beacon-chain.md b/specs/_features/custody_game/beacon-chain.md index 092846a484..66aea773a7 100644 --- a/specs/_features/custody_game/beacon-chain.md +++ b/specs/_features/custody_game/beacon-chain.md @@ -619,7 +619,7 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed for attester_index in attesters: if attester_index != custody_slashing.malefactor_index: increase_balance(state, attester_index, whistleblower_reward) - # No special whisteblower reward: it is expected to be an attester. Others are free to slash too however. + # No special whistleblower reward: it is expected to be an attester. Others are free to slash too however. else: # The claim was false, the custody bit was correct. Slash the whistleblower that induced this work. slash_validator(state, custody_slashing.whistleblower_index) From 92a2b20c0ee239de05fc5b76fc98d39d682b9bd1 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Sat, 28 Dec 2024 09:36:03 -0600 Subject: [PATCH 46/60] In get_custody_groups, don't skip 0 value --- specs/fulu/das-core.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/specs/fulu/das-core.md b/specs/fulu/das-core.md index 31c4af3c38..846f6b206e 100644 --- a/specs/fulu/das-core.md +++ b/specs/fulu/das-core.md @@ -105,19 +105,20 @@ class MatrixEntry(Container): def get_custody_groups(node_id: NodeID, custody_group_count: uint64) -> Sequence[CustodyIndex]: assert custody_group_count <= NUMBER_OF_CUSTODY_GROUPS - custody_groups: List[uint64] = [] current_id = uint256(node_id) + custody_groups: List[CustodyIndex] = [] while len(custody_groups) < custody_group_count: custody_group = CustodyIndex( - bytes_to_uint64(hash(uint_to_bytes(uint256(current_id)))[0:8]) + bytes_to_uint64(hash(uint_to_bytes(current_id))[0:8]) % NUMBER_OF_CUSTODY_GROUPS ) if custody_group not in custody_groups: custody_groups.append(custody_group) if current_id == UINT256_MAX: # Overflow prevention - current_id = NodeID(0) - current_id += 1 + current_id = uint256(0) + else: + current_id += 1 assert len(custody_groups) == len(set(custody_groups)) return sorted(custody_groups) From 6dd929fa8d621e48560412fbf38c3a19778fbe5f Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Sat, 4 Jan 2025 23:04:03 +0100 Subject: [PATCH 47/60] Deneb: Add BeaconState During the Deneb fork, some fields were added to the `ExecutionPayloadHeader`. The `ExecutionPayloadHeader` is part of the `BeaconState`. ==> This change should be reflected in the `BeaconState`. --- specs/deneb/beacon-chain.md | 48 +++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/specs/deneb/beacon-chain.md b/specs/deneb/beacon-chain.md index 43360f8b3e..966a7007d9 100644 --- a/specs/deneb/beacon-chain.md +++ b/specs/deneb/beacon-chain.md @@ -20,6 +20,7 @@ - [`BeaconBlockBody`](#beaconblockbody) - [`ExecutionPayload`](#executionpayload) - [`ExecutionPayloadHeader`](#executionpayloadheader) + - [`BeaconState`](#beaconstate) - [Helper functions](#helper-functions) - [Misc](#misc) - [`kzg_commitment_to_versioned_hash`](#kzg_commitment_to_versioned_hash) @@ -171,6 +172,53 @@ class ExecutionPayloadHeader(Container): excess_blob_gas: uint64 # [New in Deneb:EIP4844] ``` +#### `BeaconState` + +```python +class BeaconState(Container): + # Versioning + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + # History + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + # Eth1 + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + # Registry + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + # Randomness + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + # Slashings + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances + # Participation + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + # Finality + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + # Inactivity + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + # Sync + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + # Execution + latest_execution_payload_header: ExecutionPayloadHeader # [Modified in Deneb:EIP4844] + # Withdrawals + next_withdrawal_index: WithdrawalIndex + next_withdrawal_validator_index: ValidatorIndex + # Deep history valid from Capella onwards + historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] +``` + ## Helper functions ### Misc From 4f7fe8230d5758b44d70149ccdfc473dbe3fdfde Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Mon, 6 Jan 2025 11:58:04 +0100 Subject: [PATCH 48/60] Fulu: Remove V3 of blob sidecar by root/range RPC The Fulu fork introduces peerDAS, replacing blobs sidecars by data columns sidecars. After the Fulu fork epoch, clients still need to be able to request blob sidecars by root/range, at least for the blobs retention period after the Fulu fork epoch. Blob sidecars will be retrieved at most up to the Electra epoch, so the V2 version (Electra) for blob sidecars by range/root is enough. There is no need to retrieve blobs sidecars after the Fulu fork where data columns sidecars will be used instead, so there is no need to introduce the V3 version (Fulu) for blob sidecars by range/root. --- specs/fulu/p2p-interface.md | 72 ------------------------------------- 1 file changed, 72 deletions(-) diff --git a/specs/fulu/p2p-interface.md b/specs/fulu/p2p-interface.md index ef8a9b9c03..73d96192ff 100644 --- a/specs/fulu/p2p-interface.md +++ b/specs/fulu/p2p-interface.md @@ -29,8 +29,6 @@ - [`data_column_sidecar_{subnet_id}`](#data_column_sidecar_subnet_id) - [The Req/Resp domain](#the-reqresp-domain) - [Messages](#messages) - - [BlobSidecarsByRoot v3](#blobsidecarsbyroot-v3) - - [BlobSidecarsByRange v3](#blobsidecarsbyrange-v3) - [DataColumnSidecarsByRoot v1](#datacolumnsidecarsbyroot-v1) - [DataColumnSidecarsByRange v1](#datacolumnsidecarsbyrange-v1) - [GetMetaData v3](#getmetadata-v3) @@ -64,7 +62,6 @@ The specification of these changes continues in the same format as the network s | `DATA_COLUMN_SIDECAR_SUBNET_COUNT` | `128` | The number of data column sidecar subnets used in the gossipsub protocol | | `MAX_REQUEST_DATA_COLUMN_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS` | Maximum number of data column sidecars in a single request | | `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve data column sidecars | -| `MAX_REQUEST_BLOB_SIDECARS_FULU` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_FULU` | Maximum number of blob sidecars in a single request | ### Containers @@ -211,75 +208,6 @@ The following validations MUST pass before forwarding the `sidecar: DataColumnSi #### Messages -##### BlobSidecarsByRoot v3 - -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/3/` - -*[Modified in Fulu:EIP7594]* - -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|---------------------|--------------------| -| `FULU_FORK_VERSION` | `fulu.BlobSidecar` | - -Request Content: - -``` -( - List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS_FULU] -) -``` - -Response Content: - -``` -( - List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS_FULU] -) -``` - -*Updated validation* - -No more than `MAX_REQUEST_BLOB_SIDECARS_FULU` may be requested at a time. - -##### BlobSidecarsByRange v3 - -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/3/` - -*[Modified in Fulu:EIP7594]* - -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|---------------------|--------------------| -| `FULU_FORK_VERSION` | `fulu.BlobSidecar` | - -Request Content: - -``` -( - start_slot: Slot - count: uint64 -) -``` - -Response Content: - -``` -( - List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS_FULU] -) -``` - -*Updated validation* - -Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS_FULU` sidecars. - ##### DataColumnSidecarsByRoot v1 **Protocol ID:** `/eth2/beacon_chain/req/data_column_sidecars_by_root/1/` From 7d511becf65946c26d654c17be452d595ce38522 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Mon, 6 Jan 2025 10:40:41 +0100 Subject: [PATCH 49/60] Emit correct block hash in random Electra tests New tests were added in #4032 with incorrect EL block hash, fix these. --- .../pyspec/eth2spec/test/utils/randomized_block_tests.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py index 0e4727b794..3dae15c694 100644 --- a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py +++ b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py @@ -8,7 +8,7 @@ from typing import Callable from eth2spec.test.helpers.execution_payload import ( - compute_el_block_hash, + compute_el_block_hash_for_block, build_randomized_execution_payload, ) from eth2spec.test.helpers.multi_operations import ( @@ -255,7 +255,7 @@ def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(34 opaque_tx, _, blob_kzg_commitments, _ = get_sample_blob_tx( spec, blob_count=rng.randint(0, spec.config.MAX_BLOBS_PER_BLOCK), rng=rng) block.body.execution_payload.transactions.append(opaque_tx) - block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state) + block.body.execution_payload.block_hash = compute_el_block_hash_for_block(spec, block) block.body.blob_kzg_commitments = blob_kzg_commitments return block @@ -264,6 +264,7 @@ def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(34 def random_block_electra(spec, state, signed_blocks, scenario_state, rng=Random(3456)): block = random_block_deneb(spec, state, signed_blocks, scenario_state, rng=rng) block.body.execution_requests = get_random_execution_requests(spec, state, rng=rng) + block.body.execution_payload.block_hash = compute_el_block_hash_for_block(spec, block) return block From 777c023babdce7dffad295bcb602ba26b014bb90 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Mon, 6 Jan 2025 08:46:48 -0600 Subject: [PATCH 50/60] Update unit test --- .../eth2spec/test/fulu/unittests/test_config_invariants.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py index fcf98c7e75..a0b8d30ac3 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py +++ b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py @@ -32,7 +32,3 @@ def test_polynomical_commitments_sampling(spec): @single_phase def test_networking(spec): assert spec.config.MAX_BLOBS_PER_BLOCK_FULU <= spec.MAX_BLOB_COMMITMENTS_PER_BLOCK - assert ( - spec.config.MAX_REQUEST_BLOB_SIDECARS_FULU == - spec.config.MAX_REQUEST_BLOCKS_DENEB * spec.config.MAX_BLOBS_PER_BLOCK_FULU - ) From d1d50cc539a11d04f78ea60ebae5b423353f17b9 Mon Sep 17 00:00:00 2001 From: NC <17676176+ensi321@users.noreply.github.com> Date: Mon, 6 Jan 2025 13:34:47 -0800 Subject: [PATCH 51/60] Remove electra.BlobSidecar --- specs/electra/p2p-interface.md | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md index d0663943f1..bd9020c0fa 100644 --- a/specs/electra/p2p-interface.md +++ b/specs/electra/p2p-interface.md @@ -116,15 +116,6 @@ The following validations are removed: *[Modified in Electra:EIP7691]* -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|------------------------|-----------------------| -| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` | -| `ELECTRA_FORK_VERSION` | `electra.BlobSidecar` | - Request Content: ``` @@ -151,15 +142,6 @@ No more than `MAX_REQUEST_BLOB_SIDECARS_ELECTRA` may be requested at a time. *[Modified in Electra:EIP7691]* -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|------------------------|-----------------------| -| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` | -| `ELECTRA_FORK_VERSION` | `electra.BlobSidecar` | - Request Content: ``` From 44cecd2caa0345bae46341641738608f1d8e58fe Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 7 Jan 2025 18:31:04 +0100 Subject: [PATCH 52/60] fix bellatrix constant too --- specs/bellatrix/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/bellatrix/p2p-interface.md b/specs/bellatrix/p2p-interface.md index 1f4c815660..5d8425e888 100644 --- a/specs/bellatrix/p2p-interface.md +++ b/specs/bellatrix/p2p-interface.md @@ -148,8 +148,8 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: #### Why was the max gossip message size increased at Bellatrix? With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic -field -- `transactions` -- which can validly exceed the `GOSSIP_MAX_SIZE` limit (1 MiB) put in -place at Phase 0, so GOSSIP_MAX_SIZE has increased to 10 Mib on the network. +field -- `transactions` -- which can validly exceed the `MAX_PAYLOAD_SIZE` limit (1 MiB) put in +place at Phase 0, so MAX_PAYLOAD_SIZE has increased to 10 Mib on the network. At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a single transaction filled entirely with data at a cost of 16 gas per byte can create a valid `ExecutionPayload` of ~2 MiB. Thus we need a size limit to at least account for From 3fced0903f211118b8c02e0467b925c41c4209da Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 7 Jan 2025 16:28:01 -0600 Subject: [PATCH 53/60] Add fork test with inactive, compounding validator with excess balance --- .../electra/fork/test_electra_fork_basic.py | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py index aade4a1605..4416063b39 100644 --- a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py @@ -151,6 +151,40 @@ def test_fork_has_compounding_withdrawal_credential(spec, phases, state): )] +@with_phases(phases=[DENEB], other_phases=[ELECTRA]) +@spec_test +@with_state +@with_meta_tags(ELECTRA_FORK_TEST_META_TAGS) +def test_fork_inactive_compounding_validator_with_excess_balance(spec, phases, state): + index = 0 + post_spec = phases[ELECTRA] + validator = state.validators[index] + + # set validator balance greater than min_activation_balance + state.balances[index] = post_spec.MIN_ACTIVATION_BALANCE + 1 + # set validator as not active yet + validator.activation_epoch = spec.FAR_FUTURE_EPOCH + # set validator activation eligibility epoch to the latest finalized epoch + validator.activation_eligibility_epoch = state.finalized_checkpoint.epoch + # give the validator compounding withdrawal credentials + validator.withdrawal_credentials = post_spec.COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:] + + post_state = yield from run_fork_test(post_spec, state) + + # the validator cannot be activated again + assert post_state.validators[index].activation_eligibility_epoch == spec.FAR_FUTURE_EPOCH + # the validator should now have a zero balance + assert post_state.balances[index] == 0 + # there should be a single pending deposit for this validator + assert post_state.pending_deposits == [post_spec.PendingDeposit( + pubkey=validator.pubkey, + withdrawal_credentials=validator.withdrawal_credentials, + amount=state.balances[index], + signature=spec.bls.G2_POINT_AT_INFINITY, + slot=spec.GENESIS_SLOT, + )] + + @with_phases(phases=[DENEB], other_phases=[ELECTRA]) @spec_test @with_state From 8e376dc03b8c9c16d041d3e4579d4e9ede51ba3c Mon Sep 17 00:00:00 2001 From: NC <17676176+ensi321@users.noreply.github.com> Date: Tue, 7 Jan 2025 15:53:27 -0800 Subject: [PATCH 54/60] Use SubnetID for sync committee --- specs/altair/validator.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 3602377acd..00dca30308 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -295,7 +295,7 @@ The `subnet_id` is derived from the position in the sync committee such that the *Note*: This function returns multiple deduplicated subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees. ```python -def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Set[uint64]: +def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Set[SubnetID]: next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1)) if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch): sync_committee = state.current_sync_committee @@ -305,7 +305,7 @@ def compute_subnets_for_sync_committee(state: BeaconState, validator_index: Vali target_pubkey = state.validators[validator_index].pubkey sync_committee_indices = [index for index, pubkey in enumerate(sync_committee.pubkeys) if pubkey == target_pubkey] return set([ - uint64(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT)) + SubnetID(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT)) for index in sync_committee_indices ]) ``` From f02275eb27332f80fb8d82652ee23216c063bd75 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Wed, 8 Jan 2025 12:16:07 -0600 Subject: [PATCH 55/60] Revert BlobSidecarsByRoot/Range version bump --- specs/electra/p2p-interface.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md index bd9020c0fa..5064676f18 100644 --- a/specs/electra/p2p-interface.md +++ b/specs/electra/p2p-interface.md @@ -19,8 +19,8 @@ - [`beacon_attestation_{subnet_id}`](#beacon_attestation_subnet_id) - [The Req/Resp domain](#the-reqresp-domain) - [Messages](#messages) - - [BlobSidecarsByRoot v2](#blobsidecarsbyroot-v2) - - [BlobSidecarsByRange v2](#blobsidecarsbyrange-v2) + - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) + - [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1) @@ -110,9 +110,9 @@ The following validations are removed: #### Messages -##### BlobSidecarsByRoot v2 +##### BlobSidecarsByRoot v1 -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/2/` +**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/` *[Modified in Electra:EIP7691]* @@ -136,9 +136,9 @@ Response Content: No more than `MAX_REQUEST_BLOB_SIDECARS_ELECTRA` may be requested at a time. -##### BlobSidecarsByRange v2 +##### BlobSidecarsByRange v1 -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/2/` +**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/` *[Modified in Electra:EIP7691]* From db52011909dee88236aa28669479d5fab9d7b006 Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Wed, 8 Jan 2025 14:11:08 -0600 Subject: [PATCH 56/60] Bump version to 1.5.0-beta.0 --- tests/core/pyspec/eth2spec/VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index e7fd637b5a..ba25d3754e 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.5.0-alpha.10 +1.5.0-beta.0 From 454bd57cd0fd0ead7012b1ab81460f2fd7a5f49f Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Wed, 8 Jan 2025 14:20:01 -0600 Subject: [PATCH 57/60] Update config files & fix some nits --- configs/mainnet.yaml | 4 +--- configs/minimal.yaml | 4 +--- specs/bellatrix/p2p-interface.md | 2 +- specs/phase0/p2p-interface.md | 2 +- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index deb3dcf5fe..e54db49661 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -115,15 +115,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 460474ebf7..a15314bb1f 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -116,15 +116,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # [customized] `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 272) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 272 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s diff --git a/specs/bellatrix/p2p-interface.md b/specs/bellatrix/p2p-interface.md index 5d8425e888..b2d28cf1f4 100644 --- a/specs/bellatrix/p2p-interface.md +++ b/specs/bellatrix/p2p-interface.md @@ -149,7 +149,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic field -- `transactions` -- which can validly exceed the `MAX_PAYLOAD_SIZE` limit (1 MiB) put in -place at Phase 0, so MAX_PAYLOAD_SIZE has increased to 10 Mib on the network. +place at Phase 0, so MAX_PAYLOAD_SIZE has increased to 10 MiB on the network. At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a single transaction filled entirely with data at a cost of 16 gas per byte can create a valid `ExecutionPayload` of ~2 MiB. Thus we need a size limit to at least account for diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index e400dff58c..f3d9038abd 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -199,7 +199,7 @@ This section outlines configurations that are used in this spec. | Name | Value | Description | |---|---|---| -| `MAX_PAYLOAD_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed payload in gossipsub messages / RPC chunks. | +| `MAX_PAYLOAD_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed payload in gossipsub messages / RPC chunks | | `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request | | `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | Number of epochs on a subnet subscription (~27 hours) | | `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks | From 5127929733ed14c5f06b0dc675f575daaac9a155 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Wed, 8 Jan 2025 14:43:54 -0600 Subject: [PATCH 58/60] Try to polish new paragraphs a bit --- specs/phase0/p2p-interface.md | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index f3d9038abd..ea51d96dfd 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -242,7 +242,7 @@ Maximum message sizes are derived from the maximum payload size that the network ```python def max_compressed_len(n: uint64) -> uint64: - # Worst-case compressed length for a given payload of size n when using snappy + # Worst-case compressed length for a given payload of size n when using snappy: # https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 return uint64(32 + n + n / 6) ``` @@ -534,9 +534,9 @@ Size limits are placed both on the [`RPCMsg`](https://github.com/libp2p/specs/bl Clients MUST reject and MUST NOT emit or propagate messages whose size exceed the following limits: -* the size of the encoded `RPCMsg`, including control messages, framing, topics etc, must not exceed `max_message_size()` -* the size of the compressed payload in the `Message.data` field must not exceed `max_compressed_len(MAX_PAYLOAD_SIZE)`. -* the size of the uncompressed payload must not exceed `MAX_PAYLOAD_SIZE` or the [type-specific SSZ bound](#what-are-ssz-type-size-bounds), whichever is lower. +* The size of the encoded `RPCMsg` (including control messages, framing, topics, etc) must not exceed `max_message_size()`. +* The size of the compressed payload in the `Message.data` field must not exceed `max_compressed_len(MAX_PAYLOAD_SIZE)`. +* The size of the uncompressed payload must not exceed `MAX_PAYLOAD_SIZE` or the [type-specific SSZ bound](#what-are-ssz-type-size-bounds), whichever is lower. ### The Req/Resp domain @@ -1715,19 +1715,17 @@ It is advisable to derive these lengths from the SSZ type definitions in use, to #### Why is the message size defined in terms of application payload? -When transmitting messages over gossipsub and / or req/resp, we want to ensure that the same payload sizes are supported no matter the underlying transport, decoupling the consensus layer from libp2p-induced overhead and the particular transmission strategy. +When transmitting messages over gossipsub and/or the req/resp domain, we want to ensure that the same payload sizes are supported regardless of the underlying transport, decoupling the consensus layer from libp2p-induced overhead and the particular transmission strategy. -To derive "encoded size limits" from desired application sizes we take into account snappy compression and framing overhead. +To derive "encoded size limits" from desired application sizes, we take into account snappy compression and framing overhead. -In the case of gossipsub, the protocol supports sending multiple application payloads as well as mixing application data with control messages in each gossipsub frame - the limit is set such that at least one max-sized application-level message together with a small amount (1kb) of gossipsub overhead is allowed - implementations are free to pack multiple smaller application messages into a single gossipsub frame, and / or combine it with control messages as they see fit. - -The limit is set on the uncompressed payload size in particular to protect against decompression bombs - although +In the case of gossipsub, the protocol supports sending multiple application payloads as well as mixing application data with control messages in each gossipsub frame. The limit is set such that at least one max-sized application-level message together with a small amount (1 KiB) of gossipsub overhead is allowed. Implementations are free to pack multiple smaller application messages into a single gossipsub frame, and/or combine it with control messages as they see fit. #### Why is there a limit on message sizes at all? -The message size limit protects against several forms of DoS and network-based amplification attacks and provide upper bounds for resource (network, memory) usage in the client based on protocol requirements to decode, buffer, cache, store and re-transmit messages which in turn translate into performance and protection tradeoffs, ensuring capacity to handle worst cases during recovery from network instability. +The message size limit protects against several forms of DoS and network-based amplification attacks and provides upper bounds for resource (network, memory) usage in the client based on protocol requirements to decode, buffer, cache, store and re-transmit messages which in turn translate into performance and protection tradeoffs, ensuring capacity to handle worst cases during recovery from network instability. -In particular, blocks which at the time of writing is the only message type without a practical SSZ-derived upper bound on size cannot be fully verified synchronously as part of gossipsub validity checks meaning that there exist cases where invalid messages signed by a validator may be amplified by the network. +In particular, blocks—-currently the only message type without a practical SSZ-derived upper bound on size—-cannot be fully verified synchronously as part of gossipsub validity checks. This means that there exist cases where invalid messages signed by a validator may be amplified by the network. ## libp2p implementations matrix From e8eb367da26e908d0f0e7219bc9c5ad45e5b5e7e Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Wed, 8 Jan 2025 14:46:16 -0600 Subject: [PATCH 59/60] Fix two more small nits --- specs/phase0/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index ea51d96dfd..ab3306d235 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -199,7 +199,7 @@ This section outlines configurations that are used in this spec. | Name | Value | Description | |---|---|---| -| `MAX_PAYLOAD_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed payload in gossipsub messages / RPC chunks | +| `MAX_PAYLOAD_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed payload in gossipsub messages and RPC chunks | | `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request | | `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | Number of epochs on a subnet subscription (~27 hours) | | `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks | @@ -294,7 +294,7 @@ This defines both the type of data being sent on the topic and how the data fiel - `Encoding` - the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encodings) section for further details. -Clients MUST reject messages with unknown topic. +Clients MUST reject messages with an unknown topic. *Note*: `ForkDigestValue` is composed of values that are not known until the genesis block/state are available. Due to this, clients SHOULD NOT subscribe to gossipsub topics until these genesis values are known. From d867b84f093fe5270da7e7a49a9c9ea1be7c538c Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Wed, 8 Jan 2025 15:34:06 -0600 Subject: [PATCH 60/60] Add back remark about compression bombs --- specs/phase0/p2p-interface.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index ab3306d235..1196fca90d 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -1721,6 +1721,8 @@ To derive "encoded size limits" from desired application sizes, we take into acc In the case of gossipsub, the protocol supports sending multiple application payloads as well as mixing application data with control messages in each gossipsub frame. The limit is set such that at least one max-sized application-level message together with a small amount (1 KiB) of gossipsub overhead is allowed. Implementations are free to pack multiple smaller application messages into a single gossipsub frame, and/or combine it with control messages as they see fit. +The limit is set on the uncompressed payload size in particular to protect against decompression bombs. + #### Why is there a limit on message sizes at all? The message size limit protects against several forms of DoS and network-based amplification attacks and provides upper bounds for resource (network, memory) usage in the client based on protocol requirements to decode, buffer, cache, store and re-transmit messages which in turn translate into performance and protection tradeoffs, ensuring capacity to handle worst cases during recovery from network instability.