diff --git a/Cargo.lock b/Cargo.lock index 18517f94788a..67ace673addb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15833,6 +15833,7 @@ dependencies = [ "polkadot-primitives-test-helpers", "polkadot-subsystem-bench", "rand_chacha", + "rstest", "sc-keystore", "sc-network", "sp-application-crypto 30.0.0", diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index 08059353033d..de07937ffb0a 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -44,6 +44,7 @@ polkadot-primitives = { workspace = true, features = ["test"] } polkadot-primitives-test-helpers = { workspace = true } rand_chacha = { workspace = true, default-features = true } polkadot-subsystem-bench = { workspace = true } +rstest = { workspace = true } [[bench]] name = "statement-distribution-regression-bench" diff --git a/polkadot/node/network/statement-distribution/src/error.rs b/polkadot/node/network/statement-distribution/src/error.rs index d7f52162fe23..8fe8f71308ac 100644 --- a/polkadot/node/network/statement-distribution/src/error.rs +++ b/polkadot/node/network/statement-distribution/src/error.rs @@ -82,7 +82,7 @@ pub enum Error { FetchValidatorGroups(RuntimeApiError), #[error("Fetching claim queue failed {0:?}")] - FetchClaimQueue(runtime::Error), + FetchClaimQueue(RuntimeApiError), #[error("Attempted to share statement when not a validator or not assigned")] InvalidShare, diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index c79ae3953ad9..1ab25693a777 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -46,12 +46,17 @@ use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, reputation::ReputationAggregator, runtime::{ - fetch_claim_queue, request_min_backing_votes, ClaimQueueSnapshot, ProspectiveParachainsMode, + request_min_backing_votes, request_node_features, ClaimQueueSnapshot, + ProspectiveParachainsMode, }, }; use polkadot_primitives::{ - vstaging::CoreState, AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, - GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec, SessionIndex, SessionInfo, + node_features::FeatureIndex, + vstaging::{ + transpose_claim_queue, CandidateDescriptorVersion, CoreState, TransposedClaimQueue, + }, + AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, GroupIndex, + GroupRotationInfo, Hash, Id as ParaId, IndexedVec, NodeFeatures, SessionIndex, SessionInfo, SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, }; @@ -69,7 +74,7 @@ use futures::{ use std::{ collections::{ hash_map::{Entry, HashMap}, - HashSet, + BTreeMap, HashSet, }, time::{Duration, Instant}, }; @@ -137,6 +142,12 @@ const COST_UNREQUESTED_RESPONSE_STATEMENT: Rep = Rep::CostMajor("Un-requested Statement In Response"); const COST_INACCURATE_ADVERTISEMENT: Rep = Rep::CostMajor("Peer advertised a candidate inaccurately"); +const COST_INVALID_DESCRIPTOR_VERSION: Rep = + Rep::CostMajor("Candidate Descriptor version is invalid"); +const COST_INVALID_CORE_INDEX: Rep = + Rep::CostMajor("Candidate Descriptor contains an invalid core index"); +const COST_INVALID_SESSION_INDEX: Rep = + Rep::CostMajor("Candidate Descriptor contains an invalid session index"); const COST_INVALID_REQUEST: Rep = Rep::CostMajor("Peer sent unparsable request"); const COST_INVALID_REQUEST_BITFIELD_SIZE: Rep = @@ -156,6 +167,7 @@ struct PerRelayParentState { statement_store: StatementStore, seconding_limit: usize, session: SessionIndex, + transposed_cq: TransposedClaimQueue, groups_per_para: HashMap>, disabled_validators: HashSet, } @@ -219,10 +231,17 @@ struct PerSessionState { // getting the topology from the gossip-support subsystem grid_view: Option, local_validator: Option, + // `true` if v2 candidate receipts are allowed by the runtime + v2_receipts: bool, } impl PerSessionState { - fn new(session_info: SessionInfo, keystore: &KeystorePtr, backing_threshold: u32) -> Self { + fn new( + session_info: SessionInfo, + keystore: &KeystorePtr, + backing_threshold: u32, + v2_receipts: bool, + ) -> Self { let groups = Groups::new(session_info.validator_groups.clone(), backing_threshold); let mut authority_lookup = HashMap::new(); for (i, ad) in session_info.discovery_keys.iter().cloned().enumerate() { @@ -235,7 +254,14 @@ impl PerSessionState { ) .map(|(_, index)| LocalValidatorIndex::Active(index)); - PerSessionState { session_info, groups, authority_lookup, grid_view: None, local_validator } + PerSessionState { + session_info, + groups, + authority_lookup, + grid_view: None, + local_validator, + v2_receipts, + } } fn supply_topology( @@ -271,6 +297,11 @@ impl PerSessionState { fn is_not_validator(&self) -> bool { self.grid_view.is_some() && self.local_validator.is_none() } + + /// Returns `true` if v2 candidate receipts are enabled + fn candidate_receipt_v2_enabled(&self) -> bool { + self.v2_receipts + } } pub(crate) struct State { @@ -280,7 +311,7 @@ pub(crate) struct State { implicit_view: ImplicitView, candidates: Candidates, per_relay_parent: HashMap, - per_session: HashMap, + per_session: BTreeMap, // Topology might be received before first leaf update, where we // initialize the per_session_state, so cache it here until we // are able to use it. @@ -299,7 +330,7 @@ impl State { implicit_view: Default::default(), candidates: Default::default(), per_relay_parent: HashMap::new(), - per_session: HashMap::new(), + per_session: BTreeMap::new(), peers: HashMap::new(), keystore, authorities: HashMap::new(), @@ -615,8 +646,18 @@ pub(crate) async fn handle_active_leaves_update( let minimum_backing_votes = request_min_backing_votes(new_relay_parent, session_index, ctx.sender()).await?; - let mut per_session_state = - PerSessionState::new(session_info, &state.keystore, minimum_backing_votes); + let node_features = + request_node_features(new_relay_parent, session_index, ctx.sender()).await?; + let mut per_session_state = PerSessionState::new( + session_info, + &state.keystore, + minimum_backing_votes, + node_features + .unwrap_or(NodeFeatures::EMPTY) + .get(FeatureIndex::CandidateReceiptV2 as usize) + .map(|b| *b) + .unwrap_or(false), + ); if let Some(topology) = state.unused_topologies.remove(&session_index) { per_session_state.supply_topology(&topology.topology, topology.local_index); } @@ -662,12 +703,11 @@ pub(crate) async fn handle_active_leaves_update( .map_err(JfyiError::FetchValidatorGroups)? .1; - let maybe_claim_queue = fetch_claim_queue(ctx.sender(), new_relay_parent) - .await - .unwrap_or_else(|err| { - gum::debug!(target: LOG_TARGET, ?new_relay_parent, ?err, "handle_active_leaves_update: `claim_queue` API not available"); - None - }); + let claim_queue = ClaimQueueSnapshot(polkadot_node_subsystem_util::request_claim_queue(new_relay_parent, ctx.sender()) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchClaimQueue)?); let local_validator = per_session.local_validator.and_then(|v| { if let LocalValidatorIndex::Active(idx) = v { @@ -676,9 +716,8 @@ pub(crate) async fn handle_active_leaves_update( &per_session.groups, &availability_cores, &group_rotation_info, - &maybe_claim_queue, + &claim_queue, seconding_limit, - max_candidate_depth, ) } else { Some(LocalValidatorState { grid_tracker: GridTracker::default(), active: None }) @@ -688,11 +727,12 @@ pub(crate) async fn handle_active_leaves_update( let groups_per_para = determine_groups_per_para( availability_cores, group_rotation_info, - &maybe_claim_queue, - max_candidate_depth, + &claim_queue, ) .await; + let transposed_cq = transpose_claim_queue(claim_queue.0); + state.per_relay_parent.insert( new_relay_parent, PerRelayParentState { @@ -702,6 +742,7 @@ pub(crate) async fn handle_active_leaves_update( session: session_index, groups_per_para, disabled_validators, + transposed_cq, }, ); } @@ -743,9 +784,8 @@ fn find_active_validator_state( groups: &Groups, availability_cores: &[CoreState], group_rotation_info: &GroupRotationInfo, - maybe_claim_queue: &Option, + claim_queue: &ClaimQueueSnapshot, seconding_limit: usize, - max_candidate_depth: usize, ) -> Option { if groups.all().is_empty() { return None @@ -754,22 +794,7 @@ fn find_active_validator_state( let our_group = groups.by_validator_index(validator_index)?; let core_index = group_rotation_info.core_for_group(our_group, availability_cores.len()); - let paras_assigned_to_core = if let Some(claim_queue) = maybe_claim_queue { - claim_queue.iter_claims_for_core(&core_index).copied().collect() - } else { - availability_cores - .get(core_index.0 as usize) - .and_then(|core_state| match core_state { - CoreState::Scheduled(scheduled_core) => Some(scheduled_core.para_id), - CoreState::Occupied(occupied_core) if max_candidate_depth >= 1 => occupied_core - .next_up_on_available - .as_ref() - .map(|scheduled_core| scheduled_core.para_id), - CoreState::Free | CoreState::Occupied(_) => None, - }) - .into_iter() - .collect() - }; + let paras_assigned_to_core = claim_queue.iter_claims_for_core(&core_index).copied().collect(); let group_validators = groups.get(our_group)?.to_owned(); Some(LocalValidatorState { @@ -2176,37 +2201,18 @@ async fn provide_candidate_to_grid( async fn determine_groups_per_para( availability_cores: Vec, group_rotation_info: GroupRotationInfo, - maybe_claim_queue: &Option, - max_candidate_depth: usize, + claim_queue: &ClaimQueueSnapshot, ) -> HashMap> { let n_cores = availability_cores.len(); // Determine the core indices occupied by each para at the current relay parent. To support // on-demand parachains we also consider the core indices at next blocks. - let schedule: HashMap> = if let Some(claim_queue) = maybe_claim_queue { + let schedule: HashMap> = claim_queue .iter_all_claims() .map(|(core_index, paras)| (*core_index, paras.iter().copied().collect())) - .collect() - } else { - availability_cores - .into_iter() - .enumerate() - .filter_map(|(index, core)| match core { - CoreState::Scheduled(scheduled_core) => - Some((CoreIndex(index as u32), vec![scheduled_core.para_id])), - CoreState::Occupied(occupied_core) => - if max_candidate_depth >= 1 { - occupied_core.next_up_on_available.map(|scheduled_core| { - (CoreIndex(index as u32), vec![scheduled_core.para_id]) - }) - } else { - None - }, - CoreState::Free => None, - }) - .collect() - }; + .collect(); + let mut groups_per_para = HashMap::new(); // Map from `CoreIndex` to `GroupIndex` and collect as `HashMap`. @@ -2353,7 +2359,7 @@ async fn handle_incoming_manifest_common<'a, Context>( peer: PeerId, peers: &HashMap, per_relay_parent: &'a mut HashMap, - per_session: &'a HashMap, + per_session: &'a BTreeMap, candidates: &mut Candidates, candidate_hash: CandidateHash, relay_parent: Hash, @@ -3106,11 +3112,12 @@ pub(crate) async fn handle_response( ) { let &requests::CandidateIdentifier { relay_parent, candidate_hash, group_index } = response.candidate_identifier(); + let peer = response.requested_peer().clone(); gum::trace!( target: LOG_TARGET, ?candidate_hash, - peer = ?response.requested_peer(), + ?peer, "Received response", ); @@ -3174,6 +3181,62 @@ pub(crate) async fn handle_response( "Successfully received candidate" ); + if !per_session.candidate_receipt_v2_enabled() && + candidate.descriptor.version() == CandidateDescriptorVersion::V2 + { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?peer, + "Version 2 candidate receipts are not enabled by the runtime" + ); + + // Punish peer. + modify_reputation( + reputation, + ctx.sender(), + peer, + COST_INVALID_DESCRIPTOR_VERSION, + ) + .await; + return + } + + // Get the latest session index & check candidate descriptor session index. + match (candidate.descriptor.session_index(), state.per_session.last_key_value()) { + (Some(session_index), Some((latest_session_index, _))) => { + if &session_index != latest_session_index { + // Punish peer. + modify_reputation( + reputation, + ctx.sender(), + peer, + COST_INVALID_SESSION_INDEX, + ) + .await; + return + } + // TODO: determine if we need to buffer candidates at session boundaries. + }, + _ => {}, + } + + // Validate the core index. + if let Err(err) = candidate.check_core_index(&relay_parent_state.transposed_cq) { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?err, + ?peer, + "Received candidate has invalid core index" + ); + + // Punish peer. + modify_reputation(reputation, ctx.sender(), peer, COST_INVALID_CORE_INDEX) + .await; + return + } + (candidate, persisted_validation_data, statements) }, }; diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 9f2c36ad1018..abc267a8d3fc 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -33,9 +33,7 @@ use polkadot_node_subsystem::messages::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::{ - vstaging::{CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState}, - AssignmentPair, AsyncBackingParams, Block, BlockNumber, GroupRotationInfo, HeadData, Header, - IndexedVec, PersistedValidationData, ScheduledCore, SessionIndex, SessionInfo, ValidatorPair, + vstaging::{CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreState}, AssignmentPair, AsyncBackingParams, Block, BlockNumber, GroupRotationInfo, HeadData, Header, IndexedVec, PersistedValidationData, ScheduledCore, SessionIndex, SessionInfo, ValidatorPair }; use sc_keystore::LocalKeystore; use sc_network::ProtocolName; @@ -96,6 +94,7 @@ struct TestState { validators: Vec, session_info: SessionInfo, req_sender: async_channel::Sender, + node_features: NodeFeatures, } impl TestState { @@ -174,7 +173,11 @@ impl TestState { random_seed: [0u8; 32], }; - TestState { config, local, validators, session_info, req_sender } + let mut node_features = NodeFeatures::new(); + node_features.resize(4, false); + node_features.set(FeatureIndex::CandidateReceiptV2 as usize, true); + + TestState { config, local, validators, session_info, req_sender, node_features } } fn make_dummy_leaf(&self, relay_parent: Hash) -> TestLeaf { @@ -186,6 +189,7 @@ impl TestState { relay_parent: Hash, groups_for_first_para: usize, ) -> TestLeaf { + let mut cq = BTreeMap::new(); TestLeaf { number: 1, hash: relay_parent, @@ -193,8 +197,10 @@ impl TestState { session: 1, availability_cores: self.make_availability_cores(|i| { let para_id = if i < groups_for_first_para { + cq.entry(CoreIndex(i as u32)).or_insert_with(|| vec![ParaId::from(0u32), ParaId::from(0u32)].into()); ParaId::from(0u32) } else { + cq.entry(CoreIndex(i as u32)).or_insert_with(|| vec![ParaId::from(i), ParaId::from(i)].into()); ParaId::from(i as u32) }; @@ -213,6 +219,7 @@ impl TestState { }) .collect(), minimum_backing_votes: 2, + claim_queue: ClaimQueueSnapshot(cq), } } @@ -232,7 +239,7 @@ impl TestState { TestLeaf { minimum_backing_votes, ..self.make_dummy_leaf(relay_parent) } } - fn make_availability_cores(&self, f: impl Fn(usize) -> CoreState) -> Vec { + fn make_availability_cores(&self, f: impl FnMut(usize) -> CoreState) -> Vec { (0..self.session_info.validator_groups.len()).map(f).collect() } @@ -427,6 +434,7 @@ struct TestLeaf { pub disabled_validators: Vec, para_data: Vec<(ParaId, PerParaData)>, minimum_backing_votes: u32, + claim_queue: ClaimQueueSnapshot, } impl TestLeaf { @@ -577,6 +585,7 @@ async fn handle_leaf_activation( availability_cores, disabled_validators, minimum_backing_votes, + claim_queue, } = leaf; assert_matches!( @@ -623,7 +632,7 @@ async fn handle_leaf_activation( _parent, RuntimeApiRequest::Version(tx), )) => { - tx.send(Ok(RuntimeApiRequest::DISABLED_VALIDATORS_RUNTIME_REQUIREMENT)).unwrap(); + tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap(); }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, @@ -675,6 +684,18 @@ async fn handle_leaf_activation( }; tx.send(Ok((validator_groups, group_rotation_info))).unwrap(); }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::NodeFeatures(_session_index, tx), + )) if parent == *hash => { + tx.send(Ok(test_state.node_features.clone())).unwrap(); + }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::ClaimQueue(tx), + )) if parent == *hash => { + tx.send(Ok(claim_queue.0.clone())).unwrap(); + }, AllMessages::ProspectiveParachains( ProspectiveParachainsMessage::GetHypotheticalMembership(req, tx), ) => { diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index dcb90bacdcde..bd8b429fc6ea 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -21,13 +21,18 @@ use codec::{Decode, Encode}; use polkadot_node_network_protocol::{ request_response::v2 as request_v2, v2::BackedCandidateManifest, }; -use polkadot_primitives_test_helpers::make_candidate; +use polkadot_primitives_test_helpers::{make_candidate, make_candidate_v2}; use sc_network::config::{ IncomingRequest as RawIncomingRequest, OutgoingResponse as RawOutgoingResponse, }; -#[test] -fn cluster_peer_allowed_to_send_incomplete_statements() { +use polkadot_primitives::vstaging::MutateDescriptorV2; +use rstest::rstest; + +#[rstest] +#[case(false)] +#[case(true)] +fn cluster_peer_allowed_to_send_incomplete_statements(#[case] v2_descriptor: bool) { let group_size = 3; let config = TestConfig { validator_count: 20, @@ -48,14 +53,29 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { let test_leaf = state.make_dummy_leaf(relay_parent); - let (candidate, pvd) = make_candidate( - relay_parent, - 1, - local_para, - test_leaf.para_data(local_para).head_data.clone(), - vec![4, 5, 6].into(), - Hash::repeat_byte(42).into(), - ); + let (candidate, pvd) = + if v2_descriptor { + let (mut candidate, pvd) = make_candidate_v2( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + candidate.descriptor.set_core_index(CoreIndex(local_group_index.0)); + (candidate, pvd) + } else { + make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ) + }; + let candidate_hash = candidate.hash(); let other_group_validators = state.group_validators(local_group_index, true); @@ -925,6 +945,147 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { }); } + +#[test] +fn peer_reported_for_statements_with_invalid_core_index() { + let group_size = 3; + let config = TestConfig { + validator_count: 20, + group_size, + local_validator: LocalRole::Validator, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_group_index = local_validator.group_index.unwrap(); + let local_para = ParaId::from(local_group_index.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (mut candidate, pvd) = make_candidate_v2( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + + candidate.descriptor.set_core_index(CoreIndex(100)); + + let candidate_hash = candidate.hash(); + + let other_group_validators = state.group_validators(local_group_index, true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ) + .await; + + connect_peer(&mut overseer, peer_c.clone(), None).await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true, vec![]).await; + + // Peer in cluster sends a statement, triggering a request. + { + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_v2::StatementDistributionMessage::Statement(relay_parent, a_seconded), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST.into() => { } + ); + } + + // Send a request to peer and mock its response to include invalid statements. + { + // Sign statement with wrong signing context, leading to bad signature. + let b_seconded_invalid = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statements = vec![b_seconded_invalid.clone()]; + + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + let recv = overseer.recv().await; + println!("recvd {:?}", recv); + + assert_matches!( + recv, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_STATEMENT.into() => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) + if p == peer_a && r == COST_INVALID_CORE_INDEX.into() => { } + ); + } + + overseer + }); +} + #[test] fn peer_reported_for_providing_statements_with_wrong_validator_id() { let group_size = 3; diff --git a/polkadot/primitives/src/vstaging/mod.rs b/polkadot/primitives/src/vstaging/mod.rs index 01ed8fcd5e1a..f1612382d8e8 100644 --- a/polkadot/primitives/src/vstaging/mod.rs +++ b/polkadot/primitives/src/vstaging/mod.rs @@ -208,6 +208,10 @@ pub trait MutateDescriptorV2 { fn set_erasure_root(&mut self, erasure_root: Hash); /// Set the para head of the descriptor. fn set_para_head(&mut self, para_head: Hash); + /// Set the core index of the descriptor. + fn set_core_index(&mut self, core_index: CoreIndex); + /// Set the session index of the descriptor. + fn set_session_index(&mut self, session_index: SessionIndex); } #[cfg(feature = "test")] @@ -228,6 +232,14 @@ impl MutateDescriptorV2 for CandidateDescriptorV2 { self.version = version; } + fn set_core_index(&mut self, core_index: CoreIndex) { + self.core_index = core_index.0 as u16; + } + + fn set_session_index(&mut self, session_index: SessionIndex) { + self.session_index = session_index; + } + fn set_persisted_validation_data_hash(&mut self, persisted_validation_data_hash: Hash) { self.persisted_validation_data_hash = persisted_validation_data_hash; } diff --git a/polkadot/primitives/test-helpers/src/lib.rs b/polkadot/primitives/test-helpers/src/lib.rs index c2eccafef788..f8fac318f47c 100644 --- a/polkadot/primitives/test-helpers/src/lib.rs +++ b/polkadot/primitives/test-helpers/src/lib.rs @@ -23,13 +23,13 @@ //! Note that `dummy_` prefixed values are meant to be fillers, that should not matter, and will //! contain randomness based data. use polkadot_primitives::{ - vstaging::{CandidateDescriptorV2, CandidateReceiptV2, CommittedCandidateReceiptV2}, + vstaging::{CandidateDescriptorV2, CandidateReceiptV2, CommittedCandidateReceiptV2, MutateDescriptorV2}, CandidateCommitments, CandidateDescriptor, CandidateReceipt, CollatorId, CollatorSignature, CommittedCandidateReceipt, CoreIndex, Hash, HeadData, Id as ParaId, PersistedValidationData, SessionIndex, ValidationCode, ValidationCodeHash, ValidatorId, }; pub use rand; -use sp_application_crypto::sr25519; +use sp_application_crypto::{sr25519, ByteArray}; use sp_keyring::Sr25519Keyring; use sp_runtime::generic::Digest; @@ -199,9 +199,11 @@ pub fn dummy_collator() -> CollatorId { CollatorId::from(sr25519::Public::default()) } -/// Create a meaningless collator signature. +/// Create a meaningless collator signature. It is important to not be 0, as we'd confuse +/// v1 and v2 descriptors. pub fn dummy_collator_signature() -> CollatorSignature { - CollatorSignature::from(sr25519::Signature::default()) + CollatorSignature::from_slice(&mut (0..64).into_iter().collect::>().as_slice()) + .expect("64 bytes; qed") } /// Create a meaningless persisted validation data. @@ -250,6 +252,37 @@ pub fn make_candidate( (candidate, pvd) } + +/// Create a meaningless v2 candidate, returning its receipt and PVD. +pub fn make_candidate_v2( + relay_parent_hash: Hash, + relay_parent_number: u32, + para_id: ParaId, + parent_head: HeadData, + head_data: HeadData, + _validation_code_hash: ValidationCodeHash, +) -> (CommittedCandidateReceiptV2, PersistedValidationData) { + let pvd = dummy_pvd(parent_head, relay_parent_number); + let commitments = CandidateCommitments { + head_data, + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: relay_parent_number, + }; + + let mut descriptor = + dummy_candidate_descriptor_v2(relay_parent_hash); + descriptor.set_para_id(para_id); + descriptor.set_persisted_validation_data_hash(pvd.hash()); + // descriptor.set_validation_code_hash(validation_code_hash); + let candidate = + CommittedCandidateReceiptV2 { descriptor, commitments }; + + (candidate, pvd) +} + /// Create a new candidate descriptor, and apply a valid signature /// using the provided `collator` key. pub fn make_valid_candidate_descriptor>(