From bacb878332961ee12d97b805d1879ee028a0ae3a Mon Sep 17 00:00:00 2001 From: qima Date: Fri, 29 Mar 2024 00:09:00 +0800 Subject: [PATCH] feat(node): bad quoting detection --- sn_networking/src/cmd.rs | 154 ++++++++++++++--------- sn_networking/src/driver.rs | 3 + sn_networking/src/event.rs | 42 ++++++- sn_networking/src/lib.rs | 21 +++- sn_networking/src/record_store.rs | 2 +- sn_node/src/node.rs | 9 ++ sn_node/src/put_validation.rs | 4 +- sn_node/src/quote.rs | 119 +++++++++++++----- sn_protocol/src/messages/cmd.rs | 20 ++- sn_protocol/src/messages/response.rs | 5 + sn_transfers/src/wallet/data_payments.rs | 63 ++++++++++ 11 files changed, 347 insertions(+), 95 deletions(-) diff --git a/sn_networking/src/cmd.rs b/sn_networking/src/cmd.rs index 651c4bdff3..a09a7fafc4 100644 --- a/sn_networking/src/cmd.rs +++ b/sn_networking/src/cmd.rs @@ -22,7 +22,7 @@ use sn_protocol::{ storage::{RecordHeader, RecordKind, RecordType}, NetworkAddress, PrettyPrintRecordKey, }; -use sn_transfers::{NanoTokens, QuotingMetrics}; +use sn_transfers::{NanoTokens, PaymentQuote, QuotingMetrics}; use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, @@ -42,6 +42,8 @@ pub enum NodeIssue { ReplicationFailure, /// Close nodes have reported this peer as bad CloseNodesShunning, + /// Provided a bad quote + BadQuoting, } /// Commands to send to the Swarm @@ -169,6 +171,10 @@ pub enum SwarmCmd { target: NetworkAddress, sender: oneshot::Sender, }, + // Quote verification agaisnt historical collected quotes + QuoteVerification { + quotes: Vec<(PeerId, PaymentQuote)>, + }, } /// Debug impl for SwarmCmd to avoid printing full Record, instead only RecodKey @@ -286,6 +292,9 @@ impl Debug for SwarmCmd { SwarmCmd::IsPeerShunned { target, .. } => { write!(f, "SwarmCmd::IsPeerInTrouble target: {target:?}") } + SwarmCmd::QuoteVerification { quotes } => { + write!(f, "SwarmCmd::QuoteVerification of {} quotes", quotes.len()) + } } } } @@ -685,63 +694,7 @@ impl SwarmDriver { SwarmCmd::RecordNodeIssue { peer_id, issue } => { cmd_string = "RecordNodeIssues"; let _ = self.bad_nodes_ongoing_verifications.remove(&peer_id); - - info!("Peer {peer_id:?} is reported as having issue {issue:?}"); - let (issue_vec, is_bad) = self.bad_nodes.entry(peer_id).or_default(); - - // If being considered as bad already, skip certain operations - if !(*is_bad) { - // Remove outdated entries - issue_vec.retain(|(_, timestamp)| timestamp.elapsed().as_secs() < 300); - - // check if vec is already 10 long, if so, remove the oldest issue - // we only track 10 issues to avoid mem leaks - if issue_vec.len() == 10 { - issue_vec.remove(0); - } - - // To avoid being too sensitive, only consider as a new issue - // when after certain while since the last one - let is_new_issue = if let Some((_issue, timestamp)) = issue_vec.last() { - timestamp.elapsed().as_secs() > 10 - } else { - true - }; - - if is_new_issue { - issue_vec.push((issue, Instant::now())); - } - - // Only consider candidate as a bad node when: - // accumulated THREE same kind issues within certain period - for (issue, _timestamp) in issue_vec.iter() { - let issue_counts = issue_vec - .iter() - .filter(|(i, _timestamp)| *issue == *i) - .count(); - if issue_counts >= 3 { - *is_bad = true; - info!("Peer {peer_id:?} accumulated {issue_counts} times of issue {issue:?}. Consider it as a bad node now."); - // Once a bad behaviour detected, no point to continue - break; - } - } - } - - if *is_bad { - warn!("Cleaning out bad_peer {peer_id:?}"); - if let Some(dead_peer) = - self.swarm.behaviour_mut().kademlia.remove_peer(&peer_id) - { - self.connected_peers = self.connected_peers.saturating_sub(1); - self.send_event(NetworkEvent::PeerRemoved( - *dead_peer.node.key.preimage(), - self.connected_peers, - )); - self.log_kbuckets(&peer_id); - let _ = self.check_for_change_in_our_close_group(); - } - } + self.record_node_issue(peer_id, issue); } SwarmCmd::IsPeerShunned { target, sender } => { cmd_string = "IsPeerInTrouble"; @@ -756,6 +709,18 @@ impl SwarmDriver { }; let _ = sender.send(is_bad); } + SwarmCmd::QuoteVerification { quotes } => { + cmd_string = "QuoteVerification"; + for (peer_id, quote) in quotes { + // Do nothing if already being bad + if let Some((_issues, is_bad)) = self.bad_nodes.get(&peer_id) { + if *is_bad { + continue; + } + } + self.verify_peer_quote(peer_id, quote); + } + } } self.log_handling(cmd_string.to_string(), start.elapsed()); @@ -763,6 +728,79 @@ impl SwarmDriver { Ok(()) } + fn record_node_issue(&mut self, peer_id: PeerId, issue: NodeIssue) { + info!("Peer {peer_id:?} is reported as having issue {issue:?}"); + let (issue_vec, is_bad) = self.bad_nodes.entry(peer_id).or_default(); + + // If being considered as bad already, skip certain operations + if !(*is_bad) { + // Remove outdated entries + issue_vec.retain(|(_, timestamp)| timestamp.elapsed().as_secs() < 300); + + // check if vec is already 10 long, if so, remove the oldest issue + // we only track 10 issues to avoid mem leaks + if issue_vec.len() == 10 { + issue_vec.remove(0); + } + + // To avoid being too sensitive, only consider as a new issue + // when after certain while since the last one + let is_new_issue = if let Some((_issue, timestamp)) = issue_vec.last() { + timestamp.elapsed().as_secs() > 10 + } else { + true + }; + + if is_new_issue { + issue_vec.push((issue, Instant::now())); + } + + // Only consider candidate as a bad node when: + // accumulated THREE same kind issues within certain period + for (issue, _timestamp) in issue_vec.iter() { + let issue_counts = issue_vec + .iter() + .filter(|(i, _timestamp)| *issue == *i) + .count(); + if issue_counts >= 3 { + *is_bad = true; + info!("Peer {peer_id:?} accumulated {issue_counts} times of issue {issue:?}. Consider it as a bad node now."); + // Once a bad behaviour detected, no point to continue + break; + } + } + } + + if *is_bad { + warn!("Cleaning out bad_peer {peer_id:?}"); + if let Some(dead_peer) = self.swarm.behaviour_mut().kademlia.remove_peer(&peer_id) { + self.connected_peers = self.connected_peers.saturating_sub(1); + self.send_event(NetworkEvent::PeerRemoved( + *dead_peer.node.key.preimage(), + self.connected_peers, + )); + self.log_kbuckets(&peer_id); + let _ = self.check_for_change_in_our_close_group(); + } + } + } + + fn verify_peer_quote(&mut self, peer_id: PeerId, quote: PaymentQuote) { + if let Some(history_quote) = self.quotes_history.get(&peer_id) { + if !history_quote.historical_verify("e) { + info!("From {peer_id:?}, detected a bad quote {quote:?} against history_quote {history_quote:?}"); + self.record_node_issue(peer_id, NodeIssue::BadQuoting); + return; + } + + if history_quote.is_newer_than("e) { + return; + } + } + + let _ = self.quotes_history.insert(peer_id, quote); + } + fn try_interval_replication(&mut self) -> Result<()> { // get closest peers from buckets, sorted by increasing distance to us let our_peer_id = self.self_peer_id.into(); diff --git a/sn_networking/src/driver.rs b/sn_networking/src/driver.rs index 3b155bf88d..0bfc1fc22e 100644 --- a/sn_networking/src/driver.rs +++ b/sn_networking/src/driver.rs @@ -50,6 +50,7 @@ use sn_protocol::{ storage::RetryStrategy, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, }; +use sn_transfers::PaymentQuote; use std::{ collections::{btree_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet}, fmt::Debug, @@ -513,6 +514,7 @@ impl NetworkBuilder { hard_disk_write_error: 0, bad_nodes: Default::default(), bad_nodes_ongoing_verifications: Default::default(), + quotes_history: Default::default(), }; Ok(( @@ -567,6 +569,7 @@ pub struct SwarmDriver { // the boolean flag to indicate whether the node is considered as bad or not pub(crate) bad_nodes: BTreeMap, bool)>, pub(crate) bad_nodes_ongoing_verifications: BTreeSet, + pub(crate) quotes_history: BTreeMap, } impl SwarmDriver { diff --git a/sn_networking/src/event.rs b/sn_networking/src/event.rs index 5ab23bbc79..4de72da38b 100644 --- a/sn_networking/src/event.rs +++ b/sn_networking/src/event.rs @@ -35,6 +35,7 @@ use sn_protocol::{ storage::RecordType, NetworkAddress, PrettyPrintRecordKey, }; +use sn_transfers::PaymentQuote; use std::{ collections::{hash_map::Entry, BTreeSet, HashSet}, fmt::{Debug, Formatter}, @@ -123,6 +124,10 @@ pub enum NetworkEvent { BadNodeVerification { peer_id: PeerId, }, + /// Quotes to be verified + QuoteVerification { + quotes: Vec<(PeerId, PaymentQuote)>, + }, } // Manually implement Debug as `#[debug(with = "unverified_record_fmt")]` not working as expected. @@ -164,6 +169,13 @@ impl Debug for NetworkEvent { NetworkEvent::BadNodeVerification { peer_id } => { write!(f, "NetworkEvent::BadNodeVerification({peer_id:?})") } + NetworkEvent::QuoteVerification { quotes } => { + write!( + f, + "NetworkEvent::QuoteVerification({} quotes)", + quotes.len() + ) + } } } } @@ -566,8 +578,9 @@ impl SwarmDriver { .. } => { trace!("Received request {request_id:?} from peer {peer:?}, req: {request:?}"); - // if the request is replication, we can handle it and send the OK response here, - // as we send that regardless of how we handle the request as its unimportant to the sender. + // If the request is replication or quote verification, + // we can handle it and send the OK response here. + // As the handle result is unimportant to the sender. match request { Request::Cmd(sn_protocol::messages::Cmd::Replicate { holder, keys }) => { let response = Response::Cmd( @@ -581,6 +594,31 @@ impl SwarmDriver { self.add_keys_to_replication_fetcher(holder, keys); } + Request::Cmd(sn_protocol::messages::Cmd::QuoteVerification { + quotes, + .. + }) => { + let response = Response::Cmd( + sn_protocol::messages::CmdResponse::QuoteVerification(Ok(())), + ); + self.swarm + .behaviour_mut() + .request_response + .send_response(channel, response) + .map_err(|_| NetworkError::InternalMsgChannelDropped)?; + + // The keypair is required to verify the quotes, + // hence throw it up to Network layer for further actions. + let quotes = quotes + .iter() + .filter_map(|(peer_address, quote)| { + peer_address + .as_peer_id() + .map(|peer_id| (peer_id, quote.clone())) + }) + .collect(); + self.send_event(NetworkEvent::QuoteVerification { quotes }) + } Request::Query(query) => { self.send_event(NetworkEvent::QueryRequestReceived { query, diff --git a/sn_networking/src/lib.rs b/sn_networking/src/lib.rs index 94bcbee0fa..e8afe65c9e 100644 --- a/sn_networking/src/lib.rs +++ b/sn_networking/src/lib.rs @@ -37,7 +37,7 @@ pub use self::{ driver::{GetRecordCfg, NetworkBuilder, PutRecordCfg, SwarmDriver, VerificationKind}, error::{GetRecordError, NetworkError}, event::{MsgResponder, NetworkEvent}, - record_store::NodeRecordStore, + record_store::{calculate_cost_for_records, NodeRecordStore}, transfers::{get_raw_signed_spends_from_record, get_signed_spend_from_record}, }; @@ -53,7 +53,7 @@ use libp2p::{ use rand::Rng; use sn_protocol::{ error::Error as ProtocolError, - messages::{ChunkProof, Nonce, Query, QueryResponse, Request, Response}, + messages::{ChunkProof, Cmd, Nonce, Query, QueryResponse, Request, Response}, storage::{RecordType, RetryStrategy}, NetworkAddress, PrettyPrintKBucketKey, PrettyPrintRecordKey, }; @@ -362,6 +362,7 @@ impl Network { // loop over responses, generating an average fee and storing all responses along side let mut all_costs = vec![]; + let mut all_quotes = vec![]; for response in responses.into_values().flatten() { debug!( "StoreCostReq for {record_address:?} received response: {:?}", @@ -373,7 +374,8 @@ impl Network { payment_address, peer_address, }) => { - all_costs.push((peer_address, payment_address, quote)); + all_costs.push((peer_address.clone(), payment_address, quote.clone())); + all_quotes.push((peer_address, quote)); } Response::Query(QueryResponse::GetStoreCost { quote: Err(ProtocolError::RecordExists(_)), @@ -388,6 +390,15 @@ impl Network { } } + for peer_id in close_nodes.iter() { + let request = Request::Cmd(Cmd::QuoteVerification { + target: NetworkAddress::from_peer(*peer_id), + quotes: all_quotes.clone(), + }); + + self.send_req_ignore_reply(request, *peer_id); + } + // Sort all_costs by the NetworkAddress proximity to record_address all_costs.sort_by(|(peer_address_a, _, _), (peer_address_b, _, _)| { record_address @@ -746,6 +757,10 @@ impl Network { self.send_swarm_cmd(SwarmCmd::RecordNodeIssue { peer_id, issue }); } + pub fn historical_verify_quotes(&self, quotes: Vec<(PeerId, PaymentQuote)>) { + self.send_swarm_cmd(SwarmCmd::QuoteVerification { quotes }); + } + // Helper to send SwarmCmd fn send_swarm_cmd(&self, cmd: SwarmCmd) { send_swarm_cmd(self.swarm_cmd_sender.clone(), cmd); diff --git a/sn_networking/src/record_store.rs b/sn_networking/src/record_store.rs index ea092d4dfe..66a705d7f3 100644 --- a/sn_networking/src/record_store.rs +++ b/sn_networking/src/record_store.rs @@ -680,7 +680,7 @@ impl RecordStore for ClientRecordStore { // to allow nodes receiving too many replication copies can still got paid, // and gives an exponential pricing curve when storage reaches high. // and give extra reward (lower the quoting price to gain a better chance) to long lived nodes. -fn calculate_cost_for_records( +pub fn calculate_cost_for_records( records_stored: usize, received_payment_count: usize, max_records: usize, diff --git a/sn_node/src/node.rs b/sn_node/src/node.rs index 2300086f04..8d3ba4e689 100644 --- a/sn_node/src/node.rs +++ b/sn_node/src/node.rs @@ -9,6 +9,7 @@ use super::{ error::{Error, Result}, event::NodeEventsChannel, + quote::quotes_verification, Marker, NodeEvent, }; #[cfg(feature = "open-metrics")] @@ -401,6 +402,14 @@ impl Node { } }); } + NetworkEvent::QuoteVerification { quotes } => { + event_header = "QuoteVerification"; + let network = self.network.clone(); + + let _handle = spawn(async move { + quotes_verification(&network, quotes).await; + }); + } } trace!( diff --git a/sn_node/src/put_validation.rs b/sn_node/src/put_validation.rs index e66c433b55..d0c07a82b3 100644 --- a/sn_node/src/put_validation.rs +++ b/sn_node/src/put_validation.rs @@ -6,7 +6,7 @@ // KIND, either express or implied. Please review the Licences for the specific language governing // permissions and limitations relating to use of the SAFE Network Software. -use crate::{node::Node, Error, Marker, Result}; +use crate::{node::Node, quote::verify_quote_for_storecost, Error, Marker, Result}; use libp2p::kad::{Record, RecordKey}; use sn_networking::{get_raw_signed_spends_from_record, GetRecordError, NetworkError}; use sn_protocol::{ @@ -514,7 +514,7 @@ impl Node { // check if the quote is valid let storecost = payment.quote.cost; - self.verify_quote_for_storecost(payment.quote, address)?; + verify_quote_for_storecost(&self.network, payment.quote, address)?; trace!("Payment quote valid for record {pretty_key}"); // Let's check payment is sufficient both for our store cost and for network royalties diff --git a/sn_node/src/quote.rs b/sn_node/src/quote.rs index 0e241af663..df747016a0 100644 --- a/sn_node/src/quote.rs +++ b/sn_node/src/quote.rs @@ -7,9 +7,11 @@ // permissions and limitations relating to use of the SAFE Network Software. use crate::{node::Node, Error, Result}; -use sn_networking::Network; -use sn_protocol::{error::Error as ProtocolError, NetworkAddress}; +use libp2p::PeerId; +use sn_networking::{calculate_cost_for_records, Network, NodeIssue}; +use sn_protocol::{error::Error as ProtocolError, storage::ChunkAddress, NetworkAddress}; use sn_transfers::{NanoTokens, PaymentQuote, QuotingMetrics}; +use std::time::Duration; impl Node { pub(crate) fn create_quote_for_storecost( @@ -37,36 +39,97 @@ impl Node { debug!("Created payment quote for {address:?}: {quote:?}"); Ok(quote) } +} - pub(crate) fn verify_quote_for_storecost( - &self, - quote: PaymentQuote, - address: &NetworkAddress, - ) -> Result<()> { - debug!("Verifying payment quote for {address:?}: {quote:?}"); +pub(crate) fn verify_quote_for_storecost( + network: &Network, + quote: PaymentQuote, + address: &NetworkAddress, +) -> Result<()> { + debug!("Verifying payment quote for {address:?}: {quote:?}"); - // check address - if address.as_xorname().unwrap_or_default() != quote.content { - return Err(Error::InvalidQuoteContent); - } + // check address + if address.as_xorname().unwrap_or_default() != quote.content { + return Err(Error::InvalidQuoteContent); + } - // check if the quote has expired - if quote.has_expired() { - return Err(Error::QuoteExpired(address.clone())); - } + // check if the quote has expired + if quote.has_expired() { + return Err(Error::QuoteExpired(address.clone())); + } - // check sig - let bytes = PaymentQuote::bytes_for_signing( - quote.content, - quote.cost, - quote.timestamp, - "e.quoting_metrics, - ); - let signature = quote.signature; - if !self.network.verify(&bytes, &signature) { - return Err(Error::InvalidQuoteSignature); - } + // check sig + let bytes = PaymentQuote::bytes_for_signing( + quote.content, + quote.cost, + quote.timestamp, + "e.quoting_metrics, + ); + let signature = quote.signature; + if !network.verify(&bytes, &signature) { + return Err(Error::InvalidQuoteSignature); + } + + Ok(()) +} + +// Following metrics will be considered as client issue instead of node's bad quote. +// 1, quote is not regarding the same chunk as ours +// 2, quote is not around the same time as ours +// 3, quote is no longer valid +// +// Following metrics will be considered as node's bad quote. +// 1, Price calculation is incorrect +// 2, QuoteMetrics doesn't match the historical quotes collected by self +pub(crate) async fn quotes_verification(network: &Network, quotes: Vec<(PeerId, PaymentQuote)>) { + // Do nothing if self is not one of the quoters. + if let Some((_, self_quote)) = quotes + .iter() + .find(|(peer_id, _quote)| *peer_id == *network.peer_id) + { + let target_address = + NetworkAddress::from_chunk_address(ChunkAddress::new(self_quote.content)); + if verify_quote_for_storecost(network, self_quote.clone(), &target_address).is_ok() { + let mut quotes_for_nodes_duty: Vec<_> = quotes + .iter() + .filter(|(peer_id, quote)| { + let is_same_target = quote.content == self_quote.content; + let is_not_self = *peer_id != *network.peer_id; + let is_not_zero_quote = quote.cost != NanoTokens::zero(); + + let time_gap = Duration::from_secs(10); + let is_around_same_time = if quote.timestamp > self_quote.timestamp { + self_quote.timestamp + time_gap > quote.timestamp + } else { + quote.timestamp + time_gap > self_quote.timestamp + }; + + is_same_target && is_not_self && is_not_zero_quote && is_around_same_time + }) + .cloned() + .collect(); - Ok(()) + quotes_for_nodes_duty.retain(|(peer_id, quote)| { + let cost = calculate_cost_for_records( + quote.quoting_metrics.records_stored, + quote.quoting_metrics.received_payment_count, + quote.quoting_metrics.max_records, + quote.quoting_metrics.live_time, + ); + let is_same_as_expected = quote.cost == NanoTokens::from(cost); + + // TODO: need to confirm the quote_metrics was signed by the peer. + if !is_same_as_expected { + info!("Quote from {peer_id:?} using a different quoting_metrics to achieve the claimed cost. Quote {quote:?} can only result in cost {cost:?}"); + network.record_node_issues(*peer_id, NodeIssue::BadQuoting); + } + + is_same_as_expected + }); + + // Pass down to swarm_driver level for further bad quote detection + // against historical collected quotes. + network.historical_verify_quotes(quotes_for_nodes_duty); + } } } diff --git a/sn_protocol/src/messages/cmd.rs b/sn_protocol/src/messages/cmd.rs index 7b4b585c02..31222399e7 100644 --- a/sn_protocol/src/messages/cmd.rs +++ b/sn_protocol/src/messages/cmd.rs @@ -10,7 +10,7 @@ use crate::{storage::RecordType, NetworkAddress}; use serde::{Deserialize, Serialize}; // TODO: remove this dependency and define these types herein. -pub use sn_transfers::Hash; +pub use sn_transfers::{Hash, PaymentQuote}; /// Data and CashNote cmds - recording spends or creating, updating, and removing data. /// @@ -30,6 +30,11 @@ pub enum Cmd { /// Keys of copy that shall be replicated. keys: Vec<(NetworkAddress, RecordType)>, }, + /// Write operation to notify nodes a list of PaymentQuote collected. + QuoteVerification { + target: NetworkAddress, + quotes: Vec<(NetworkAddress, PaymentQuote)>, + }, } impl std::fmt::Debug for Cmd { @@ -43,6 +48,11 @@ impl std::fmt::Debug for Cmd { .field("first_ten_keys", &first_ten_keys) .finish() } + Cmd::QuoteVerification { target, quotes } => f + .debug_struct("Cmd::QuoteVerification") + .field("target", target) + .field("quotes_len", "es.len()) + .finish(), } } } @@ -52,6 +62,7 @@ impl Cmd { pub fn dst(&self) -> NetworkAddress { match self { Cmd::Replicate { holder, .. } => holder.clone(), + Cmd::QuoteVerification { target, .. } => target.clone(), } } } @@ -67,6 +78,13 @@ impl std::fmt::Display for Cmd { keys.len() ) } + Cmd::QuoteVerification { target, quotes } => { + write!( + f, + "Cmd::QuoteVerification(sent to {target:?} has {} quotes)", + quotes.len() + ) + } } } } diff --git a/sn_protocol/src/messages/response.rs b/sn_protocol/src/messages/response.rs index d6e3af278b..46bef9937c 100644 --- a/sn_protocol/src/messages/response.rs +++ b/sn_protocol/src/messages/response.rs @@ -106,6 +106,11 @@ pub enum CmdResponse { // /// Response to replication cmd Replicate(Result<()>), + // + // ===== QuoteVerification ===== + // + /// Response to quote verification cmd + QuoteVerification(Result<()>), } /// The Ok variant of a CmdResponse diff --git a/sn_transfers/src/wallet/data_payments.rs b/sn_transfers/src/wallet/data_payments.rs index 024fc1c9cb..19e25021e9 100644 --- a/sn_transfers/src/wallet/data_payments.rs +++ b/sn_transfers/src/wallet/data_payments.rs @@ -163,4 +163,67 @@ impl PaymentQuote { signature: vec![], } } + + /// Check whether self is newer than the target quote. + pub fn is_newer_than(&self, other: &Self) -> bool { + self.timestamp > other.timestamp + } + + /// Check against a new quote, verify whether it is a valid one from self perspective. + /// Returns `true` to flag the `other` quote is valid, from self perspective. + pub fn historical_verify(&self, other: &Self) -> bool { + // There is a chance that an old quote got used later than a new quote + let self_is_newer = self.is_newer_than(other); + let (old_quote, new_quote) = if self_is_newer { + (other, self) + } else { + (self, other) + }; + + if new_quote.quoting_metrics.live_time < old_quote.quoting_metrics.live_time { + info!("Claimed live_time out of sequence"); + return false; + } + + let old_elapsed = if let Ok(elapsed) = old_quote.timestamp.elapsed() { + elapsed + } else { + info!("timestamp failure"); + return false; + }; + let new_elapsed = if let Ok(elapsed) = new_quote.timestamp.elapsed() { + elapsed + } else { + info!("timestamp failure"); + return false; + }; + + let time_diff = old_elapsed.as_secs().saturating_sub(new_elapsed.as_secs()); + let live_time_diff = + new_quote.quoting_metrics.live_time - old_quote.quoting_metrics.live_time; + // In theory, these two shall match, give it a margin of 10 to avoid system glitch + if live_time_diff > time_diff + 10 { + info!("claimed live_time out of sync with the timestamp"); + return false; + } + + // There could be pruning to be undertaken, + // hence the `increasement` check only valid when not being too full. + if new_quote.quoting_metrics.records_stored + 20 < new_quote.quoting_metrics.max_records + && new_quote.quoting_metrics.records_stored < old_quote.quoting_metrics.records_stored + { + info!("claimed records_stored out of sequence"); + return false; + } + + // TODO: Double check if this applies, as this will prevent a node restart with same ID + if new_quote.quoting_metrics.received_payment_count + < old_quote.quoting_metrics.received_payment_count + { + info!("claimed received_payment_count out of sequence"); + return false; + } + + true + } }