Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

backport: Merge bitcoin#19370, 19562, 19538, 19272, 19214, 19429, 18990, 20927, 20507 #5770

Merged
merged 8 commits into from
Feb 7, 2024
1 change: 1 addition & 0 deletions src/bench/bench_bitcoin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ int main(int argc, char** argv)
{
ArgsManager argsman;
SetupBenchArgs(argsman);
SHA256AutoDetect();
std::string error;
if (!argsman.ParseParameters(argc, argv, error)) {
tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error);
Expand Down
70 changes: 40 additions & 30 deletions src/net.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1487,37 +1487,47 @@ void CConnman::CalculateNumConnectionsChangedStats()
statsClient.gauge("peers.torConnections", torNodes, 1.0f);
}

void CConnman::InactivityCheck(CNode *pnode) const
bool CConnman::InactivityCheck(const CNode& node) const
{
int64_t nTime = GetSystemTimeInSeconds();
if (nTime - pnode->nTimeConnected > m_peer_connect_timeout)
{
if (pnode->nLastRecv == 0 || pnode->nLastSend == 0)
{
LogPrint(BCLog::NET, "socket no message in first %i seconds, %d %d from %d\n", m_peer_connect_timeout, pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->GetId());
pnode->fDisconnect = true;
}
else if (nTime - pnode->nLastSend > TIMEOUT_INTERVAL)
{
LogPrintf("socket sending timeout: %is\n", nTime - pnode->nLastSend);
pnode->fDisconnect = true;
}
else if (nTime - pnode->nLastRecv > TIMEOUT_INTERVAL)
{
LogPrintf("socket receive timeout: %is\n", nTime - pnode->nLastRecv);
pnode->fDisconnect = true;
}
else if (pnode->nPingNonceSent && pnode->nPingUsecStart + TIMEOUT_INTERVAL * 1000000 < GetTimeMicros())
{
LogPrintf("ping timeout: %fs\n", 0.000001 * (GetTimeMicros() - pnode->nPingUsecStart));
pnode->fDisconnect = true;
}
else if (!pnode->fSuccessfullyConnected)
{
LogPrint(BCLog::NET, "version handshake timeout from %d\n", pnode->GetId());
pnode->fDisconnect = true;
}
// Use non-mockable system time (otherwise these timers will pop when we
// use setmocktime in the tests).
int64_t now = GetSystemTimeInSeconds();

if (now <= node.nTimeConnected + m_peer_connect_timeout) {
// Only run inactivity checks if the peer has been connected longer
// than m_peer_connect_timeout.
return false;
}

if (node.nLastRecv == 0 || node.nLastSend == 0) {
LogPrint(BCLog::NET, "socket no message in first %i seconds, %d %d from %d\n", m_peer_connect_timeout, node.nLastRecv != 0, node.nLastSend != 0, node.GetId());
return true;
}

if (now > node.nLastSend + TIMEOUT_INTERVAL) {
LogPrintf("socket sending timeout: %is\n", now - node.nLastSend);
return true;
}

if (now > node.nLastRecv + TIMEOUT_INTERVAL) {
LogPrintf("socket receive timeout: %is\n", now - node.nLastRecv);
return true;
}

if (node.nPingNonceSent && node.nPingUsecStart.load() + TIMEOUT_INTERVAL * 1000000 < GetTimeMicros()) {
// We use mockable time for ping timeouts. This means that setmocktime
// may cause pings to time out for peers that have been connected for
// longer than m_peer_connect_timeout.
LogPrintf("ping timeout: %fs\n", 0.000001 * (GetTimeMicros() - node.nPingUsecStart));
return true;
}

if (!node.fSuccessfullyConnected) {
LogPrint(BCLog::NET, "version handshake timeout from %d\n", node.GetId());
return true;
}

return false;
}

bool CConnman::GenerateSelectSet(std::set<SOCKET> &recv_set, std::set<SOCKET> &send_set, std::set<SOCKET> &error_set)
Expand Down Expand Up @@ -2036,7 +2046,7 @@ void CConnman::ThreadSocketHandler()
SocketHandler();
if (GetTimeMillis() - nLastCleanupNodes > 1000) {
ForEachNode(AllNodes, [&](CNode* pnode) {
InactivityCheck(pnode);
if (InactivityCheck(*pnode)) pnode->fDisconnect = true;
});
nLastCleanupNodes = GetTimeMillis();
}
Expand Down
3 changes: 2 additions & 1 deletion src/net.h
Original file line number Diff line number Diff line change
Expand Up @@ -600,7 +600,8 @@ friend class CNode;
void DisconnectNodes();
void NotifyNumConnectionsChanged();
void CalculateNumConnectionsChangedStats();
void InactivityCheck(CNode *pnode) const;
/** Return true if the peer is inactive and should be disconnected. */
bool InactivityCheck(const CNode& node) const;
bool GenerateSelectSet(std::set<SOCKET> &recv_set, std::set<SOCKET> &send_set, std::set<SOCKET> &error_set);
#ifdef USE_KQUEUE
void SocketEventsKqueue(std::set<SOCKET> &recv_set, std::set<SOCKET> &send_set, std::set<SOCKET> &error_set, bool fOnlyPoll);
Expand Down
13 changes: 8 additions & 5 deletions src/net_processing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2582,7 +2582,10 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
break;
} else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
if (state.IsInvalid()) {
LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanHash.ToString());
LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s from peer=%d. %s\n",
orphanHash.ToString(),
orphan_it->second.fromPeer,
state.ToString());
// Maybe punish peer that gave us an invalid orphan tx
MaybePunishNodeForTx(orphan_it->second.fromPeer, state);
}
Expand Down Expand Up @@ -3279,7 +3282,7 @@ void PeerManagerImpl::ProcessMessage(
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
Misbehaving(pfrom.GetId(), 20, strprintf("message inv size() = %u", vInv.size()));
Misbehaving(pfrom.GetId(), 20, strprintf("inv message size = %u", vInv.size()));
return;
}

Expand Down Expand Up @@ -3374,7 +3377,8 @@ void PeerManagerImpl::ProcessMessage(
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
Misbehaving(pfrom.GetId(), 20, strprintf("message getdata size() = %u", vInv.size()));

Misbehaving(pfrom.GetId(), 20, strprintf("getdata message size = %u", vInv.size()));
return;
}

Expand Down Expand Up @@ -3741,8 +3745,7 @@ void PeerManagerImpl::ProcessMessage(
// peer simply for relaying a tx that our m_recent_rejects has caught,
// regardless of false positives.

if (state.IsInvalid())
{
if (state.IsInvalid()) {
LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
pfrom.GetId(),
state.ToString());
Expand Down
6 changes: 4 additions & 2 deletions src/sync.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ static void potential_deadlock_detected(const LockPair& mismatch, const LockStac
throw std::logic_error(strprintf("potential deadlock detected: %s -> %s -> %s", mutex_b, mutex_a, mutex_b));
}

static void double_lock_detected(const void* mutex, LockStack& lock_stack)
static void double_lock_detected(const void* mutex, const LockStack& lock_stack)
{
LogPrintf("DOUBLE LOCK DETECTED\n");
LogPrintf("Lock order:\n");
Expand All @@ -151,7 +151,9 @@ static void double_lock_detected(const void* mutex, LockStack& lock_stack)
LogPrintf(" %s\n", i.second.ToString());
}
if (g_debug_lockorder_abort) {
tfm::format(std::cerr, "Assertion failed: detected double lock at %s:%i, details in debug log.\n", __FILE__, __LINE__);
tfm::format(std::cerr,
"Assertion failed: detected double lock for %s, details in debug log.\n",
lock_stack.back().second.ToString());
abort();
}
throw std::logic_error("double lock detected");
Expand Down
4 changes: 2 additions & 2 deletions src/test/fuzz/util.h
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ class FuzzedFileProvider
return 0;
}
std::memcpy(buf, random_bytes.data(), random_bytes.size());
if (AdditionOverflow(static_cast<size_t>(fuzzed_file->m_offset), random_bytes.size())) {
if (AdditionOverflow(fuzzed_file->m_offset, (int64_t)random_bytes.size())) {
return fuzzed_file->m_fuzzed_data_provider.ConsumeBool() ? 0 : -1;
}
fuzzed_file->m_offset += random_bytes.size();
Expand All @@ -421,7 +421,7 @@ class FuzzedFileProvider
FuzzedFileProvider* fuzzed_file = (FuzzedFileProvider*)cookie;
SetFuzzedErrNo(fuzzed_file->m_fuzzed_data_provider);
const ssize_t n = fuzzed_file->m_fuzzed_data_provider.ConsumeIntegralInRange<ssize_t>(0, size);
if (AdditionOverflow(static_cast<ssize_t>(fuzzed_file->m_offset), n)) {
if (AdditionOverflow(fuzzed_file->m_offset, (int64_t)n)) {
return fuzzed_file->m_fuzzed_data_provider.ConsumeBool() ? 0 : -1;
}
fuzzed_file->m_offset += n;
Expand Down
6 changes: 2 additions & 4 deletions src/test/sync_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,10 +62,8 @@ void TestDoubleLock(bool should_throw)
MutexType m;
ENTER_CRITICAL_SECTION(m);
if (should_throw) {
BOOST_CHECK_EXCEPTION(
TestDoubleLock2(m), std::logic_error, [](const std::logic_error& e) {
return strcmp(e.what(), "double lock detected") == 0;
});
BOOST_CHECK_EXCEPTION(TestDoubleLock2(m), std::logic_error,
HasReason("double lock detected"));
} else {
BOOST_CHECK_NO_THROW(TestDoubleLock2(m));
}
Expand Down
5 changes: 3 additions & 2 deletions src/validation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -648,8 +648,9 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
CAmount& nModifiedFees = ws.m_modified_fees;

if (!CheckTransaction(tx, state))
if (!CheckTransaction(tx, state)) {
return false; // state filled in by CheckTransaction
}

assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate));
if (!ContextualCheckTransaction(tx, state, chainparams.GetConsensus(), m_active_chainstate.m_chain.Tip()))
Expand Down Expand Up @@ -761,7 +762,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)

assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_active_chainstate.m_blockman));
if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_blockman.GetSpendHeight(m_view), ws.m_base_fees)) {
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), state.ToString());
return false; // state filled in by CheckTxInputs
}

// Check for non-standard pay-to-script-hash in inputs
Expand Down
5 changes: 5 additions & 0 deletions src/wallet/test/wallet_tests.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,11 @@ extern UniValue addmultisigaddress(const JSONRPCRequest& request);

extern RecursiveMutex cs_wallets;

// Ensure that fee levels defined in the wallet are at least as high
// as the default levels for node policy.
static_assert(DEFAULT_TRANSACTION_MINFEE >= DEFAULT_MIN_RELAY_TX_FEE, "wallet minimum fee is smaller than default relay fee");
static_assert(WALLET_INCREMENTAL_RELAY_FEE >= DEFAULT_INCREMENTAL_RELAY_FEE, "wallet incremental fee is smaller than default incremental relay fee");

BOOST_FIXTURE_TEST_SUITE(wallet_tests, WalletTestingSetup)

namespace {
Expand Down
2 changes: 1 addition & 1 deletion test/functional/p2p_addr_relay.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def run_test(self):
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = msg_addr()

self.log.info('Send too large addr message')
self.log.info('Send too-large addr message')
msg.addrs = ADDRS * 101
with self.nodes[0].assert_debug_log(['addr message size = 1010']):
addr_source.send_and_ping(msg)
Expand Down
68 changes: 42 additions & 26 deletions test/functional/p2p_invalid_messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@
CInv,
msg_ping,
ser_string,
MAX_HEADERS_RESULTS,
MAX_INV_SIZE,
MAX_PROTOCOL_MESSAGE_LENGTH,
msg_getdata,
msg_headers,
msg_inv,
Expand All @@ -23,8 +26,7 @@
assert_equal,
)

MSG_LIMIT = 3 * 1024 * 1024 # 3MB, per MAX_PROTOCOL_MESSAGE_LENGTH
VALID_DATA_LIMIT = MSG_LIMIT - 5 # Account for the 5-byte length prefix
VALID_DATA_LIMIT = MAX_PROTOCOL_MESSAGE_LENGTH - 5 # Account for the 5-byte length prefix

class msg_unrecognized:
"""Nonsensical message. Modeled after similar types in test_framework.messages."""
Expand Down Expand Up @@ -52,11 +54,13 @@ def run_test(self):
self.test_checksum()
self.test_size()
self.test_msgtype()
self.test_large_inv()
self.test_oversized_inv_msg()
self.test_oversized_getdata_msg()
self.test_oversized_headers_msg()
self.test_resource_exhaustion()

def test_buffer(self):
self.log.info("Test message with header split across two buffers, should be received")
self.log.info("Test message with header split across two buffers is received")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
# Create valid message
msg = conn.build_message(msg_ping(nonce=12345))
Expand All @@ -75,16 +79,18 @@ def test_buffer(self):
self.nodes[0].disconnect_p2ps()

def test_magic_bytes(self):
self.log.info("Test message with invalid magic bytes disconnects peer")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['HEADER ERROR - MESSAGESTART (badmsg, 2 bytes), received ffffffff']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
# modify magic bytes
msg = b'\xff' * 4 + msg[4:]
conn.send_raw_message(msg)
conn.wait_for_disconnect(timeout=5)
self.nodes[0].disconnect_p2ps()
self.nodes[0].disconnect_p2ps()

def test_checksum(self):
self.log.info("Test message with invalid checksum logs an error")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['CHECKSUM ERROR (badmsg, 2 bytes), expected 78df0a04 was ffffffff']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
Expand All @@ -94,19 +100,20 @@ def test_checksum(self):
msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:]
conn.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
self.nodes[0].disconnect_p2ps()

def test_size(self):
self.log.info("Test message with oversized payload disconnects peer")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['']):
# Create a message with oversized payload
msg = msg_unrecognized(str_data="d"*(VALID_DATA_LIMIT + 1))
msg = conn.build_message(msg)
conn.send_raw_message(msg)
conn.wait_for_disconnect(timeout=5)
self.nodes[0].disconnect_p2ps()
self.nodes[0].disconnect_p2ps()

def test_msgtype(self):
self.log.info("Test message with invalid message type logs an error")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['HEADER ERROR - COMMAND']):
msg = msg_unrecognized(str_data="d")
Expand All @@ -115,42 +122,51 @@ def test_msgtype(self):
msg = msg[:7] + b'\x00' + msg[7 + 1:]
conn.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()

def test_large_inv(self):
conn = self.nodes[0].add_p2p_connection(P2PInterface())
with self.nodes[0].assert_debug_log(['Misbehaving', '(0 -> 20): message inv size() = 50001']):
msg = msg_inv([CInv(MSG_TX, 1)] * 50001)
conn.send_and_ping(msg)
with self.nodes[0].assert_debug_log(['Misbehaving', '(20 -> 40): message getdata size() = 50001']):
msg = msg_getdata([CInv(MSG_TX, 1)] * 50001)
conn.send_and_ping(msg)
with self.nodes[0].assert_debug_log(['Misbehaving', '(40 -> 60): headers message size = 2001']):
msg = msg_headers([CBlockHeader()] * 2001)
conn.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()

def test_oversized_msg(self, msg, size):
msg_type = msg.msgtype.decode('ascii')
self.log.info("Test {} message of size {} is logged as misbehaving".format(msg_type, size))
with self.nodes[0].assert_debug_log(['Misbehaving', '{} message size = {}'.format(msg_type, size)]):
self.nodes[0].add_p2p_connection(P2PInterface()).send_and_ping(msg)
self.nodes[0].disconnect_p2ps()

def test_oversized_inv_msg(self):
size = MAX_INV_SIZE + 1
self.test_oversized_msg(msg_inv([CInv(MSG_TX, 1)] * size), size)

def test_oversized_getdata_msg(self):
size = MAX_INV_SIZE + 1
self.test_oversized_msg(msg_getdata([CInv(MSG_TX, 1)] * size), size)

def test_oversized_headers_msg(self):
size = MAX_HEADERS_RESULTS + 1
self.test_oversized_msg(msg_headers([CBlockHeader()] * size), size)

UdjinM6 marked this conversation as resolved.
Show resolved Hide resolved
def test_resource_exhaustion(self):
self.log.info("Test node stays up despite many large junk messages")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
conn2 = self.nodes[0].add_p2p_connection(P2PDataStore())
msg_at_size = msg_unrecognized(str_data="b" * VALID_DATA_LIMIT)
assert len(msg_at_size.serialize()) == MSG_LIMIT

self.log.info("Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...")
assert len(msg_at_size.serialize()) == MAX_PROTOCOL_MESSAGE_LENGTH

# Run a bunch of times to test for memory exhaustion.
self.log.info("(a) Send 80 messages, each of maximum valid data size (4MB)")
for _ in range(80):
conn.send_message(msg_at_size)

# Check that, even though the node is being hammered by nonsense from one
# connection, it can still service other peers in a timely way.
self.log.info("(b) Check node still services peers in a timely way")
for _ in range(20):
conn2.sync_with_ping(timeout=2)

# Peer 1, despite being served up a bunch of nonsense, should still be connected.
self.log.info("Waiting for node to drop junk messages.")
self.log.info("(c) Wait for node to drop junk messages, while remaining connected")
conn.sync_with_ping(timeout=400)

# Despite being served up a bunch of nonsense, the peers should still be connected.
assert conn.is_connected
assert conn2.is_connected
self.nodes[0].disconnect_p2ps()


Expand Down
4 changes: 4 additions & 0 deletions test/functional/test_framework/messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,10 @@

BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out

MAX_PROTOCOL_MESSAGE_LENGTH = 3 * 1024 * 1024 # Maximum length of incoming protocol messages
MAX_HEADERS_RESULTS = 2000 # Number of headers sent in one getheaders result
MAX_INV_SIZE = 50000 # Maximum number of entries in an 'inv' protocol message

NODE_NETWORK = (1 << 0)
NODE_BLOOM = (1 << 2)
NODE_COMPACT_FILTERS = (1 << 6)
Expand Down
Loading
Loading