Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Limited number of connections per IP #2046

Draft
wants to merge 3 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).

### Added
- [1983](https://github.com/FuelLabs/fuel-core/pull/1983): Add adapters for gas price service for accessing database values
- [2046](https://github.com/FuelLabs/fuel-core/pull/2046): Added limit for the number of connections per same remote IP.

### Breaking
- [2025](https://github.com/FuelLabs/fuel-core/pull/2025): Add new V0 algorithm for gas price to services.
Expand Down
5 changes: 5 additions & 0 deletions bin/fuel-core/src/cli/run/p2p.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,10 @@ pub struct P2PArgs {
#[clap(long = "max-peers-connected", default_value = "50", env)]
pub max_peers_connected: u32,

/// Max number of unique peers connected with the same remote IP address.
#[clap(long = "max-peers-per-remote-ip", default_value = "5", env)]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
#[clap(long = "max-peers-per-remote-ip", default_value = "5", env)]
#[clap(long = "max-peers-per-remote-ip", default_value = "50", env)]

right?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, 5=)

pub max_peers_per_remote_ip: usize,

/// Max number of connections per single peer
/// The total number of connections will be `(max_peers_connected + reserved_nodes.len()) * max_connections_per_peer`
#[clap(long = "max-connections-per-peer", default_value = "3", env)]
Expand Down Expand Up @@ -309,6 +313,7 @@ impl P2PArgs {
reserved_nodes_only_mode: self.reserved_nodes_only_mode,
enable_mdns: self.enable_mdns,
max_peers_connected: self.max_peers_connected,
max_peers_per_remote_ip: self.max_peers_per_remote_ip,
max_connections_per_peer: self.max_connections_per_peer,
allow_private_addresses: self.allow_private_addresses,
random_walk,
Expand Down
4 changes: 4 additions & 0 deletions crates/services/p2p/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@ pub struct Config<State = Initialized> {
/// This number should be at least number of `mesh_n` from `Gossipsub` configuration.
/// The total number of connections will be `(max_peers_connected + reserved_nodes.len()) * max_connections_per_peer`
pub max_peers_connected: u32,
/// Max number of unique peers connected with the same remote IP address.
pub max_peers_per_remote_ip: usize,
/// Max number of connections per single peer
/// The total number of connections will be `(max_peers_connected + reserved_nodes.len()) * max_connections_per_peer`
pub max_connections_per_peer: u32,
Expand Down Expand Up @@ -154,6 +156,7 @@ impl Config<NotInitialized> {
bootstrap_nodes: self.bootstrap_nodes,
enable_mdns: self.enable_mdns,
max_peers_connected: self.max_peers_connected,
max_peers_per_remote_ip: self.max_peers_per_remote_ip,
max_connections_per_peer: self.max_connections_per_peer,
allow_private_addresses: self.allow_private_addresses,
random_walk: self.random_walk,
Expand Down Expand Up @@ -203,6 +206,7 @@ impl Config<NotInitialized> {
bootstrap_nodes: vec![],
enable_mdns: false,
max_peers_connected: 50,
max_peers_per_remote_ip: 50,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
max_peers_per_remote_ip: 50,
max_peers_per_remote_ip: 5,

The config default here doesn't match the default on clap

max_connections_per_peer: 3,
allow_private_addresses: true,
random_walk: Some(Duration::from_millis(500)),
Expand Down
132 changes: 132 additions & 0 deletions crates/services/p2p/src/p2p_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -544,6 +544,9 @@ impl FuelP2PService {
self.peer_manager.handle_peer_disconnect(peer_id);
return Some(FuelP2PEvent::PeerDisconnected(peer_id));
}
PeerReportEvent::AskForDisconnection { peer_id } => {
let _ = self.swarm.disconnect_peer_id(peer_id);
}
}
None
}
Expand Down Expand Up @@ -748,6 +751,11 @@ mod tests {
};
use futures::{
future::join_all,
stream::{
select_all,
BoxStream,
},
FutureExt,
StreamExt,
};
use libp2p::{
Expand All @@ -772,6 +780,8 @@ mod tests {
mpsc,
oneshot,
watch,
Mutex,
OwnedMutexGuard,
};
use tracing_attributes::instrument;

Expand Down Expand Up @@ -1186,6 +1196,128 @@ mod tests {
}
}

fn any_event_from_node<'a>(
nodes: Vec<P2PService>,
) -> BoxStream<'a, (OwnedMutexGuard<P2PService>, FuelP2PEvent)> {
use futures::stream;

let streams = nodes.into_iter().map(|node| {
let node = Arc::new(Mutex::new(node));

stream::unfold(node, |node| {
async move {
let new_node = node.clone();
let mut lock = node.lock_owned().await;
loop {
let event = lock.next_event().await;
if let Some(event) = event {
return Some(((lock, event), new_node));
}
}
}
.boxed()
})
});

select_all(streams).boxed()
}

#[tokio::test]
#[instrument]
async fn limited_number_of_connections_per_remote_ip() {
const LIMIT: usize = 10;

// Node A
let mut p2p_config =
Config::default_initialized("limited_number_of_connections_per_remote_ip");
p2p_config.max_peers_per_remote_ip = LIMIT;

let mut target_node = build_service_from_config(p2p_config.clone()).await;

p2p_config.bootstrap_nodes = target_node.multiaddrs();

let mut nodes_that_fit_into_limit = vec![];
for _ in 0..LIMIT {
let node = build_service_from_config(p2p_config.clone()).await;
nodes_that_fit_into_limit.push(node);
}

let mut good_nodes = any_event_from_node(nodes_that_fit_into_limit);
let mut connected_nodes: usize = 0;

loop {
tokio::select! {
target_node_event = target_node.next_event() => {
if let Some(event) = target_node_event {
tracing::info!("Target node Event: {:?}", event);
match event {
FuelP2PEvent::PeerConnected(_) => {
connected_nodes = connected_nodes.saturating_add(1);
}
_ => {
// Do nothing
}
}

if connected_nodes >= LIMIT {
break
}
}
},
event = good_nodes.next() => {
if let Some((good_node, event)) = event {
tracing::info!("Good node {:?}: {:?}", good_node.local_peer_id, event);

match event {
FuelP2PEvent::PeerDisconnected(_) => {
panic!("Good node should not disconnect");
}
_ => {
// Do nothing
}
}
}
},
}
}

let mut nodes_that_not_fit_into_limit = vec![];
for _ in 0..LIMIT {
let node = build_service_from_config(p2p_config.clone()).await;
nodes_that_not_fit_into_limit.push(node);
}
let new_nodes = any_event_from_node(nodes_that_not_fit_into_limit);
let mut nodes = select_all(vec![good_nodes, new_nodes]);
let mut rejected_nodes = HashSet::new();

loop {
tokio::select! {
target_node_event = target_node.next_event() => {
tracing::info!("Target node Event: {:?}", target_node_event);
assert!(target_node.peer_manager.get_peers_ids().count() <= LIMIT);
},
node_event = nodes.next() => {
if let Some((node, event)) = node_event {
tracing::info!("Node {:?}: {:?}", node.local_peer_id, event);

match event {
FuelP2PEvent::PeerDisconnected(_) => {
rejected_nodes.insert(node.local_peer_id);
}
_ => {
// Do nothing
}
}

if rejected_nodes.len() == LIMIT {
break
}
}
},
}
}
}

// Simulates 2 p2p nodes that connect to each other and consequently exchange Peer Info
// On successful connection, node B updates its latest BlockHeight
// and shares it with Peer A via Heartbeat protocol
Expand Down
Loading
Loading