Skip to content
This repository has been archived by the owner on Oct 23, 2022. It is now read-only.

Update libp2p to v0.43.0 #499

Merged
merged 27 commits into from
Apr 1, 2022
Merged
Changes from 1 commit
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
f601b8d
fix: update libp2p and renamed the changed types
rand0m-cloud Mar 18, 2022
c3a48c9
fix: updated libp2p in the bitswap crate
rand0m-cloud Mar 18, 2022
4e5ff4d
more libp2p updating
rand0m-cloud Mar 18, 2022
918d4d8
more updating of types
rand0m-cloud Mar 18, 2022
c1a5bba
some updates to pubsub
rand0m-cloud Mar 18, 2022
7e9da72
fix the pubsub network behaviour action type
rand0m-cloud Mar 18, 2022
085be77
replaced todo placeholders
rand0m-cloud Mar 18, 2022
e4002d6
re-add connection closed and established
rand0m-cloud Mar 18, 2022
a996922
added change to changelog
rand0m-cloud Mar 18, 2022
bdf977c
enable event_process for BehaviourEvent
rand0m-cloud Mar 18, 2022
93b31b3
chore: clean up type signature
rand0m-cloud Mar 18, 2022
25c8d58
fix: removed unneeded BehaviourEvent struct
rand0m-cloud Mar 18, 2022
3b59193
temp fix: changed field order to workaround bug in libp2p
rand0m-cloud Mar 18, 2022
31262b5
chore: more updating to libp2p
rand0m-cloud Mar 18, 2022
6c6fc3d
fix: update libp2p and renamed the changed types
rand0m-cloud Mar 18, 2022
77291ee
fix(swarm-test): add biased to tokio::select for non-random behavior
rand0m-cloud Mar 18, 2022
888e6f1
wip: re-add code fragment to handle dial failure
rand0m-cloud Mar 18, 2022
72ff95d
fix(swarm): corrected dial failure logic
rand0m-cloud Mar 21, 2022
1cee67d
fix: corrected faulty Vec::retain logic and updated WrongPeerId test
rand0m-cloud Mar 21, 2022
897c16f
fix: apply review suggestions and fix clippy lints
rand0m-cloud Mar 24, 2022
d4d3def
fix(pubsub): tell Floodsub about the peers we want to hear from
rand0m-cloud Mar 25, 2022
87a4114
ci(win): use windows-2019 image
koivunej Mar 30, 2022
82453e5
fix(build): stop building while writing an error
koivunej Mar 30, 2022
277954b
test(pubsub): disjoint topics as new test case
koivunej Apr 1, 2022
50ad10f
test(pubsub): simplify, comment
koivunej Apr 1, 2022
081a598
test(conf): ignore pubsub tests on windows for now
koivunej Apr 1, 2022
bf7a807
doc(p2p): add fixme for possible issue
koivunej Apr 1, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
test(pubsub): simplify, comment
simplify away the use of hashset's for messages along with any
filtering, instead simply assert that who witnessed what message and
include the sent message in the assertion as well.

comment as in use less broad technical names and more context specific
names.

also removes some of the duplicate comments.
  • Loading branch information
koivunej committed Apr 1, 2022
commit 50ad10fc4c54d54cd069b0694b706c7dc5466d6b
72 changes: 46 additions & 26 deletions tests/pubsub.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,8 @@ async fn can_publish_without_subscribing() {
}

#[tokio::test]
#[allow(clippy::mutable_key_type)] // clippy doesn't like Vec inside HashSet
async fn publish_between_two_nodes() {
async fn publish_between_two_nodes_single_topic() {
use futures::stream::StreamExt;
use std::collections::HashSet;

let nodes = spawn_nodes(2, Topology::Line).await;

Expand Down Expand Up @@ -98,29 +96,50 @@ async fn publish_between_two_nodes() {

// the order is not defined, but both should see the other's message and the message they sent
let expected = [
(&[topic.clone()], &nodes[0].id, b"foobar"),
(&[topic.clone()], &nodes[1].id, b"barfoo"),
// first node should witness it's the message it sent
(&[topic.clone()], nodes[0].id, b"foobar", nodes[0].id),
// second node should witness first nodes message, and so on.
(&[topic.clone()], nodes[0].id, b"foobar", nodes[1].id),
(&[topic.clone()], nodes[1].id, b"barfoo", nodes[0].id),
(&[topic.clone()], nodes[1].id, b"barfoo", nodes[1].id),
]
.iter()
.cloned()
.map(|(topics, id, data)| (topics.to_vec(), *id, data.to_vec()))
.collect::<HashSet<_>>();
.map(|(topics, sender, data, witness)| (topics.to_vec(), sender, data.to_vec(), witness))
.collect::<Vec<_>>();

for st in &mut [b_msgs.by_ref(), a_msgs.by_ref()] {
let actual = timeout(
let mut actual = Vec::new();

for (st, own_peer_id) in &mut [
(b_msgs.by_ref(), nodes[1].id),
(a_msgs.by_ref(), nodes[0].id),
] {
let received = timeout(
Duration::from_secs(2),
st.take(2)
// Arc::try_unwrap will fail sometimes here as the sender side in src/p2p/pubsub.rs:305
// can still be looping
.map(|msg| (*msg).clone())
.map(|msg| (msg.topics, msg.source, msg.data))
.collect::<HashSet<_>>(),
.map(|msg| (msg.topics, msg.source, msg.data, *own_peer_id))
.collect::<Vec<_>>(),
)
.await
.unwrap();
assert_eq!(expected, actual);

actual.extend(received);
}

// sort the received messages both in expected and actual to make sure they are comparable;
// order of receiving is not part of the tuple and shouldn't matter.
let mut expected = expected;
expected.sort_unstable();
actual.sort_unstable();

assert_eq!(
actual, expected,
"sent and received messages must be present on both nodes' streams"
);

drop(b_msgs);

let mut disappeared = false;
Expand All @@ -143,10 +162,8 @@ async fn publish_between_two_nodes() {
}

#[tokio::test]
#[allow(clippy::mutable_key_type)] // clippy doesn't like Vec inside HashSet
async fn publish_between_two_nodes_different_topics() {
use futures::stream::StreamExt;
use std::collections::HashSet;

let nodes = spawn_nodes(2, Topology::Line).await;
let node_a = &nodes[0];
Expand Down Expand Up @@ -197,34 +214,37 @@ async fn publish_between_two_nodes_different_topics() {
.await
.unwrap();

// the order is not defined, but both should see the other's message
// the order between messages is not defined, but both should see the other's message. since we
// receive messages first from node_b's stream we expect this order.
//
// in this test case the nodes are not expected to see their own message because nodes are not
// subscribing to the streams they are sending to.
let expected = [
(&[topic_a.clone()], &node_a.id, b"foobar"),
(&[topic_b.clone()], &node_b.id, b"barfoo"),
(&[topic_a.clone()], node_a.id, b"foobar", node_b.id),
(&[topic_b.clone()], node_b.id, b"barfoo", node_a.id),
]
.iter()
.cloned()
.map(|(topics, id, data)| (topics.to_vec(), *id, data.to_vec()))
.collect::<HashSet<_>>();
.map(|(topics, sender, data, witness)| (topics.to_vec(), sender, data.to_vec(), witness))
.collect::<Vec<_>>();

let mut actual = HashSet::new();
let mut actual = Vec::new();
for (st, own_peer_id) in &mut [(b_msgs.by_ref(), node_b.id), (a_msgs.by_ref(), node_a.id)] {
let actual_msg = timeout(
let received = timeout(
Duration::from_secs(2),
st.take(1)
// Arc::try_unwrap will fail sometimes here as the sender side in src/p2p/pubsub.rs:305
// can still be looping
.map(|msg| (*msg).clone())
.map(|msg| (msg.topics, msg.source, msg.data))
.filter(|(_, source_peer_id, _)| future::ready(source_peer_id != own_peer_id))
.map(|msg| (msg.topics, msg.source, msg.data, *own_peer_id))
.next(),
)
.await
.unwrap()
.unwrap();
actual.insert(actual_msg);
actual.push(received);
}

// ordering is defined for expected and actual by the order of the looping above and the
// initial expected creation.
assert_eq!(expected, actual);

drop(b_msgs);
Expand Down