Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions waku_scalability/assumptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,19 @@
message_size = 0.002 # in MB (Mega Bytes)
messages_sent_per_hour = 5 # ona a single pubsub topic / shard

# Here we've chosen assumptions that keep the average message size the same.
# (big_message_size * ratio_of_big_messages) + (small_message_size * (1-ratio_of_big_messages)) == message_size
small_message_size = 0.001
big_message_size = 0.006 # in MB (Mega Bytes)
ratio_of_big_messages = .2 # ratio of number of big messages / number of other messages

idontwant_too_late = 0.6 # ratio of number of big messages that are sent to a node after that node has received them

# gossip
gossip_message_size = (
0.00005 # 50Bytes in MB (see https://github.com/libp2p/specs/pull/413#discussion_r1018821589 )
)
idontwant_message_size = 0.00005
d_lazy = 6 # gossip out degree
mcache_gossip = 3 # Number of history windows to use when emitting gossip (see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md)
avg_ratio_gossip_replys = 0.01 # -> this is a wild guess! (todo: investigate)
Expand All @@ -34,6 +43,8 @@
a1 = "- A01. Message size (static): " + sizeof_fmt_kb(message_size)
a2 = "- A02. Messages sent per node per hour (static) (assuming no spam; but also no rate limiting.): " + str(messages_sent_per_hour)
a3 = "- A03. The network topology is a d-regular graph of degree (static): " + str(average_node_degree)
a16 = "- A16. There exists at most one peer edge between any two nodes."
a17 = "- A17. The peer network is connected."
a4 = "- A04. Messages outside of Waku Relay are not considered, e.g. store messages."
a5 = "- A05. Messages are only sent once along an edge. (requires delays before sending)"
a6 = "- A06. Messages are sent to all d-1 neighbours as soon as receiving a message (current operation)" # Thanks @Mmenduist
Expand All @@ -60,6 +71,10 @@
a31 = "- A21. Gossip is not considered."
a32 = "- A32. Gossip message size (IHAVE/IWANT) (static):" + sizeof_fmt_kb(gossip_message_size)
a33 = "- A33. Ratio of IHAVEs followed-up by an IWANT (incl. the actual requested message):" + str(avg_ratio_gossip_replys)
a34 = "- A34. Gossip message size for IDONTWANT (static): " + sizeof_fmt_kb(idontwant_message_size)
a35 = "- A35. Ratio of messages that are big enough to trigger a IDONTWANT response: " + str(ratio_of_big_messages)
a36 = "- A36. Ratio of big messages that are avoided due to IDONTWANT: " + str(1/idontwant_too_late)
a37 = "- A37. Size of messages large enough to trigger IDONTWANT (static): " + sizeof_fmt_kb(big_message_size)

# Assumption strings (delay)
a41 = "- A41. Delay is calculated based on an upper bound of the expected distance."
Expand Down
69 changes: 69 additions & 0 deletions waku_scalability/cases.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,18 @@
a13,
a14,
a15,
a16,
a17,
a21,
a31,
a32,
a33,
a41,
a42,
a34,
a35,
a36,
a37,
)

from assumptions import (
Expand All @@ -40,6 +46,11 @@
avg_nodes_per_shard,
avg_shards_per_node,
average_delay_per_hop,
ratio_of_big_messages,
idontwant_message_size,
big_message_size,
small_message_size,
idontwant_too_late,
)

from utils import load_color_fmt, magnitude_fmt, get_header, sizeof_fmt
Expand Down Expand Up @@ -239,6 +250,64 @@ def assumptions(self):
return [a1, a2, a3, a4, a6, a7, a32, a33]


# Case 5:single shard n*(d-1) messages, IDONTWANT
class Case5(Case):
label: str = "case 5"
legend: str = (
"Case 5. top: 6-regular; receive load per node, incl. IDONTWANT without IHAVE/IWANT"
)

def load(self, n_users, **kwargs):
# Of all messages in the graph, the ratio of relayed messages from another node
# Derived from the fact that "per-node messages" = xd - x, where x = messages_sent_per_hour.
portion_not_originator = (average_node_degree - 1) / average_node_degree

# Of the messages a node sees, the ratio of seeing for the first time.
# Let d = average_node_degree
# For each `d` entrances to the node,
# we first see the message through edge 1
# then we see message from the other `d - 1` edges.
portion_seen_first = 1 / average_node_degree

# Start per-node calculations.
total_small_messages = (
messages_sent_per_hour
* average_node_degree
* (1 - ratio_of_big_messages)
* portion_not_originator
)
total_big_messages = (
messages_sent_per_hour
* average_node_degree
* ratio_of_big_messages
* portion_not_originator
)
num_big_seen_first = total_big_messages * portion_seen_first
# Number of messages (per node) which come after the first seen message of its type.
# In other words: count(2nd, 3rd, 4th... instance of a big message).
num_big_after = total_big_messages * (1 - portion_seen_first)
# Not all of the above messages come into existence (see `idontwant_too_late`).
num_big_seen_after = num_big_after * idontwant_too_late

# Factor in message sizes.
small_message_load = small_message_size * total_small_messages
big_message_load = big_message_size * (num_big_seen_first + num_big_seen_after)

# End of per-node calculations. Factor in `n_users`.
dontwant_load = n_users * num_big_seen_first * idontwant_message_size
messages_load = n_users * (big_message_load + small_message_load)

return messages_load + dontwant_load

@property
def header(self) -> str:
return "Load case 5 (received load per node with IDONTWANT messages)"

@property
def assumptions(self):
return [a1, a2, a3, a4, a6, a7, a16, a17, a34, a35, a36, a37]


# sharding case 1: multi shard, n*(d-1) messages, gossip
class ShardingCase1(Case):
label: str = "case 1"
Expand Down
4 changes: 3 additions & 1 deletion waku_scalability/waku_scaling.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
Case2,
Case3,
Case4,
Case5,
LatencyCase1,
ShardingCase1,
ShardingCase2,
Expand All @@ -36,6 +37,7 @@
Case2(),
Case3(),
Case4(),
Case5(),
ShardingCase1(),
ShardingCase2(),
ShardingCase3(),
Expand Down Expand Up @@ -86,7 +88,7 @@ def plot_load(caption: str, cases: List[Case], file_path: str):

plot_load(
caption="Plot 1: single shard.",
cases=[Case1(), Case2(), Case3(), Case4()],
cases=[Case1(), Case2(), Case3(), Case4(), Case5()],
file_path="waku_scaling_single_shard_plot.png",
)

Expand Down