Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Simplify configuration for the waku network #2404

Merged
merged 2 commits into from
Feb 7, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions apps/wakunode2/external_config.nim
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ type ProtectedTopic* = object
topic*: string
key*: secp256k1.SkPublicKey

type ShardIdx = distinct uint16

type StartUpCommand* = enum
noCommand # default, runs waku
generateRlnKeystore # generates a new RLN keystore
Expand Down Expand Up @@ -256,6 +258,11 @@ type
desc: "Default pubsub topic to subscribe to. Argument may be repeated."
name: "pubsub-topic" .}: seq[string]

shards* {.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thinking that in future the ability to configure ranges may be needed for large subscriptions (e.g. --shards:0..7,15,21..29). For now repeated arg makes sense.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

mm i would say node operators don't necessarily need to specify the exact shards but the number of them they want to run. unless they are interested in a specific shard because their content topic falls in there, but in that case i expect them to provide the contentTopic and not the shard.

(but yeap, current behaviour is --shard=5 means shard 5 and not 5 shards)

desc: "Shards index to subscribe to [0..MAX_SHARDS-1]. Argument may be repeated."
defaultValue: @[]
name: "shard" .}: seq[ShardIdx]

contentTopics* {.
desc: "Default content topic to subscribe to. Argument may be repeated."
name: "content-topic" .}: seq[string]
Expand Down Expand Up @@ -576,6 +583,15 @@ proc parseCmdArg*(T: type Option[int], p: string): T =
except CatchableError:
raise newException(ValueError, "Invalid number")

proc completeCmdArg*(T: type ShardIdx, val: string): seq[ShardIdx] =
return @[]

proc parseCmdArg*(T: type ShardIdx, p: string): T =
try:
ShardIdx(parseInt(p))
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would it be possible to ensure that the given index is within the valid boundaries?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will address this in followup prs. don't want to just add a MaxAmountShards here but enforce it all over the place when a given cluster-id is chosen. and that may require some changes out of scope of this pr :)

except CatchableError:
raise newException(ValueError, "Invalid shard index")

proc parseCmdArg*(T: type Option[uint], p: string): T =
try:
some(parseUint(p))
Expand Down Expand Up @@ -608,6 +624,18 @@ proc readValue*(r: var EnvvarReader, value: var ProtectedTopic) {.raises: [Seria
except CatchableError:
raise newException(SerializationError, getCurrentExceptionMsg())

proc readValue*(r: var TomlReader, value: var ShardIdx) {.raises: [SerializationError].} =
try:
value = parseCmdArg(ShardIdx, r.readValue(string))
except CatchableError:
raise newException(SerializationError, getCurrentExceptionMsg())

proc readValue*(r: var EnvvarReader, value: var ShardIdx) {.raises: [SerializationError].} =
try:
value = parseCmdArg(ShardIdx, r.readValue(string))
except CatchableError:
raise newException(SerializationError, getCurrentExceptionMsg())

{.push warning[ProveInit]: off.}

proc load*(T: type WakuNodeConf, version=""): ConfResult[T] =
Expand Down
40 changes: 40 additions & 0 deletions apps/wakunode2/networks_config.nim
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}

type ClusterConf* = object
maxMessageSize*: string
clusterId*: uint32
rlnRelay*: bool
rlnRelayEthContractAddress*: string
rlnRelayDynamic*: bool
rlnRelayBandwidthThreshold*: int
pubsubTopics*: seq[string]
discv5Discovery*: bool
discv5BootstrapNodes*: seq[string]

# cluster-id=1
# Cluster configuration corresponding to The Waku Network. Note that it
# overrides existing cli configuration
proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
Comment on lines +17 to +20
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Super nitpick, I know xD but comments tend to go below proc definition

return ClusterConf(
maxMessageSize: "150KiB",
clusterId: 1.uint32,
rlnRelay: true,
rlnRelayEthContractAddress: "0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4",
rlnRelayDynamic: true,
rlnRelayBandwidthThreshold: 0,
pubsubTopics:
@[
"/waku/2/rs/1/0", "/waku/2/rs/1/1", "/waku/2/rs/1/2", "/waku/2/rs/1/3",
"/waku/2/rs/1/4", "/waku/2/rs/1/5", "/waku/2/rs/1/6", "/waku/2/rs/1/7"
],
discv5Discovery: true,
discv5BootstrapNodes:
@[
"enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Ugl_r25UHQJ3f1rIRrpzxJXSMaJe4yk1XFSAYJpZIJ2NIJpcISygI2rim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQJATXRSRSUyTw_QLB6H_U3oziVQgNRgrXpK7wp2AMyNxYN0Y3CCdl-DdWRwgiMohXdha3UyDw",
"enr:-QEkuECnZ3IbVAgkOzv-QLnKC4dRKAPRY80m1-R7G8jZ7yfT3ipEfBrhKN7ARcQgQ-vg-h40AQzyvAkPYlHPaFKk6u9uAYJpZIJ2NIJpcIQiEAFDim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQMIJwesBVgUiBCi8yiXGx7RWylBQkYm1U9dvEy-neLG2YN0Y3CCdl-DdWRwgiMohXdha3UyDw",
"enr:-QEkuEDzQyIAhs-CgBHIrJqtBv3EY1uP1Psrc-y8yJKsmxW7dh3DNcq2ergMUWSFVcJNlfcgBeVsFPkgd_QopRIiCV2pAYJpZIJ2NIJpcIQI2ttrim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQJIN4qwz3v4r2Q8Bv8zZD0eqBcKw6bdLvdkV7-JLjqIj4N0Y3CCdl-DdWRwgiMohXdha3UyDw"
],
)
80 changes: 65 additions & 15 deletions apps/wakunode2/wakunode2.nim
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ else:
{.push raises: [].}

import
std/[options, strutils, os],
std/[options, strutils, os, sequtils],
stew/shims/net as stewNet,
chronicles,
chronos,
Expand All @@ -16,12 +16,37 @@ import
../../tools/rln_keystore_generator/rln_keystore_generator,
../../waku/common/logging,
./external_config,
./networks_config,
./app

logScope:
topics = "wakunode main"

{.pop.} # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
proc logConfig(conf: WakuNodeConf) =
info "Configuration: Enabled protocols",
relay = conf.relay,
rlnRelay = conf.rlnRelay,
store = conf.store,
filter = conf.filter,
lightpush = conf.lightpush,
peerExchange = conf.peerExchange

info "Configuration. Network",
cluster = conf.clusterId,
pubsubTopics = conf.pubsubTopics,
maxPeers = conf.maxRelayPeers

for i in conf.discv5BootstrapNodes:
info "Configuration. Bootstrap nodes", node = i

if conf.rlnRelay and conf.rlnRelayDynamic:
info "Configuration. Validation",
mechanism = "onchain rln",
contract = conf.rlnRelayEthContractAddress,
maxMessageSize = conf.maxMessageSize

{.pop.}
# @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
when isMainModule:
## Node setup happens in 6 phases:
## 1. Set up storage
Expand All @@ -34,23 +59,48 @@ when isMainModule:
const versionString = "version / git commit hash: " & app.git_version
let rng = crypto.newRng()

let confRes = WakuNodeConf.load(version=versionString)
let confRes = WakuNodeConf.load(version = versionString)
if confRes.isErr():
error "failure while loading the configuration", error=confRes.error
error "failure while loading the configuration", error = confRes.error
quit(QuitFailure)

let conf = confRes.get()
var conf = confRes.get()

# The Waku Network config (cluster-id=1)
if conf.clusterId == 1:
let twnClusterConf = ClusterConf.TheWakuNetworkConf()
if len(conf.shards) != 0:
conf.pubsubTopics = conf.shards.mapIt(twnClusterConf.pubsubTopics[it.uint16])
else:
conf.pubsubTopics = twnClusterConf.pubsubTopics

# Override configuration
conf.maxMessageSize = twnClusterConf.maxMessageSize
conf.clusterId = twnClusterConf.clusterId
conf.rlnRelay = twnClusterConf.rlnRelay
conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
conf.rlnRelayBandwidthThreshold = twnClusterConf.rlnRelayBandwidthThreshold
conf.discv5Discovery = twnClusterConf.discv5Discovery
conf.discv5BootstrapNodes =
conf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes
Comment on lines +85 to +86
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this overriding the configuration or just appending made on purpose?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

appending, just in case someone wants to add more bootstrap nodes on top of the existing ones.


## Logging setup

# Adhere to NO_COLOR initiative: https://no-color.org/
let color = try: not parseBool(os.getEnv("NO_COLOR", "false"))
except CatchableError: true
let color =
try:
not parseBool(os.getEnv("NO_COLOR", "false"))
except CatchableError:
true

logging.setupLogLevel(conf.logLevel)
logging.setupLogFormat(conf.logFormat, color)

case conf.cmd:
info "Running nwaku node", version = app.git_version
logConfig(conf)

case conf.cmd
of generateRlnKeystore:
doRlnKeystoreGenerator(conf)
of noCommand:
Expand All @@ -65,42 +115,42 @@ when isMainModule:
## Peer persistence
let res1 = wakunode2.setupPeerPersistence()
if res1.isErr():
error "1/7 Setting up storage failed", error=res1.error
error "1/7 Setting up storage failed", error = res1.error
quit(QuitFailure)

debug "2/7 Retrieve dynamic bootstrap nodes"

let res3 = wakunode2.setupDyamicBootstrapNodes()
if res3.isErr():
error "2/7 Retrieving dynamic bootstrap nodes failed", error=res3.error
error "2/7 Retrieving dynamic bootstrap nodes failed", error = res3.error
quit(QuitFailure)

debug "3/7 Initializing node"

let res4 = wakunode2.setupWakuApp()
if res4.isErr():
error "3/7 Initializing node failed", error=res4.error
error "3/7 Initializing node failed", error = res4.error
quit(QuitFailure)

debug "4/7 Mounting protocols"

let res5 = waitFor wakunode2.setupAndMountProtocols()
if res5.isErr():
error "4/7 Mounting protocols failed", error=res5.error
error "4/7 Mounting protocols failed", error = res5.error
quit(QuitFailure)

debug "5/7 Starting node and mounted protocols"

let res6 = wakunode2.startApp()
if res6.isErr():
error "5/7 Starting node and protocols failed", error=res6.error
error "5/7 Starting node and protocols failed", error = res6.error
quit(QuitFailure)

debug "6/7 Starting monitoring and external interfaces"

let res7 = wakunode2.setupMonitoringAndExternalInterfaces()
if res7.isErr():
error "6/7 Starting monitoring and external interfaces failed", error=res7.error
error "6/7 Starting monitoring and external interfaces failed", error = res7.error
quit(QuitFailure)

debug "7/7 Setting up shutdown hooks"
Expand Down Expand Up @@ -133,7 +183,7 @@ when isMainModule:
when defined(posix):
proc handleSigsegv(signal: cint) {.noconv.} =
# Require --debugger:native
fatal "Shutting down after receiving SIGSEGV", stacktrace=getBacktrace()
fatal "Shutting down after receiving SIGSEGV", stacktrace = getBacktrace()

# Not available in -d:release mode
writeStackTrace()
Expand Down
Loading