diff --git a/apps/chat2/chat2.nim b/apps/chat2/chat2.nim index 1edfe24cec..7aee403ae7 100644 --- a/apps/chat2/chat2.nim +++ b/apps/chat2/chat2.nim @@ -38,8 +38,8 @@ import ../../waku/waku_lightpush/common, ../../waku/waku_lightpush/rpc, ../../waku/waku_enr, - ../../waku/waku_store, ../../waku/discovery/waku_dnsdisc, + ../../waku/waku_store_legacy, ../../waku/waku_node, ../../waku/node/waku_metrics, ../../waku/node/peer_manager, @@ -469,7 +469,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = # We have a viable storenode. Let's query it for historical messages. echo "Connecting to storenode: " & $(storenode.get()) - node.mountStoreClient() + node.mountLegacyStoreClient() node.peerManager.addServicePeer(storenode.get(), WakuStoreCodec) proc storeHandler(response: HistoryResponse) {.gcsafe.} = diff --git a/tests/all_tests_waku.nim b/tests/all_tests_waku.nim index cae32cc796..15a544f899 100644 --- a/tests/all_tests_waku.nim +++ b/tests/all_tests_waku.nim @@ -33,9 +33,16 @@ import ./waku_store/test_waku_store, ./waku_store/test_wakunode_store +# Waku legacy store test suite +import + ./waku_store_legacy/test_client, + ./waku_store_legacy/test_rpc_codec, + ./waku_store_legacy/test_waku_store, + ./waku_store_legacy/test_wakunode_store + when defined(waku_exp_store_resume): # TODO: Review store resume test cases (#1282) - import ./waku_store/test_resume + import ./waku_store_legacy/test_resume import ./node/test_all, diff --git a/tests/node/test_all.nim b/tests/node/test_all.nim index 6ef93f57e0..308550c88f 100644 --- a/tests/node/test_all.nim +++ b/tests/node/test_all.nim @@ -2,4 +2,5 @@ import ./test_wakunode_filter, ./test_wakunode_lightpush, ./test_wakunode_peer_exchange, - ./test_wakunode_store + ./test_wakunode_store, + ./test_wakunode_legacy_store diff --git a/tests/node/test_wakunode_legacy_store.nim b/tests/node/test_wakunode_legacy_store.nim new file mode 100644 index 0000000000..ce6657a6a8 --- /dev/null +++ b/tests/node/test_wakunode_legacy_store.nim @@ -0,0 +1,1077 @@ +{.used.} + +import + std/options, + stew/shims/net as stewNet, + testutils/unittests, + chronos, + libp2p/crypto/crypto + +import + ../../../waku/[ + common/paging, + node/waku_node, + node/peer_manager, + waku_core, + waku_store_legacy, + waku_store_legacy/client, + waku_archive, + waku_archive/driver/sqlite_driver, + common/databases/db_sqlite, + ], + ../waku_store_legacy/store_utils, + ../waku_archive/archive_utils, + ../testlib/[common, wakucore, wakunode, testasync, futures, testutils] + +suite "Waku Store - End to End - Sorted Archive": + var pubsubTopic {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + + var archiveMessages {.threadvar.}: seq[WakuMessage] + var historyQuery {.threadvar.}: HistoryQuery + + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + + var archiveDriver {.threadvar.}: ArchiveDriver + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + var clientPeerId {.threadvar.}: PeerId + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + contentTopicSeq = @[contentTopic] + + let timeOrigin = now() + archiveMessages = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), + ] + + historyQuery = HistoryQuery( + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.Forward, + pageSize: 5, + ) + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + + archiveDriver = newArchiveDriverWithMessages(pubsubTopic, archiveMessages) + let mountArchiveResult = server.mountArchive(archiveDriver) + assert mountArchiveResult.isOk() + + waitFor server.mountLegacyStore() + client.mountLegacyStoreClient() + + waitFor allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + clientPeerId = client.peerInfo.toRemotePeerInfo().peerId + + asyncTeardown: + waitFor allFutures(client.stop(), server.stop()) + + suite "Message Pagination": + asyncTest "Forward Pagination": + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[0 ..< 5] + + # Given the next query + var otherHistoryQuery = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + + # When making the next history query + let otherQueryResponse = + await client.query(otherHistoryQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + otherQueryResponse.get().messages == archiveMessages[5 ..< 10] + + asyncTest "Backward Pagination": + # Given the history query is backward + historyQuery.direction = PagingDirection.BACKWARD + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[5 ..< 10] + + # Given the next query + var nextHistoryQuery = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.BACKWARD, + pageSize: 5, + ) + + # When making the next history query + let otherQueryResponse = + await client.query(nextHistoryQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + otherQueryResponse.get().messages == archiveMessages[0 ..< 5] + + suite "Pagination with Differente Page Sizes": + asyncTest "Pagination with Small Page Size": + # Given the first query (1/5) + historyQuery.pageSize = 2 + + # When making a history query + let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse1.get().messages == archiveMessages[0 ..< 2] + + # Given the next query (2/5) + let historyQuery2 = HistoryQuery( + cursor: queryResponse1.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 2, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[2 ..< 4] + + # Given the next query (3/5) + let historyQuery3 = HistoryQuery( + cursor: queryResponse2.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 2, + ) + + # When making the next history query + let queryResponse3 = await client.query(historyQuery3, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse3.get().messages == archiveMessages[4 ..< 6] + + # Given the next query (4/5) + let historyQuery4 = HistoryQuery( + cursor: queryResponse3.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 2, + ) + + # When making the next history query + let queryResponse4 = await client.query(historyQuery4, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse4.get().messages == archiveMessages[6 ..< 8] + + # Given the next query (5/5) + let historyQuery5 = HistoryQuery( + cursor: queryResponse4.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 2, + ) + + # When making the next history query + let queryResponse5 = await client.query(historyQuery5, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse5.get().messages == archiveMessages[8 ..< 10] + + asyncTest "Pagination with Large Page Size": + # Given the first query (1/2) + historyQuery.pageSize = 8 + + # When making a history query + let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse1.get().messages == archiveMessages[0 ..< 8] + + # Given the next query (2/2) + let historyQuery2 = HistoryQuery( + cursor: queryResponse1.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 8, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[8 ..< 10] + + asyncTest "Pagination with Excessive Page Size": + # Given the first query (1/1) + historyQuery.pageSize = 100 + + # When making a history query + let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse1.get().messages == archiveMessages[0 ..< 10] + + asyncTest "Pagination with Mixed Page Size": + # Given the first query (1/3) + historyQuery.pageSize = 2 + + # When making a history query + let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse1.get().messages == archiveMessages[0 ..< 2] + + # Given the next query (2/3) + let historyQuery2 = HistoryQuery( + cursor: queryResponse1.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 4, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[2 ..< 6] + + # Given the next query (3/3) + let historyQuery3 = HistoryQuery( + cursor: queryResponse2.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 6, + ) + + # When making the next history query + let queryResponse3 = await client.query(historyQuery3, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse3.get().messages == archiveMessages[6 ..< 10] + + asyncTest "Pagination with Zero Page Size (Behaves as DefaultPageSize)": + # Given a message list of size higher than the default page size + let currentStoreLen = uint((await archiveDriver.getMessagesCount()).get()) + assert archive.DefaultPageSize > currentStoreLen, + "This test requires a store with more than (DefaultPageSize) messages" + let missingMessagesAmount = archive.DefaultPageSize - currentStoreLen + 5 + + let lastMessageTimestamp = archiveMessages[archiveMessages.len - 1].timestamp + var extraMessages: seq[WakuMessage] = @[] + for i in 0 ..< missingMessagesAmount: + let + timestampOffset = 10 * int(i + 1) + # + 1 to avoid collision with existing messages + message: WakuMessage = + fakeWakuMessage(@[byte i], ts = ts(timestampOffset, lastMessageTimestamp)) + extraMessages.add(message) + discard archiveDriver.put(pubsubTopic, extraMessages) + + let totalMessages = archiveMessages & extraMessages + + # Given the a query with zero page size (1/2) + historyQuery.pageSize = 0 + + # When making a history query + let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the archive.DefaultPageSize messages + check: + queryResponse1.get().messages == totalMessages[0 ..< archive.DefaultPageSize] + + # Given the next query (2/2) + let historyQuery2 = HistoryQuery( + cursor: queryResponse1.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 0, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the remaining messages + check: + queryResponse2.get().messages == + totalMessages[archive.DefaultPageSize ..< archive.DefaultPageSize + 5] + + asyncTest "Pagination with Default Page Size": + # Given a message list of size higher than the default page size + let currentStoreLen = uint((await archiveDriver.getMessagesCount()).get()) + assert archive.DefaultPageSize > currentStoreLen, + "This test requires a store with more than (DefaultPageSize) messages" + let missingMessagesAmount = archive.DefaultPageSize - currentStoreLen + 5 + + let lastMessageTimestamp = archiveMessages[archiveMessages.len - 1].timestamp + var extraMessages: seq[WakuMessage] = @[] + for i in 0 ..< missingMessagesAmount: + let + timestampOffset = 10 * int(i + 1) + # + 1 to avoid collision with existing messages + message: WakuMessage = + fakeWakuMessage(@[byte i], ts = ts(timestampOffset, lastMessageTimestamp)) + extraMessages.add(message) + discard archiveDriver.put(pubsubTopic, extraMessages) + + let totalMessages = archiveMessages & extraMessages + + # Given a query with default page size (1/2) + historyQuery = HistoryQuery( + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + ) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == totalMessages[0 ..< archive.DefaultPageSize] + + # Given the next query (2/2) + let historyQuery2 = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == + totalMessages[archive.DefaultPageSize ..< archive.DefaultPageSize + 5] + + suite "Pagination with Different Cursors": + asyncTest "Starting Cursor": + # Given a cursor pointing to the first message + let cursor = computeHistoryCursor(pubsubTopic, archiveMessages[0]) + historyQuery.cursor = some(cursor) + historyQuery.pageSize = 1 + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the message + check: + queryResponse.get().messages == archiveMessages[1 ..< 2] + + asyncTest "Middle Cursor": + # Given a cursor pointing to the middle message1 + let cursor = computeHistoryCursor(pubsubTopic, archiveMessages[5]) + historyQuery.cursor = some(cursor) + historyQuery.pageSize = 1 + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the message + check: + queryResponse.get().messages == archiveMessages[6 ..< 7] + + asyncTest "Ending Cursor": + # Given a cursor pointing to the last message + let cursor = computeHistoryCursor(pubsubTopic, archiveMessages[9]) + historyQuery.cursor = some(cursor) + historyQuery.pageSize = 1 + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + suite "Message Sorting": + asyncTest "Cursor Reusability Across Nodes": + # Given a different server node with the same archive + let + otherArchiveDriverWithMessages = + newArchiveDriverWithMessages(pubsubTopic, archiveMessages) + otherServerKey = generateSecp256k1Key() + otherServer = + newTestWakuNode(otherServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + mountOtherArchiveResult = + otherServer.mountArchive(otherArchiveDriverWithMessages) + assert mountOtherArchiveResult.isOk() + + waitFor otherServer.mountLegacyStore() + + waitFor otherServer.start() + let otherServerRemotePeerInfo = otherServer.peerInfo.toRemotePeerInfo() + + # When making a history query to the first server node + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[0 ..< 5] + + # Given the cursor from the first query + let cursor = queryResponse.get().cursor + + # When making a history query to the second server node + let otherHistoryQuery = HistoryQuery( + cursor: cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + let otherQueryResponse = + await client.query(otherHistoryQuery, otherServerRemotePeerInfo) + + # Then the response contains the remaining messages + check: + otherQueryResponse.get().messages == archiveMessages[5 ..< 10] + + # Cleanup + waitFor otherServer.stop() + +suite "Waku Store - End to End - Unsorted Archive": + var pubsubTopic {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + + var historyQuery {.threadvar.}: HistoryQuery + var unsortedArchiveMessages {.threadvar.}: seq[WakuMessage] + + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + contentTopicSeq = @[contentTopic] + + historyQuery = HistoryQuery( + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + + let timeOrigin = now() + unsortedArchiveMessages = + @[ # SortIndex (by timestamp and digest) + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), # 1 + fakeWakuMessage(@[byte 03], ts = ts(00, timeOrigin)), # 2 + fakeWakuMessage(@[byte 08], ts = ts(00, timeOrigin)), # 0 + fakeWakuMessage(@[byte 07], ts = ts(10, timeOrigin)), # 4 + fakeWakuMessage(@[byte 02], ts = ts(10, timeOrigin)), # 3 + fakeWakuMessage(@[byte 09], ts = ts(10, timeOrigin)), # 5 + fakeWakuMessage(@[byte 06], ts = ts(20, timeOrigin)), # 6 + fakeWakuMessage(@[byte 01], ts = ts(20, timeOrigin)), # 9 + fakeWakuMessage(@[byte 04], ts = ts(20, timeOrigin)), # 7 + fakeWakuMessage(@[byte 05], ts = ts(20, timeOrigin)), # 8 + ] + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + + let + unsortedArchiveDriverWithMessages = + newArchiveDriverWithMessages(pubsubTopic, unsortedArchiveMessages) + mountUnsortedArchiveResult = + server.mountArchive(unsortedArchiveDriverWithMessages) + + assert mountUnsortedArchiveResult.isOk() + + waitFor server.mountLegacyStore() + client.mountLegacyStoreClient() + + waitFor allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + + asyncTeardown: + waitFor allFutures(client.stop(), server.stop()) + + asyncTest "Basic (Timestamp and Digest) Sorting Validation": + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + unsortedArchiveMessages[2], + unsortedArchiveMessages[0], + unsortedArchiveMessages[1], + unsortedArchiveMessages[4], + unsortedArchiveMessages[3], + ] + + # Given the next query + var historyQuery2 = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == + @[ + unsortedArchiveMessages[5], + unsortedArchiveMessages[6], + unsortedArchiveMessages[8], + unsortedArchiveMessages[9], + unsortedArchiveMessages[7], + ] + + asyncTest "Backward pagination with Ascending Sorting": + # Given a history query with backward pagination + let cursor = computeHistoryCursor(pubsubTopic, unsortedArchiveMessages[4]) + historyQuery.direction = PagingDirection.BACKWARD + historyQuery.cursor = some(cursor) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + unsortedArchiveMessages[2], + unsortedArchiveMessages[0], + unsortedArchiveMessages[1], + ] + + asyncTest "Forward Pagination with Ascending Sorting": + # Given a history query with forward pagination + let cursor = computeHistoryCursor(pubsubTopic, unsortedArchiveMessages[4]) + historyQuery.direction = PagingDirection.FORWARD + historyQuery.cursor = some(cursor) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + unsortedArchiveMessages[3], + unsortedArchiveMessages[5], + unsortedArchiveMessages[6], + unsortedArchiveMessages[8], + unsortedArchiveMessages[9], + ] + +suite "Waku Store - End to End - Archive with Multiple Topics": + var pubsubTopic {.threadvar.}: PubsubTopic + var pubsubTopicB {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicB {.threadvar.}: ContentTopic + var contentTopicC {.threadvar.}: ContentTopic + var contentTopicSpecials {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + + var historyQuery {.threadvar.}: HistoryQuery + var originTs {.threadvar.}: proc(offset: int): Timestamp {.gcsafe, raises: [].} + var archiveMessages {.threadvar.}: seq[WakuMessage] + + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + pubsubTopicB = "topicB" + contentTopic = DefaultContentTopic + contentTopicB = "topicB" + contentTopicC = "topicC" + contentTopicSpecials = "!@#$%^&*()_+" + contentTopicSeq = + @[contentTopic, contentTopicB, contentTopicC, contentTopicSpecials] + + historyQuery = HistoryQuery( + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + + let timeOrigin = now() + originTs = proc(offset = 0): Timestamp {.gcsafe, raises: [].} = + ts(offset, timeOrigin) + + archiveMessages = + @[ + fakeWakuMessage(@[byte 00], ts = originTs(00), contentTopic = contentTopic), + fakeWakuMessage(@[byte 01], ts = originTs(10), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 02], ts = originTs(20), contentTopic = contentTopicC), + fakeWakuMessage(@[byte 03], ts = originTs(30), contentTopic = contentTopic), + fakeWakuMessage(@[byte 04], ts = originTs(40), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 05], ts = originTs(50), contentTopic = contentTopicC), + fakeWakuMessage(@[byte 06], ts = originTs(60), contentTopic = contentTopic), + fakeWakuMessage(@[byte 07], ts = originTs(70), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 08], ts = originTs(80), contentTopic = contentTopicC), + fakeWakuMessage( + @[byte 09], ts = originTs(90), contentTopic = contentTopicSpecials + ), + ] + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + + let archiveDriver = newSqliteArchiveDriver() + .put(pubsubTopic, archiveMessages[0 ..< 6]) + .put(pubsubTopicB, archiveMessages[6 ..< 10]) + let mountSortedArchiveResult = server.mountArchive(archiveDriver) + + assert mountSortedArchiveResult.isOk() + + waitFor server.mountLegacyStore() + client.mountLegacyStoreClient() + + waitFor allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + + asyncTeardown: + waitFor allFutures(client.stop(), server.stop()) + + suite "Validation of Content Filtering": + asyncTest "Basic Content Filtering": + # Given a history query with content filtering + historyQuery.contentTopics = @[contentTopic] + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == @[archiveMessages[0], archiveMessages[3]] + + asyncTest "Multiple Content Filters": + # Given a history query with multiple content filtering + historyQuery.contentTopics = @[contentTopic, contentTopicB] + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + archiveMessages[0], + archiveMessages[1], + archiveMessages[3], + archiveMessages[4], + ] + + asyncTest "Empty Content Filtering": + # Given a history query with empty content filtering + historyQuery.contentTopics = @[] + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[0 ..< 5] + + # Given the next query + let historyQuery2 = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: none(PubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[5 ..< 10] + + asyncTest "Non-Existent Content Topic": + # Given a history query with non-existent content filtering + historyQuery.contentTopics = @["non-existent-topic"] + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + asyncTest "Special Characters in Content Filtering": + # Given a history query with special characters in content filtering + historyQuery.pubsubTopic = some(pubsubTopicB) + historyQuery.contentTopics = @["!@#$%^&*()_+"] + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages == @[archiveMessages[9]] + + asyncTest "PubsubTopic Specified": + # Given a history query with pubsub topic specified + historyQuery.pubsubTopic = some(pubsubTopicB) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + archiveMessages[6], + archiveMessages[7], + archiveMessages[8], + archiveMessages[9], + ] + + asyncTest "PubsubTopic Left Empty": + # Given a history query with pubsub topic left empty + historyQuery.pubsubTopic = none(PubsubTopic) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[0 ..< 5] + + # Given the next query + let historyQuery2 = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: none(PubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[5 ..< 10] + + suite "Validation of Time-based Filtering": + asyncTest "Basic Time Filtering": + # Given a history query with start and end time + historyQuery.startTime = some(originTs(20)) + historyQuery.endTime = some(originTs(40)) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[archiveMessages[2], archiveMessages[3], archiveMessages[4]] + + asyncTest "Only Start Time Specified": + # Given a history query with only start time + historyQuery.startTime = some(originTs(20)) + historyQuery.endTime = none(Timestamp) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + archiveMessages[2], + archiveMessages[3], + archiveMessages[4], + archiveMessages[5], + ] + + asyncTest "Only End Time Specified": + # Given a history query with only end time + historyQuery.startTime = none(Timestamp) + historyQuery.endTime = some(originTs(40)) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages == + @[ + archiveMessages[0], + archiveMessages[1], + archiveMessages[2], + archiveMessages[3], + archiveMessages[4], + ] + + asyncTest "Invalid Time Range": + # Given a history query with invalid time range + historyQuery.startTime = some(originTs(60)) + historyQuery.endTime = some(originTs(40)) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + asyncTest "Time Filtering with Content Filtering": + # Given a history query with time and content filtering + historyQuery.startTime = some(originTs(20)) + historyQuery.endTime = some(originTs(60)) + historyQuery.contentTopics = @[contentTopicC] + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == @[archiveMessages[2], archiveMessages[5]] + + asyncTest "Messages Outside of Time Range": + # Given a history query with a valid time range which does not contain any messages + historyQuery.startTime = some(originTs(100)) + historyQuery.endTime = some(originTs(200)) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + suite "Ephemeral": + # TODO: Ephemeral value is not properly set for Sqlite + xasyncTest "Only ephemeral Messages:": + # Given an archive with only ephemeral messages + let + ephemeralMessages = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00), ephemeral = true), + fakeWakuMessage(@[byte 01], ts = ts(10), ephemeral = true), + fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true), + ] + ephemeralArchiveDriver = + newSqliteArchiveDriver().put(pubsubTopic, ephemeralMessages) + + # And a server node with the ephemeral archive + let + ephemeralServerKey = generateSecp256k1Key() + ephemeralServer = + newTestWakuNode(ephemeralServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + mountEphemeralArchiveResult = + ephemeralServer.mountArchive(ephemeralArchiveDriver) + assert mountEphemeralArchiveResult.isOk() + + waitFor ephemeralServer.mountLegacyStore() + waitFor ephemeralServer.start() + let ephemeralServerRemotePeerInfo = ephemeralServer.peerInfo.toRemotePeerInfo() + + # When making a history query to the server with only ephemeral messages + let queryResponse = + await client.query(historyQuery, ephemeralServerRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + # Cleanup + waitFor ephemeralServer.stop() + + xasyncTest "Mixed messages": + # Given an archive with both ephemeral and non-ephemeral messages + let + ephemeralMessages = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00), ephemeral = true), + fakeWakuMessage(@[byte 01], ts = ts(10), ephemeral = true), + fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true), + ] + nonEphemeralMessages = + @[ + fakeWakuMessage(@[byte 03], ts = ts(30), ephemeral = false), + fakeWakuMessage(@[byte 04], ts = ts(40), ephemeral = false), + fakeWakuMessage(@[byte 05], ts = ts(50), ephemeral = false), + ] + mixedArchiveDriver = newSqliteArchiveDriver() + .put(pubsubTopic, ephemeralMessages) + .put(pubsubTopic, nonEphemeralMessages) + + # And a server node with the mixed archive + let + mixedServerKey = generateSecp256k1Key() + mixedServer = + newTestWakuNode(mixedServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + mountMixedArchiveResult = mixedServer.mountArchive(mixedArchiveDriver) + assert mountMixedArchiveResult.isOk() + + waitFor mixedServer.mountLegacyStore() + waitFor mixedServer.start() + let mixedServerRemotePeerInfo = mixedServer.peerInfo.toRemotePeerInfo() + + # When making a history query to the server with mixed messages + let queryResponse = await client.query(historyQuery, mixedServerRemotePeerInfo) + + # Then the response contains the non-ephemeral messages + check: + queryResponse.get().messages == nonEphemeralMessages + + # Cleanup + waitFor mixedServer.stop() + + suite "Edge Case Scenarios": + asyncTest "Empty Message Store": + # Given an empty archive + let emptyArchiveDriver = newSqliteArchiveDriver() + + # And a server node with the empty archive + let + emptyServerKey = generateSecp256k1Key() + emptyServer = + newTestWakuNode(emptyServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + mountEmptyArchiveResult = emptyServer.mountArchive(emptyArchiveDriver) + assert mountEmptyArchiveResult.isOk() + + waitFor emptyServer.mountLegacyStore() + waitFor emptyServer.start() + let emptyServerRemotePeerInfo = emptyServer.peerInfo.toRemotePeerInfo() + + # When making a history query to the server with an empty archive + let queryResponse = await client.query(historyQuery, emptyServerRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + # Cleanup + waitFor emptyServer.stop() + + asyncTest "Voluminous Message Store": + # Given a voluminous archive (1M+ messages) + var voluminousArchiveMessages: seq[WakuMessage] = @[] + for i in 0 ..< 100000: + let topic = "topic" & $i + voluminousArchiveMessages.add(fakeWakuMessage(@[byte i], contentTopic = topic)) + let voluminousArchiveDriverWithMessages = + newArchiveDriverWithMessages(pubsubTopic, voluminousArchiveMessages) + + # And a server node with the voluminous archive + let + voluminousServerKey = generateSecp256k1Key() + voluminousServer = + newTestWakuNode(voluminousServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + mountVoluminousArchiveResult = + voluminousServer.mountArchive(voluminousArchiveDriverWithMessages) + assert mountVoluminousArchiveResult.isOk() + + waitFor voluminousServer.mountLegacyStore() + waitFor voluminousServer.start() + let voluminousServerRemotePeerInfo = voluminousServer.peerInfo.toRemotePeerInfo() + + # Given the following history query + historyQuery.contentTopics = + @["topic10000", "topic30000", "topic50000", "topic70000", "topic90000"] + + # When making a history query to the server with a voluminous archive + let queryResponse = + await client.query(historyQuery, voluminousServerRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + voluminousArchiveMessages[10000], + voluminousArchiveMessages[30000], + voluminousArchiveMessages[50000], + voluminousArchiveMessages[70000], + voluminousArchiveMessages[90000], + ] + + # Cleanup + waitFor voluminousServer.stop() + + asyncTest "Large contentFilters Array": + # Given a history query with the max contentFilters len, 10 + historyQuery.contentTopics = @[contentTopic] + for i in 0 ..< 9: + let topic = "topic" & $i + historyQuery.contentTopics.add(topic) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response should trigger no errors + check: + queryResponse.get().messages == @[archiveMessages[0], archiveMessages[3]] diff --git a/tests/node/test_wakunode_store.nim b/tests/node/test_wakunode_store.nim index e6cea171aa..47f5e63a68 100644 --- a/tests/node/test_wakunode_store.nim +++ b/tests/node/test_wakunode_store.nim @@ -1,7 +1,7 @@ {.used.} import - std/options, + std/[options, sequtils, algorithm, sets], stew/shims/net as stewNet, testutils/unittests, chronos, @@ -13,6 +13,7 @@ import node/waku_node, node/peer_manager, waku_core, + waku_core/message/digest, waku_store, waku_store/client, waku_archive, @@ -28,8 +29,8 @@ suite "Waku Store - End to End - Sorted Archive": var contentTopic {.threadvar.}: ContentTopic var contentTopicSeq {.threadvar.}: seq[ContentTopic] - var archiveMessages {.threadvar.}: seq[WakuMessage] - var historyQuery {.threadvar.}: HistoryQuery + var archiveMessages {.threadvar.}: seq[WakuMessageKeyValue] + var storeQuery {.threadvar.}: StoreQueryRequest var server {.threadvar.}: WakuNode var client {.threadvar.}: WakuNode @@ -44,7 +45,7 @@ suite "Waku Store - End to End - Sorted Archive": contentTopicSeq = @[contentTopic] let timeOrigin = now() - archiveMessages = + let messages = @[ fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), @@ -57,12 +58,15 @@ suite "Waku Store - End to End - Sorted Archive": fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), ] + archiveMessages = messages.mapIt( + WakuMessageKeyValue(messageHash: computeMessageHash(pubsubTopic, it), message: it) + ) - historyQuery = HistoryQuery( + storeQuery = StoreQueryRequest( pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.Forward, - pageSize: 5, + paginationForward: PagingDirection.Forward, + paginationLimit: some(uint64(5)), ) let @@ -72,7 +76,7 @@ suite "Waku Store - End to End - Sorted Archive": server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - archiveDriver = newArchiveDriverWithMessages(pubsubTopic, archiveMessages) + archiveDriver = newArchiveDriverWithMessages(pubsubTopic, messages) let mountArchiveResult = server.mountArchive(archiveDriver) assert mountArchiveResult.isOk() @@ -90,19 +94,19 @@ suite "Waku Store - End to End - Sorted Archive": suite "Message Pagination": asyncTest "Forward Pagination": # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: queryResponse.get().messages == archiveMessages[0 ..< 5] # Given the next query - var otherHistoryQuery = HistoryQuery( - cursor: queryResponse.get().cursor, + var otherHistoryQuery = StoreQueryRequest( pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 5, + paginationCursor: queryResponse.get().paginationCursor, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), ) # When making the next history query @@ -115,22 +119,22 @@ suite "Waku Store - End to End - Sorted Archive": asyncTest "Backward Pagination": # Given the history query is backward - historyQuery.direction = PagingDirection.BACKWARD + storeQuery.paginationForward = PagingDirection.BACKWARD # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: queryResponse.get().messages == archiveMessages[5 ..< 10] # Given the next query - var nextHistoryQuery = HistoryQuery( - cursor: queryResponse.get().cursor, + var nextHistoryQuery = StoreQueryRequest( + paginationCursor: queryResponse.get().paginationCursor, pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.BACKWARD, - pageSize: 5, + paginationForward: PagingDirection.BACKWARD, + paginationLimit: some(uint64(5)), ) # When making the next history query @@ -144,22 +148,22 @@ suite "Waku Store - End to End - Sorted Archive": suite "Pagination with Differente Page Sizes": asyncTest "Pagination with Small Page Size": # Given the first query (1/5) - historyQuery.pageSize = 2 + storeQuery.paginationLimit = some(uint64(2)) # When making a history query - let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse1 = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: queryResponse1.get().messages == archiveMessages[0 ..< 2] # Given the next query (2/5) - let historyQuery2 = HistoryQuery( - cursor: queryResponse1.get().cursor, + let historyQuery2 = StoreQueryRequest( + paginationCursor: queryResponse1.get().paginationCursor, pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 2, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(2)), ) # When making the next history query @@ -170,12 +174,12 @@ suite "Waku Store - End to End - Sorted Archive": queryResponse2.get().messages == archiveMessages[2 ..< 4] # Given the next query (3/5) - let historyQuery3 = HistoryQuery( - cursor: queryResponse2.get().cursor, + let historyQuery3 = StoreQueryRequest( + paginationCursor: queryResponse2.get().paginationCursor, pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 2, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(2)), ) # When making the next history query @@ -186,12 +190,12 @@ suite "Waku Store - End to End - Sorted Archive": queryResponse3.get().messages == archiveMessages[4 ..< 6] # Given the next query (4/5) - let historyQuery4 = HistoryQuery( - cursor: queryResponse3.get().cursor, + let historyQuery4 = StoreQueryRequest( + paginationCursor: queryResponse3.get().paginationCursor, pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 2, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(2)), ) # When making the next history query @@ -202,12 +206,12 @@ suite "Waku Store - End to End - Sorted Archive": queryResponse4.get().messages == archiveMessages[6 ..< 8] # Given the next query (5/5) - let historyQuery5 = HistoryQuery( - cursor: queryResponse4.get().cursor, + let historyQuery5 = StoreQueryRequest( + paginationCursor: queryResponse4.get().paginationCursor, pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 2, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(2)), ) # When making the next history query @@ -219,22 +223,22 @@ suite "Waku Store - End to End - Sorted Archive": asyncTest "Pagination with Large Page Size": # Given the first query (1/2) - historyQuery.pageSize = 8 + storeQuery.paginationLimit = some(uint64(8)) # When making a history query - let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse1 = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: queryResponse1.get().messages == archiveMessages[0 ..< 8] # Given the next query (2/2) - let historyQuery2 = HistoryQuery( - cursor: queryResponse1.get().cursor, + let historyQuery2 = StoreQueryRequest( + paginationCursor: queryResponse1.get().paginationCursor, pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 8, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(8)), ) # When making the next history query @@ -246,10 +250,10 @@ suite "Waku Store - End to End - Sorted Archive": asyncTest "Pagination with Excessive Page Size": # Given the first query (1/1) - historyQuery.pageSize = 100 + storeQuery.paginationLimit = some(uint64(100)) # When making a history query - let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse1 = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: @@ -257,22 +261,22 @@ suite "Waku Store - End to End - Sorted Archive": asyncTest "Pagination with Mixed Page Size": # Given the first query (1/3) - historyQuery.pageSize = 2 + storeQuery.paginationLimit = some(uint64(2)) # When making a history query - let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse1 = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: queryResponse1.get().messages == archiveMessages[0 ..< 2] # Given the next query (2/3) - let historyQuery2 = HistoryQuery( - cursor: queryResponse1.get().cursor, + let historyQuery2 = StoreQueryRequest( + paginationCursor: queryResponse1.get().paginationCursor, pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 4, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(4)), ) # When making the next history query @@ -283,12 +287,12 @@ suite "Waku Store - End to End - Sorted Archive": queryResponse2.get().messages == archiveMessages[2 ..< 6] # Given the next query (3/3) - let historyQuery3 = HistoryQuery( - cursor: queryResponse2.get().cursor, + let historyQuery3 = StoreQueryRequest( + paginationCursor: queryResponse2.get().paginationCursor, pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 6, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(6)), ) # When making the next history query @@ -305,7 +309,8 @@ suite "Waku Store - End to End - Sorted Archive": "This test requires a store with more than (DefaultPageSize) messages" let missingMessagesAmount = archive.DefaultPageSize - currentStoreLen + 5 - let lastMessageTimestamp = archiveMessages[archiveMessages.len - 1].timestamp + let lastMessageTimestamp = + archiveMessages[archiveMessages.len - 1].message.timestamp var extraMessages: seq[WakuMessage] = @[] for i in 0 ..< missingMessagesAmount: let @@ -316,25 +321,31 @@ suite "Waku Store - End to End - Sorted Archive": extraMessages.add(message) discard archiveDriver.put(pubsubTopic, extraMessages) - let totalMessages = archiveMessages & extraMessages + let totalMessages = + archiveMessages & + extraMessages.mapIt( + WakuMessageKeyValue( + messageHash: computeMessageHash(pubsubTopic, it), message: it + ) + ) # Given the a query with zero page size (1/2) - historyQuery.pageSize = 0 + storeQuery.paginationLimit = none(uint64) # When making a history query - let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse1 = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the archive.DefaultPageSize messages check: queryResponse1.get().messages == totalMessages[0 ..< archive.DefaultPageSize] # Given the next query (2/2) - let historyQuery2 = HistoryQuery( - cursor: queryResponse1.get().cursor, + let historyQuery2 = StoreQueryRequest( + paginationCursor: queryResponse1.get().paginationCursor, pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 0, + paginationForward: PagingDirection.FORWARD, + paginationLimit: none(uint64), ) # When making the next history query @@ -352,7 +363,8 @@ suite "Waku Store - End to End - Sorted Archive": "This test requires a store with more than (DefaultPageSize) messages" let missingMessagesAmount = archive.DefaultPageSize - currentStoreLen + 5 - let lastMessageTimestamp = archiveMessages[archiveMessages.len - 1].timestamp + let lastMessageTimestamp = + archiveMessages[archiveMessages.len - 1].message.timestamp var extraMessages: seq[WakuMessage] = @[] for i in 0 ..< missingMessagesAmount: let @@ -363,28 +375,34 @@ suite "Waku Store - End to End - Sorted Archive": extraMessages.add(message) discard archiveDriver.put(pubsubTopic, extraMessages) - let totalMessages = archiveMessages & extraMessages + let totalMessages = + archiveMessages & + extraMessages.mapIt( + WakuMessageKeyValue( + messageHash: computeMessageHash(pubsubTopic, it), message: it + ) + ) # Given a query with default page size (1/2) - historyQuery = HistoryQuery( + storeQuery = StoreQueryRequest( pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, + paginationForward: PagingDirection.FORWARD, ) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: queryResponse.get().messages == totalMessages[0 ..< archive.DefaultPageSize] # Given the next query (2/2) - let historyQuery2 = HistoryQuery( - cursor: queryResponse.get().cursor, + let historyQuery2 = StoreQueryRequest( + paginationCursor: queryResponse.get().paginationCursor, pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, + paginationForward: PagingDirection.FORWARD, ) # When making the next history query @@ -397,39 +415,39 @@ suite "Waku Store - End to End - Sorted Archive": suite "Pagination with Different Cursors": asyncTest "Starting Cursor": - # Given a cursor pointing to the first message - let cursor = computeHistoryCursor(pubsubTopic, archiveMessages[0]) - historyQuery.cursor = some(cursor) - historyQuery.pageSize = 1 + # Given a paginationCursor pointing to the first message + let paginationCursor = archiveMessages[0].messageHash + storeQuery.paginationCursor = some(paginationCursor) + storeQuery.paginationLimit = some(uint64(1)) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the message check: queryResponse.get().messages == archiveMessages[1 ..< 2] asyncTest "Middle Cursor": - # Given a cursor pointing to the middle message1 - let cursor = computeHistoryCursor(pubsubTopic, archiveMessages[5]) - historyQuery.cursor = some(cursor) - historyQuery.pageSize = 1 + # Given a paginationCursor pointing to the middle message1 + let paginationCursor = archiveMessages[5].messageHash + storeQuery.paginationCursor = some(paginationCursor) + storeQuery.paginationLimit = some(uint64(1)) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the message check: queryResponse.get().messages == archiveMessages[6 ..< 7] asyncTest "Ending Cursor": - # Given a cursor pointing to the last message - let cursor = computeHistoryCursor(pubsubTopic, archiveMessages[9]) - historyQuery.cursor = some(cursor) - historyQuery.pageSize = 1 + # Given a paginationCursor pointing to the last message + let paginationCursor = archiveMessages[9].messageHash + storeQuery.paginationCursor = some(paginationCursor) + storeQuery.paginationLimit = some(uint64(1)) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains no messages check: @@ -440,7 +458,7 @@ suite "Waku Store - End to End - Sorted Archive": # Given a different server node with the same archive let otherArchiveDriverWithMessages = - newArchiveDriverWithMessages(pubsubTopic, archiveMessages) + newArchiveDriverWithMessages(pubsubTopic, archiveMessages.mapIt(it.message)) otherServerKey = generateSecp256k1Key() otherServer = newTestWakuNode(otherServerKey, ValidIpAddress.init("0.0.0.0"), Port(0)) @@ -454,22 +472,22 @@ suite "Waku Store - End to End - Sorted Archive": let otherServerRemotePeerInfo = otherServer.peerInfo.toRemotePeerInfo() # When making a history query to the first server node - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: queryResponse.get().messages == archiveMessages[0 ..< 5] - # Given the cursor from the first query - let cursor = queryResponse.get().cursor + # Given the paginationCursor from the first query + let paginationCursor = queryResponse.get().paginationCursor # When making a history query to the second server node - let otherHistoryQuery = HistoryQuery( - cursor: cursor, + let otherHistoryQuery = StoreQueryRequest( + paginationCursor: paginationCursor, pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 5, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), ) let otherQueryResponse = await client.query(otherHistoryQuery, otherServerRemotePeerInfo) @@ -486,8 +504,8 @@ suite "Waku Store - End to End - Unsorted Archive": var contentTopic {.threadvar.}: ContentTopic var contentTopicSeq {.threadvar.}: seq[ContentTopic] - var historyQuery {.threadvar.}: HistoryQuery - var unsortedArchiveMessages {.threadvar.}: seq[WakuMessage] + var storeQuery {.threadvar.}: StoreQueryRequest + var unsortedArchiveMessages {.threadvar.}: seq[WakuMessageKeyValue] var server {.threadvar.}: WakuNode var client {.threadvar.}: WakuNode @@ -499,27 +517,30 @@ suite "Waku Store - End to End - Unsorted Archive": contentTopic = DefaultContentTopic contentTopicSeq = @[contentTopic] - historyQuery = HistoryQuery( + storeQuery = StoreQueryRequest( pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 5, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), ) let timeOrigin = now() - unsortedArchiveMessages = - @[ # SortIndex (by timestamp and digest) - fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), # 1 - fakeWakuMessage(@[byte 03], ts = ts(00, timeOrigin)), # 2 - fakeWakuMessage(@[byte 08], ts = ts(00, timeOrigin)), # 0 - fakeWakuMessage(@[byte 07], ts = ts(10, timeOrigin)), # 4 - fakeWakuMessage(@[byte 02], ts = ts(10, timeOrigin)), # 3 - fakeWakuMessage(@[byte 09], ts = ts(10, timeOrigin)), # 5 - fakeWakuMessage(@[byte 06], ts = ts(20, timeOrigin)), # 6 - fakeWakuMessage(@[byte 01], ts = ts(20, timeOrigin)), # 9 - fakeWakuMessage(@[byte 04], ts = ts(20, timeOrigin)), # 7 - fakeWakuMessage(@[byte 05], ts = ts(20, timeOrigin)), # 8 + let messages = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(20, timeOrigin)), ] + unsortedArchiveMessages = messages.mapIt( + WakuMessageKeyValue(messageHash: computeMessageHash(pubsubTopic, it), message: it) + ) let serverKey = generateSecp256k1Key() @@ -530,7 +551,7 @@ suite "Waku Store - End to End - Unsorted Archive": let unsortedArchiveDriverWithMessages = - newArchiveDriverWithMessages(pubsubTopic, unsortedArchiveMessages) + newArchiveDriverWithMessages(pubsubTopic, messages) mountUnsortedArchiveResult = server.mountArchive(unsortedArchiveDriverWithMessages) @@ -546,81 +567,270 @@ suite "Waku Store - End to End - Unsorted Archive": asyncTeardown: waitFor allFutures(client.stop(), server.stop()) - asyncTest "Basic (Timestamp and Digest) Sorting Validation": + asyncTest "Basic (Timestamp and Hash) Sorting Validation": # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) - # Then the response contains the messages + # Check the ordering check: - queryResponse.get().messages == - @[ - unsortedArchiveMessages[2], - unsortedArchiveMessages[0], - unsortedArchiveMessages[1], - unsortedArchiveMessages[4], - unsortedArchiveMessages[3], - ] + queryResponse.get().messages.len == 5 + + queryResponse.get().messages[0].message.timestamp == + queryResponse.get().messages[1].message.timestamp + + queryResponse.get().messages[1].message.timestamp == + queryResponse.get().messages[2].message.timestamp + + queryResponse.get().messages[2].message.timestamp < + queryResponse.get().messages[3].message.timestamp + + queryResponse.get().messages[3].message.timestamp == + queryResponse.get().messages[4].message.timestamp + + toHex(queryResponse.get().messages[0].messageHash) < + toHex(queryResponse.get().messages[1].messageHash) + + toHex(queryResponse.get().messages[1].messageHash) < + toHex(queryResponse.get().messages[2].messageHash) + + toHex(queryResponse.get().messages[3].messageHash) < + toHex(queryResponse.get().messages[4].messageHash) # Given the next query - var historyQuery2 = HistoryQuery( - cursor: queryResponse.get().cursor, + var historyQuery2 = StoreQueryRequest( + paginationCursor: queryResponse.get().paginationCursor, pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 5, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), ) # When making the next history query let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) - # Then the response contains the messages + # Check the ordering check: - queryResponse2.get().messages == - @[ - unsortedArchiveMessages[5], - unsortedArchiveMessages[6], - unsortedArchiveMessages[8], - unsortedArchiveMessages[9], - unsortedArchiveMessages[7], - ] + queryResponse2.get().messages[0].message.timestamp < + queryResponse2.get().messages[1].message.timestamp + + queryResponse2.get().messages[1].message.timestamp == + queryResponse2.get().messages[2].message.timestamp + + queryResponse2.get().messages[2].message.timestamp == + queryResponse2.get().messages[3].message.timestamp + + queryResponse2.get().messages[3].message.timestamp == + queryResponse2.get().messages[4].message.timestamp + + toHex(queryResponse2.get().messages[1].messageHash) < + toHex(queryResponse2.get().messages[2].messageHash) + + toHex(queryResponse2.get().messages[2].messageHash) < + toHex(queryResponse2.get().messages[3].messageHash) + + toHex(queryResponse2.get().messages[3].messageHash) < + toHex(queryResponse2.get().messages[4].messageHash) asyncTest "Backward pagination with Ascending Sorting": # Given a history query with backward pagination - let cursor = computeHistoryCursor(pubsubTopic, unsortedArchiveMessages[4]) - historyQuery.direction = PagingDirection.BACKWARD - historyQuery.cursor = some(cursor) + + # Pick the right cursor based on the ordering + var cursor = unsortedArchiveMessages[3].messageHash + if toHex(cursor) > toHex(unsortedArchiveMessages[4].messageHash): + cursor = unsortedArchiveMessages[4].messageHash + if toHex(cursor) > toHex(unsortedArchiveMessages[5].messageHash): + cursor = unsortedArchiveMessages[5].messageHash + + storeQuery.paginationForward = PagingDirection.BACKWARD + storeQuery.paginationCursor = some(cursor) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) - # Then the response contains the messages + # Then check the response ordering check: - queryResponse.get().messages == - @[ - unsortedArchiveMessages[2], - unsortedArchiveMessages[0], - unsortedArchiveMessages[1], - ] + queryResponse.get().messages.len == 3 + + queryResponse.get().messages[0].message.timestamp == + queryResponse.get().messages[1].message.timestamp + + queryResponse.get().messages[1].message.timestamp == + queryResponse.get().messages[2].message.timestamp + + toHex(queryResponse.get().messages[0].messageHash) < + toHex(queryResponse.get().messages[1].messageHash) + + toHex(queryResponse.get().messages[1].messageHash) < + toHex(queryResponse.get().messages[2].messageHash) asyncTest "Forward Pagination with Ascending Sorting": # Given a history query with forward pagination - let cursor = computeHistoryCursor(pubsubTopic, unsortedArchiveMessages[4]) - historyQuery.direction = PagingDirection.FORWARD - historyQuery.cursor = some(cursor) + + # Pick the right cursor based on the ordering + var cursor = unsortedArchiveMessages[3].messageHash + if toHex(cursor) > toHex(unsortedArchiveMessages[4].messageHash): + cursor = unsortedArchiveMessages[4].messageHash + if toHex(cursor) > toHex(unsortedArchiveMessages[5].messageHash): + cursor = unsortedArchiveMessages[5].messageHash + + storeQuery.paginationForward = PagingDirection.FORWARD + storeQuery.paginationCursor = some(cursor) + storeQuery.paginationLimit = some(uint64(6)) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) - # Then the response contains the messages + # Then check the response ordering check: - queryResponse.get().messages == - @[ - unsortedArchiveMessages[3], - unsortedArchiveMessages[5], - unsortedArchiveMessages[6], - unsortedArchiveMessages[8], - unsortedArchiveMessages[9], - ] + queryResponse.get().messages.len == 6 + + queryResponse.get().messages[0].message.timestamp == + queryResponse.get().messages[1].message.timestamp + + queryResponse.get().messages[1].message.timestamp < + queryResponse.get().messages[2].message.timestamp + + queryResponse.get().messages[2].message.timestamp == + queryResponse.get().messages[3].message.timestamp + + queryResponse.get().messages[3].message.timestamp == + queryResponse.get().messages[4].message.timestamp + + queryResponse.get().messages[4].message.timestamp == + queryResponse.get().messages[5].message.timestamp + + toHex(queryResponse.get().messages[0].messageHash) < + toHex(queryResponse.get().messages[1].messageHash) + + toHex(queryResponse.get().messages[2].messageHash) < + toHex(queryResponse.get().messages[3].messageHash) + + toHex(queryResponse.get().messages[3].messageHash) < + toHex(queryResponse.get().messages[4].messageHash) + + toHex(queryResponse.get().messages[4].messageHash) < + toHex(queryResponse.get().messages[5].messageHash) + +suite "Waku Store - End to End - Unsorted Archive without provided Timestamp": + var pubsubTopic {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + + var storeQuery {.threadvar.}: StoreQueryRequest + var unsortedArchiveMessages {.threadvar.}: seq[WakuMessageKeyValue] + + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + contentTopicSeq = @[contentTopic] + + storeQuery = StoreQueryRequest( + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), + ) + + let messages = + @[ # Not providing explicit timestamp means it will be set in "arrive" order + fakeWakuMessage(@[byte 09]), + fakeWakuMessage(@[byte 07]), + fakeWakuMessage(@[byte 05]), + fakeWakuMessage(@[byte 03]), + fakeWakuMessage(@[byte 01]), + fakeWakuMessage(@[byte 00]), + fakeWakuMessage(@[byte 02]), + fakeWakuMessage(@[byte 04]), + fakeWakuMessage(@[byte 06]), + fakeWakuMessage(@[byte 08]), + ] + unsortedArchiveMessages = messages.mapIt( + WakuMessageKeyValue(messageHash: computeMessageHash(pubsubTopic, it), message: it) + ) + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) + + let + unsortedArchiveDriverWithMessages = + newArchiveDriverWithMessages(pubsubTopic, messages) + mountUnsortedArchiveResult = + server.mountArchive(unsortedArchiveDriverWithMessages) + + assert mountUnsortedArchiveResult.isOk() + + waitFor server.mountStore() + client.mountStoreClient() + + waitFor allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + + asyncTeardown: + waitFor allFutures(client.stop(), server.stop()) + + asyncTest "Sorting using receiverTime": + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + check: + queryResponse.get().messages.len == 5 + + queryResponse.get().messages[0].message.timestamp <= + queryResponse.get().messages[1].message.timestamp + + queryResponse.get().messages[1].message.timestamp <= + queryResponse.get().messages[2].message.timestamp + + queryResponse.get().messages[2].message.timestamp <= + queryResponse.get().messages[3].message.timestamp + + queryResponse.get().messages[3].message.timestamp <= + queryResponse.get().messages[4].message.timestamp + + # Given the next query + var historyQuery2 = StoreQueryRequest( + paginationCursor: queryResponse.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Timestamps are quite random in this case. + # Those are the only assumptions we can make in ALL cases. + let setA = toHashSet(queryResponse.get().messages) + let setB = toHashSet(queryResponse2.get().messages) + let setC = intersection(setA, setB) + + check: + setC.len == 0 + + queryResponse2.get().messages.len == 5 + + queryResponse2.get().messages[0].message.timestamp <= + queryResponse2.get().messages[1].message.timestamp + + queryResponse2.get().messages[1].message.timestamp <= + queryResponse2.get().messages[2].message.timestamp + + queryResponse2.get().messages[2].message.timestamp <= + queryResponse2.get().messages[3].message.timestamp + + queryResponse2.get().messages[3].message.timestamp <= + queryResponse2.get().messages[4].message.timestamp suite "Waku Store - End to End - Archive with Multiple Topics": var pubsubTopic {.threadvar.}: PubsubTopic @@ -631,9 +841,9 @@ suite "Waku Store - End to End - Archive with Multiple Topics": var contentTopicSpecials {.threadvar.}: ContentTopic var contentTopicSeq {.threadvar.}: seq[ContentTopic] - var historyQuery {.threadvar.}: HistoryQuery + var storeQuery {.threadvar.}: StoreQueryRequest var originTs {.threadvar.}: proc(offset: int): Timestamp {.gcsafe, raises: [].} - var archiveMessages {.threadvar.}: seq[WakuMessage] + var archiveMessages {.threadvar.}: seq[WakuMessageKeyValue] var server {.threadvar.}: WakuNode var client {.threadvar.}: WakuNode @@ -650,18 +860,18 @@ suite "Waku Store - End to End - Archive with Multiple Topics": contentTopicSeq = @[contentTopic, contentTopicB, contentTopicC, contentTopicSpecials] - historyQuery = HistoryQuery( + storeQuery = StoreQueryRequest( pubsubTopic: some(pubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 5, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), ) let timeOrigin = now() originTs = proc(offset = 0): Timestamp {.gcsafe, raises: [].} = ts(offset, timeOrigin) - archiveMessages = + let messages = @[ fakeWakuMessage(@[byte 00], ts = originTs(00), contentTopic = contentTopic), fakeWakuMessage(@[byte 01], ts = originTs(10), contentTopic = contentTopicB), @@ -677,6 +887,14 @@ suite "Waku Store - End to End - Archive with Multiple Topics": ), ] + archiveMessages = messages.mapIt( + WakuMessageKeyValue(messageHash: computeMessageHash(pubsubTopic, it), message: it) + ) + + for i in 6 ..< 10: + archiveMessages[i].messagehash = + computeMessageHash(pubsubTopicB, archiveMessages[i].message) + let serverKey = generateSecp256k1Key() clientKey = generateSecp256k1Key() @@ -684,12 +902,12 @@ suite "Waku Store - End to End - Archive with Multiple Topics": server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0)) client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0)) - let archiveDriver = newSqliteArchiveDriver() - .put(pubsubTopic, archiveMessages[0 ..< 6]) - .put(pubsubTopicB, archiveMessages[6 ..< 10]) - let mountSortedArchiveResult = server.mountArchive(archiveDriver) + let archiveDriver = newSqliteArchiveDriver().put(pubsubTopic, messages[0 ..< 6]).put( + pubsubTopicB, messages[6 ..< 10] + ) + let mountUnsortedArchiveResult = server.mountArchive(archiveDriver) - assert mountSortedArchiveResult.isOk() + assert mountUnsortedArchiveResult.isOk() waitFor server.mountStore() client.mountStoreClient() @@ -704,10 +922,10 @@ suite "Waku Store - End to End - Archive with Multiple Topics": suite "Validation of Content Filtering": asyncTest "Basic Content Filtering": # Given a history query with content filtering - historyQuery.contentTopics = @[contentTopic] + storeQuery.contentTopics = @[contentTopic] # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: @@ -715,10 +933,10 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Multiple Content Filters": # Given a history query with multiple content filtering - historyQuery.contentTopics = @[contentTopic, contentTopicB] + storeQuery.contentTopics = @[contentTopic, contentTopicB] # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: @@ -732,22 +950,22 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Empty Content Filtering": # Given a history query with empty content filtering - historyQuery.contentTopics = @[] + storeQuery.contentTopics = @[] # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: queryResponse.get().messages == archiveMessages[0 ..< 5] # Given the next query - let historyQuery2 = HistoryQuery( - cursor: queryResponse.get().cursor, + let historyQuery2 = StoreQueryRequest( + paginationCursor: queryResponse.get().paginationCursor, pubsubTopic: none(PubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 5, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), ) # When making the next history query @@ -759,10 +977,10 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Non-Existent Content Topic": # Given a history query with non-existent content filtering - historyQuery.contentTopics = @["non-existent-topic"] + storeQuery.contentTopics = @["non-existent-topic"] # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains no messages check: @@ -770,11 +988,11 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Special Characters in Content Filtering": # Given a history query with special characters in content filtering - historyQuery.pubsubTopic = some(pubsubTopicB) - historyQuery.contentTopics = @["!@#$%^&*()_+"] + storeQuery.pubsubTopic = some(pubsubTopicB) + storeQuery.contentTopics = @["!@#$%^&*()_+"] # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains no messages check: @@ -782,10 +1000,10 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "PubsubTopic Specified": # Given a history query with pubsub topic specified - historyQuery.pubsubTopic = some(pubsubTopicB) + storeQuery.pubsubTopic = some(pubsubTopicB) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: @@ -799,22 +1017,22 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "PubsubTopic Left Empty": # Given a history query with pubsub topic left empty - historyQuery.pubsubTopic = none(PubsubTopic) + storeQuery.pubsubTopic = none(PubsubTopic) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: queryResponse.get().messages == archiveMessages[0 ..< 5] # Given the next query - let historyQuery2 = HistoryQuery( - cursor: queryResponse.get().cursor, + let historyQuery2 = StoreQueryRequest( + paginationCursor: queryResponse.get().paginationCursor, pubsubTopic: none(PubsubTopic), contentTopics: contentTopicSeq, - direction: PagingDirection.FORWARD, - pageSize: 5, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), ) # When making the next history query @@ -827,11 +1045,11 @@ suite "Waku Store - End to End - Archive with Multiple Topics": suite "Validation of Time-based Filtering": asyncTest "Basic Time Filtering": # Given a history query with start and end time - historyQuery.startTime = some(originTs(20)) - historyQuery.endTime = some(originTs(40)) + storeQuery.startTime = some(originTs(20)) + storeQuery.endTime = some(originTs(40)) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: @@ -840,11 +1058,11 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Only Start Time Specified": # Given a history query with only start time - historyQuery.startTime = some(originTs(20)) - historyQuery.endTime = none(Timestamp) + storeQuery.startTime = some(originTs(20)) + storeQuery.endTime = none(Timestamp) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: @@ -858,11 +1076,11 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Only End Time Specified": # Given a history query with only end time - historyQuery.startTime = none(Timestamp) - historyQuery.endTime = some(originTs(40)) + storeQuery.startTime = none(Timestamp) + storeQuery.endTime = some(originTs(40)) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains no messages check: @@ -877,11 +1095,11 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Invalid Time Range": # Given a history query with invalid time range - historyQuery.startTime = some(originTs(60)) - historyQuery.endTime = some(originTs(40)) + storeQuery.startTime = some(originTs(60)) + storeQuery.endTime = some(originTs(40)) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains no messages check: @@ -889,12 +1107,12 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Time Filtering with Content Filtering": # Given a history query with time and content filtering - historyQuery.startTime = some(originTs(20)) - historyQuery.endTime = some(originTs(60)) - historyQuery.contentTopics = @[contentTopicC] + storeQuery.startTime = some(originTs(20)) + storeQuery.endTime = some(originTs(60)) + storeQuery.contentTopics = @[contentTopicC] # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains the messages check: @@ -902,11 +1120,11 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Messages Outside of Time Range": # Given a history query with a valid time range which does not contain any messages - historyQuery.startTime = some(originTs(100)) - historyQuery.endTime = some(originTs(200)) + storeQuery.startTime = some(originTs(100)) + storeQuery.endTime = some(originTs(200)) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response contains no messages check: @@ -940,8 +1158,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let ephemeralServerRemotePeerInfo = ephemeralServer.peerInfo.toRemotePeerInfo() # When making a history query to the server with only ephemeral messages - let queryResponse = - await client.query(historyQuery, ephemeralServerRemotePeerInfo) + let queryResponse = await client.query(storeQuery, ephemeralServerRemotePeerInfo) # Then the response contains no messages check: @@ -982,7 +1199,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let mixedServerRemotePeerInfo = mixedServer.peerInfo.toRemotePeerInfo() # When making a history query to the server with mixed messages - let queryResponse = await client.query(historyQuery, mixedServerRemotePeerInfo) + let queryResponse = await client.query(storeQuery, mixedServerRemotePeerInfo) # Then the response contains the non-ephemeral messages check: @@ -1009,7 +1226,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let emptyServerRemotePeerInfo = emptyServer.peerInfo.toRemotePeerInfo() # When making a history query to the server with an empty archive - let queryResponse = await client.query(historyQuery, emptyServerRemotePeerInfo) + let queryResponse = await client.query(storeQuery, emptyServerRemotePeerInfo) # Then the response contains no messages check: @@ -1020,12 +1237,19 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Voluminous Message Store": # Given a voluminous archive (1M+ messages) - var voluminousArchiveMessages: seq[WakuMessage] = @[] + var messages: seq[WakuMessage] = @[] for i in 0 ..< 100000: let topic = "topic" & $i - voluminousArchiveMessages.add(fakeWakuMessage(@[byte i], contentTopic = topic)) + messages.add(fakeWakuMessage(@[byte i], contentTopic = topic)) + + let voluminousArchiveMessages = messages.mapIt( + WakuMessageKeyValue( + messageHash: computeMessageHash(pubsubTopic, it), message: it + ) + ) + let voluminousArchiveDriverWithMessages = - newArchiveDriverWithMessages(pubsubTopic, voluminousArchiveMessages) + newArchiveDriverWithMessages(pubsubTopic, messages) # And a server node with the voluminous archive let @@ -1041,12 +1265,11 @@ suite "Waku Store - End to End - Archive with Multiple Topics": let voluminousServerRemotePeerInfo = voluminousServer.peerInfo.toRemotePeerInfo() # Given the following history query - historyQuery.contentTopics = + storeQuery.contentTopics = @["topic10000", "topic30000", "topic50000", "topic70000", "topic90000"] # When making a history query to the server with a voluminous archive - let queryResponse = - await client.query(historyQuery, voluminousServerRemotePeerInfo) + let queryResponse = await client.query(storeQuery, voluminousServerRemotePeerInfo) # Then the response contains the messages check: @@ -1064,13 +1287,13 @@ suite "Waku Store - End to End - Archive with Multiple Topics": asyncTest "Large contentFilters Array": # Given a history query with the max contentFilters len, 10 - historyQuery.contentTopics = @[contentTopic] + storeQuery.contentTopics = @[contentTopic] for i in 0 ..< 9: let topic = "topic" & $i - historyQuery.contentTopics.add(topic) + storeQuery.contentTopics.add(topic) # When making a history query - let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) # Then the response should trigger no errors check: diff --git a/tests/testlib/futures.nim b/tests/testlib/futures.nim index fcc37d909a..cfb6431e14 100644 --- a/tests/testlib/futures.nim +++ b/tests/testlib/futures.nim @@ -1,6 +1,6 @@ import chronos -import ../../../waku/[waku_core/message, waku_store] +import ../../../waku/[waku_core/message, waku_store, waku_store_legacy] const FUTURE_TIMEOUT* = 1.seconds @@ -13,8 +13,11 @@ proc newPushHandlerFuture*(): Future[(string, WakuMessage)] = proc newBoolFuture*(): Future[bool] = newFuture[bool]() -proc newHistoryFuture*(): Future[HistoryQuery] = - newFuture[HistoryQuery]() +proc newHistoryFuture*(): Future[StoreQueryRequest] = + newFuture[StoreQueryRequest]() + +proc newLegacyHistoryFuture*(): Future[waku_store_legacy.HistoryQuery] = + newFuture[waku_store_legacy.HistoryQuery]() proc toResult*[T](future: Future[T]): Result[T, string] = if future.cancelled(): diff --git a/tests/waku_archive/test_driver_queue.nim b/tests/waku_archive/test_driver_queue.nim index e4fa7425f3..d57f0ee1d2 100644 --- a/tests/waku_archive/test_driver_queue.nim +++ b/tests/waku_archive/test_driver_queue.nim @@ -17,11 +17,13 @@ proc genIndexedWakuMessage(i: int8): (Index, WakuMessage) = let message = WakuMessage(payload: @[byte i], timestamp: Timestamp(i)) + topic = "test-pubsub-topic" cursor = Index( receiverTime: Timestamp(i), senderTime: Timestamp(i), digest: MessageDigest(data: data), - pubsubTopic: "test-pubsub-topic", + pubsubTopic: topic, + hash: computeMessageHash(topic, message), ) (cursor, message) diff --git a/tests/waku_archive/test_driver_queue_index.nim b/tests/waku_archive/test_driver_queue_index.nim index 2af986557b..2f1e685c7c 100644 --- a/tests/waku_archive/test_driver_queue_index.nim +++ b/tests/waku_archive/test_driver_queue_index.nim @@ -1,8 +1,10 @@ {.used.} -import std/times, stew/byteutils, testutils/unittests, nimcrypto +import std/[times, random], stew/byteutils, testutils/unittests, nimcrypto import ../../../waku/waku_core, ../../../waku/waku_archive/driver/queue_driver/index +var rng = initRand() + ## Helpers proc getTestTimestamp(offset = 0): Timestamp = @@ -19,6 +21,15 @@ proc hashFromStr(input: string): MDigest[256] = return hashed +proc randomHash(): WakuMessageHash = + var hash: WakuMessageHash + + for i in 0 ..< hash.len: + let numb: byte = byte(rng.next()) + hash[i] = numb + + hash + suite "Queue Driver - index": ## Test vars let @@ -26,67 +37,79 @@ suite "Queue Driver - index": digest: hashFromStr("1234"), receiverTime: getNanosecondTime(0), senderTime: getNanosecondTime(1000), + hash: randomHash(), ) smallIndex2 = Index( digest: hashFromStr("1234567"), # digest is less significant than senderTime receiverTime: getNanosecondTime(0), senderTime: getNanosecondTime(1000), + hash: randomHash(), ) largeIndex1 = Index( digest: hashFromStr("1234"), receiverTime: getNanosecondTime(0), senderTime: getNanosecondTime(9000), + hash: randomHash(), ) # only senderTime differ from smallIndex1 largeIndex2 = Index( digest: hashFromStr("12345"), # only digest differs from smallIndex1 receiverTime: getNanosecondTime(0), senderTime: getNanosecondTime(1000), + hash: randomHash(), ) eqIndex1 = Index( digest: hashFromStr("0003"), receiverTime: getNanosecondTime(0), senderTime: getNanosecondTime(54321), + hash: randomHash(), ) eqIndex2 = Index( digest: hashFromStr("0003"), receiverTime: getNanosecondTime(0), senderTime: getNanosecondTime(54321), + hash: randomHash(), ) eqIndex3 = Index( digest: hashFromStr("0003"), receiverTime: getNanosecondTime(9999), # receiverTime difference should have no effect on comparisons senderTime: getNanosecondTime(54321), + hash: randomHash(), ) diffPsTopic = Index( digest: hashFromStr("1234"), receiverTime: getNanosecondTime(0), senderTime: getNanosecondTime(1000), pubsubTopic: "zzzz", + hash: randomHash(), ) noSenderTime1 = Index( digest: hashFromStr("1234"), receiverTime: getNanosecondTime(1100), senderTime: getNanosecondTime(0), pubsubTopic: "zzzz", + hash: randomHash(), ) noSenderTime2 = Index( digest: hashFromStr("1234"), receiverTime: getNanosecondTime(10000), senderTime: getNanosecondTime(0), pubsubTopic: "zzzz", + hash: randomHash(), ) noSenderTime3 = Index( digest: hashFromStr("1234"), receiverTime: getNanosecondTime(1200), senderTime: getNanosecondTime(0), pubsubTopic: "aaaa", + hash: randomHash(), ) noSenderTime4 = Index( digest: hashFromStr("0"), receiverTime: getNanosecondTime(1200), senderTime: getNanosecondTime(0), pubsubTopic: "zzzz", + hash: randomHash(), ) test "Index comparison": diff --git a/tests/waku_archive/test_driver_queue_pagination.nim b/tests/waku_archive/test_driver_queue_pagination.nim index 4dbde2ef3d..6ce1d6d560 100644 --- a/tests/waku_archive/test_driver_queue_pagination.nim +++ b/tests/waku_archive/test_driver_queue_pagination.nim @@ -24,6 +24,7 @@ proc getTestQueueDriver(numMessages: int): QueueDriver = receiverTime: Timestamp(i), senderTime: Timestamp(i), digest: MessageDigest(data: data), + hash: computeMessageHash(DefaultPubsubTopic, msg), ) discard testQueueDriver.add(index, msg) diff --git a/tests/waku_store/store_utils.nim b/tests/waku_store/store_utils.nim index 9f9fa7402b..595466fb84 100644 --- a/tests/waku_store/store_utils.nim +++ b/tests/waku_store/store_utils.nim @@ -7,7 +7,7 @@ import ../testlib/[common, wakucore] proc newTestWakuStore*( - switch: Switch, handler: HistoryQueryHandler + switch: Switch, handler: StoreQueryRequestHandler ): Future[WakuStore] {.async.} = let peerManager = PeerManager.new(switch) @@ -21,13 +21,3 @@ proc newTestWakuStore*( proc newTestWakuStoreClient*(switch: Switch): WakuStoreClient = let peerManager = PeerManager.new(switch) WakuStoreClient.new(peerManager, rng) - -proc computeHistoryCursor*( - pubsubTopic: PubsubTopic, message: WakuMessage -): HistoryCursor = - HistoryCursor( - pubsubTopic: pubsubTopic, - senderTime: message.timestamp, - storeTime: message.timestamp, - digest: waku_store.computeDigest(message), - ) diff --git a/tests/waku_store/test_all.nim b/tests/waku_store/test_all.nim index b495310f27..da990f4128 100644 --- a/tests/waku_store/test_all.nim +++ b/tests/waku_store/test_all.nim @@ -1,8 +1,3 @@ {.used.} -import - ./test_client, - ./test_resume, - ./test_rpc_codec, - ./test_waku_store, - ./test_wakunode_store +import ./test_client, ./test_rpc_codec, ./test_waku_store, ./test_wakunode_store diff --git a/tests/waku_store/test_client.nim b/tests/waku_store/test_client.nim index 9b675106b4..bee963eb2a 100644 --- a/tests/waku_store/test_client.nim +++ b/tests/waku_store/test_client.nim @@ -12,10 +12,13 @@ suite "Store Client": var message1 {.threadvar.}: WakuMessage var message2 {.threadvar.}: WakuMessage var message3 {.threadvar.}: WakuMessage - var messageSeq {.threadvar.}: seq[WakuMessage] - var handlerFuture {.threadvar.}: Future[HistoryQuery] - var handler {.threadvar.}: HistoryQueryHandler - var historyQuery {.threadvar.}: HistoryQuery + var hash1 {.threadvar.}: WakuMessageHash + var hash2 {.threadvar.}: WakuMessageHash + var hash3 {.threadvar.}: WakuMessageHash + var messageSeq {.threadvar.}: seq[WakuMessageKeyValue] + var handlerFuture {.threadvar.}: Future[StoreQueryRequest] + var handler {.threadvar.}: StoreQueryRequestHandler + var storeQuery {.threadvar.}: StoreQueryRequest var serverSwitch {.threadvar.}: Switch var clientSwitch {.threadvar.}: Switch @@ -30,15 +33,25 @@ suite "Store Client": message1 = fakeWakuMessage(contentTopic = DefaultContentTopic) message2 = fakeWakuMessage(contentTopic = DefaultContentTopic) message3 = fakeWakuMessage(contentTopic = DefaultContentTopic) - messageSeq = @[message1, message2, message3] + hash1 = computeMessageHash(DefaultPubsubTopic, message1) + hash2 = computeMessageHash(DefaultPubsubTopic, message2) + hash3 = computeMessageHash(DefaultPubsubTopic, message3) + messageSeq = + @[ + WakuMessageKeyValue(messageHash: hash1, message: message1), + WakuMessageKeyValue(messageHash: hash2, message: message2), + WakuMessageKeyValue(messageHash: hash3, message: message3), + ] handlerFuture = newHistoryFuture() - handler = proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} = - handlerFuture.complete(req) - return ok(HistoryResponse(messages: messageSeq)) - historyQuery = HistoryQuery( + handler = proc(req: StoreQueryRequest): Future[StoreQueryResult] {.async, gcsafe.} = + var request = req + request.requestId = "" + handlerFuture.complete(request) + return ok(StoreQueryResponse(messages: messageSeq)) + storeQuery = StoreQueryRequest( pubsubTopic: some(DefaultPubsubTopic), contentTopics: @[DefaultContentTopic], - direction: PagingDirection.FORWARD, + paginationForward: PagingDirection.FORWARD, ) serverSwitch = newTestSwitch() @@ -55,15 +68,15 @@ suite "Store Client": asyncTeardown: await allFutures(serverSwitch.stop(), clientSwitch.stop()) - suite "HistoryQuery Creation and Execution": + suite "StoreQueryRequest Creation and Execution": asyncTest "Valid Queries": # When a valid query is sent to the server - let queryResponse = await client.query(historyQuery, peer = serverPeerInfo) + let queryResponse = await client.query(storeQuery, peer = serverPeerInfo) # Then the query is processed successfully assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) check: - handlerFuture.read() == historyQuery + handlerFuture.read() == storeQuery queryResponse.get().messages == messageSeq asyncTest "Invalid Queries": @@ -73,33 +86,33 @@ suite "Store Client": # Given some invalid queries let - invalidQuery1 = HistoryQuery( + invalidQuery1 = StoreQueryRequest( pubsubTopic: some(DefaultPubsubTopic), contentTopics: @[], - direction: PagingDirection.FORWARD, + paginationForward: PagingDirection.FORWARD, ) - invalidQuery2 = HistoryQuery( + invalidQuery2 = StoreQueryRequest( pubsubTopic: PubsubTopic.none(), contentTopics: @[DefaultContentTopic], - direction: PagingDirection.FORWARD, + paginationForward: PagingDirection.FORWARD, ) - invalidQuery3 = HistoryQuery( + invalidQuery3 = StoreQueryRequest( pubsubTopic: some(DefaultPubsubTopic), contentTopics: @[DefaultContentTopic], - pageSize: 0, + paginationLimit: some(uint64(0)), ) - invalidQuery4 = HistoryQuery( + invalidQuery4 = StoreQueryRequest( pubsubTopic: some(DefaultPubsubTopic), contentTopics: @[DefaultContentTopic], - pageSize: 0, + paginationLimit: some(uint64(0)), ) - invalidQuery5 = HistoryQuery( + invalidQuery5 = StoreQueryRequest( pubsubTopic: some(DefaultPubsubTopic), contentTopics: @[DefaultContentTopic], startTime: some(0.Timestamp), endTime: some(0.Timestamp), ) - invalidQuery6 = HistoryQuery( + invalidQuery6 = StoreQueryRequest( pubsubTopic: some(DefaultPubsubTopic), contentTopics: @[DefaultContentTopic], startTime: some(0.Timestamp), @@ -165,15 +178,15 @@ suite "Store Client": handlerFuture.read() == invalidQuery6 queryResponse6.get().messages == messageSeq - suite "Verification of HistoryResponse Payload": + suite "Verification of StoreQueryResponse Payload": asyncTest "Positive Responses": # When a valid query is sent to the server - let queryResponse = await client.query(historyQuery, peer = serverPeerInfo) + let queryResponse = await client.query(storeQuery, peer = serverPeerInfo) # Then the query is processed successfully, and is of the expected type check: await handlerFuture.withTimeout(FUTURE_TIMEOUT) - type(queryResponse.get()) is HistoryResponse + type(queryResponse.get()) is StoreQueryResponse asyncTest "Negative Responses - PeerDialFailure": # Given a stopped peer @@ -182,10 +195,10 @@ suite "Store Client": otherServerPeerInfo = otherServerSwitch.peerInfo.toRemotePeerInfo() # When a query is sent to the stopped peer - let queryResponse = await client.query(historyQuery, peer = otherServerPeerInfo) + let queryResponse = await client.query(storeQuery, peer = otherServerPeerInfo) # Then the query is not processed check: not await handlerFuture.withTimeout(FUTURE_TIMEOUT) queryResponse.isErr() - queryResponse.error.kind == HistoryErrorKind.PEER_DIAL_FAILURE + queryResponse.error.kind == ErrorCode.PEER_DIAL_FAILURE diff --git a/tests/waku_store/test_rpc_codec.nim b/tests/waku_store/test_rpc_codec.nim index d8b3e28a65..d88da84b1b 100644 --- a/tests/waku_store/test_rpc_codec.nim +++ b/tests/waku_store/test_rpc_codec.nim @@ -5,113 +5,29 @@ import ../../../waku/common/protobuf, ../../../waku/common/paging, ../../../waku/waku_core, - ../../../waku/waku_store/rpc, + ../../../waku/waku_store/common, ../../../waku/waku_store/rpc_codec, - ../testlib/common, ../testlib/wakucore procSuite "Waku Store - RPC codec": - test "PagingIndexRPC protobuf codec": + test "StoreQueryRequest protobuf codec": ## Given - let index = PagingIndexRPC.compute( - fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + let query = StoreQueryRequest( + requestId: "0", + includeData: false, + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + startTime: some(Timestamp(10)), + endTime: some(Timestamp(11)), + messageHashes: @[], + paginationCursor: none(WakuMessageHash), + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(DefaultPageSize), ) - ## When - let encodedIndex = index.encode() - let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer) - - ## Then - check: - decodedIndexRes.isOk() - - let decodedIndex = decodedIndexRes.tryGet() - check: - # The fields of decodedIndex must be the same as the original index - decodedIndex == index - - test "PagingIndexRPC protobuf codec - empty index": - ## Given - let emptyIndex = PagingIndexRPC() - - let encodedIndex = emptyIndex.encode() - let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer) - - ## Then - check: - decodedIndexRes.isOk() - - let decodedIndex = decodedIndexRes.tryGet() - check: - # Check the correctness of init and encode for an empty PagingIndexRPC - decodedIndex == emptyIndex - - test "PagingInfoRPC protobuf codec": - ## Given - let - index = PagingIndexRPC.compute( - fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic - ) - pagingInfo = PagingInfoRPC( - pageSize: some(1'u64), - cursor: some(index), - direction: some(PagingDirection.FORWARD), - ) - - ## When - let pb = pagingInfo.encode() - let decodedPagingInfo = PagingInfoRPC.decode(pb.buffer) - - ## Then - check: - decodedPagingInfo.isOk() - - check: - # The fields of decodedPagingInfo must be the same as the original pagingInfo - decodedPagingInfo.value == pagingInfo - decodedPagingInfo.value.direction == pagingInfo.direction - - test "PagingInfoRPC protobuf codec - empty paging info": - ## Given - let emptyPagingInfo = PagingInfoRPC() - - ## When - let pb = emptyPagingInfo.encode() - let decodedEmptyPagingInfo = PagingInfoRPC.decode(pb.buffer) - - ## Then - check: - decodedEmptyPagingInfo.isOk() - - check: - # check the correctness of init and encode for an empty PagingInfoRPC - decodedEmptyPagingInfo.value == emptyPagingInfo - - test "HistoryQueryRPC protobuf codec": - ## Given - let - index = PagingIndexRPC.compute( - fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic - ) - pagingInfo = PagingInfoRPC( - pageSize: some(1'u64), - cursor: some(index), - direction: some(PagingDirection.BACKWARD), - ) - query = HistoryQueryRPC( - contentFilters: - @[ - HistoryContentFilterRPC(contentTopic: DefaultContentTopic), - HistoryContentFilterRPC(contentTopic: DefaultContentTopic), - ], - pagingInfo: some(pagingInfo), - startTime: some(Timestamp(10)), - endTime: some(Timestamp(11)), - ) - ## When let pb = query.encode() - let decodedQuery = HistoryQueryRPC.decode(pb.buffer) + let decodedQuery = StoreQueryRequest.decode(pb.buffer) ## Then check: @@ -121,13 +37,13 @@ procSuite "Waku Store - RPC codec": # the fields of decoded query decodedQuery must be the same as the original query query decodedQuery.value == query - test "HistoryQueryRPC protobuf codec - empty history query": + test "StoreQueryRequest protobuf codec - empty history query": ## Given - let emptyQuery = HistoryQueryRPC() + let emptyQuery = StoreQueryRequest() ## When let pb = emptyQuery.encode() - let decodedEmptyQuery = HistoryQueryRPC.decode(pb.buffer) + let decodedEmptyQuery = StoreQueryRequest.decode(pb.buffer) ## Then check: @@ -137,27 +53,23 @@ procSuite "Waku Store - RPC codec": # check the correctness of init and encode for an empty HistoryQueryRPC decodedEmptyQuery.value == emptyQuery - test "HistoryResponseRPC protobuf codec": + test "StoreQueryResponse protobuf codec": ## Given let message = fakeWakuMessage() - index = PagingIndexRPC.compute( - message, receivedTime = ts(), pubsubTopic = DefaultPubsubTopic - ) - pagingInfo = PagingInfoRPC( - pageSize: some(1'u64), - cursor: some(index), - direction: some(PagingDirection.BACKWARD), - ) - res = HistoryResponseRPC( - messages: @[message], - pagingInfo: some(pagingInfo), - error: HistoryResponseErrorRPC.INVALID_CURSOR, + hash = computeMessageHash(DefaultPubsubTopic, message) + keyValue = WakuMessageKeyValue(messageHash: hash, message: message) + res = StoreQueryResponse( + requestId: "1", + statusCode: 200, + statusDesc: "it's fine", + messages: @[keyValue], + paginationCursor: none(WakuMessageHash), ) ## When let pb = res.encode() - let decodedRes = HistoryResponseRPC.decode(pb.buffer) + let decodedRes = StoreQueryResponse.decode(pb.buffer) ## Then check: @@ -167,13 +79,13 @@ procSuite "Waku Store - RPC codec": # the fields of decoded response decodedRes must be the same as the original response res decodedRes.value == res - test "HistoryResponseRPC protobuf codec - empty history response": + test "StoreQueryResponse protobuf codec - empty history response": ## Given - let emptyRes = HistoryResponseRPC() + let emptyRes = StoreQueryResponse() ## When let pb = emptyRes.encode() - let decodedEmptyRes = HistoryResponseRPC.decode(pb.buffer) + let decodedEmptyRes = StoreQueryResponse.decode(pb.buffer) ## Then check: diff --git a/tests/waku_store/test_waku_store.nim b/tests/waku_store/test_waku_store.nim index da70fa1c35..56ca9a314f 100644 --- a/tests/waku_store/test_waku_store.nim +++ b/tests/waku_store/test_waku_store.nim @@ -3,8 +3,15 @@ import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto import - ../../../waku/ - [common/paging, node/peer_manager, waku_core, waku_store, waku_store/client], + ../../../waku/[ + common/paging, + node/peer_manager, + waku_core, + waku_core/message/digest, + waku_store, + waku_store/client, + waku_store/common, + ], ../testlib/[common, wakucore], ./store_utils @@ -21,21 +28,25 @@ suite "Waku Store - query handler": let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() let msg = fakeWakuMessage(contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + let kv = WakuMessageKeyValue(messageHash: hash, message: msg) - var queryHandlerFut = newFuture[(HistoryQuery)]() + var queryHandlerFut = newFuture[(StoreQueryRequest)]() let queryHandler = proc( - req: HistoryQuery - ): Future[HistoryResult] {.async, gcsafe.} = - queryHandlerFut.complete(req) - return ok(HistoryResponse(messages: @[msg])) + req: StoreQueryRequest + ): Future[StoreQueryResult] {.async, gcsafe.} = + var request = req + request.requestId = "" # Must remove the id for equality + queryHandlerFut.complete(request) + return ok(StoreQueryResponse(messages: @[kv])) let server = await newTestWakuStore(serverSwitch, handler = queryhandler) client = newTestWakuStoreClient(clientSwitch) - let req = HistoryQuery( - contentTopics: @[DefaultContentTopic], direction: PagingDirection.FORWARD + let req = StoreQueryRequest( + contentTopics: @[DefaultContentTopic], paginationForward: PagingDirection.FORWARD ) ## When @@ -53,7 +64,7 @@ suite "Waku Store - query handler": let response = queryRes.tryGet() check: response.messages.len == 1 - response.messages == @[msg] + response.messages == @[kv] ## Cleanup await allFutures(serverSwitch.stop(), clientSwitch.stop()) @@ -69,19 +80,21 @@ suite "Waku Store - query handler": ## Given let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() - var queryHandlerFut = newFuture[(HistoryQuery)]() + var queryHandlerFut = newFuture[(StoreQueryRequest)]() let queryHandler = proc( - req: HistoryQuery - ): Future[HistoryResult] {.async, gcsafe.} = - queryHandlerFut.complete(req) - return err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST)) + req: StoreQueryRequest + ): Future[StoreQueryResult] {.async, gcsafe.} = + var request = req + request.requestId = "" # Must remove the id for equality + queryHandlerFut.complete(request) + return err(StoreError(kind: ErrorCode.BAD_REQUEST)) let server = await newTestWakuStore(serverSwitch, handler = queryhandler) client = newTestWakuStoreClient(clientSwitch) - let req = HistoryQuery( - contentTopics: @[DefaultContentTopic], direction: PagingDirection.FORWARD + let req = StoreQueryRequest( + contentTopics: @[DefaultContentTopic], paginationForward: PagingDirection.FORWARD ) ## When @@ -98,7 +111,7 @@ suite "Waku Store - query handler": let error = queryRes.tryError() check: - error.kind == HistoryErrorKind.BAD_REQUEST + error.kind == ErrorCode.BAD_REQUEST ## Cleanup await allFutures(serverSwitch.stop(), clientSwitch.stop()) diff --git a/tests/waku_store/test_wakunode_store.nim b/tests/waku_store/test_wakunode_store.nim index cab0b545e4..bce7bacb88 100644 --- a/tests/waku_store/test_wakunode_store.nim +++ b/tests/waku_store/test_wakunode_store.nim @@ -1,6 +1,7 @@ {.used.} import + std/sequtils, stew/shims/net as stewNet, testutils/unittests, chronicles, @@ -13,7 +14,6 @@ import libp2p/protocols/pubsub/pubsub, libp2p/protocols/pubsub/gossipsub import - ../../../waku/common/databases/db_sqlite, ../../../waku/common/paging, ../../../waku/waku_core, ../../../waku/waku_core/message/digest, @@ -27,7 +27,6 @@ import ../../../waku/waku_node, ../waku_store/store_utils, ../waku_archive/archive_utils, - ../testlib/common, ../testlib/wakucore, ../testlib/wakunode @@ -48,14 +47,21 @@ procSuite "WakuNode - Store": fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), ] + let hashes = msgListA.mapIt(computeMessageHash(DefaultPubsubTopic, it)) + + let kvs = + zip(hashes, msgListA).mapIt(WakuMessageKeyValue(messageHash: it[0], message: it[1])) + let archiveA = block: let driver = newSqliteArchiveDriver() - for msg in msgListA: - let msg_digest = waku_archive.computeDigest(msg) - let msg_hash = computeMessageHash(DefaultPubsubTopic, msg) + for kv in kvs: + let msg_digest = computeDigest(kv.message) require ( - waitFor driver.put(DefaultPubsubTopic, msg, msg_digest, msg_hash, msg.timestamp) + waitFor driver.put( + DefaultPubsubTopic, kv.message, msg_digest, kv.messageHash, + kv.message.timestamp, + ) ).isOk() driver @@ -78,7 +84,7 @@ procSuite "WakuNode - Store": client.mountStoreClient() ## Given - let req = HistoryQuery(contentTopics: @[DefaultContentTopic]) + let req = StoreQueryRequest(contentTopics: @[DefaultContentTopic]) let serverPeer = server.peerInfo.toRemotePeerInfo() ## When @@ -89,7 +95,7 @@ procSuite "WakuNode - Store": let response = queryRes.get() check: - response.messages == msgListA + response.messages == kvs # Cleanup waitFor allFutures(client.stop(), server.stop()) @@ -112,18 +118,18 @@ procSuite "WakuNode - Store": client.mountStoreClient() ## Given - let req = HistoryQuery( + let req = StoreQueryRequest( contentTopics: @[DefaultContentTopic], - pageSize: 7, - direction: PagingDirection.FORWARD, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(7)), ) let serverPeer = server.peerInfo.toRemotePeerInfo() ## When var nextReq = req # copy - var pages = newSeq[seq[WakuMessage]](2) - var cursors = newSeq[Option[HistoryCursor]](2) + var pages = newSeq[seq[WakuMessageKeyValue]](2) + var cursors = newSeq[Option[WakuMessageHash]](2) for i in 0 ..< 2: let res = waitFor client.query(nextReq, peer = serverPeer) @@ -132,19 +138,19 @@ procSuite "WakuNode - Store": # Keep query response content let response = res.get() pages[i] = response.messages - cursors[i] = response.cursor + cursors[i] = response.paginationCursor # Set/update the request cursor - nextReq.cursor = cursors[i] + nextReq.paginationCursor = cursors[i] ## Then check: - cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[6])) - cursors[1] == none(HistoryCursor) + cursors[0] == some(kvs[6].messageHash) + cursors[1] == none(WakuMessageHash) check: - pages[0] == msgListA[0 .. 6] - pages[1] == msgListA[7 .. 9] + pages[0] == kvs[0 .. 6] + pages[1] == kvs[7 .. 9] # Cleanup waitFor allFutures(client.stop(), server.stop()) @@ -167,18 +173,18 @@ procSuite "WakuNode - Store": client.mountStoreClient() ## Given - let req = HistoryQuery( + let req = StoreQueryRequest( contentTopics: @[DefaultContentTopic], - pageSize: 7, - direction: PagingDirection.BACKWARD, + paginationLimit: some(uint64(7)), + paginationForward: PagingDirection.BACKWARD, ) let serverPeer = server.peerInfo.toRemotePeerInfo() ## When var nextReq = req # copy - var pages = newSeq[seq[WakuMessage]](2) - var cursors = newSeq[Option[HistoryCursor]](2) + var pages = newSeq[seq[WakuMessageKeyValue]](2) + var cursors = newSeq[Option[WakuMessageHash]](2) for i in 0 ..< 2: let res = waitFor client.query(nextReq, peer = serverPeer) @@ -187,19 +193,19 @@ procSuite "WakuNode - Store": # Keep query response content let response = res.get() pages[i] = response.messages - cursors[i] = response.cursor + cursors[i] = response.paginationCursor # Set/update the request cursor - nextReq.cursor = cursors[i] + nextReq.paginationCursor = cursors[i] ## Then check: - cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[3])) - cursors[1] == none(HistoryCursor) + cursors[0] == some(kvs[3].messageHash) + cursors[1] == none(WakuMessageHash) check: - pages[0] == msgListA[3 .. 9] - pages[1] == msgListA[0 .. 2] + pages[0] == kvs[3 .. 9] + pages[1] == kvs[0 .. 2] # Cleanup waitFor allFutures(client.stop(), server.stop()) @@ -230,6 +236,7 @@ procSuite "WakuNode - Store": ## Given let message = fakeWakuMessage() + let hash = computeMessageHash(DefaultPubSubTopic, message) let serverPeer = server.peerInfo.toRemotePeerInfo() filterSourcePeer = filterSource.peerInfo.toRemotePeerInfo() @@ -254,9 +261,8 @@ procSuite "WakuNode - Store": # Wait for the server filter to receive the push message require waitFor filterFut.withTimeout(5.seconds) - let res = waitFor client.query( - HistoryQuery(contentTopics: @[DefaultContentTopic]), peer = serverPeer - ) + let req = StoreQueryRequest(contentTopics: @[DefaultContentTopic]) + let res = waitFor client.query(req, serverPeer) ## Then check res.isOk() @@ -264,7 +270,7 @@ procSuite "WakuNode - Store": let response = res.get() check: response.messages.len == 1 - response.messages[0] == message + response.messages[0] == WakuMessageKeyValue(messageHash: hash, message: message) let (handledPubsubTopic, handledMsg) = filterFut.read() check: @@ -292,28 +298,27 @@ procSuite "WakuNode - Store": client.mountStoreClient() ## Forcing a bad cursor with empty digest data - var data: array[32, byte] = [ + var cursor: WakuMessageHash = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ] - let cursor = HistoryCursor( - pubsubTopic: "pubsubTopic", - senderTime: now(), - storeTime: now(), - digest: waku_archive.MessageDigest(data: data), - ) ## Given - let req = HistoryQuery(contentTopics: @[DefaultContentTopic], cursor: some(cursor)) + let req = StoreQueryRequest( + contentTopics: @[DefaultContentTopic], paginationCursor: some(cursor) + ) let serverPeer = server.peerInfo.toRemotePeerInfo() ## When let queryRes = waitFor client.query(req, peer = serverPeer) ## Then - check not queryRes.isOk() + check queryRes.isOk() - check queryRes.error == "BAD_REQUEST: invalid cursor" + let response = queryRes.get() + + check response.statusCode == 400 + check response.statusDesc == "BAD_REQUEST: invalid cursor" # Cleanup waitFor allFutures(client.stop(), server.stop()) @@ -336,7 +341,7 @@ procSuite "WakuNode - Store": client.mountStoreClient() ## Given - let req = HistoryQuery(contentTopics: @[DefaultContentTopic]) + let req = StoreQueryRequest(contentTopics: @[DefaultContentTopic]) let serverPeer = server.peerInfo.toRemotePeerInfo() let requestProc = proc() {.async.} = @@ -346,7 +351,7 @@ procSuite "WakuNode - Store": let response = queryRes.get() check: - response.messages == msgListA + response.messages.mapIt(it.message) == msgListA for count in 0 ..< 4: waitFor requestProc() @@ -379,23 +384,24 @@ procSuite "WakuNode - Store": client.mountStoreClient() ## Given - let req = HistoryQuery(contentTopics: @[DefaultContentTopic]) + let req = StoreQueryRequest(contentTopics: @[DefaultContentTopic]) let serverPeer = server.peerInfo.toRemotePeerInfo() let successProc = proc() {.async.} = let queryRes = waitFor client.query(req, peer = serverPeer) check queryRes.isOk() - let response = queryRes.get() check: - response.messages == msgListA + response.messages.mapIt(it.message) == msgListA let failsProc = proc() {.async.} = let queryRes = waitFor client.query(req, peer = serverPeer) - check queryRes.isErr() - check queryRes.error == "TOO_MANY_REQUESTS" + check queryRes.isOk() + let response = queryRes.get() + + check response.statusCode == 429 for count in 0 ..< 3: waitFor successProc() diff --git a/tests/waku_store_legacy/store_utils.nim b/tests/waku_store_legacy/store_utils.nim new file mode 100644 index 0000000000..20dcf66935 --- /dev/null +++ b/tests/waku_store_legacy/store_utils.nim @@ -0,0 +1,34 @@ +{.used.} + +import std/options, chronos, chronicles, libp2p/crypto/crypto + +import + ../../../waku/ + [node/peer_manager, waku_core, waku_store_legacy, waku_store_legacy/client], + ../testlib/[common, wakucore] + +proc newTestWakuStore*( + switch: Switch, handler: HistoryQueryHandler +): Future[WakuStore] {.async.} = + let + peerManager = PeerManager.new(switch) + proto = WakuStore.new(peerManager, rng, handler) + + await proto.start() + switch.mount(proto) + + return proto + +proc newTestWakuStoreClient*(switch: Switch): WakuStoreClient = + let peerManager = PeerManager.new(switch) + WakuStoreClient.new(peerManager, rng) + +proc computeHistoryCursor*( + pubsubTopic: PubsubTopic, message: WakuMessage +): HistoryCursor = + HistoryCursor( + pubsubTopic: pubsubTopic, + senderTime: message.timestamp, + storeTime: message.timestamp, + digest: computeDigest(message), + ) diff --git a/tests/waku_store_legacy/test_all.nim b/tests/waku_store_legacy/test_all.nim new file mode 100644 index 0000000000..b495310f27 --- /dev/null +++ b/tests/waku_store_legacy/test_all.nim @@ -0,0 +1,8 @@ +{.used.} + +import + ./test_client, + ./test_resume, + ./test_rpc_codec, + ./test_waku_store, + ./test_wakunode_store diff --git a/tests/waku_store_legacy/test_client.nim b/tests/waku_store_legacy/test_client.nim new file mode 100644 index 0000000000..66ff8955d4 --- /dev/null +++ b/tests/waku_store_legacy/test_client.nim @@ -0,0 +1,196 @@ +{.used.} + +import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto + +import + ../../../waku/[ + node/peer_manager, + waku_core, + waku_store_legacy, + waku_store_legacy/client, + common/paging, + ], + ../testlib/[common, wakucore, testasync, futures], + ./store_utils + +suite "Store Client": + var message1 {.threadvar.}: WakuMessage + var message2 {.threadvar.}: WakuMessage + var message3 {.threadvar.}: WakuMessage + var messageSeq {.threadvar.}: seq[WakuMessage] + var handlerFuture {.threadvar.}: Future[HistoryQuery] + var handler {.threadvar.}: HistoryQueryHandler + var historyQuery {.threadvar.}: HistoryQuery + + var serverSwitch {.threadvar.}: Switch + var clientSwitch {.threadvar.}: Switch + + var server {.threadvar.}: WakuStore + var client {.threadvar.}: WakuStoreClient + + var serverPeerInfo {.threadvar.}: RemotePeerInfo + var clientPeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + message1 = fakeWakuMessage(contentTopic = DefaultContentTopic) + message2 = fakeWakuMessage(contentTopic = DefaultContentTopic) + message3 = fakeWakuMessage(contentTopic = DefaultContentTopic) + messageSeq = @[message1, message2, message3] + handlerFuture = newLegacyHistoryFuture() + handler = proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} = + handlerFuture.complete(req) + return ok(HistoryResponse(messages: messageSeq)) + historyQuery = HistoryQuery( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + direction: PagingDirection.FORWARD, + ) + + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + server = await newTestWakuStore(serverSwitch, handler = handler) + client = newTestWakuStoreClient(clientSwitch) + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + clientPeerInfo = clientSwitch.peerInfo.toRemotePeerInfo() + + asyncTeardown: + await allFutures(serverSwitch.stop(), clientSwitch.stop()) + + suite "HistoryQuery Creation and Execution": + asyncTest "Valid Queries": + # When a valid query is sent to the server + let queryResponse = await client.query(historyQuery, peer = serverPeerInfo) + + # Then the query is processed successfully + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == historyQuery + queryResponse.get().messages == messageSeq + + asyncTest "Invalid Queries": + # TODO: IMPROVE: We can't test "actual" invalid queries because + # it directly depends on the handler implementation, to achieve + # proper coverage we'd need an example implementation. + + # Given some invalid queries + let + invalidQuery1 = HistoryQuery( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[], + direction: PagingDirection.FORWARD, + ) + invalidQuery2 = HistoryQuery( + pubsubTopic: PubsubTopic.none(), + contentTopics: @[DefaultContentTopic], + direction: PagingDirection.FORWARD, + ) + invalidQuery3 = HistoryQuery( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + pageSize: 0, + ) + invalidQuery4 = HistoryQuery( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + pageSize: 0, + ) + invalidQuery5 = HistoryQuery( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + startTime: some(0.Timestamp), + endTime: some(0.Timestamp), + ) + invalidQuery6 = HistoryQuery( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + startTime: some(0.Timestamp), + endTime: some(-1.Timestamp), + ) + + # When the query is sent to the server + let queryResponse1 = await client.query(invalidQuery1, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery1 + queryResponse1.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newLegacyHistoryFuture() + let queryResponse2 = await client.query(invalidQuery2, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery2 + queryResponse2.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newLegacyHistoryFuture() + let queryResponse3 = await client.query(invalidQuery3, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery3 + queryResponse3.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newLegacyHistoryFuture() + let queryResponse4 = await client.query(invalidQuery4, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery4 + queryResponse4.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newLegacyHistoryFuture() + let queryResponse5 = await client.query(invalidQuery5, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery5 + queryResponse5.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newLegacyHistoryFuture() + let queryResponse6 = await client.query(invalidQuery6, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery6 + queryResponse6.get().messages == messageSeq + + suite "Verification of HistoryResponse Payload": + asyncTest "Positive Responses": + # When a valid query is sent to the server + let queryResponse = await client.query(historyQuery, peer = serverPeerInfo) + + # Then the query is processed successfully, and is of the expected type + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + type(queryResponse.get()) is HistoryResponse + + asyncTest "Negative Responses - PeerDialFailure": + # Given a stopped peer + let + otherServerSwitch = newTestSwitch() + otherServerPeerInfo = otherServerSwitch.peerInfo.toRemotePeerInfo() + + # When a query is sent to the stopped peer + let queryResponse = await client.query(historyQuery, peer = otherServerPeerInfo) + + # Then the query is not processed + check: + not await handlerFuture.withTimeout(FUTURE_TIMEOUT) + queryResponse.isErr() + queryResponse.error.kind == HistoryErrorKind.PEER_DIAL_FAILURE diff --git a/tests/waku_store/test_resume.nim b/tests/waku_store_legacy/test_resume.nim similarity index 99% rename from tests/waku_store/test_resume.nim rename to tests/waku_store_legacy/test_resume.nim index c15aa175ba..d061481f64 100644 --- a/tests/waku_store/test_resume.nim +++ b/tests/waku_store_legacy/test_resume.nim @@ -13,8 +13,8 @@ import ../../waku/node/peer_manager, ../../waku/waku_core, ../../waku/waku_core/message/digest, - ../../waku/waku_store, - ../waku_store/store_utils, + ../../waku/waku_store_legacy, + ../waku_store_legacy/store_utils, ../waku_archive/archive_utils, ./testlib/common, ./testlib/switch diff --git a/tests/waku_store_legacy/test_rpc_codec.nim b/tests/waku_store_legacy/test_rpc_codec.nim new file mode 100644 index 0000000000..211aecb6c5 --- /dev/null +++ b/tests/waku_store_legacy/test_rpc_codec.nim @@ -0,0 +1,184 @@ +{.used.} + +import std/options, testutils/unittests, chronos +import + ../../../waku/common/protobuf, + ../../../waku/common/paging, + ../../../waku/waku_core, + ../../../waku/waku_store_legacy/rpc, + ../../../waku/waku_store_legacy/rpc_codec, + ../testlib/common, + ../testlib/wakucore + +procSuite "Waku Store - RPC codec": + test "PagingIndexRPC protobuf codec": + ## Given + let index = PagingIndexRPC.compute( + fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + ) + + ## When + let encodedIndex = index.encode() + let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer) + + ## Then + check: + decodedIndexRes.isOk() + + let decodedIndex = decodedIndexRes.tryGet() + check: + # The fields of decodedIndex must be the same as the original index + decodedIndex == index + + test "PagingIndexRPC protobuf codec - empty index": + ## Given + let emptyIndex = PagingIndexRPC() + + let encodedIndex = emptyIndex.encode() + let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer) + + ## Then + check: + decodedIndexRes.isOk() + + let decodedIndex = decodedIndexRes.tryGet() + check: + # Check the correctness of init and encode for an empty PagingIndexRPC + decodedIndex == emptyIndex + + test "PagingInfoRPC protobuf codec": + ## Given + let + index = PagingIndexRPC.compute( + fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + ) + pagingInfo = PagingInfoRPC( + pageSize: some(1'u64), + cursor: some(index), + direction: some(PagingDirection.FORWARD), + ) + + ## When + let pb = pagingInfo.encode() + let decodedPagingInfo = PagingInfoRPC.decode(pb.buffer) + + ## Then + check: + decodedPagingInfo.isOk() + + check: + # The fields of decodedPagingInfo must be the same as the original pagingInfo + decodedPagingInfo.value == pagingInfo + decodedPagingInfo.value.direction == pagingInfo.direction + + test "PagingInfoRPC protobuf codec - empty paging info": + ## Given + let emptyPagingInfo = PagingInfoRPC() + + ## When + let pb = emptyPagingInfo.encode() + let decodedEmptyPagingInfo = PagingInfoRPC.decode(pb.buffer) + + ## Then + check: + decodedEmptyPagingInfo.isOk() + + check: + # check the correctness of init and encode for an empty PagingInfoRPC + decodedEmptyPagingInfo.value == emptyPagingInfo + + test "HistoryQueryRPC protobuf codec": + ## Given + let + index = PagingIndexRPC.compute( + fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + ) + pagingInfo = PagingInfoRPC( + pageSize: some(1'u64), + cursor: some(index), + direction: some(PagingDirection.BACKWARD), + ) + query = HistoryQueryRPC( + contentFilters: + @[ + HistoryContentFilterRPC(contentTopic: DefaultContentTopic), + HistoryContentFilterRPC(contentTopic: DefaultContentTopic), + ], + pagingInfo: some(pagingInfo), + startTime: some(Timestamp(10)), + endTime: some(Timestamp(11)), + ) + + ## When + let pb = query.encode() + let decodedQuery = HistoryQueryRPC.decode(pb.buffer) + + ## Then + check: + decodedQuery.isOk() + + check: + # the fields of decoded query decodedQuery must be the same as the original query query + decodedQuery.value == query + + test "HistoryQueryRPC protobuf codec - empty history query": + ## Given + let emptyQuery = HistoryQueryRPC() + + ## When + let pb = emptyQuery.encode() + let decodedEmptyQuery = HistoryQueryRPC.decode(pb.buffer) + + ## Then + check: + decodedEmptyQuery.isOk() + + check: + # check the correctness of init and encode for an empty HistoryQueryRPC + decodedEmptyQuery.value == emptyQuery + + test "HistoryResponseRPC protobuf codec": + ## Given + let + message = fakeWakuMessage() + index = PagingIndexRPC.compute( + message, receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + ) + pagingInfo = PagingInfoRPC( + pageSize: some(1'u64), + cursor: some(index), + direction: some(PagingDirection.BACKWARD), + ) + res = HistoryResponseRPC( + messages: @[message], + pagingInfo: some(pagingInfo), + error: HistoryResponseErrorRPC.INVALID_CURSOR, + ) + + ## When + let pb = res.encode() + let decodedRes = HistoryResponseRPC.decode(pb.buffer) + + ## Then + check: + decodedRes.isOk() + + check: + # the fields of decoded response decodedRes must be the same as the original response res + decodedRes.value == res + + test "HistoryResponseRPC protobuf codec - empty history response": + ## Given + let emptyRes = HistoryResponseRPC() + + ## When + let pb = emptyRes.encode() + let decodedEmptyRes = HistoryResponseRPC.decode(pb.buffer) + + ## Then + check: + decodedEmptyRes.isOk() + + check: + # check the correctness of init and encode for an empty HistoryResponseRPC + decodedEmptyRes.value == emptyRes diff --git a/tests/waku_store_legacy/test_waku_store.nim b/tests/waku_store_legacy/test_waku_store.nim new file mode 100644 index 0000000000..9b4bdaa1c0 --- /dev/null +++ b/tests/waku_store_legacy/test_waku_store.nim @@ -0,0 +1,109 @@ +{.used.} + +import std/options, testutils/unittests, chronos, chronicles, libp2p/crypto/crypto + +import + ../../../waku/[ + common/paging, + node/peer_manager, + waku_core, + waku_store_legacy, + waku_store_legacy/client, + ], + ../testlib/[common, wakucore], + ./store_utils + +suite "Waku Store - query handler": + asyncTest "history query handler should be called": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## Given + let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + + let msg = fakeWakuMessage(contentTopic = DefaultContentTopic) + + var queryHandlerFut = newFuture[(HistoryQuery)]() + + let queryHandler = proc( + req: HistoryQuery + ): Future[HistoryResult] {.async, gcsafe.} = + queryHandlerFut.complete(req) + return ok(HistoryResponse(messages: @[msg])) + + let + server = await newTestWakuStore(serverSwitch, handler = queryhandler) + client = newTestWakuStoreClient(clientSwitch) + + let req = HistoryQuery( + contentTopics: @[DefaultContentTopic], direction: PagingDirection.FORWARD + ) + + ## When + let queryRes = await client.query(req, peer = serverPeerInfo) + + ## Then + check: + not queryHandlerFut.failed() + queryRes.isOk() + + let request = queryHandlerFut.read() + check: + request == req + + let response = queryRes.tryGet() + check: + response.messages.len == 1 + response.messages == @[msg] + + ## Cleanup + await allFutures(serverSwitch.stop(), clientSwitch.stop()) + + asyncTest "history query handler should be called and return an error": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## Given + let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + + var queryHandlerFut = newFuture[(HistoryQuery)]() + let queryHandler = proc( + req: HistoryQuery + ): Future[HistoryResult] {.async, gcsafe.} = + queryHandlerFut.complete(req) + return err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST)) + + let + server = await newTestWakuStore(serverSwitch, handler = queryhandler) + client = newTestWakuStoreClient(clientSwitch) + + let req = HistoryQuery( + contentTopics: @[DefaultContentTopic], direction: PagingDirection.FORWARD + ) + + ## When + let queryRes = await client.query(req, peer = serverPeerInfo) + + ## Then + check: + not queryHandlerFut.failed() + queryRes.isErr() + + let request = queryHandlerFut.read() + check: + request == req + + let error = queryRes.tryError() + check: + error.kind == HistoryErrorKind.BAD_REQUEST + + ## Cleanup + await allFutures(serverSwitch.stop(), clientSwitch.stop()) diff --git a/tests/waku_store_legacy/test_wakunode_store.nim b/tests/waku_store_legacy/test_wakunode_store.nim new file mode 100644 index 0000000000..8d7f20d28a --- /dev/null +++ b/tests/waku_store_legacy/test_wakunode_store.nim @@ -0,0 +1,320 @@ +{.used.} + +import + stew/shims/net as stewNet, + testutils/unittests, + chronicles, + chronos, + libp2p/crypto/crypto, + libp2p/peerid, + libp2p/multiaddress, + libp2p/switch, + libp2p/protocols/pubsub/rpc/messages, + libp2p/protocols/pubsub/pubsub, + libp2p/protocols/pubsub/gossipsub +import + ../../../waku/common/databases/db_sqlite, + ../../../waku/common/paging, + ../../../waku/waku_core, + ../../../waku/waku_core/message/digest, + ../../../waku/waku_core/subscription, + ../../../waku/node/peer_manager, + ../../../waku/waku_archive, + ../../../waku/waku_archive/driver/sqlite_driver, + ../../../waku/waku_filter_v2, + ../../../waku/waku_filter_v2/client, + ../../../waku/waku_store_legacy, + ../../../waku/waku_node, + ../waku_store_legacy/store_utils, + ../waku_archive/archive_utils, + ../testlib/common, + ../testlib/wakucore, + ../testlib/wakunode + +procSuite "WakuNode - Store": + ## Fixtures + let timeOrigin = now() + let msgListA = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), + ] + + let archiveA = block: + let driver = newSqliteArchiveDriver() + + for msg in msgListA: + let msg_digest = waku_archive.computeDigest(msg) + let msg_hash = computeMessageHash(DefaultPubsubTopic, msg) + require ( + waitFor driver.put(DefaultPubsubTopic, msg, msg_digest, msg_hash, msg.timestamp) + ).isOk() + + driver + + test "Store protocol returns expected messages": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountLegacyStore() + + client.mountLegacyStoreClient() + + ## Given + let req = HistoryQuery(contentTopics: @[DefaultContentTopic]) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + let queryRes = waitFor client.query(req, peer = serverPeer) + + ## Then + check queryRes.isOk() + + let response = queryRes.get() + check: + response.messages == msgListA + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) + + test "Store node history response - forward pagination": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountLegacyStore() + + client.mountLegacyStoreClient() + + ## Given + let req = HistoryQuery( + contentTopics: @[DefaultContentTopic], + pageSize: 7, + direction: PagingDirection.FORWARD, + ) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + var nextReq = req # copy + + var pages = newSeq[seq[WakuMessage]](2) + var cursors = newSeq[Option[HistoryCursor]](2) + + for i in 0 ..< 2: + let res = waitFor client.query(nextReq, peer = serverPeer) + require res.isOk() + + # Keep query response content + let response = res.get() + pages[i] = response.messages + cursors[i] = response.cursor + + # Set/update the request cursor + nextReq.cursor = cursors[i] + + ## Then + check: + cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[6])) + cursors[1] == none(HistoryCursor) + + check: + pages[0] == msgListA[0 .. 6] + pages[1] == msgListA[7 .. 9] + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) + + test "Store node history response - backward pagination": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountLegacyStore() + + client.mountLegacyStoreClient() + + ## Given + let req = HistoryQuery( + contentTopics: @[DefaultContentTopic], + pageSize: 7, + direction: PagingDirection.BACKWARD, + ) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + var nextReq = req # copy + + var pages = newSeq[seq[WakuMessage]](2) + var cursors = newSeq[Option[HistoryCursor]](2) + + for i in 0 ..< 2: + let res = waitFor client.query(nextReq, peer = serverPeer) + require res.isOk() + + # Keep query response content + let response = res.get() + pages[i] = response.messages + cursors[i] = response.cursor + + # Set/update the request cursor + nextReq.cursor = cursors[i] + + ## Then + check: + cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[3])) + cursors[1] == none(HistoryCursor) + + check: + pages[0] == msgListA[3 .. 9] + pages[1] == msgListA[0 .. 2] + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) + + test "Store protocol returns expected message when relay is disabled and filter enabled": + ## See nwaku issue #937: 'Store: ability to decouple store from relay' + ## Setup + let + filterSourceKey = generateSecp256k1Key() + filterSource = + newTestWakuNode(filterSourceKey, parseIpAddress("0.0.0.0"), Port(0)) + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start(), filterSource.start()) + + waitFor filterSource.mountFilter() + let driver = newSqliteArchiveDriver() + + let mountArchiveRes = server.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountLegacyStore() + waitFor server.mountFilterClient() + client.mountLegacyStoreClient() + + ## Given + let message = fakeWakuMessage() + let + serverPeer = server.peerInfo.toRemotePeerInfo() + filterSourcePeer = filterSource.peerInfo.toRemotePeerInfo() + + ## Then + let filterFut = newFuture[(PubsubTopic, WakuMessage)]() + proc filterHandler( + pubsubTopic: PubsubTopic, msg: WakuMessage + ) {.async, gcsafe, closure.} = + await server.wakuArchive.handleMessage(pubsubTopic, msg) + filterFut.complete((pubsubTopic, msg)) + + server.wakuFilterClient.registerPushHandler(filterHandler) + let resp = waitFor server.filterSubscribe( + some(DefaultPubsubTopic), DefaultContentTopic, peer = filterSourcePeer + ) + + waitFor sleepAsync(100.millis) + + waitFor filterSource.wakuFilter.handleMessage(DefaultPubsubTopic, message) + + # Wait for the server filter to receive the push message + require waitFor filterFut.withTimeout(5.seconds) + + let res = waitFor client.query( + HistoryQuery(contentTopics: @[DefaultContentTopic]), peer = serverPeer + ) + + ## Then + check res.isOk() + + let response = res.get() + check: + response.messages.len == 1 + response.messages[0] == message + + let (handledPubsubTopic, handledMsg) = filterFut.read() + check: + handledPubsubTopic == DefaultPubsubTopic + handledMsg == message + + ## Cleanup + waitFor allFutures(client.stop(), server.stop(), filterSource.stop()) + + test "history query should return INVALID_CURSOR if the cursor has empty data in the request": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountLegacyStore() + + client.mountLegacyStoreClient() + + ## Forcing a bad cursor with empty digest data + var data: array[32, byte] = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + ] + let cursor = HistoryCursor( + pubsubTopic: "pubsubTopic", + senderTime: now(), + storeTime: now(), + digest: waku_archive.MessageDigest(data: data), + ) + + ## Given + let req = HistoryQuery(contentTopics: @[DefaultContentTopic], cursor: some(cursor)) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + let queryRes = waitFor client.query(req, peer = serverPeer) + + ## Then + check not queryRes.isOk() + + check queryRes.error == + "legacy store client query error: BAD_REQUEST: invalid cursor" + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) diff --git a/tests/wakunode_rest/test_rest_store.nim b/tests/wakunode_rest/test_rest_store.nim index 85a92c885b..d5adabb7eb 100644 --- a/tests/wakunode_rest/test_rest_store.nim +++ b/tests/wakunode_rest/test_rest_store.nim @@ -26,7 +26,6 @@ import ../../../waku/waku_archive/driver/queue_driver, ../../../waku/waku_store as waku_store, ../../../waku/common/base64, - ../testlib/common, ../testlib/wakucore, ../testlib/wakunode @@ -37,7 +36,7 @@ proc put( store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage ): Future[Result[void, string]] = let - digest = waku_archive.computeDigest(message) + digest = computeDigest(message) msgHash = computeMessageHash(pubsubTopic, message) receivedTime = if message.timestamp > 0: @@ -60,25 +59,30 @@ proc testWakuNode(): WakuNode = ################################################################################ # Beginning of the tests ################################################################################ -procSuite "Waku v2 Rest API - Store": - asyncTest "MessageDigest <-> string conversions": - # Validate MessageDigest conversion from a WakuMessage obj +procSuite "Waku Rest API - Store v3": + asyncTest "MessageHash <-> string conversions": + # Validate MessageHash conversion from a WakuMessage obj let wakuMsg = WakuMessage( contentTopic: "Test content topic", payload: @[byte('H'), byte('i'), byte('!')] ) - let messageDigest = waku_store.computeDigest(wakuMsg) - let restMsgDigest = some(messageDigest.toRestStringMessageDigest()) - let parsedMsgDigest = restMsgDigest.parseMsgDigest().value + let messageHash = computeMessageHash(DefaultPubsubTopic, wakuMsg) + let restMsgHash = some(messageHash.toRestStringWakuMessageHash()) + + let parsedMsgHashRes = parseHash(restMsgHash) + assert parsedMsgHashRes.isOk(), $parsedMsgHashRes.error check: - messageDigest == parsedMsgDigest.get() + messageHash == parsedMsgHashRes.get().get() # Random validation. Obtained the raw values manually - let expected = some("ZjNhM2Q2NDkwMTE0MjMzNDg0MzJlMDdiZGI3NzIwYTc%3D") - let msgDigest = expected.parseMsgDigest().value + let expected = some("f6za9OzG1xSiEZagZc2b3litRbkd3zRl61rezDd3pgQ%3D") + + let msgHashRes = parseHash(expected) + assert msgHashRes.isOk(), $msgHashRes.error + check: - expected.get() == msgDigest.get().toRestStringMessageDigest() + expected.get() == msgHashRes.get().get().toRestStringWakuMessageHash() asyncTest "Filter by start and end time": let node = testWakuNode() @@ -127,17 +131,17 @@ procSuite "Waku v2 Rest API - Store": let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId # Apply filter by start and end timestamps - var response = await client.getStoreMessagesV1( + var response = await client.getStoreMessagesV3( encodeUrl(fullAddr), + "true", # include data encodeUrl(DefaultPubsubTopic), "", # empty content topics. Don't filter by this field "3", # start time "6", # end time - "", # sender time - "", # store time - "", # base64-encoded digest - "", # empty implies default page size + "", # hashes + "", # base64-encoded hash "true", # ascending + "", # empty implies default page size ) check: @@ -200,39 +204,35 @@ procSuite "Waku v2 Rest API - Store": var pages = newSeq[seq[WakuMessage]](2) - # Fields that compose a HistoryCursor object - var reqPubsubTopic = DefaultPubsubTopic - var reqSenderTime = Timestamp(0) - var reqStoreTime = Timestamp(0) - var reqDigest = waku_store.MessageDigest() + var reqHash = none(WakuMessageHash) for i in 0 ..< 2: - let response = await client.getStoreMessagesV1( + let response = await client.getStoreMessagesV3( encodeUrl(fullAddr), - encodeUrl(reqPubsubTopic), + "true", # include data + encodeUrl(DefaultPubsubTopic), "", # content topics. Empty ignores the field. "", # start time. Empty ignores the field. "", # end time. Empty ignores the field. - encodeUrl($reqSenderTime), # sender time - encodeUrl($reqStoreTime), # store time - reqDigest.toRestStringMessageDigest(), - # base64-encoded digest. Empty ignores the field. - "7", # page size. Empty implies default page size. + "", # hashes + if reqHash.isSome(): + reqHash.get().toRestStringWakuMessageHash() + else: + "" + , # base64-encoded digest. Empty ignores the field. "true", # ascending + "7", # page size. Empty implies default page size. ) var wakuMessages = newSeq[WakuMessage](0) for j in 0 ..< response.data.messages.len: - wakuMessages.add(response.data.messages[j].toWakuMessage()) + wakuMessages.add(response.data.messages[j].message) pages[i] = wakuMessages # populate the cursor for next page - if response.data.cursor.isSome(): - reqPubsubTopic = response.data.cursor.get().pubsubTopic - reqDigest = response.data.cursor.get().digest - reqSenderTime = response.data.cursor.get().senderTime - reqStoreTime = response.data.cursor.get().storeTime + if response.data.paginationCursor.isSome(): + reqHash = some(response.data.paginationCursor.get()) check: response.status == 200 @@ -289,8 +289,8 @@ procSuite "Waku v2 Rest API - Store": let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId # Filtering by a known pubsub topic - var response = await client.getStoreMessagesV1( - encodeUrl($fullAddr), encodeUrl(DefaultPubsubTopic) + var response = await client.getStoreMessagesV3( + encodeUrl($fullAddr), "true", encodeUrl(DefaultPubsubTopic) ) check: @@ -299,15 +299,15 @@ procSuite "Waku v2 Rest API - Store": response.data.messages.len == 3 # Get all the messages by specifying an empty pubsub topic - response = await client.getStoreMessagesV1(encodeUrl($fullAddr)) + response = await client.getStoreMessagesV3(encodeUrl($fullAddr), "true") check: response.status == 200 $response.contentType == $MIMETYPE_JSON response.data.messages.len == 3 # Receiving no messages by filtering with a random pubsub topic - response = await client.getStoreMessagesV1( - encodeUrl($fullAddr), encodeUrl("random pubsub topic") + response = await client.getStoreMessagesV3( + encodeUrl($fullAddr), "true", encodeUrl("random pubsub topic") ) check: response.status == 200 @@ -362,8 +362,8 @@ procSuite "Waku v2 Rest API - Store": # Filtering by a known pubsub topic. # We also pass the store-node address in the request. - var response = await client.getStoreMessagesV1( - encodeUrl(fullAddr), encodeUrl(DefaultPubsubTopic) + var response = await client.getStoreMessagesV3( + encodeUrl(fullAddr), "true", encodeUrl(DefaultPubsubTopic) ) check: response.status == 200 @@ -372,7 +372,8 @@ procSuite "Waku v2 Rest API - Store": # Get all the messages by specifying an empty pubsub topic # We also pass the store-node address in the request. - response = await client.getStoreMessagesV1(encodeUrl(fullAddr), encodeUrl("")) + response = + await client.getStoreMessagesV3(encodeUrl(fullAddr), "true", encodeUrl("")) check: response.status == 200 $response.contentType == $MIMETYPE_JSON @@ -380,8 +381,8 @@ procSuite "Waku v2 Rest API - Store": # Receiving no messages by filtering with a random pubsub topic # We also pass the store-node address in the request. - response = await client.getStoreMessagesV1( - encodeUrl(fullAddr), encodeUrl("random pubsub topic") + response = await client.getStoreMessagesV3( + encodeUrl(fullAddr), "true", encodeUrl("random pubsub topic") ) check: response.status == 200 @@ -389,14 +390,16 @@ procSuite "Waku v2 Rest API - Store": response.data.messages.len == 0 # Receiving 400 response if setting wrong store-node address - response = await client.getStoreMessagesV1( - encodeUrl("incorrect multi address format"), encodeUrl("random pubsub topic") + response = await client.getStoreMessagesV3( + encodeUrl("incorrect multi address format"), + "true", + encodeUrl("random pubsub topic"), ) check: response.status == 400 $response.contentType == $MIMETYPE_TEXT response.data.messages.len == 0 - response.data.error_message.get == + response.data.statusDesc == "Failed parsing remote peer info [MultiAddress.init [multiaddress: Invalid MultiAddress, must start with `/`]]" await restServer.stop() @@ -446,8 +449,8 @@ procSuite "Waku v2 Rest API - Store": let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId # Filtering by content topic - let response = await client.getStoreMessagesV1( - encodeUrl(fullAddr), encodeUrl(DefaultPubsubTopic), encodeUrl("ct1,ct2") + let response = await client.getStoreMessagesV3( + encodeUrl(fullAddr), "true", encodeUrl(DefaultPubsubTopic), encodeUrl("ct1,ct2") ) check: response.status == 200 @@ -471,50 +474,50 @@ procSuite "Waku v2 Rest API - Store": installStoreApiHandlers(restServer.router, node) restServer.start() - # WakuStore setup - let driver: ArchiveDriver = QueueDriver.new() - let mountArchiveRes = node.mountArchive(driver) - assert mountArchiveRes.isOk(), mountArchiveRes.error - - await node.mountStore() node.mountStoreClient() let key = generateEcdsaKey() var peerSwitch = newStandardSwitch(some(key)) await peerSwitch.start() - peerSwitch.mount(node.wakuStore) - - # Now prime it with some history before tests - let msgList = - @[ - fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0), - fakeWakuMessage(@[byte 1], ts = 1), - fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9), - ] - for msg in msgList: - require (waitFor driver.put(DefaultPubsubTopic, msg)).isOk() - let client = newRestHttpClient(initTAddress(restAddress, restPort)) let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo() - let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId # Sending no peer-store node address - var response = - await client.getStoreMessagesV1(encodeUrl(""), encodeUrl(DefaultPubsubTopic)) + var response = await client.getStoreMessagesV3( + encodeUrl(""), "true", encodeUrl(DefaultPubsubTopic) + ) check: response.status == 412 $response.contentType == $MIMETYPE_TEXT response.data.messages.len == 0 - response.data.error_message.get == NoPeerNoDiscError.errobj.message + response.data.statusDesc == NoPeerNoDiscError.errobj.message # Now add the storenode from "config" node.peerManager.addServicePeer(remotePeerInfo, WakuStoreCodec) + # WakuStore setup + let driver: ArchiveDriver = QueueDriver.new() + let mountArchiveRes = node.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + await node.mountStore() + + # Now prime it with some history before tests + let msgList = + @[ + fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0), + fakeWakuMessage(@[byte 1], ts = 1), + fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9), + ] + for msg in msgList: + require (waitFor driver.put(DefaultPubsubTopic, msg)).isOk() + # Sending no peer-store node address - response = - await client.getStoreMessagesV1(encodeUrl(""), encodeUrl(DefaultPubsubTopic)) + response = await client.getStoreMessagesV3( + encodeUrl(""), "true", encodeUrl(DefaultPubsubTopic) + ) check: response.status == 200 $response.contentType == $MIMETYPE_JSON @@ -561,8 +564,9 @@ procSuite "Waku v2 Rest API - Store": let client = newRestHttpClient(initTAddress(restAddress, restPort)) # Filtering by a known pubsub topic. - var response = - await client.getStoreMessagesV1(none[string](), encodeUrl(DefaultPubsubTopic)) + var response = await client.getStoreMessagesV3( + includeData = "true", pubsubTopic = encodeUrl(DefaultPubsubTopic) + ) check: response.status == 200 @@ -570,15 +574,17 @@ procSuite "Waku v2 Rest API - Store": response.data.messages.len == 3 # Get all the messages by specifying an empty pubsub topic - response = await client.getStoreMessagesV1(none[string](), encodeUrl("")) + response = + await client.getStoreMessagesV3(includeData = "true", pubsubTopic = encodeUrl("")) check: response.status == 200 $response.contentType == $MIMETYPE_JSON response.data.messages.len == 3 # Receiving no messages by filtering with a random pubsub topic - response = - await client.getStoreMessagesV1(none[string](), encodeUrl("random pubsub topic")) + response = await client.getStoreMessagesV3( + includeData = "true", pubsubTopic = encodeUrl("random pubsub topic") + ) check: response.status == 200 $response.contentType == $MIMETYPE_JSON @@ -615,31 +621,24 @@ procSuite "Waku v2 Rest API - Store": # Filtering by a known pubsub topic. var response = - await client.getStoreMessagesV1(none[string](), encodeUrl(DefaultPubsubTopic)) + await client.getStoreMessagesV3(pubsubTopic = encodeUrl(DefaultPubsubTopic)) check: response.status == 200 $response.contentType == $MIMETYPE_JSON response.data.messages.len == 1 - let storeMessage = response.data.messages[0] + let storeMessage = response.data.messages[0].message check: - storeMessage.contentTopic.isSome() - storeMessage.version.isSome() - storeMessage.timestamp.isSome() - storeMessage.ephemeral.isSome() - storeMessage.meta.isSome() - - check: - storeMessage.payload == base64.encode(msg.payload) - storeMessage.contentTopic.get() == msg.contentTopic - storeMessage.version.get() == msg.version - storeMessage.timestamp.get() == msg.timestamp - storeMessage.ephemeral.get() == msg.ephemeral - storeMessage.meta.get() == base64.encode(msg.meta) - - asyncTest "Rate limit store node history query": + storeMessage.payload == msg.payload + storeMessage.contentTopic == msg.contentTopic + storeMessage.version == msg.version + storeMessage.timestamp == msg.timestamp + storeMessage.ephemeral == msg.ephemeral + storeMessage.meta == msg.meta + + asyncTest "Rate limit store node store query": # Test adapted from the analogous present at waku_store/test_wakunode_store.nim let node = testWakuNode() await node.start() @@ -690,39 +689,36 @@ procSuite "Waku v2 Rest API - Store": var pages = newSeq[seq[WakuMessage]](2) - # Fields that compose a HistoryCursor object var reqPubsubTopic = DefaultPubsubTopic - var reqSenderTime = Timestamp(0) - var reqStoreTime = Timestamp(0) - var reqDigest = waku_store.MessageDigest() + var reqHash = none(WakuMessageHash) for i in 0 ..< 2: - let response = await client.getStoreMessagesV1( + let response = await client.getStoreMessagesV3( encodeUrl(fullAddr), + "true", # include data encodeUrl(reqPubsubTopic), "", # content topics. Empty ignores the field. "", # start time. Empty ignores the field. "", # end time. Empty ignores the field. - encodeUrl($reqSenderTime), # sender time - encodeUrl($reqStoreTime), # store time - reqDigest.toRestStringMessageDigest(), - # base64-encoded digest. Empty ignores the field. - "3", # page size. Empty implies default page size. + "", # hashes + if reqHash.isSome(): + reqHash.get().toRestStringWakuMessageHash() + else: + "" + , # base64-encoded digest. Empty ignores the field. "true", # ascending + "3", # page size. Empty implies default page size. ) var wakuMessages = newSeq[WakuMessage](0) for j in 0 ..< response.data.messages.len: - wakuMessages.add(response.data.messages[j].toWakuMessage()) + wakuMessages.add(response.data.messages[j].message) pages[i] = wakuMessages # populate the cursor for next page - if response.data.cursor.isSome(): - reqPubsubTopic = response.data.cursor.get().pubsubTopic - reqDigest = response.data.cursor.get().digest - reqSenderTime = response.data.cursor.get().senderTime - reqStoreTime = response.data.cursor.get().storeTime + if response.data.paginationCursor.isSome(): + reqHash = response.data.paginationCursor check: response.status == 200 @@ -733,38 +729,44 @@ procSuite "Waku v2 Rest API - Store": pages[1] == msgList[3 .. 5] # request last third will lead to rate limit rejection - var response = await client.getStoreMessagesV1( + var response = await client.getStoreMessagesV3( encodeUrl(fullAddr), + "true", # include data encodeUrl(reqPubsubTopic), "", # content topics. Empty ignores the field. "", # start time. Empty ignores the field. "", # end time. Empty ignores the field. - encodeUrl($reqSenderTime), # sender time - encodeUrl($reqStoreTime), # store time - reqDigest.toRestStringMessageDigest(), - # base64-encoded digest. Empty ignores the field. + "", # hashes + if reqHash.isSome(): + reqHash.get().toRestStringWakuMessageHash() + else: + "" + , # base64-encoded digest. Empty ignores the field. ) check: response.status == 429 $response.contentType == $MIMETYPE_TEXT - response.data.error_message.get == "Request rate limmit reached" + response.data.statusDesc == "Request rate limit reached" await sleepAsync(500.millis) # retry after respective amount of time shall succeed - response = await client.getStoreMessagesV1( + response = await client.getStoreMessagesV3( encodeUrl(fullAddr), + "true", # include data encodeUrl(reqPubsubTopic), "", # content topics. Empty ignores the field. "", # start time. Empty ignores the field. "", # end time. Empty ignores the field. - encodeUrl($reqSenderTime), # sender time - encodeUrl($reqStoreTime), # store time - reqDigest.toRestStringMessageDigest(), - # base64-encoded digest. Empty ignores the field. - "5", # page size. Empty implies default page size. + "", # hashes + if reqHash.isSome(): + reqHash.get().toRestStringWakuMessageHash() + else: + "" + , # base64-encoded digest. Empty ignores the field. "true", # ascending + "5", # page size. Empty implies default page size. ) check: @@ -773,7 +775,7 @@ procSuite "Waku v2 Rest API - Store": var wakuMessages = newSeq[WakuMessage](0) for j in 0 ..< response.data.messages.len: - wakuMessages.add(response.data.messages[j].toWakuMessage()) + wakuMessages.add(response.data.messages[j].message) check wakuMessages == msgList[6 .. 9] diff --git a/waku/factory/app.nim b/waku/factory/app.nim index e9d0e281bd..3900cbb997 100644 --- a/waku/factory/app.nim +++ b/waku/factory/app.nim @@ -32,6 +32,7 @@ import ../../waku/waku_api/rest/filter/handlers as rest_filter_api, ../../waku/waku_api/rest/lightpush/handlers as rest_lightpush_api, ../../waku/waku_api/rest/store/handlers as rest_store_api, + ../../waku/waku_api/rest/legacy_store/handlers as rest_legacy_store_api, ../../waku/waku_api/rest/health/handlers as rest_health_api, ../../waku/waku_api/rest/admin/handlers as rest_admin_api, ../../waku/waku_archive, diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim index 1dfbbf9da9..638e3df291 100644 --- a/waku/factory/node_factory.nim +++ b/waku/factory/node_factory.nim @@ -19,6 +19,9 @@ import ../discovery/waku_dnsdisc, ../waku_archive, ../waku_store, + ../waku_store/common as store_common, + ../waku_store_legacy, + ../waku_store_legacy/common as legacy_common, ../waku_filter_v2, ../waku_peer_exchange, ../node/peer_manager, @@ -248,14 +251,28 @@ proc setupProtocols( except CatchableError: return err("failed to mount waku store protocol: " & getCurrentExceptionMsg()) + try: + await mountLegacyStore(node) + except CatchableError: + return + err("failed to mount waku legacy store protocol: " & getCurrentExceptionMsg()) + mountStoreClient(node) if conf.storenode != "": let storeNode = parsePeerInfo(conf.storenode) if storeNode.isOk(): - node.peerManager.addServicePeer(storeNode.value, WakuStoreCodec) + node.peerManager.addServicePeer(storeNode.value, store_common.WakuStoreCodec) else: return err("failed to set node waku store peer: " & storeNode.error) + mountLegacyStoreClient(node) + if conf.storenode != "": + let storeNode = parsePeerInfo(conf.storenode) + if storeNode.isOk(): + node.peerManager.addServicePeer(storeNode.value, legacy_common.WakuStoreCodec) + else: + return err("failed to set node waku legacy store peer: " & storeNode.error) + # NOTE Must be mounted after relay if conf.lightpush: try: diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index 7cb65ba224..a2be401d76 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -31,8 +31,12 @@ import ../waku_core/topics/sharding, ../waku_relay, ../waku_archive, - ../waku_store, + ../waku_store_legacy/protocol as legacy_store, + ../waku_store_legacy/client as legacy_store_client, + ../waku_store_legacy/common as legacy_store_common, + ../waku_store/protocol as store, ../waku_store/client as store_client, + ../waku_store/common as store_common, ../waku_filter_v2, ../waku_filter_v2/client as filter_client, ../waku_filter_v2/subscriptions as filter_subscriptions, @@ -87,8 +91,10 @@ type switch*: Switch wakuRelay*: WakuRelay wakuArchive*: WakuArchive - wakuStore*: WakuStore - wakuStoreClient*: WakuStoreClient + wakuLegacyStore*: legacy_store.WakuStore + wakuLegacyStoreClient*: legacy_store_client.WakuStoreClient + wakuStore*: store.WakuStore + wakuStoreClient*: store_client.WakuStoreClient wakuFilter*: waku_filter_v2.WakuFilter wakuFilterClient*: filter_client.WakuFilterClient wakuRlnRelay*: WakuRLNRelay @@ -651,10 +657,10 @@ proc mountArchive*( return ok() -## Waku store +## Legacy Waku Store # TODO: Review this mapping logic. Maybe, move it to the appplication code -proc toArchiveQuery(request: HistoryQuery): ArchiveQuery = +proc toArchiveQuery(request: legacy_store_common.HistoryQuery): ArchiveQuery = ArchiveQuery( pubsubTopic: request.pubsubTopic, contentTopics: request.contentTopics, @@ -674,7 +680,7 @@ proc toArchiveQuery(request: HistoryQuery): ArchiveQuery = ) # TODO: Review this mapping logic. Maybe, move it to the appplication code -proc toHistoryResult*(res: ArchiveResult): HistoryResult = +proc toHistoryResult*(res: ArchiveResult): legacy_store_common.HistoryResult = if res.isErr(): let error = res.error case res.error.kind @@ -699,51 +705,57 @@ proc toHistoryResult*(res: ArchiveResult): HistoryResult = ) ) -proc mountStore*( +proc mountLegacyStore*( node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit -) {.async, raises: [Defect, LPError].} = - info "mounting waku store protocol" +) {.async.} = + info "mounting waku legacy store protocol" if node.wakuArchive.isNil(): - error "failed to mount waku store protocol", error = "waku archive not set" + error "failed to mount waku legacy store protocol", error = "waku archive not set" return # TODO: Review this handler logic. Maybe, move it to the appplication code let queryHandler: HistoryQueryHandler = proc( request: HistoryQuery - ): Future[HistoryResult] {.async.} = + ): Future[legacy_store_common.HistoryResult] {.async.} = if request.cursor.isSome(): request.cursor.get().checkHistCursor().isOkOr: return err(error) let request = request.toArchiveQuery() - let response = await node.wakuArchive.findMessages(request) + let response = await node.wakuArchive.findMessagesV2(request) return response.toHistoryResult() - node.wakuStore = - WakuStore.new(node.peerManager, node.rng, queryHandler, some(rateLimit)) + node.wakuLegacyStore = legacy_store.WakuStore.new( + node.peerManager, node.rng, queryHandler, some(rateLimit) + ) if node.started: # Node has started already. Let's start store too. - await node.wakuStore.start() + await node.wakuLegacyStore.start() - node.switch.mount(node.wakuStore, protocolMatcher(WakuStoreCodec)) + node.switch.mount( + node.wakuLegacyStore, protocolMatcher(legacy_store_common.WakuStoreCodec) + ) -proc mountStoreClient*(node: WakuNode) = - info "mounting store client" +proc mountLegacyStoreClient*(node: WakuNode) = + info "mounting legacy store client" - node.wakuStoreClient = WakuStoreClient.new(node.peerManager, node.rng) + node.wakuLegacyStoreClient = + legacy_store_client.WakuStoreClient.new(node.peerManager, node.rng) proc query*( - node: WakuNode, query: HistoryQuery, peer: RemotePeerInfo -): Future[WakuStoreResult[HistoryResponse]] {.async, gcsafe.} = + node: WakuNode, query: legacy_store_common.HistoryQuery, peer: RemotePeerInfo +): Future[legacy_store_common.WakuStoreResult[legacy_store_common.HistoryResponse]] {. + async, gcsafe +.} = ## Queries known nodes for historical messages - if node.wakuStoreClient.isNil(): - return err("waku store client is nil") + if node.wakuLegacyStoreClient.isNil(): + return err("waku legacy store client is nil") - let queryRes = await node.wakuStoreClient.query(query, peer) + let queryRes = await node.wakuLegacyStoreClient.query(query, peer) if queryRes.isErr(): - return err($queryRes.error) + return err("legacy store client query error: " & $queryRes.error) let response = queryRes.get() @@ -751,15 +763,15 @@ proc query*( # TODO: Move to application module (e.g., wakunode2.nim) proc query*( - node: WakuNode, query: HistoryQuery -): Future[WakuStoreResult[HistoryResponse]] {. + node: WakuNode, query: legacy_store_common.HistoryQuery +): Future[legacy_store_common.WakuStoreResult[legacy_store_common.HistoryResponse]] {. async, gcsafe, deprecated: "Use 'node.query()' with peer destination instead" .} = ## Queries known nodes for historical messages - if node.wakuStoreClient.isNil(): - return err("waku store client is nil") + if node.wakuLegacyStoreClient.isNil(): + return err("waku legacy store client is nil") - let peerOpt = node.peerManager.selectPeer(WakuStoreCodec) + let peerOpt = node.peerManager.selectPeer(legacy_store_common.WakuStoreCodec) if peerOpt.isNone(): error "no suitable remote peers" return err("peer_not_found_failure") @@ -779,10 +791,10 @@ when defined(waku_exp_store_resume): ## peerList indicates the list of peers to query from. The history is fetched from the first available peer in this list. Such candidates should be found through a discovery method (to be developed). ## if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from. ## The history gets fetched successfully if the dialed peer has been online during the queried time window. - if node.wakuStoreClient.isNil(): + if node.wakuLegacyStoreClient.isNil(): return - let retrievedMessages = await node.wakuStoreClient.resume(peerList) + let retrievedMessages = await node.wakuLegacyStoreClient.resume(peerList) if retrievedMessages.isErr(): error "failed to resume store", error = retrievedMessages.error return @@ -790,6 +802,93 @@ when defined(waku_exp_store_resume): info "the number of retrieved messages since the last online time: ", number = retrievedMessages.value +## Waku Store + +proc toArchiveQuery(request: StoreQueryRequest): ArchiveQuery = + var query = ArchiveQuery() + + query.pubsubTopic = request.pubsubTopic + query.contentTopics = request.contentTopics + query.startTime = request.startTime + query.endTime = request.endTime + query.hashes = request.messageHashes + + if request.paginationCursor.isSome(): + var cursor = ArchiveCursor() + cursor.hash = request.paginationCursor.get() + query.cursor = some(cursor) + + query.direction = request.paginationForward + + if request.paginationLimit.isSome(): + query.pageSize = uint(request.paginationLimit.get()) + + return query + +proc toStoreResult(res: ArchiveResult): StoreQueryResult = + let response = res.valueOr: + return err(StoreError.new(300, "archive error: " & $error)) + + var res = StoreQueryResponse() + + res.statusCode = 200 + res.messages = response.hashes.zip(response.messages).mapIt( + WakuMessageKeyValue(messageHash: it[0], message: it[1]) + ) + + if response.cursor.isSome(): + res.paginationCursor = some(response.cursor.get().hash) + + return ok(res) + +proc mountStore*( + node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit +) {.async.} = + if node.wakuArchive.isNil(): + error "failed to mount waku store protocol", error = "waku archive not set" + return + + info "mounting waku store protocol" + + let requestHandler: StoreQueryRequestHandler = proc( + request: StoreQueryRequest + ): Future[StoreQueryResult] {.async.} = + let request = request.toArchiveQuery() + let response = await node.wakuArchive.findMessages(request) + + return response.toStoreResult() + + node.wakuStore = + store.WakuStore.new(node.peerManager, node.rng, requestHandler, some(rateLimit)) + + if node.started: + await node.wakuStore.start() + + node.switch.mount(node.wakuStore, protocolMatcher(store_common.WakuStoreCodec)) + +proc mountStoreClient*(node: WakuNode) = + info "mounting store client" + + node.wakuStoreClient = store_client.WakuStoreClient.new(node.peerManager, node.rng) + +proc query*( + node: WakuNode, request: store_common.StoreQueryRequest, peer: RemotePeerInfo +): Future[store_common.WakuStoreResult[store_common.StoreQueryResponse]] {. + async, gcsafe +.} = + ## Queries known nodes for historical messages + if node.wakuStoreClient.isNil(): + return err("waku store v3 client is nil") + + let response = (await node.wakuStoreClient.query(request, peer)).valueOr: + var res = StoreQueryResponse() + res.statusCode = uint32(error.kind) + res.statusDesc = $error + + return ok(res) + + return ok(response) + ## Waku lightpush proc mountLightPush*( diff --git a/waku/waku_api/rest/admin/handlers.nim b/waku/waku_api/rest/admin/handlers.nim index 05724b1131..7574683ab6 100644 --- a/waku/waku_api/rest/admin/handlers.nim +++ b/waku/waku_api/rest/admin/handlers.nim @@ -4,7 +4,7 @@ else: {.push raises: [].} import - std/[strformat, sequtils, sets, tables], + std/[strformat, sequtils, tables], stew/byteutils, chronicles, json_serialization, @@ -13,7 +13,7 @@ import import ../../../waku_core, - ../../../waku_store, + ../../../waku_store_legacy/common, ../../../waku_filter_v2, ../../../waku_lightpush/common, ../../../waku_relay, @@ -66,7 +66,7 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = ) tuplesToWakuPeers(peers, filterV2Peers) - if not node.wakuStore.isNil(): + if not node.wakuLegacyStore.isNil(): # Map WakuStore peers to WakuPeers and add to return list let storePeers = node.peerManager.peerStore.peers(WakuStoreCodec).mapIt( ( diff --git a/waku/waku_api/rest/builder.nim b/waku/waku_api/rest/builder.nim index e5b4893341..9ab709afd3 100644 --- a/waku/waku_api/rest/builder.nim +++ b/waku/waku_api/rest/builder.nim @@ -18,6 +18,7 @@ import ../../waku/waku_api/rest/filter/handlers as rest_filter_api, ../../waku/waku_api/rest/lightpush/handlers as rest_lightpush_api, ../../waku/waku_api/rest/store/handlers as rest_store_api, + ../../waku/waku_api/rest/legacy_store/handlers as rest_store_legacy_api, ../../waku/waku_api/rest/health/handlers as rest_health_api, ../../waku/waku_api/rest/admin/handlers as rest_admin_api, ../../waku/waku_core/topics @@ -172,7 +173,8 @@ proc startRestServerProtocolSupport*( else: none(DiscoveryHandler) - installStoreApiHandlers(router, node, storeDiscoHandler) + rest_store_api.installStoreApiHandlers(router, node, storeDiscoHandler) + rest_store_legacy_api.installStoreApiHandlers(router, node, storeDiscoHandler) ## Light push API if conf.lightpushnode != "" and node.wakuLightpushClient != nil: diff --git a/waku/waku_api/rest/legacy_store/client.nim b/waku/waku_api/rest/legacy_store/client.nim new file mode 100644 index 0000000000..fa85b9b2eb --- /dev/null +++ b/waku/waku_api/rest/legacy_store/client.nim @@ -0,0 +1,78 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + chronicles, json_serialization, json_serialization/std/options, presto/[route, client] +import ../../../waku_store_legacy/common, ../serdes, ../responses, ./types + +export types + +logScope: + topics = "waku node rest legacy store_api" + +proc decodeBytes*( + t: typedesc[StoreResponseRest], + data: openArray[byte], + contentType: Opt[ContentTypeData], +): RestResult[StoreResponseRest] = + if MediaType.init($contentType) == MIMETYPE_JSON: + let decoded = ?decodeFromJsonBytes(StoreResponseRest, data) + return ok(decoded) + + if MediaType.init($contentType) == MIMETYPE_TEXT: + var res: string + if len(data) > 0: + res = newString(len(data)) + copyMem(addr res[0], unsafeAddr data[0], len(data)) + + return ok( + StoreResponseRest( + messages: newSeq[StoreWakuMessage](0), + cursor: none(HistoryCursorRest), + # field that contain error information + errorMessage: some(res), + ) + ) + + # If everything goes wrong + return err(cstring("Unsupported contentType " & $contentType)) + +proc getStoreMessagesV1*( + # URL-encoded reference to the store-node + peerAddr: string = "", + pubsubTopic: string = "", + # URL-encoded comma-separated list of content topics + contentTopics: string = "", + startTime: string = "", + endTime: string = "", + + # Optional cursor fields + senderTime: string = "", + storeTime: string = "", + digest: string = "", # base64-encoded digest + pageSize: string = "", + ascending: string = "", +): RestResponse[StoreResponseRest] {. + rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet +.} + +proc getStoreMessagesV1*( + # URL-encoded reference to the store-node + peerAddr: Option[string], + pubsubTopic: string = "", + # URL-encoded comma-separated list of content topics + contentTopics: string = "", + startTime: string = "", + endTime: string = "", + + # Optional cursor fields + senderTime: string = "", + storeTime: string = "", + digest: string = "", # base64-encoded digest + pageSize: string = "", + ascending: string = "", +): RestResponse[StoreResponseRest] {. + rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet +.} diff --git a/waku/waku_api/rest/legacy_store/handlers.nim b/waku/waku_api/rest/legacy_store/handlers.nim new file mode 100644 index 0000000000..fdf23958e1 --- /dev/null +++ b/waku/waku_api/rest/legacy_store/handlers.nim @@ -0,0 +1,258 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/strformat, stew/results, chronicles, uri, json_serialization, presto/route +import + ../../../waku_core, + ../../../waku_store_legacy/common, + ../../../waku_store_legacy/self_req_handler, + ../../../waku_node, + ../../../node/peer_manager, + ../../../common/paging, + ../../handlers, + ../responses, + ../serdes, + ./types + +export types + +logScope: + topics = "waku node rest legacy store_api" + +const futTimeout* = 5.seconds # Max time to wait for futures + +const NoPeerNoDiscError* = + RestApiResponse.preconditionFailed("No suitable service peer & no discovery method") + +# Queries the store-node with the query parameters and +# returns a RestApiResponse that is sent back to the api client. +proc performHistoryQuery( + selfNode: WakuNode, histQuery: HistoryQuery, storePeer: RemotePeerInfo +): Future[RestApiResponse] {.async.} = + let queryFut = selfNode.query(histQuery, storePeer) + if not await queryFut.withTimeout(futTimeout): + const msg = "No history response received (timeout)" + error msg + return RestApiResponse.internalServerError(msg) + + let res = queryFut.read() + if res.isErr(): + const msg = "Error occurred in queryFut.read()" + error msg, error = res.error + return RestApiResponse.internalServerError(fmt("{msg} [{res.error}]")) + + let storeResp = res.value.toStoreResponseRest() + let resp = RestApiResponse.jsonResponse(storeResp, status = Http200) + if resp.isErr(): + const msg = "Error building the json respose" + error msg, error = resp.error + return RestApiResponse.internalServerError(fmt("{msg} [{resp.error}]")) + + return resp.get() + +# Converts a string time representation into an Option[Timestamp]. +# Only positive time is considered a valid Timestamp in the request +proc parseTime(input: Option[string]): Result[Option[Timestamp], string] = + if input.isSome() and input.get() != "": + try: + let time = parseInt(input.get()) + if time > 0: + return ok(some(Timestamp(time))) + except ValueError: + return err("Problem parsing time [" & getCurrentExceptionMsg() & "]") + + return ok(none(Timestamp)) + +# Generates a history query cursor as per the given params +proc parseCursor( + parsedPubsubTopic: Option[string], + senderTime: Option[string], + storeTime: Option[string], + digest: Option[string], +): Result[Option[HistoryCursor], string] = + # Parse sender time + let parsedSenderTime = parseTime(senderTime) + if not parsedSenderTime.isOk(): + return err(parsedSenderTime.error) + + # Parse store time + let parsedStoreTime = parseTime(storeTime) + if not parsedStoreTime.isOk(): + return err(parsedStoreTime.error) + + # Parse message digest + let parsedMsgDigest = parseMsgDigest(digest) + if not parsedMsgDigest.isOk(): + return err(parsedMsgDigest.error) + + # Parse cursor information + if parsedPubsubTopic.isSome() and parsedSenderTime.value.isSome() and + parsedStoreTime.value.isSome() and parsedMsgDigest.value.isSome(): + return ok( + some( + HistoryCursor( + pubsubTopic: parsedPubsubTopic.get(), + senderTime: parsedSenderTime.value.get(), + storeTime: parsedStoreTime.value.get(), + digest: parsedMsgDigest.value.get(), + ) + ) + ) + else: + return ok(none(HistoryCursor)) + +# Creates a HistoryQuery from the given params +proc createHistoryQuery( + pubsubTopic: Option[string], + contentTopics: Option[string], + senderTime: Option[string], + storeTime: Option[string], + digest: Option[string], + startTime: Option[string], + endTime: Option[string], + pageSize: Option[string], + direction: Option[string], +): Result[HistoryQuery, string] = + # Parse pubsubTopic parameter + var parsedPubsubTopic = none(string) + if pubsubTopic.isSome(): + let decodedPubsubTopic = decodeUrl(pubsubTopic.get()) + if decodedPubsubTopic != "": + parsedPubsubTopic = some(decodedPubsubTopic) + + # Parse the content topics + var parsedContentTopics = newSeq[ContentTopic](0) + if contentTopics.isSome(): + let ctList = decodeUrl(contentTopics.get()) + if ctList != "": + for ct in ctList.split(','): + parsedContentTopics.add(ct) + + # Parse cursor information + let parsedCursor = ?parseCursor(parsedPubsubTopic, senderTime, storeTime, digest) + + # Parse page size field + var parsedPagedSize = DefaultPageSize + if pageSize.isSome() and pageSize.get() != "": + try: + parsedPagedSize = uint64(parseInt(pageSize.get())) + except CatchableError: + return err("Problem parsing page size [" & getCurrentExceptionMsg() & "]") + + # Parse start time + let parsedStartTime = ?parseTime(startTime) + + # Parse end time + let parsedEndTime = ?parseTime(endTime) + + # Parse ascending field + var parsedDirection = default() + if direction.isSome() and direction.get() != "": + parsedDirection = direction.get().into() + + return ok( + HistoryQuery( + pubsubTopic: parsedPubsubTopic, + contentTopics: parsedContentTopics, + startTime: parsedStartTime, + endTime: parsedEndTime, + direction: parsedDirection, + pageSize: parsedPagedSize, + cursor: parsedCursor, + ) + ) + +# Simple type conversion. The "Option[Result[string, cstring]]" +# type is used by the nim-presto library. +proc toOpt(self: Option[Result[string, cstring]]): Option[string] = + if not self.isSome() or self.get().value == "": + return none(string) + if self.isSome() and self.get().value != "": + return some(self.get().value) + +proc retrieveMsgsFromSelfNode( + self: WakuNode, histQuery: HistoryQuery +): Future[RestApiResponse] {.async.} = + ## Performs a "store" request to the local node (self node.) + ## Notice that this doesn't follow the regular store libp2p channel because a node + ## it is not allowed to libp2p-dial a node to itself, by default. + ## + + let selfResp = (await self.wakuLegacyStore.handleSelfStoreRequest(histQuery)).valueOr: + return RestApiResponse.internalServerError($error) + + let storeResp = selfResp.toStoreResponseRest() + let resp = RestApiResponse.jsonResponse(storeResp, status = Http200).valueOr: + const msg = "Error building the json respose" + error msg, error = error + return RestApiResponse.internalServerError(fmt("{msg} [{error}]")) + + return resp + +# Subscribes the rest handler to attend "/store/v1/messages" requests +proc installStoreApiHandlers*( + router: var RestRouter, + node: WakuNode, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + # Handles the store-query request according to the passed parameters + router.api(MethodGet, "/store/v1/messages") do( + peerAddr: Option[string], + pubsubTopic: Option[string], + contentTopics: Option[string], + senderTime: Option[string], + storeTime: Option[string], + digest: Option[string], + startTime: Option[string], + endTime: Option[string], + pageSize: Option[string], + ascending: Option[string] + ) -> RestApiResponse: + debug "REST-GET /store/v1/messages ", peer_addr = $peerAddr + + # All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding) + # Example: + # /store/v1/messages?peerAddr=%2Fip4%2F127.0.0.1%2Ftcp%2F60001%2Fp2p%2F16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\&pubsubTopic=my-waku-topic + + # Parse the rest of the parameters and create a HistoryQuery + let histQuery = createHistoryQuery( + pubsubTopic.toOpt(), + contentTopics.toOpt(), + senderTime.toOpt(), + storeTime.toOpt(), + digest.toOpt(), + startTime.toOpt(), + endTime.toOpt(), + pageSize.toOpt(), + ascending.toOpt(), + ) + + if not histQuery.isOk(): + return RestApiResponse.badRequest(histQuery.error) + + if peerAddr.isNone() and not node.wakuLegacyStore.isNil(): + ## The user didn't specify a peer address and self-node is configured as a store node. + ## In this case we assume that the user is willing to retrieve the messages stored by + ## the local/self store node. + return await node.retrieveMsgsFromSelfNode(histQuery.get()) + + # Parse the peer address parameter + let parsedPeerAddr = parseUrlPeerAddr(peerAddr.toOpt()).valueOr: + return RestApiResponse.badRequest(error) + + let peerAddr = parsedPeerAddr.valueOr: + node.peerManager.selectPeer(WakuStoreCodec).valueOr: + let handler = discHandler.valueOr: + return NoPeerNoDiscError + + let peerOp = (await handler()).valueOr: + return RestApiResponse.internalServerError($error) + + peerOp.valueOr: + return RestApiResponse.preconditionFailed( + "No suitable service peer & none discovered" + ) + + return await node.performHistoryQuery(histQuery.value, peerAddr) diff --git a/waku/waku_api/rest/legacy_store/types.nim b/waku/waku_api/rest/legacy_store/types.nim new file mode 100644 index 0000000000..2669b8481a --- /dev/null +++ b/waku/waku_api/rest/legacy_store/types.nim @@ -0,0 +1,383 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + std/[sets, strformat, uri], + stew/byteutils, + chronicles, + json_serialization, + json_serialization/std/options, + presto/[route, client, common] +import + ../../../waku_store_legacy/common as waku_store_common, + ../../../common/base64, + ../../../waku_core, + ../serdes + +#### Types + +type + HistoryCursorRest* = object + pubsubTopic*: PubsubTopic + senderTime*: Timestamp + storeTime*: Timestamp + digest*: waku_store_common.MessageDigest + + StoreRequestRest* = object + # inspired by https://github.com/waku-org/nwaku/blob/f95147f5b7edfd45f914586f2d41cd18fb0e0d18/waku/v2//waku_store/common.nim#L52 + pubsubTopic*: Option[PubsubTopic] + contentTopics*: seq[ContentTopic] + cursor*: Option[HistoryCursorRest] + startTime*: Option[Timestamp] + endTime*: Option[Timestamp] + pageSize*: uint64 + ascending*: bool + + StoreWakuMessage* = object + payload*: Base64String + contentTopic*: Option[ContentTopic] + version*: Option[uint32] + timestamp*: Option[Timestamp] + ephemeral*: Option[bool] + meta*: Option[Base64String] + + StoreResponseRest* = object # inspired by https://rfc.vac.dev/spec/16/#storeresponse + messages*: seq[StoreWakuMessage] + cursor*: Option[HistoryCursorRest] + # field that contains error information + errorMessage*: Option[string] + +createJsonFlavor RestJson + +Json.setWriter JsonWriter, PreferredOutput = string + +#### Type conversion + +# Converts a URL-encoded-base64 string into a 'MessageDigest' +proc parseMsgDigest*( + input: Option[string] +): Result[Option[waku_store_common.MessageDigest], string] = + if not input.isSome() or input.get() == "": + return ok(none(waku_store_common.MessageDigest)) + + let decodedUrl = decodeUrl(input.get()) + let base64Decoded = base64.decode(Base64String(decodedUrl)) + var messageDigest = waku_store_common.MessageDigest() + + if not base64Decoded.isOk(): + return err(base64Decoded.error) + + let base64DecodedArr = base64Decoded.get() + # Next snippet inspired by "nwaku/waku/waku_archive/archive.nim" + # TODO: Improve coherence of MessageDigest type + messageDigest = block: + var data: array[32, byte] + for i in 0 ..< min(base64DecodedArr.len, 32): + data[i] = base64DecodedArr[i] + + waku_store_common.MessageDigest(data: data) + + return ok(some(messageDigest)) + +# Converts a given MessageDigest object into a suitable +# Base64-URL-encoded string suitable to be transmitted in a Rest +# request-response. The MessageDigest is first base64 encoded +# and this result is URL-encoded. +proc toRestStringMessageDigest*(self: waku_store_common.MessageDigest): string = + let base64Encoded = base64.encode(self.data) + encodeUrl($base64Encoded) + +proc toWakuMessage*(message: StoreWakuMessage): WakuMessage = + WakuMessage( + payload: base64.decode(message.payload).get(), + contentTopic: message.contentTopic.get(), + version: message.version.get(), + timestamp: message.timestamp.get(), + ephemeral: message.ephemeral.get(), + meta: message.meta.get(Base64String("")).decode().get(), + ) + +# Converts a 'HistoryResponse' object to an 'StoreResponseRest' +# that can be serialized to a json object. +proc toStoreResponseRest*(histResp: HistoryResponse): StoreResponseRest = + proc toStoreWakuMessage(message: WakuMessage): StoreWakuMessage = + StoreWakuMessage( + payload: base64.encode(message.payload), + contentTopic: some(message.contentTopic), + version: some(message.version), + timestamp: some(message.timestamp), + ephemeral: some(message.ephemeral), + meta: + if message.meta.len > 0: + some(base64.encode(message.meta)) + else: + none(Base64String) + , + ) + + var storeWakuMsgs: seq[StoreWakuMessage] + for m in histResp.messages: + storeWakuMsgs.add(m.toStoreWakuMessage()) + + var cursor = none(HistoryCursorRest) + if histResp.cursor.isSome: + cursor = some( + HistoryCursorRest( + pubsubTopic: histResp.cursor.get().pubsubTopic, + senderTime: histResp.cursor.get().senderTime, + storeTime: histResp.cursor.get().storeTime, + digest: histResp.cursor.get().digest, + ) + ) + + StoreResponseRest(messages: storeWakuMsgs, cursor: cursor) + +## Beginning of StoreWakuMessage serde + +proc writeValue*( + writer: var JsonWriter, value: StoreWakuMessage +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + writer.writeField("payload", $value.payload) + if value.contentTopic.isSome(): + writer.writeField("content_topic", value.contentTopic.get()) + if value.version.isSome(): + writer.writeField("version", value.version.get()) + if value.timestamp.isSome(): + writer.writeField("timestamp", value.timestamp.get()) + if value.ephemeral.isSome(): + writer.writeField("ephemeral", value.ephemeral.get()) + if value.meta.isSome(): + writer.writeField("meta", value.meta.get()) + writer.endRecord() + +proc readValue*( + reader: var JsonReader, value: var StoreWakuMessage +) {.gcsafe, raises: [SerializationError, IOError].} = + var + payload = none(Base64String) + contentTopic = none(ContentTopic) + version = none(uint32) + timestamp = none(Timestamp) + ephemeral = none(bool) + meta = none(Base64String) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "StoreWakuMessage") + + case fieldName + of "payload": + payload = some(reader.readValue(Base64String)) + of "content_topic": + contentTopic = some(reader.readValue(ContentTopic)) + of "version": + version = some(reader.readValue(uint32)) + of "timestamp": + timestamp = some(reader.readValue(Timestamp)) + of "ephemeral": + ephemeral = some(reader.readValue(bool)) + of "meta": + meta = some(reader.readValue(Base64String)) + else: + reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) + + if payload.isNone(): + reader.raiseUnexpectedValue("Field `payload` is missing") + + value = StoreWakuMessage( + payload: payload.get(), + contentTopic: contentTopic, + version: version, + timestamp: timestamp, + ephemeral: ephemeral, + meta: meta, + ) + +## End of StoreWakuMessage serde + +## Beginning of MessageDigest serde + +proc writeValue*( + writer: var JsonWriter, value: waku_store_common.MessageDigest +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + writer.writeField("data", base64.encode(value.data)) + writer.endRecord() + +proc readValue*( + reader: var JsonReader, value: var waku_store_common.MessageDigest +) {.gcsafe, raises: [SerializationError, IOError].} = + var data = none(seq[byte]) + + for fieldName in readObjectFields(reader): + case fieldName + of "data": + if data.isSome(): + reader.raiseUnexpectedField("Multiple `data` fields found", "MessageDigest") + let decoded = base64.decode(reader.readValue(Base64String)) + if not decoded.isOk(): + reader.raiseUnexpectedField("Failed decoding data", "MessageDigest") + data = some(decoded.get()) + else: + reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) + + if data.isNone(): + reader.raiseUnexpectedValue("Field `data` is missing") + + for i in 0 ..< 32: + value.data[i] = data.get()[i] + +## End of MessageDigest serde + +## Beginning of HistoryCursorRest serde + +proc writeValue*( + writer: var JsonWriter, value: HistoryCursorRest +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + writer.writeField("pubsub_topic", value.pubsubTopic) + writer.writeField("sender_time", value.senderTime) + writer.writeField("store_time", value.storeTime) + writer.writeField("digest", value.digest) + writer.endRecord() + +proc readValue*( + reader: var JsonReader, value: var HistoryCursorRest +) {.gcsafe, raises: [SerializationError, IOError].} = + var + pubsubTopic = none(PubsubTopic) + senderTime = none(Timestamp) + storeTime = none(Timestamp) + digest = none(waku_store_common.MessageDigest) + + for fieldName in readObjectFields(reader): + case fieldName + of "pubsub_topic": + if pubsubTopic.isSome(): + reader.raiseUnexpectedField( + "Multiple `pubsub_topic` fields found", "HistoryCursorRest" + ) + pubsubTopic = some(reader.readValue(PubsubTopic)) + of "sender_time": + if senderTime.isSome(): + reader.raiseUnexpectedField( + "Multiple `sender_time` fields found", "HistoryCursorRest" + ) + senderTime = some(reader.readValue(Timestamp)) + of "store_time": + if storeTime.isSome(): + reader.raiseUnexpectedField( + "Multiple `store_time` fields found", "HistoryCursorRest" + ) + storeTime = some(reader.readValue(Timestamp)) + of "digest": + if digest.isSome(): + reader.raiseUnexpectedField( + "Multiple `digest` fields found", "HistoryCursorRest" + ) + digest = some(reader.readValue(waku_store_common.MessageDigest)) + else: + reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) + + if pubsubTopic.isNone(): + reader.raiseUnexpectedValue("Field `pubsub_topic` is missing") + + if senderTime.isNone(): + reader.raiseUnexpectedValue("Field `sender_time` is missing") + + if storeTime.isNone(): + reader.raiseUnexpectedValue("Field `store_time` is missing") + + if digest.isNone(): + reader.raiseUnexpectedValue("Field `digest` is missing") + + value = HistoryCursorRest( + pubsubTopic: pubsubTopic.get(), + senderTime: senderTime.get(), + storeTime: storeTime.get(), + digest: digest.get(), + ) + +## End of HistoryCursorRest serde + +## Beginning of StoreResponseRest serde + +proc writeValue*( + writer: var JsonWriter, value: StoreResponseRest +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + writer.writeField("messages", value.messages) + if value.cursor.isSome(): + writer.writeField("cursor", value.cursor.get()) + if value.errorMessage.isSome(): + writer.writeField("error_message", value.errorMessage.get()) + writer.endRecord() + +proc readValue*( + reader: var JsonReader, value: var StoreResponseRest +) {.gcsafe, raises: [SerializationError, IOError].} = + var + messages = none(seq[StoreWakuMessage]) + cursor = none(HistoryCursorRest) + errorMessage = none(string) + + for fieldName in readObjectFields(reader): + case fieldName + of "messages": + if messages.isSome(): + reader.raiseUnexpectedField( + "Multiple `messages` fields found", "StoreResponseRest" + ) + messages = some(reader.readValue(seq[StoreWakuMessage])) + of "cursor": + if cursor.isSome(): + reader.raiseUnexpectedField( + "Multiple `cursor` fields found", "StoreResponseRest" + ) + cursor = some(reader.readValue(HistoryCursorRest)) + of "error_message": + if errorMessage.isSome(): + reader.raiseUnexpectedField( + "Multiple `error_message` fields found", "StoreResponseRest" + ) + errorMessage = some(reader.readValue(string)) + else: + reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) + + if messages.isNone(): + reader.raiseUnexpectedValue("Field `messages` is missing") + + value = StoreResponseRest( + messages: messages.get(), cursor: cursor, errorMessage: errorMessage + ) + +## End of StoreResponseRest serde + +## Beginning of StoreRequestRest serde + +proc writeValue*( + writer: var JsonWriter, value: StoreRequestRest +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + if value.pubsubTopic.isSome(): + writer.writeField("pubsub_topic", value.pubsubTopic.get()) + writer.writeField("content_topics", value.contentTopics) + if value.startTime.isSome(): + writer.writeField("start_time", value.startTime.get()) + if value.endTime.isSome(): + writer.writeField("end_time", value.endTime.get()) + writer.writeField("page_size", value.pageSize) + writer.writeField("ascending", value.ascending) + writer.endRecord() + +## End of StoreRequestRest serde diff --git a/waku/waku_api/rest/store/client.nim b/waku/waku_api/rest/store/client.nim index fb4e08a45c..90fab2f57d 100644 --- a/waku/waku_api/rest/store/client.nim +++ b/waku/waku_api/rest/store/client.nim @@ -6,7 +6,11 @@ else: import chronicles, json_serialization, json_serialization/std/options, presto/[route, client] import - ../../../waku_store/common, ../../../common/base64, ../serdes, ../responses, ./types + ../../../waku_store/common, + ../../../waku_core/message/digest, + ../serdes, + ../responses, + ./types export types @@ -14,12 +18,12 @@ logScope: topics = "waku node rest store_api" proc decodeBytes*( - t: typedesc[StoreResponseRest], + t: typedesc[StoreQueryResponse], data: openArray[byte], contentType: Opt[ContentTypeData], -): RestResult[StoreResponseRest] = +): RestResult[StoreQueryResponse] = if MediaType.init($contentType) == MIMETYPE_JSON: - let decoded = ?decodeFromJsonBytes(StoreResponseRest, data) + let decoded = ?decodeFromJsonBytes(StoreQueryResponse, data) return ok(decoded) if MediaType.init($contentType) == MIMETYPE_TEXT: @@ -29,51 +33,34 @@ proc decodeBytes*( copyMem(addr res[0], unsafeAddr data[0], len(data)) return ok( - StoreResponseRest( - messages: newSeq[StoreWakuMessage](0), - cursor: none(HistoryCursorRest), - # field that contain error information - errorMessage: some(res), + StoreQueryResponse( + statusCode: uint32(ErrorCode.BAD_RESPONSE), + statusDesc: res, + messages: newSeq[WakuMessageKeyValue](0), + paginationCursor: none(WakuMessageHash), ) ) # If everything goes wrong return err(cstring("Unsupported contentType " & $contentType)) -proc getStoreMessagesV1*( +proc getStoreMessagesV3*( # URL-encoded reference to the store-node peerAddr: string = "", + includeData: string = "", pubsubTopic: string = "", # URL-encoded comma-separated list of content topics contentTopics: string = "", startTime: string = "", endTime: string = "", - # Optional cursor fields - senderTime: string = "", - storeTime: string = "", - digest: string = "", # base64-encoded digest - pageSize: string = "", - ascending: string = "", -): RestResponse[StoreResponseRest] {. - rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet -.} - -proc getStoreMessagesV1*( - # URL-encoded reference to the store-node - peerAddr: Option[string], - pubsubTopic: string = "", - # URL-encoded comma-separated list of content topics - contentTopics: string = "", - startTime: string = "", - endTime: string = "", + # URL-encoded comma-separated list of message hashes + hashes: string = "", # Optional cursor fields - senderTime: string = "", - storeTime: string = "", - digest: string = "", # base64-encoded digest - pageSize: string = "", + cursor: string = "", # base64-encoded hash ascending: string = "", -): RestResponse[StoreResponseRest] {. - rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet + pageSize: string = "", +): RestResponse[StoreQueryResponse] {. + rest, endpoint: "/store/v3/messages", meth: HttpMethod.MethodGet .} diff --git a/waku/waku_api/rest/store/handlers.nim b/waku/waku_api/rest/store/handlers.nim index e25bbe4561..23817118b9 100644 --- a/waku/waku_api/rest/store/handlers.nim +++ b/waku/waku_api/rest/store/handlers.nim @@ -28,35 +28,35 @@ const NoPeerNoDiscError* = # Queries the store-node with the query parameters and # returns a RestApiResponse that is sent back to the api client. -proc performHistoryQuery( - selfNode: WakuNode, histQuery: HistoryQuery, storePeer: RemotePeerInfo +proc performStoreQuery( + selfNode: WakuNode, storeQuery: StoreQueryRequest, storePeer: RemotePeerInfo ): Future[RestApiResponse] {.async.} = - let queryFut = selfNode.query(histQuery, storePeer) + let queryFut = selfNode.query(storeQuery, storePeer) + if not await queryFut.withTimeout(futTimeout): const msg = "No history response received (timeout)" error msg return RestApiResponse.internalServerError(msg) - let res = queryFut.read() - if res.isErr(): - const TooManyRequestErrorStr = - $HistoryError(kind: HistoryErrorKind.TOO_MANY_REQUESTS) - if res.error == TooManyRequestErrorStr: - debug "Request rate limmit reached on peer ", storePeer - return RestApiResponse.tooManyRequests("Request rate limmit reached") - else: - const msg = "Error occurred in queryFut.read()" - error msg, error = res.error - return RestApiResponse.internalServerError(fmt("{msg} [{res.error}]")) - - let storeResp = res.value.toStoreResponseRest() - let resp = RestApiResponse.jsonResponse(storeResp, status = Http200) - if resp.isErr(): + let futRes = queryFut.read() + + if futRes.isErr(): + const msg = "Error occurred in queryFut.read()" + error msg, error = futRes.error + return RestApiResponse.internalServerError(fmt("{msg} [{futRes.error}]")) + + let res = futRes.get() + + if res.statusCode == uint32(ErrorCode.TOO_MANY_REQUESTS): + debug "Request rate limit reached on peer ", storePeer + return RestApiResponse.tooManyRequests("Request rate limit reached") + + let resp = RestApiResponse.jsonResponse(res, status = Http200).valueOr: const msg = "Error building the json respose" - error msg, error = resp.error - return RestApiResponse.internalServerError(fmt("{msg} [{resp.error}]")) + error msg, error = error + return RestApiResponse.internalServerError(fmt("{msg} [{error}]")) - return resp.get() + return resp # Converts a string time representation into an Option[Timestamp]. # Only positive time is considered a valid Timestamp in the request @@ -67,60 +67,34 @@ proc parseTime(input: Option[string]): Result[Option[Timestamp], string] = if time > 0: return ok(some(Timestamp(time))) except ValueError: - return err("Problem parsing time [" & getCurrentExceptionMsg() & "]") + return err("time parsing error: " & getCurrentExceptionMsg()) return ok(none(Timestamp)) -# Generates a history query cursor as per the given params -proc parseCursor( - parsedPubsubTopic: Option[string], - senderTime: Option[string], - storeTime: Option[string], - digest: Option[string], -): Result[Option[HistoryCursor], string] = - # Parse sender time - let parsedSenderTime = parseTime(senderTime) - if not parsedSenderTime.isOk(): - return err(parsedSenderTime.error) - - # Parse store time - let parsedStoreTime = parseTime(storeTime) - if not parsedStoreTime.isOk(): - return err(parsedStoreTime.error) - - # Parse message digest - let parsedMsgDigest = parseMsgDigest(digest) - if not parsedMsgDigest.isOk(): - return err(parsedMsgDigest.error) +proc parseIncludeData(input: Option[string]): Result[bool, string] = + var includeData = false + if input.isSome() and input.get() != "": + try: + includeData = parseBool(input.get()) + except ValueError: + return err("include data parsing error: " & getCurrentExceptionMsg()) - # Parse cursor information - if parsedPubsubTopic.isSome() and parsedSenderTime.value.isSome() and - parsedStoreTime.value.isSome() and parsedMsgDigest.value.isSome(): - return ok( - some( - HistoryCursor( - pubsubTopic: parsedPubsubTopic.get(), - senderTime: parsedSenderTime.value.get(), - storeTime: parsedStoreTime.value.get(), - digest: parsedMsgDigest.value.get(), - ) - ) - ) - else: - return ok(none(HistoryCursor)) + return ok(includeData) # Creates a HistoryQuery from the given params -proc createHistoryQuery( +proc createStoreQuery( + includeData: Option[string], pubsubTopic: Option[string], contentTopics: Option[string], - senderTime: Option[string], - storeTime: Option[string], - digest: Option[string], startTime: Option[string], endTime: Option[string], - pageSize: Option[string], + hashes: Option[string], + cursor: Option[string], direction: Option[string], -): Result[HistoryQuery, string] = + pageSize: Option[string], +): Result[StoreQueryRequest, string] = + var parsedIncludeData = ?parseIncludeData(includeData) + # Parse pubsubTopic parameter var parsedPubsubTopic = none(string) if pubsubTopic.isSome(): @@ -136,37 +110,41 @@ proc createHistoryQuery( for ct in ctList.split(','): parsedContentTopics.add(ct) - # Parse cursor information - let parsedCursor = ?parseCursor(parsedPubsubTopic, senderTime, storeTime, digest) - - # Parse page size field - var parsedPagedSize = DefaultPageSize - if pageSize.isSome() and pageSize.get() != "": - try: - parsedPagedSize = uint64(parseInt(pageSize.get())) - except CatchableError: - return err("Problem parsing page size [" & getCurrentExceptionMsg() & "]") - # Parse start time let parsedStartTime = ?parseTime(startTime) # Parse end time let parsedEndTime = ?parseTime(endTime) + var parsedHashes = ?parseHashes(hashes) + + # Parse cursor information + let parsedCursor = ?parseHash(cursor) + # Parse ascending field var parsedDirection = default() if direction.isSome() and direction.get() != "": parsedDirection = direction.get().into() + # Parse page size field + var parsedPagedSize = none(uint64) + if pageSize.isSome() and pageSize.get() != "": + try: + parsedPagedSize = some(uint64(parseInt(pageSize.get()))) + except CatchableError: + return err("page size parsing error: " & getCurrentExceptionMsg()) + return ok( - HistoryQuery( + StoreQueryRequest( + includeData: parsedIncludeData, pubsubTopic: parsedPubsubTopic, contentTopics: parsedContentTopics, startTime: parsedStartTime, endTime: parsedEndTime, - direction: parsedDirection, - pageSize: parsedPagedSize, - cursor: parsedCursor, + messageHashes: parsedHashes, + paginationCursor: parsedCursor, + paginationForward: parsedDirection, + paginationLimit: parsedPagedSize, ) ) @@ -179,17 +157,16 @@ proc toOpt(self: Option[Result[string, cstring]]): Option[string] = return some(self.get().value) proc retrieveMsgsFromSelfNode( - self: WakuNode, histQuery: HistoryQuery + self: WakuNode, storeQuery: StoreQueryRequest ): Future[RestApiResponse] {.async.} = ## Performs a "store" request to the local node (self node.) ## Notice that this doesn't follow the regular store libp2p channel because a node ## it is not allowed to libp2p-dial a node to itself, by default. ## - let selfResp = (await self.wakuStore.handleSelfStoreRequest(histQuery)).valueOr: + let storeResp = (await self.wakuStore.handleSelfStoreRequest(storeQuery)).valueOr: return RestApiResponse.internalServerError($error) - let storeResp = selfResp.toStoreResponseRest() let resp = RestApiResponse.jsonResponse(storeResp, status = Http200).valueOr: const msg = "Error building the json respose" error msg, error = error @@ -204,51 +181,51 @@ proc installStoreApiHandlers*( discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), ) = # Handles the store-query request according to the passed parameters - router.api(MethodGet, "/store/v1/messages") do( + router.api(MethodGet, "/store/v3/messages") do( peerAddr: Option[string], + includeData: Option[string], pubsubTopic: Option[string], contentTopics: Option[string], - senderTime: Option[string], - storeTime: Option[string], - digest: Option[string], startTime: Option[string], endTime: Option[string], - pageSize: Option[string], - ascending: Option[string] + hashes: Option[string], + cursor: Option[string], + ascending: Option[string], + pageSize: Option[string] ) -> RestApiResponse: - debug "REST-GET /store/v1/messages ", peer_addr = $peerAddr + let peer = peerAddr.toOpt() + + debug "REST-GET /store/v3/messages ", peer_addr = $peer # All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding) # Example: # /store/v1/messages?peerAddr=%2Fip4%2F127.0.0.1%2Ftcp%2F60001%2Fp2p%2F16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\&pubsubTopic=my-waku-topic # Parse the rest of the parameters and create a HistoryQuery - let histQuery = createHistoryQuery( + let storeQuery = createStoreQuery( + includeData.toOpt(), pubsubTopic.toOpt(), contentTopics.toOpt(), - senderTime.toOpt(), - storeTime.toOpt(), - digest.toOpt(), startTime.toOpt(), endTime.toOpt(), - pageSize.toOpt(), + hashes.toOpt(), + cursor.toOpt(), ascending.toOpt(), - ) - - if not histQuery.isOk(): - return RestApiResponse.badRequest(histQuery.error) + pageSize.toOpt(), + ).valueOr: + return RestApiResponse.badRequest(error) - if peerAddr.isNone() and not node.wakuStore.isNil(): + if peer.isNone() and not node.wakuStore.isNil(): ## The user didn't specify a peer address and self-node is configured as a store node. ## In this case we assume that the user is willing to retrieve the messages stored by ## the local/self store node. - return await node.retrieveMsgsFromSelfNode(histQuery.get()) + return await node.retrieveMsgsFromSelfNode(storeQuery) # Parse the peer address parameter - let parsedPeerAddr = parseUrlPeerAddr(peerAddr.toOpt()).valueOr: + let parsedPeerAddr = parseUrlPeerAddr(peer).valueOr: return RestApiResponse.badRequest(error) - let peerAddr = parsedPeerAddr.valueOr: + let peerInfo = parsedPeerAddr.valueOr: node.peerManager.selectPeer(WakuStoreCodec).valueOr: let handler = discHandler.valueOr: return NoPeerNoDiscError @@ -261,4 +238,4 @@ proc installStoreApiHandlers*( "No suitable service peer & none discovered" ) - return await node.performHistoryQuery(histQuery.value, peerAddr) + return await node.performStoreQuery(storeQuery, peerInfo) diff --git a/waku/waku_api/rest/store/types.nim b/waku/waku_api/rest/store/types.nim index 7519b4fc69..55367b754f 100644 --- a/waku/waku_api/rest/store/types.nim +++ b/waku/waku_api/rest/store/types.nim @@ -4,163 +4,99 @@ else: {.push raises: [].} import - std/[sets, strformat, uri], - stew/byteutils, + std/[sets, strformat, uri, options], + stew/[byteutils, arrayops], chronicles, json_serialization, json_serialization/std/options, presto/[route, client, common] -import - ../../../waku_store/common as waku_store_common, - ../../../common/base64, - ../../../waku_core, - ../serdes +import ../../../waku_store/common, ../../../common/base64, ../../../waku_core, ../serdes #### Types -type - HistoryCursorRest* = object - pubsubTopic*: PubsubTopic - senderTime*: Timestamp - storeTime*: Timestamp - digest*: MessageDigest - - StoreRequestRest* = object - # inspired by https://github.com/waku-org/nwaku/blob/f95147f5b7edfd45f914586f2d41cd18fb0e0d18/waku/v2//waku_store/common.nim#L52 - pubsubTopic*: Option[PubsubTopic] - contentTopics*: seq[ContentTopic] - cursor*: Option[HistoryCursorRest] - startTime*: Option[Timestamp] - endTime*: Option[Timestamp] - pageSize*: uint64 - ascending*: bool - - StoreWakuMessage* = object - payload*: Base64String - contentTopic*: Option[ContentTopic] - version*: Option[uint32] - timestamp*: Option[Timestamp] - ephemeral*: Option[bool] - meta*: Option[Base64String] - - StoreResponseRest* = object # inspired by https://rfc.vac.dev/spec/16/#storeresponse - messages*: seq[StoreWakuMessage] - cursor*: Option[HistoryCursorRest] - # field that contains error information - errorMessage*: Option[string] - createJsonFlavor RestJson Json.setWriter JsonWriter, PreferredOutput = string #### Type conversion -# Converts a URL-encoded-base64 string into a 'MessageDigest' -proc parseMsgDigest*(input: Option[string]): Result[Option[MessageDigest], string] = +proc parseHash*(input: Option[string]): Result[Option[WakuMessageHash], string] = + let base64UrlEncoded = + if input.isSome(): + input.get() + else: + return ok(none(WakuMessageHash)) + + if base64UrlEncoded == "": + return ok(none(WakuMessageHash)) + + let base64Encoded = decodeUrl(base64UrlEncoded) + + let decodedBytes = base64.decode(Base64String(base64Encoded)).valueOr: + return err("waku message hash parsing error: " & error) + + let hash: WakuMessageHash = fromBytes(decodedBytes) + + return ok(some(hash)) + +proc parseHashes*(input: Option[string]): Result[seq[WakuMessageHash], string] = + var hashes: seq[WakuMessageHash] = @[] + if not input.isSome() or input.get() == "": - return ok(none(MessageDigest)) + return ok(hashes) let decodedUrl = decodeUrl(input.get()) - let base64Decoded = base64.decode(Base64String(decodedUrl)) - var messageDigest = MessageDigest() - if not base64Decoded.isOk(): - return err(base64Decoded.error) + if decodedUrl != "": + for subString in decodedUrl.split(','): + let hash = ?parseHash(some(subString)) - let base64DecodedArr = base64Decoded.get() - # Next snippet inspired by "nwaku/waku/waku_archive/archive.nim" - # TODO: Improve coherence of MessageDigest type - messageDigest = block: - var data: array[32, byte] - for i in 0 ..< min(base64DecodedArr.len, 32): - data[i] = base64DecodedArr[i] + if hash.isSome(): + hashes.add(hash.get()) - MessageDigest(data: data) - - return ok(some(messageDigest)) + return ok(hashes) # Converts a given MessageDigest object into a suitable # Base64-URL-encoded string suitable to be transmitted in a Rest # request-response. The MessageDigest is first base64 encoded # and this result is URL-encoded. -proc toRestStringMessageDigest*(self: MessageDigest): string = - let base64Encoded = base64.encode(self.data) +proc toRestStringWakuMessageHash*(self: WakuMessageHash): string = + let base64Encoded = base64.encode(self) encodeUrl($base64Encoded) -proc toWakuMessage*(message: StoreWakuMessage): WakuMessage = - WakuMessage( - payload: base64.decode(message.payload).get(), - contentTopic: message.contentTopic.get(), - version: message.version.get(), - timestamp: message.timestamp.get(), - ephemeral: message.ephemeral.get(), - meta: message.meta.get(Base64String("")).decode().get(), - ) - -# Converts a 'HistoryResponse' object to an 'StoreResponseRest' -# that can be serialized to a json object. -proc toStoreResponseRest*(histResp: HistoryResponse): StoreResponseRest = - proc toStoreWakuMessage(message: WakuMessage): StoreWakuMessage = - StoreWakuMessage( - payload: base64.encode(message.payload), - contentTopic: some(message.contentTopic), - version: some(message.version), - timestamp: some(message.timestamp), - ephemeral: some(message.ephemeral), - meta: - if message.meta.len > 0: - some(base64.encode(message.meta)) - else: - none(Base64String) - , - ) - - var storeWakuMsgs: seq[StoreWakuMessage] - for m in histResp.messages: - storeWakuMsgs.add(m.toStoreWakuMessage()) - - var cursor = none(HistoryCursorRest) - if histResp.cursor.isSome: - cursor = some( - HistoryCursorRest( - pubsubTopic: histResp.cursor.get().pubsubTopic, - senderTime: histResp.cursor.get().senderTime, - storeTime: histResp.cursor.get().storeTime, - digest: histResp.cursor.get().digest, - ) - ) - - StoreResponseRest(messages: storeWakuMsgs, cursor: cursor) - -## Beginning of StoreWakuMessage serde +## WakuMessage serde proc writeValue*( - writer: var JsonWriter, value: StoreWakuMessage + writer: var JsonWriter, msg: WakuMessage ) {.gcsafe, raises: [IOError].} = writer.beginRecord() - writer.writeField("payload", $value.payload) - if value.contentTopic.isSome(): - writer.writeField("contentTopic", value.contentTopic.get()) - if value.version.isSome(): - writer.writeField("version", value.version.get()) - if value.timestamp.isSome(): - writer.writeField("timestamp", value.timestamp.get()) - if value.ephemeral.isSome(): - writer.writeField("ephemeral", value.ephemeral.get()) - if value.meta.isSome(): - writer.writeField("meta", value.meta.get()) + + writer.writeField("payload", base64.encode(msg.payload)) + writer.writeField("contentTopic", msg.contentTopic) + + if msg.meta.len > 0: + writer.writeField("meta", base64.encode(msg.meta)) + + writer.writeField("version", msg.version) + writer.writeField("timestamp", msg.timestamp) + writer.writeField("ephemeral", msg.ephemeral) + + if msg.proof.len > 0: + writer.writeField("proof", base64.encode(msg.proof)) + writer.endRecord() proc readValue*( - reader: var JsonReader, value: var StoreWakuMessage + reader: var JsonReader, value: var WakuMessage ) {.gcsafe, raises: [SerializationError, IOError].} = var - payload = none(Base64String) - contentTopic = none(ContentTopic) - version = none(uint32) - timestamp = none(Timestamp) - ephemeral = none(bool) - meta = none(Base64String) + payload: seq[byte] + contentTopic: ContentTopic + version: uint32 + timestamp: Timestamp + ephemeral: bool + meta: seq[byte] + proof: seq[byte] var keys = initHashSet[string]() for fieldName in readObjectFields(reader): @@ -171,49 +107,56 @@ proc readValue*( fmt"Multiple `{fieldName}` fields found" except CatchableError: "Multiple fields with the same name found" - reader.raiseUnexpectedField(err, "StoreWakuMessage") + reader.raiseUnexpectedField(err, "WakuMessage") case fieldName of "payload": - payload = some(reader.readValue(Base64String)) + let base64String = reader.readValue(Base64String) + payload = base64.decode(base64String).valueOr: + reader.raiseUnexpectedField("Failed decoding data", "payload") of "contentTopic": - contentTopic = some(reader.readValue(ContentTopic)) + contentTopic = reader.readValue(ContentTopic) of "version": - version = some(reader.readValue(uint32)) + version = reader.readValue(uint32) of "timestamp": - timestamp = some(reader.readValue(Timestamp)) + timestamp = reader.readValue(Timestamp) of "ephemeral": - ephemeral = some(reader.readValue(bool)) + ephemeral = reader.readValue(bool) of "meta": - meta = some(reader.readValue(Base64String)) + let base64String = reader.readValue(Base64String) + meta = base64.decode(base64String).valueOr: + reader.raiseUnexpectedField("Failed decoding data", "meta") + of "proof": + let base64String = reader.readValue(Base64String) + proof = base64.decode(base64String).valueOr: + reader.raiseUnexpectedField("Failed decoding data", "proof") else: reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) - if payload.isNone(): + if payload.len == 0: reader.raiseUnexpectedValue("Field `payload` is missing") - value = StoreWakuMessage( - payload: payload.get(), + value = WakuMessage( + payload: payload, contentTopic: contentTopic, version: version, timestamp: timestamp, ephemeral: ephemeral, meta: meta, + proof: proof, ) -## End of StoreWakuMessage serde - -## Beginning of MessageDigest serde +## WakuMessageHash serde proc writeValue*( - writer: var JsonWriter, value: MessageDigest + writer: var JsonWriter, value: WakuMessageHash ) {.gcsafe, raises: [IOError].} = writer.beginRecord() - writer.writeField("data", base64.encode(value.data)) + writer.writeField("data", base64.encode(value)) writer.endRecord() proc readValue*( - reader: var JsonReader, value: var MessageDigest + reader: var JsonReader, value: var WakuMessageHash ) {.gcsafe, raises: [SerializationError, IOError].} = var data = none(seq[byte]) @@ -221,10 +164,10 @@ proc readValue*( case fieldName of "data": if data.isSome(): - reader.raiseUnexpectedField("Multiple `data` fields found", "MessageDigest") + reader.raiseUnexpectedField("Multiple `data` fields found", "WakuMessageHash") let decoded = base64.decode(reader.readValue(Base64String)) if not decoded.isOk(): - reader.raiseUnexpectedField("Failed decoding data", "MessageDigest") + reader.raiseUnexpectedField("Failed decoding data", "WakuMessageHash") data = some(decoded.get()) else: reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) @@ -233,149 +176,165 @@ proc readValue*( reader.raiseUnexpectedValue("Field `data` is missing") for i in 0 ..< 32: - value.data[i] = data.get()[i] + value[i] = data.get()[i] -## End of MessageDigest serde - -## Beginning of HistoryCursorRest serde +## WakuMessageKeyValue serde proc writeValue*( - writer: var JsonWriter, value: HistoryCursorRest + writer: var JsonWriter, value: WakuMessageKeyValue ) {.gcsafe, raises: [IOError].} = writer.beginRecord() - writer.writeField("pubsub_topic", value.pubsubTopic) - writer.writeField("sender_time", value.senderTime) - writer.writeField("store_time", value.storeTime) - writer.writeField("digest", value.digest) + + writer.writeField("message_hash", value.messageHash) + writer.writeField("message", value.message) + writer.endRecord() proc readValue*( - reader: var JsonReader, value: var HistoryCursorRest + reader: var JsonReader, value: var WakuMessageKeyValue ) {.gcsafe, raises: [SerializationError, IOError].} = var - pubsubTopic = none(PubsubTopic) - senderTime = none(Timestamp) - storeTime = none(Timestamp) - digest = none(MessageDigest) + messageHash = none(WakuMessageHash) + message = none(WakuMessage) for fieldName in readObjectFields(reader): case fieldName - of "pubsub_topic": - if pubsubTopic.isSome(): + of "message_hash": + if messageHash.isSome(): reader.raiseUnexpectedField( - "Multiple `pubsub_topic` fields found", "HistoryCursorRest" + "Multiple `message_hash` fields found", "WakuMessageKeyValue" ) - pubsubTopic = some(reader.readValue(PubsubTopic)) - of "sender_time": - if senderTime.isSome(): + messageHash = some(reader.readValue(WakuMessageHash)) + of "message": + if message.isSome(): reader.raiseUnexpectedField( - "Multiple `sender_time` fields found", "HistoryCursorRest" + "Multiple `message` fields found", "WakuMessageKeyValue" ) - senderTime = some(reader.readValue(Timestamp)) - of "store_time": - if storeTime.isSome(): - reader.raiseUnexpectedField( - "Multiple `store_time` fields found", "HistoryCursorRest" - ) - storeTime = some(reader.readValue(Timestamp)) - of "digest": - if digest.isSome(): - reader.raiseUnexpectedField( - "Multiple `digest` fields found", "HistoryCursorRest" - ) - digest = some(reader.readValue(MessageDigest)) + message = some(reader.readValue(WakuMessage)) else: reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) - if pubsubTopic.isNone(): - reader.raiseUnexpectedValue("Field `pubsub_topic` is missing") - - if senderTime.isNone(): - reader.raiseUnexpectedValue("Field `sender_time` is missing") - - if storeTime.isNone(): - reader.raiseUnexpectedValue("Field `store_time` is missing") - - if digest.isNone(): - reader.raiseUnexpectedValue("Field `digest` is missing") + if messageHash.isNone(): + reader.raiseUnexpectedValue("Field `message_hash` is missing") - value = HistoryCursorRest( - pubsubTopic: pubsubTopic.get(), - senderTime: senderTime.get(), - storeTime: storeTime.get(), - digest: digest.get(), - ) + if message.isNone(): + reader.raiseUnexpectedValue("Field `message` is missing") -## End of HistoryCursorRest serde + value = WakuMessageKeyValue(messageHash: messageHash.get(), message: message.get()) -## Beginning of StoreResponseRest serde +## StoreQueryResponse serde proc writeValue*( - writer: var JsonWriter, value: StoreResponseRest + writer: var JsonWriter, value: StoreQueryResponse ) {.gcsafe, raises: [IOError].} = writer.beginRecord() + + writer.writeField("request_id", value.requestId) + + writer.writeField("status_code", value.statusCode) + writer.writeField("status_desc", value.statusDesc) + writer.writeField("messages", value.messages) - if value.cursor.isSome(): - writer.writeField("cursor", value.cursor.get()) - if value.errorMessage.isSome(): - writer.writeField("error_message", value.errorMessage.get()) + + if value.paginationCursor.isSome(): + writer.writeField("pagination_cursor", value.paginationCursor.get()) + writer.endRecord() proc readValue*( - reader: var JsonReader, value: var StoreResponseRest + reader: var JsonReader, value: var StoreQueryResponse ) {.gcsafe, raises: [SerializationError, IOError].} = var - messages = none(seq[StoreWakuMessage]) - cursor = none(HistoryCursorRest) - errorMessage = none(string) + requestId = none(string) + code = none(uint32) + desc = none(string) + messages = none(seq[WakuMessageKeyValue]) + cursor = none(WakuMessageHash) for fieldName in readObjectFields(reader): case fieldName + of "request_id": + if requestId.isSome(): + reader.raiseUnexpectedField( + "Multiple `request_id` fields found", "StoreQueryResponse" + ) + requestId = some(reader.readValue(string)) + of "status_code": + if code.isSome(): + reader.raiseUnexpectedField( + "Multiple `status_code` fields found", "StoreQueryResponse" + ) + code = some(reader.readValue(uint32)) + of "status_desc": + if desc.isSome(): + reader.raiseUnexpectedField( + "Multiple `status_desc` fields found", "StoreQueryResponse" + ) + desc = some(reader.readValue(string)) of "messages": if messages.isSome(): reader.raiseUnexpectedField( - "Multiple `messages` fields found", "StoreResponseRest" + "Multiple `messages` fields found", "StoreQueryResponse" ) - messages = some(reader.readValue(seq[StoreWakuMessage])) - of "cursor": + messages = some(reader.readValue(seq[WakuMessageKeyValue])) + of "pagination_cursor": if cursor.isSome(): reader.raiseUnexpectedField( - "Multiple `cursor` fields found", "StoreResponseRest" + "Multiple `pagination_cursor` fields found", "StoreQueryResponse" ) - cursor = some(reader.readValue(HistoryCursorRest)) - of "error_message": - if errorMessage.isSome(): - reader.raiseUnexpectedField( - "Multiple `error_message` fields found", "StoreResponseRest" - ) - errorMessage = some(reader.readValue(string)) + cursor = some(reader.readValue(WakuMessageHash)) else: reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) + if requestId.isNone(): + reader.raiseUnexpectedValue("Field `request_id` is missing") + + if code.isNone(): + reader.raiseUnexpectedValue("Field `status_code` is missing") + + if desc.isNone(): + reader.raiseUnexpectedValue("Field `status_desc` is missing") + if messages.isNone(): reader.raiseUnexpectedValue("Field `messages` is missing") - value = StoreResponseRest( - messages: messages.get(), cursor: cursor, errorMessage: errorMessage + value = StoreQueryResponse( + requestId: requestId.get(), + statusCode: code.get(), + statusDesc: desc.get(), + messages: messages.get(), + paginationCursor: cursor, ) -## End of StoreResponseRest serde - -## Beginning of StoreRequestRest serde +## StoreRequestRest serde proc writeValue*( - writer: var JsonWriter, value: StoreRequestRest + writer: var JsonWriter, req: StoreQueryRequest ) {.gcsafe, raises: [IOError].} = writer.beginRecord() - if value.pubsubTopic.isSome(): - writer.writeField("pubsub_topic", value.pubsubTopic.get()) - writer.writeField("content_topics", value.contentTopics) - if value.startTime.isSome(): - writer.writeField("start_time", value.startTime.get()) - if value.endTime.isSome(): - writer.writeField("end_time", value.endTime.get()) - writer.writeField("page_size", value.pageSize) - writer.writeField("ascending", value.ascending) - writer.endRecord() -## End of StoreRequestRest serde + writer.writeField("request_id", req.requestId) + writer.writeField("include_data", req.includeData) + + if req.pubsubTopic.isSome(): + writer.writeField("pubsub_topic", req.pubsubTopic.get()) + + writer.writeField("content_topics", req.contentTopics) + + if req.startTime.isSome(): + writer.writeField("start_time", req.startTime.get()) + + if req.endTime.isSome(): + writer.writeField("end_time", req.endTime.get()) + + writer.writeField("message_hashes", req.messageHashes) + + if req.paginationCursor.isSome(): + writer.writeField("pagination_cursor", req.paginationCursor.get()) + + writer.writeField("pagination_forward", req.paginationForward) + + if req.paginationLimit.isSome(): + writer.writeField("pagination_limit", req.paginationLimit.get()) + + writer.endRecord() diff --git a/waku/waku_archive/archive.nim b/waku/waku_archive/archive.nim index 6f5330f652..efbf17e0a9 100644 --- a/waku/waku_archive/archive.nim +++ b/waku/waku_archive/archive.nim @@ -193,6 +193,74 @@ proc findMessages*( return ok(ArchiveResponse(hashes: hashes, messages: messages, cursor: cursor)) +proc findMessagesV2*( + self: WakuArchive, query: ArchiveQuery +): Future[ArchiveResult] {.async, gcsafe.} = + ## Search the archive to return a single page of messages matching the query criteria + + let maxPageSize = + if query.pageSize <= 0: + DefaultPageSize + else: + min(query.pageSize, MaxPageSize) + + let isAscendingOrder = query.direction.into() + + if query.contentTopics.len > 10: + return err(ArchiveError.invalidQuery("too many content topics")) + + let queryStartTime = getTime().toUnixFloat() + + let rows = ( + await self.driver.getMessagesV2( + contentTopic = query.contentTopics, + pubsubTopic = query.pubsubTopic, + cursor = query.cursor, + startTime = query.startTime, + endTime = query.endTime, + maxPageSize = maxPageSize + 1, + ascendingOrder = isAscendingOrder, + ) + ).valueOr: + return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error)) + + let queryDuration = getTime().toUnixFloat() - queryStartTime + waku_archive_query_duration_seconds.observe(queryDuration) + + var messages = newSeq[WakuMessage]() + var cursor = none(ArchiveCursor) + + if rows.len == 0: + return ok(ArchiveResponse(messages: messages, cursor: cursor)) + + ## Messages + let pageSize = min(rows.len, int(maxPageSize)) + + messages = rows[0 ..< pageSize].mapIt(it[1]) + + ## Cursor + if rows.len > int(maxPageSize): + ## Build last message cursor + ## The cursor is built from the last message INCLUDED in the response + ## (i.e. the second last message in the rows list) + + let (pubsubTopic, message, digest, storeTimestamp, _) = rows[^2] + + cursor = some( + ArchiveCursor( + digest: MessageDigest.fromBytes(digest), + storeTime: storeTimestamp, + sendertime: message.timestamp, + pubsubTopic: pubsubTopic, + ) + ) + + # All messages MUST be returned in chronological order + if not isAscendingOrder: + reverse(messages) + + return ok(ArchiveResponse(messages: messages, cursor: cursor)) + proc periodicRetentionPolicy(self: WakuArchive) {.async.} = debug "executing message retention policy" diff --git a/waku/waku_archive/driver.nim b/waku/waku_archive/driver.nim index e91a98ff78..6b7f28fce2 100644 --- a/waku/waku_archive/driver.nim +++ b/waku/waku_archive/driver.nim @@ -32,6 +32,18 @@ method getAllMessages*( ): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = discard +method getMessagesV2*( + driver: ArchiveDriver, + contentTopic = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + maxPageSize = DefaultPageSize, + ascendingOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = + discard + method getMessages*( driver: ArchiveDriver, contentTopic = newSeq[ContentTopic](0), diff --git a/waku/waku_archive/driver/postgres_driver/postgres_driver.nim b/waku/waku_archive/driver/postgres_driver/postgres_driver.nim index 756590cf1a..0d38320e02 100644 --- a/waku/waku_archive/driver/postgres_driver/postgres_driver.nim +++ b/waku/waku_archive/driver/postgres_driver/postgres_driver.nim @@ -35,6 +35,48 @@ const InsertRowStmtDefinition = # TODO: get the sql queries from a file const SelectNoCursorAscStmtName = "SelectWithoutCursorAsc" const SelectNoCursorAscStmtDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + storedAt >= $4 AND + storedAt <= $5 + ORDER BY storedAt ASC, messageHash ASC LIMIT $6;""" + +const SelectNoCursorDescStmtName = "SelectWithoutCursorDesc" +const SelectNoCursorDescStmtDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + storedAt >= $4 AND + storedAt <= $5 + ORDER BY storedAt DESC, messageHash DESC LIMIT $6;""" + +const SelectWithCursorDescStmtName = "SelectWithCursorDesc" +const SelectWithCursorDescStmtDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + (storedAt, messageHash) < ($4,$5) AND + storedAt >= $6 AND + storedAt <= $7 + ORDER BY storedAt DESC, messageHash DESC LIMIT $8;""" + +const SelectWithCursorAscStmtName = "SelectWithCursorAsc" +const SelectWithCursorAscStmtDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + (storedAt, messageHash) > ($4,$5) AND + storedAt >= $6 AND + storedAt <= $7 + ORDER BY storedAt ASC, messageHash ASC LIMIT $8;""" + +const SelectNoCursorV2AscStmtName = "SelectWithoutCursorV2Asc" +const SelectNoCursorV2AscStmtDef = """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages WHERE contentTopic IN ($1) AND pubsubTopic = $2 AND @@ -42,8 +84,8 @@ const SelectNoCursorAscStmtDef = storedAt <= $4 ORDER BY storedAt ASC LIMIT $5;""" -const SelectNoCursorDescStmtName = "SelectWithoutCursorDesc" -const SelectNoCursorDescStmtDef = +const SelectNoCursorV2DescStmtName = "SelectWithoutCursorV2Desc" +const SelectNoCursorV2DescStmtDef = """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages WHERE contentTopic IN ($1) AND pubsubTopic = $2 AND @@ -51,8 +93,8 @@ const SelectNoCursorDescStmtDef = storedAt <= $4 ORDER BY storedAt DESC LIMIT $5;""" -const SelectWithCursorDescStmtName = "SelectWithCursorDesc" -const SelectWithCursorDescStmtDef = +const SelectWithCursorV2DescStmtName = "SelectWithCursorV2Desc" +const SelectWithCursorV2DescStmtDef = """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages WHERE contentTopic IN ($1) AND pubsubTopic = $2 AND @@ -61,8 +103,8 @@ const SelectWithCursorDescStmtDef = storedAt <= $6 ORDER BY storedAt DESC LIMIT $7;""" -const SelectWithCursorAscStmtName = "SelectWithCursorAsc" -const SelectWithCursorAscStmtDef = +const SelectWithCursorV2AscStmtName = "SelectWithCursorV2Asc" +const SelectWithCursorV2AscStmtDef = """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages WHERE contentTopic IN ($1) AND pubsubTopic = $2 AND @@ -289,6 +331,70 @@ proc getMessagesArbitraryQuery( statements.add("pubsubTopic = ?") args.add(pubsubTopic.get()) + if cursor.isSome(): + let comp = if ascendingOrder: ">" else: "<" + statements.add("(storedAt, messageHash) " & comp & " (?,?)") + args.add($cursor.get().storeTime) + args.add(toHex(cursor.get().hash)) + + if startTime.isSome(): + statements.add("storedAt >= ?") + args.add($startTime.get()) + + if endTime.isSome(): + statements.add("storedAt <= ?") + args.add($endTime.get()) + + if statements.len > 0: + query &= " WHERE " & statements.join(" AND ") + + var direction: string + if ascendingOrder: + direction = "ASC" + else: + direction = "DESC" + + query &= " ORDER BY storedAt " & direction & ", messageHash " & direction + + query &= " LIMIT ?" + args.add($maxPageSize) + + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + (await s.readConnPool.pgQuery(query, args, rowCallback)).isOkOr: + return err("failed to run query: " & $error) + + return ok(rows) + +proc getMessagesV2ArbitraryQuery( + s: PostgresDriver, + contentTopic: seq[ContentTopic] = @[], + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + maxPageSize = DefaultPageSize, + ascendingOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## This proc allows to handle atypical queries. We don't use prepared statements for those. + + var query = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages""" + var statements: seq[string] + var args: seq[string] + + if contentTopic.len > 0: + let cstmt = "contentTopic IN (" & "?".repeat(contentTopic.len).join(",") & ")" + statements.add(cstmt) + for t in contentTopic: + args.add(t) + + if pubsubTopic.isSome(): + statements.add("pubsubTopic = ?") + args.add(pubsubTopic.get()) + if cursor.isSome(): let comp = if ascendingOrder: ">" else: "<" statements.add("(storedAt, id) " & comp & " (?,?)") @@ -333,6 +439,7 @@ proc getMessagesPreparedStmt( cursor = none(ArchiveCursor), startTime: Timestamp, endTime: Timestamp, + hashes: string, maxPageSize = DefaultPageSize, ascOrder = true, ): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = @@ -355,6 +462,85 @@ proc getMessagesPreparedStmt( var stmtDef = if ascOrder: SelectWithCursorAscStmtDef else: SelectWithCursorDescStmtDef + let hash = toHex(cursor.get().hash) + let storeTime = $cursor.get().storeTime + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[ + contentTopic, hashes, pubsubTopic, storeTime, hash, startTimeStr, endTimeStr, + limit, + ], + @[ + int32(contentTopic.len), + int32(pubsubTopic.len), + int32(storeTime.len), + int32(hash.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + ) + ).isOkOr: + return err("failed to run query with cursor: " & $error) + else: + var stmtName = + if ascOrder: SelectNoCursorAscStmtName else: SelectNoCursorDescStmtName + var stmtDef = if ascOrder: SelectNoCursorAscStmtDef else: SelectNoCursorDescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[contentTopic, hashes, pubsubTopic, startTimeStr, endTimeStr, limit], + @[ + int32(contentTopic.len), + int32(pubsubTopic.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + ) + ).isOkOr: + return err("failed to run query without cursor: " & $error) + + return ok(rows) + +proc getMessagesV2PreparedStmt( + s: PostgresDriver, + contentTopic: string, + pubsubTopic: PubsubTopic, + cursor = none(ArchiveCursor), + startTime: Timestamp, + endTime: Timestamp, + maxPageSize = DefaultPageSize, + ascOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## This proc aims to run the most typical queries in a more performant way, i.e. by means of + ## prepared statements. + ## + ## contentTopic - string with list of conten topics. e.g: "'ctopic1','ctopic2','ctopic3'" + + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + let startTimeStr = $startTime + let endTimeStr = $endTime + let limit = $maxPageSize + + if cursor.isSome(): + var stmtName = + if ascOrder: SelectWithCursorV2AscStmtName else: SelectWithCursorV2DescStmtName + var stmtDef = + if ascOrder: SelectWithCursorV2AscStmtDef else: SelectWithCursorV2DescStmtDef + let digest = toHex(cursor.get().digest.data) let storeTime = $cursor.get().storeTime @@ -379,8 +565,9 @@ proc getMessagesPreparedStmt( return err("failed to run query with cursor: " & $error) else: var stmtName = - if ascOrder: SelectNoCursorAscStmtName else: SelectNoCursorDescStmtName - var stmtDef = if ascOrder: SelectNoCursorAscStmtDef else: SelectNoCursorDescStmtDef + if ascOrder: SelectNoCursorV2AscStmtName else: SelectNoCursorV2DescStmtName + var stmtDef = + if ascOrder: SelectNoCursorV2AscStmtDef else: SelectNoCursorV2DescStmtDef ( await s.readConnPool.runStmt( @@ -415,8 +602,8 @@ method getMessages*( ): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = let hexHashes = hashes.mapIt(toHex(it)) - if contentTopicSeq.len == 1 and pubsubTopic.isSome() and startTime.isSome() and - endTime.isSome(): + if contentTopicSeq.len == 1 and hexHashes.len == 1 and pubsubTopic.isSome() and + startTime.isSome() and endTime.isSome(): ## Considered the most common query. Therefore, we use prepared statements to optimize it. return await s.getMessagesPreparedStmt( contentTopicSeq.join(","), @@ -424,6 +611,7 @@ method getMessages*( cursor, startTime.get(), endTime.get(), + hexHashes.join(","), maxPageSize, ascendingOrder, ) @@ -434,6 +622,35 @@ method getMessages*( ascendingOrder, ) +method getMessagesV2*( + s: PostgresDriver, + contentTopicSeq = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + maxPageSize = DefaultPageSize, + ascendingOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + if contentTopicSeq.len == 1 and pubsubTopic.isSome() and startTime.isSome() and + endTime.isSome(): + ## Considered the most common query. Therefore, we use prepared statements to optimize it. + return await s.getMessagesV2PreparedStmt( + contentTopicSeq.join(","), + PubsubTopic(pubsubTopic.get()), + cursor, + startTime.get(), + endTime.get(), + maxPageSize, + ascendingOrder, + ) + else: + ## We will run atypical query. In this case we don't use prepared statemets + return await s.getMessagesV2ArbitraryQuery( + contentTopicSeq, pubsubTopic, cursor, startTime, endTime, maxPageSize, + ascendingOrder, + ) + proc getStr( s: PostgresDriver, query: string ): Future[ArchiveDriverResult[string]] {.async.} = diff --git a/waku/waku_archive/driver/queue_driver/index.nim b/waku/waku_archive/driver/queue_driver/index.nim index c01862a4cd..d34b550c85 100644 --- a/waku/waku_archive/driver/queue_driver/index.nim +++ b/waku/waku_archive/driver/queue_driver/index.nim @@ -52,8 +52,10 @@ proc toIndex*(index: ArchiveCursor): Index = proc `==`*(x, y: Index): bool = ## receiverTime plays no role in index equality return - (x.senderTime == y.senderTime) and (x.digest == y.digest) and - (x.pubsubTopic == y.pubsubTopic) + ( + (x.senderTime == y.senderTime) and (x.digest == y.digest) and + (x.pubsubTopic == y.pubsubTopic) + ) or (x.hash == y.hash) # this applies to store v3 queries only proc cmp*(x, y: Index): int = ## compares x and y diff --git a/waku/waku_archive/driver/sqlite_driver/queries.nim b/waku/waku_archive/driver/sqlite_driver/queries.nim index ab1fa1e9f1..30e8af02b2 100644 --- a/waku/waku_archive/driver/sqlite_driver/queries.nim +++ b/waku/waku_archive/driver/sqlite_driver/queries.nim @@ -3,7 +3,7 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import std/[options, sequtils], stew/[results, byteutils, arrayops], sqlite3_abi +import std/[options, sequtils], stew/[results, byteutils], sqlite3_abi import ../../../common/databases/db_sqlite, ../../../common/databases/common, @@ -285,21 +285,21 @@ proc combineClauses(clauses: varargs[Option[string]]): Option[string] = where &= " AND " & clause return some(where) -proc whereClause( - cursor: Option[DbCursor], +proc whereClausev2( + cursor: bool, pubsubTopic: Option[PubsubTopic], contentTopic: seq[ContentTopic], startTime: Option[Timestamp], endTime: Option[Timestamp], - hashes: seq[WakuMessageHash], ascending: bool, ): Option[string] = let cursorClause = - if cursor.isNone(): - none(string) - else: + if cursor: let comp = if ascending: ">" else: "<" + some("(storedAt, id) " & comp & " (?, ?)") + else: + none(string) let pubsubTopicClause = if pubsubTopic.isNone(): @@ -330,24 +330,12 @@ proc whereClause( else: some("storedAt <= (?)") - let hashesClause = - if hashes.len <= 0: - none(string) - else: - var where = "messageHash IN (" - where &= "?" - for _ in 1 ..< hashes.len: - where &= ", ?" - where &= ")" - some(where) - return combineClauses( - cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause, - hashesClause, + cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause ) -proc selectMessagesWithLimitQuery( - table: string, where: Option[string], limit: uint, ascending = true +proc selectMessagesWithLimitQueryv2( + table: string, where: Option[string], limit: uint, ascending = true, v3 = false ): SqlQueryStr = let order = if ascending: "ASC" else: "DESC" @@ -361,25 +349,25 @@ proc selectMessagesWithLimitQuery( query &= " WHERE " & where.get() query &= " ORDER BY storedAt " & order & ", id " & order + query &= " LIMIT " & $limit & ";" return query -proc prepareSelectMessagesWithlimitStmt( +proc prepareStmt( db: SqliteDatabase, stmt: string ): DatabaseResult[SqliteStmt[void, void]] = var s: RawStmtPtr checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil) return ok(SqliteStmt[void, void](s)) -proc execSelectMessagesWithLimitStmt( +proc execSelectMessagesV2WithLimitStmt( s: SqliteStmt, cursor: Option[DbCursor], pubsubTopic: Option[PubsubTopic], contentTopic: seq[ContentTopic], startTime: Option[Timestamp], endTime: Option[Timestamp], - hashes: seq[WakuMessageHash], onRowCallback: DataProc, ): DatabaseResult[void] = let s = RawStmtPtr(s) @@ -387,7 +375,7 @@ proc execSelectMessagesWithLimitStmt( # Bind params var paramIndex = 1 - if cursor.isSome(): # cursor = storedAt, id, pubsubTopic + if cursor.isSome(): let (storedAt, id, _) = cursor.get() checkErr bindParam(s, paramIndex, storedAt) paramIndex += 1 @@ -403,14 +391,202 @@ proc execSelectMessagesWithLimitStmt( checkErr bindParam(s, paramIndex, topic.toBytes()) paramIndex += 1 - for hash in hashes: - let bytes: array[32, byte] = hash - var byteSeq: seq[byte] + if startTime.isSome(): + let time = startTime.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + + if endTime.isSome(): + let time = endTime.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + + try: + while true: + let v = sqlite3_step(s) + case v + of SQLITE_ROW: + onRowCallback(s) + of SQLITE_DONE: + return ok() + else: + return err($sqlite3_errstr(v)) + finally: + # release implicit transaction + discard sqlite3_reset(s) # same return information as step + discard sqlite3_clear_bindings(s) # no errors possible - let byteCount = copyFrom(byteSeq, bytes) - assert byteCount == 32 +proc execSelectMessageByHash( + s: SqliteStmt, hash: WakuMessageHash, onRowCallback: DataProc +): DatabaseResult[void] = + let s = RawStmtPtr(s) - checkErr bindParam(s, paramIndex, byteSeq) + checkErr bindParam(s, 1, toSeq(hash)) + + try: + while true: + let v = sqlite3_step(s) + case v + of SQLITE_ROW: + onRowCallback(s) + of SQLITE_DONE: + return ok() + else: + return err($sqlite3_errstr(v)) + finally: + # release implicit transaction + discard sqlite3_reset(s) # same return information as step + discard sqlite3_clear_bindings(s) + # no errors possible + +proc selectMessagesByHistoryQueryWithLimit*( + db: SqliteDatabase, + contentTopic: seq[ContentTopic], + pubsubTopic: Option[PubsubTopic], + cursor: Option[DbCursor], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + limit: uint, + ascending: bool, +): DatabaseResult[ + seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] +] = + var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] = + @[] + + proc queryRowCallback(s: ptr sqlite3_stmt) = + let + pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3) + message = queryRowWakuMessageCallback( + s, contentTopicCol = 1, payloadCol = 2, versionCol = 4, senderTimestampCol = 5 + ) + digest = queryRowDigestCallback(s, digestCol = 6) + storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0) + hash = queryRowWakuMessageHashCallback(s, hashCol = 7) + + messages.add((pubsubTopic, message, digest, storedAt, hash)) + + let query = block: + let where = whereClausev2( + cursor.isSome(), pubsubTopic, contentTopic, startTime, endTime, ascending + ) + + selectMessagesWithLimitQueryv2(DbTable, where, limit, ascending) + + let dbStmt = ?db.prepareStmt(query) + ?dbStmt.execSelectMessagesV2WithLimitStmt( + cursor, pubsubTopic, contentTopic, startTime, endTime, queryRowCallback + ) + dbStmt.dispose() + + return ok(messages) + +### Store v3 ### + +proc selectMessageByHashQuery(): SqlQueryStr = + var query: string + + query = "SELECT contentTopic, payload, version, timestamp, messageHash" + query &= " FROM " & DbTable + query &= " WHERE messageHash = (?)" + + return query + +proc whereClause( + cursor: bool, + pubsubTopic: Option[PubsubTopic], + contentTopic: seq[ContentTopic], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + hashes: seq[WakuMessageHash], + ascending: bool, +): Option[string] = + let cursorClause = + if cursor: + let comp = if ascending: ">" else: "<" + + some("(timestamp, messageHash) " & comp & " (?, ?)") + else: + none(string) + + let pubsubTopicClause = + if pubsubTopic.isNone(): + none(string) + else: + some("pubsubTopic = (?)") + + let contentTopicClause = + if contentTopic.len <= 0: + none(string) + else: + var where = "contentTopic IN (" + where &= "?" + for _ in 1 ..< contentTopic.len: + where &= ", ?" + where &= ")" + some(where) + + let startTimeClause = + if startTime.isNone(): + none(string) + else: + some("storedAt >= (?)") + + let endTimeClause = + if endTime.isNone(): + none(string) + else: + some("storedAt <= (?)") + + let hashesClause = + if hashes.len <= 0: + none(string) + else: + var where = "messageHash IN (" + where &= "?" + for _ in 1 ..< hashes.len: + where &= ", ?" + where &= ")" + some(where) + + return combineClauses( + cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause, + hashesClause, + ) + +proc execSelectMessagesWithLimitStmt( + s: SqliteStmt, + cursor: Option[(Timestamp, WakuMessageHash)], + pubsubTopic: Option[PubsubTopic], + contentTopic: seq[ContentTopic], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + hashes: seq[WakuMessageHash], + onRowCallback: DataProc, +): DatabaseResult[void] = + let s = RawStmtPtr(s) + + # Bind params + var paramIndex = 1 + + if cursor.isSome(): + let (time, hash) = cursor.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + checkErr bindParam(s, paramIndex, toSeq(hash)) + paramIndex += 1 + + if pubsubTopic.isSome(): + let pubsubTopic = toBytes(pubsubTopic.get()) + checkErr bindParam(s, paramIndex, pubsubTopic) + paramIndex += 1 + + for topic in contentTopic: + checkErr bindParam(s, paramIndex, topic.toBytes()) + paramIndex += 1 + + for hash in hashes: + checkErr bindParam(s, paramIndex, toSeq(hash)) paramIndex += 1 if startTime.isSome(): @@ -438,11 +614,31 @@ proc execSelectMessagesWithLimitStmt( discard sqlite3_reset(s) # same return information as step discard sqlite3_clear_bindings(s) # no errors possible -proc selectMessagesByHistoryQueryWithLimit*( +proc selectMessagesWithLimitQuery( + table: string, where: Option[string], limit: uint, ascending = true, v3 = false +): SqlQueryStr = + let order = if ascending: "ASC" else: "DESC" + + var query: string + + query = + "SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash" + query &= " FROM " & table + + if where.isSome(): + query &= " WHERE " & where.get() + + query &= " ORDER BY storedAt " & order & ", messageHash " & order + + query &= " LIMIT " & $limit & ";" + + return query + +proc selectMessagesByStoreQueryWithLimit*( db: SqliteDatabase, contentTopic: seq[ContentTopic], pubsubTopic: Option[PubsubTopic], - cursor: Option[DbCursor], + cursor: Option[WakuMessageHash], startTime: Option[Timestamp], endTime: Option[Timestamp], hashes: seq[WakuMessageHash], @@ -451,8 +647,32 @@ proc selectMessagesByHistoryQueryWithLimit*( ): DatabaseResult[ seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] ] = + # Must first get the message timestamp before paginating by time + let newCursor = + if cursor.isSome() and cursor.get() != EmptyWakuMessageHash: + let hash: WakuMessageHash = cursor.get() + + var wakuMessage: WakuMessage + + proc queryRowCallback(s: ptr sqlite3_stmt) = + wakuMessage = queryRowWakuMessageCallback( + s, contentTopicCol = 0, payloadCol = 1, versionCol = 2, senderTimestampCol = 3 + ) + + let query = selectMessageByHashQuery() + let dbStmt = ?db.prepareStmt(query) + ?dbStmt.execSelectMessageByHash(hash, queryRowCallback) + dbStmt.dispose() + + let time: Timestamp = wakuMessage.timestamp + + some((time, hash)) + else: + none((Timestamp, WakuMessageHash)) + var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] = @[] + proc queryRowCallback(s: ptr sqlite3_stmt) = let pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3) @@ -467,13 +687,20 @@ proc selectMessagesByHistoryQueryWithLimit*( let query = block: let where = whereClause( - cursor, pubsubTopic, contentTopic, startTime, endTime, hashes, ascending + newCursor.isSome(), + pubsubTopic, + contentTopic, + startTime, + endTime, + hashes, + ascending, ) - selectMessagesWithLimitQuery(DbTable, where, limit, ascending) - let dbStmt = ?db.prepareSelectMessagesWithlimitStmt(query) + selectMessagesWithLimitQuery(DbTable, where, limit, ascending, true) + + let dbStmt = ?db.prepareStmt(query) ?dbStmt.execSelectMessagesWithLimitStmt( - cursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback + newCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback ) dbStmt.dispose() diff --git a/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim b/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim index 5331f2ea47..5a67b5778e 100644 --- a/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim +++ b/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim @@ -83,20 +83,50 @@ method getAllMessages*( ## Retrieve all messages from the store. return s.db.selectAllMessages() -method getMessages*( +method getMessagesV2*( s: SqliteDriver, contentTopic = newSeq[ContentTopic](0), pubsubTopic = none(PubsubTopic), cursor = none(ArchiveCursor), startTime = none(Timestamp), endTime = none(Timestamp), - hashes = newSeq[WakuMessageHash](0), maxPageSize = DefaultPageSize, ascendingOrder = true, ): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + echo "here" + let cursor = cursor.map(toDbCursor) let rowsRes = s.db.selectMessagesByHistoryQueryWithLimit( + contentTopic, + pubsubTopic, + cursor, + startTime, + endTime, + limit = maxPageSize, + ascending = ascendingOrder, + ) + + return rowsRes + +method getMessages*( + s: SqliteDriver, + contentTopic = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = newSeq[WakuMessageHash](0), + maxPageSize = DefaultPageSize, + ascendingOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + let cursor = + if cursor.isSome(): + some(cursor.get().hash) + else: + none(WakuMessageHash) + + let rowsRes = s.db.selectMessagesByStoreQueryWithLimit( contentTopic, pubsubTopic, cursor, diff --git a/waku/waku_core/message/digest.nim b/waku/waku_core/message/digest.nim index 342b256172..48d89d5e3e 100644 --- a/waku/waku_core/message/digest.nim +++ b/waku/waku_core/message/digest.nim @@ -11,6 +11,11 @@ import ../topics, ./message type WakuMessageHash* = array[32, byte] +const EmptyWakuMessageHash*: WakuMessageHash = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, +] + converter fromBytes*(array: openArray[byte]): WakuMessageHash = var hash: WakuMessageHash let copiedBytes = copyFrom(hash, array) diff --git a/waku/waku_store/client.nim b/waku/waku_store/client.nim index b0fac6fd21..4f378ddbcf 100644 --- a/waku/waku_store/client.nim +++ b/waku/waku_store/client.nim @@ -5,17 +5,7 @@ else: import std/options, stew/results, chronicles, chronos, metrics, bearssl/rand import - ../node/peer_manager, - ../utils/requests, - ./protocol_metrics, - ./common, - ./rpc, - ./rpc_codec - -when defined(waku_exp_store_resume): - import std/[sequtils, times] - import ../waku_archive - import ../waku_core/message/digest + ../node/peer_manager, ../utils/requests, ./protocol_metrics, ./common, ./rpc_codec logScope: topics = "waku store client" @@ -27,216 +17,48 @@ type WakuStoreClient* = ref object peerManager: PeerManager rng: ref rand.HmacDrbgContext - # TODO: Move outside of the client - when defined(waku_exp_store_resume): - store: ArchiveDriver - proc new*( T: type WakuStoreClient, peerManager: PeerManager, rng: ref rand.HmacDrbgContext ): T = WakuStoreClient(peerManager: peerManager, rng: rng) -proc sendHistoryQueryRPC( - w: WakuStoreClient, req: HistoryQuery, peer: RemotePeerInfo -): Future[HistoryResult] {.async, gcsafe.} = - let connOpt = await w.peerManager.dialPeer(peer, WakuStoreCodec) - if connOpt.isNone(): - waku_store_errors.inc(labelValues = [dialFailure]) - return err(HistoryError(kind: HistoryErrorKind.PEER_DIAL_FAILURE, address: $peer)) +proc sendStoreRequest( + self: WakuStoreClient, request: StoreQueryRequest, connection: Connection +): Future[StoreQueryResult] {.async, gcsafe.} = + var req = request - let connection = connOpt.get() + req.requestId = generateRequestId(self.rng) - let reqRpc = HistoryRPC(requestId: generateRequestId(w.rng), query: some(req.toRPC())) - await connection.writeLP(reqRpc.encode().buffer) + let writeRes = catch: + await connection.writeLP(req.encode().buffer) + if writeRes.isErr(): + return err(StoreError(kind: ErrorCode.BAD_REQUEST, cause: writeRes.error.msg)) - #TODO: I see a challenge here, if storeNode uses a different MaxRPCSize this read will fail. - # Need to find a workaround for this. - let buf = await connection.readLp(DefaultMaxRpcSize.int) - let respDecodeRes = HistoryRPC.decode(buf) - if respDecodeRes.isErr(): - waku_store_errors.inc(labelValues = [decodeRpcFailure]) - return - err(HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: decodeRpcFailure)) + let readRes = catch: + await connection.readLp(DefaultMaxRpcSize.int) - let respRpc = respDecodeRes.get() + let buf = readRes.valueOr: + return err(StoreError(kind: ErrorCode.BAD_RESPONSE, cause: error.msg)) - # Disabled ,for now, since the default response is a possible case (no messages, pagesize = 0, error = NONE(0)) - # TODO: Rework the RPC protocol to differentiate the default value from an empty value (e.g., status = 200 (OK)) - # and rework the protobuf parsing to return Option[T] when empty values are received - if respRpc.response.isNone(): - waku_store_errors.inc(labelValues = [emptyRpcResponseFailure]) - return err( - HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: emptyRpcResponseFailure) - ) + let res = StoreQueryResponse.decode(buf).valueOr: + waku_store_errors.inc(labelValues = [decodeRpcFailure]) + return err(StoreError(kind: ErrorCode.BAD_RESPONSE, cause: decodeRpcFailure)) - let resp = respRpc.response.get() + if res.statusCode != uint32(StatusCode.SUCCESS): + waku_store_errors.inc(labelValues = [res.statusDesc]) + return err(StoreError.new(res.statusCode, res.statusDesc)) - return resp.toAPI() + return ok(res) proc query*( - w: WakuStoreClient, req: HistoryQuery, peer: RemotePeerInfo -): Future[HistoryResult] {.async, gcsafe.} = - return await w.sendHistoryQueryRPC(req, peer) - -# TODO: Move outside of the client -when defined(waku_exp_store_resume): - ## Resume store - - const StoreResumeTimeWindowOffset: Timestamp = getNanosecondTime(20) - ## Adjust the time window with an offset of 20 seconds - - proc new*( - T: type WakuStoreClient, - peerManager: PeerManager, - rng: ref rand.HmacDrbgContext, - store: ArchiveDriver, - ): T = - WakuStoreClient(peerManager: peerManager, rng: rng, store: store) - - proc queryAll( - w: WakuStoreClient, query: HistoryQuery, peer: RemotePeerInfo - ): Future[WakuStoreResult[seq[WakuMessage]]] {.async, gcsafe.} = - ## A thin wrapper for query. Sends the query to the given peer. when the query has a valid pagingInfo, - ## it retrieves the historical messages in pages. - ## Returns all the fetched messages, if error occurs, returns an error string - - # Make a copy of the query - var req = query - - var messageList: seq[WakuMessage] = @[] - - while true: - let queryRes = await w.query(req, peer) - if queryRes.isErr(): - return err($queryRes.error) - - let response = queryRes.get() - - messageList.add(response.messages) - - # Check whether it is the last page - if response.cursor.isNone(): - break - - # Update paging cursor - req.cursor = response.cursor - - return ok(messageList) - - proc queryLoop( - w: WakuStoreClient, req: HistoryQuery, peers: seq[RemotePeerInfo] - ): Future[WakuStoreResult[seq[WakuMessage]]] {.async, gcsafe.} = - ## Loops through the peers candidate list in order and sends the query to each - ## - ## Once all responses have been received, the retrieved messages are consolidated into one deduplicated list. - ## if no messages have been retrieved, the returned future will resolve into a result holding an empty seq. - let queryFuturesList = peers.mapIt(w.queryAll(req, it)) - - await allFutures(queryFuturesList) - - let messagesList = queryFuturesList - .map( - proc(fut: Future[WakuStoreResult[seq[WakuMessage]]]): seq[WakuMessage] = - try: - # fut.read() can raise a CatchableError - # These futures have been awaited before using allFutures(). Call completed() just as a sanity check. - if not fut.completed() or fut.read().isErr(): - return @[] - - fut.read().value - except CatchableError: - return @[] - ) - .concat() - .deduplicate() - - return ok(messagesList) - - proc put( - store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage - ): Result[void, string] = - let - digest = waku_archive.computeDigest(message) - messageHash = computeMessageHash(pubsubTopic, message) - receivedTime = - if message.timestamp > 0: - message.timestamp - else: - getNanosecondTime(getTime().toUnixFloat()) - - store.put(pubsubTopic, message, digest, messageHash, receivedTime) - - proc resume*( - w: WakuStoreClient, - peerList = none(seq[RemotePeerInfo]), - pageSize = DefaultPageSize, - pubsubTopic = DefaultPubsubTopic, - ): Future[WakuStoreResult[uint64]] {.async, gcsafe.} = - ## resume proc retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku store node has been online - ## messages are stored in the store node's messages field and in the message db - ## the offline time window is measured as the difference between the current time and the timestamp of the most recent persisted waku message - ## an offset of 20 second is added to the time window to count for nodes asynchrony - ## peerList indicates the list of peers to query from. - ## The history is fetched from all available peers in this list and then consolidated into one deduplicated list. - ## Such candidates should be found through a discovery method (to be developed). - ## if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from. - ## The history gets fetched successfully if the dialed peer has been online during the queried time window. - ## the resume proc returns the number of retrieved messages if no error occurs, otherwise returns the error string - - # If store has not been provided, don't even try - if w.store.isNil(): - return err("store not provided (nil)") - - # NOTE: Original implementation is based on the message's sender timestamp. At the moment - # of writing, the sqlite store implementation returns the last message's receiver - # timestamp. - # lastSeenTime = lastSeenItem.get().msg.timestamp - let - lastSeenTime = w.store.getNewestMessageTimestamp().get(Timestamp(0)) - now = getNanosecondTime(getTime().toUnixFloat()) - - debug "resuming with offline time window", - lastSeenTime = lastSeenTime, currentTime = now - - let - queryEndTime = now + StoreResumeTimeWindowOffset - queryStartTime = max(lastSeenTime - StoreResumeTimeWindowOffset, 0) - - let req = HistoryQuery( - pubsubTopic: some(pubsubTopic), - startTime: some(queryStartTime), - endTime: some(queryEndTime), - pageSize: uint64(pageSize), - direction: default(), - ) - - var res: WakuStoreResult[seq[WakuMessage]] - if peerList.isSome(): - debug "trying the candidate list to fetch the history" - res = await w.queryLoop(req, peerList.get()) - else: - debug "no candidate list is provided, selecting a random peer" - # if no peerList is set then query from one of the peers stored in the peer manager - let peerOpt = w.peerManager.selectPeer(WakuStoreCodec) - if peerOpt.isNone(): - warn "no suitable remote peers" - waku_store_errors.inc(labelValues = [peerNotFoundFailure]) - return err("no suitable remote peers") - - debug "a peer is selected from peer manager" - res = await w.queryAll(req, peerOpt.get()) - - if res.isErr(): - debug "failed to resume the history" - return err("failed to resume the history") - - # Save the retrieved messages in the store - var added: uint = 0 - for msg in res.get(): - let putStoreRes = w.store.put(pubsubTopic, msg) - if putStoreRes.isErr(): - continue - - added.inc() - - return ok(added) + self: WakuStoreClient, request: StoreQueryRequest, peer: RemotePeerInfo +): Future[StoreQueryResult] {.async, gcsafe.} = + if request.paginationCursor.isSome() and request.paginationCursor.get() == EmptyCursor: + return err(StoreError(kind: ErrorCode.BAD_REQUEST, cause: "invalid cursor")) + + let connection = (await self.peerManager.dialPeer(peer, WakuStoreCodec)).valueOr: + waku_store_errors.inc(labelValues = [dialFailure]) + + return err(StoreError(kind: ErrorCode.PEER_DIAL_FAILURE, address: $peer)) + + return await self.sendStoreRequest(request, connection) diff --git a/waku/waku_store/common.nim b/waku/waku_store/common.nim index 67af41a68a..b078e5574e 100644 --- a/waku/waku_store/common.nim +++ b/waku/waku_store/common.nim @@ -3,107 +3,118 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import std/[options, sequtils], stew/results, stew/byteutils, nimcrypto/sha2 +import std/[options], stew/results import ../waku_core, ../common/paging const - WakuStoreCodec* = "/vac/waku/store/2.0.0-beta4" + WakuStoreCodec* = "/vac/waku/store-query/3.0.0" DefaultPageSize*: uint64 = 20 MaxPageSize*: uint64 = 100 -type WakuStoreResult*[T] = Result[T, string] - -## Waku message digest - -type MessageDigest* = MDigest[256] + EmptyCursor*: WakuMessageHash = EmptyWakuMessageHash -proc computeDigest*(msg: WakuMessage): MessageDigest = - var ctx: sha256 - ctx.init() - defer: - ctx.clear() - - ctx.update(msg.contentTopic.toBytes()) - ctx.update(msg.payload) - - # Computes the hash - return ctx.finish() +type WakuStoreResult*[T] = Result[T, string] ## Public API types type - HistoryCursor* = object - pubsubTopic*: PubsubTopic - senderTime*: Timestamp - storeTime*: Timestamp - digest*: MessageDigest + StoreQueryRequest* = object + requestId*: string + includeData*: bool - HistoryQuery* = object pubsubTopic*: Option[PubsubTopic] contentTopics*: seq[ContentTopic] - cursor*: Option[HistoryCursor] startTime*: Option[Timestamp] endTime*: Option[Timestamp] - pageSize*: uint64 - direction*: PagingDirection - HistoryResponse* = object - messages*: seq[WakuMessage] - cursor*: Option[HistoryCursor] + messageHashes*: seq[WakuMessageHash] + + paginationCursor*: Option[WakuMessageHash] + paginationForward*: PagingDirection + paginationLimit*: Option[uint64] + + WakuMessageKeyValue* = object + messageHash*: WakuMessageHash + message*: WakuMessage + + StoreQueryResponse* = object + requestId*: string + + statusCode*: uint32 + statusDesc*: string + + messages*: seq[WakuMessageKeyValue] + + paginationCursor*: Option[WakuMessageHash] - HistoryErrorKind* {.pure.} = enum + StatusCode* {.pure.} = enum UNKNOWN = uint32(000) + SUCCESS = uint32(200) BAD_RESPONSE = uint32(300) BAD_REQUEST = uint32(400) TOO_MANY_REQUESTS = uint32(429) SERVICE_UNAVAILABLE = uint32(503) PEER_DIAL_FAILURE = uint32(504) - HistoryError* = object - case kind*: HistoryErrorKind - of PEER_DIAL_FAILURE: + ErrorCode* {.pure.} = enum + UNKNOWN = uint32(000) + BAD_RESPONSE = uint32(300) + BAD_REQUEST = uint32(400) + TOO_MANY_REQUESTS = uint32(429) + SERVICE_UNAVAILABLE = uint32(503) + PEER_DIAL_FAILURE = uint32(504) + + StoreError* = object + case kind*: ErrorCode + of ErrorCode.PEER_DIAL_FAILURE: address*: string - of BAD_RESPONSE, BAD_REQUEST: + of ErrorCode.BAD_RESPONSE, ErrorCode.BAD_REQUEST: cause*: string else: discard - HistoryResult* = Result[HistoryResponse, HistoryError] + StoreQueryResult* = Result[StoreQueryResponse, StoreError] + +proc into*(errCode: ErrorCode): StatusCode = + StatusCode(uint32(errCode)) + +proc new*(T: type StoreError, code: uint32, desc: string): T = + let kind = ErrorCode.parse(code) -proc parse*(T: type HistoryErrorKind, kind: uint32): T = case kind - of 000, 200, 300, 400, 429, 503: - HistoryErrorKind(kind) + of ErrorCode.BAD_RESPONSE: + return StoreError(kind: kind, cause: desc) + of ErrorCode.BAD_REQUEST: + return StoreError(kind: kind, cause: desc) + of ErrorCode.TOO_MANY_REQUESTS: + return StoreError(kind: kind) + of ErrorCode.SERVICE_UNAVAILABLE: + return StoreError(kind: kind) + of ErrorCode.PEER_DIAL_FAILURE: + return StoreError(kind: kind, address: desc) + of ErrorCode.UNKNOWN: + return StoreError(kind: kind) + +proc parse*(T: type ErrorCode, kind: uint32): T = + case kind + of 000, 300, 400, 429, 503, 504: + ErrorCode(kind) else: - HistoryErrorKind.UNKNOWN + ErrorCode.UNKNOWN -proc `$`*(err: HistoryError): string = +proc `$`*(err: StoreError): string = case err.kind - of HistoryErrorKind.PEER_DIAL_FAILURE: + of ErrorCode.PEER_DIAL_FAILURE: "PEER_DIAL_FAILURE: " & err.address - of HistoryErrorKind.BAD_RESPONSE: + of ErrorCode.BAD_RESPONSE: "BAD_RESPONSE: " & err.cause - of HistoryErrorKind.BAD_REQUEST: + of ErrorCode.BAD_REQUEST: "BAD_REQUEST: " & err.cause - of HistoryErrorKind.TOO_MANY_REQUESTS: + of ErrorCode.TOO_MANY_REQUESTS: "TOO_MANY_REQUESTS" - of HistoryErrorKind.SERVICE_UNAVAILABLE: + of ErrorCode.SERVICE_UNAVAILABLE: "SERVICE_UNAVAILABLE" - of HistoryErrorKind.UNKNOWN: + of ErrorCode.UNKNOWN: "UNKNOWN" - -proc checkHistCursor*(self: HistoryCursor): Result[void, HistoryError] = - if self.pubsubTopic.len == 0: - return err(HistoryError(kind: BAD_REQUEST, cause: "empty pubsubTopic")) - if self.senderTime == 0: - return err(HistoryError(kind: BAD_REQUEST, cause: "invalid senderTime")) - if self.storeTime == 0: - return err(HistoryError(kind: BAD_REQUEST, cause: "invalid storeTime")) - if self.digest.data.all( - proc(x: byte): bool = - x == 0 - ): - return err(HistoryError(kind: BAD_REQUEST, cause: "empty digest")) - return ok() diff --git a/waku/waku_store/protocol.nim b/waku/waku_store/protocol.nim index 6818d42133..22210a64a3 100644 --- a/waku/waku_store/protocol.nim +++ b/waku/waku_store/protocol.nim @@ -21,7 +21,6 @@ import ../waku_core, ../node/peer_manager, ./common, - ./rpc, ./rpc_codec, ./protocol_metrics, ../common/ratelimit, @@ -33,105 +32,109 @@ logScope: const MaxMessageTimestampVariance* = getNanoSecondTime(20) # 20 seconds maximum allowable sender timestamp "drift" -type HistoryQueryHandler* = - proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} +type StoreQueryRequestHandler* = + proc(req: StoreQueryRequest): Future[StoreQueryResult] {.async, gcsafe.} type WakuStore* = ref object of LPProtocol peerManager: PeerManager rng: ref rand.HmacDrbgContext - queryHandler*: HistoryQueryHandler + requestHandler*: StoreQueryRequestHandler requestRateLimiter*: Option[TokenBucket] ## Protocol -proc initProtocolHandler(ws: WakuStore) = - proc handler(conn: Connection, proto: string) {.async.} = - let buf = await conn.readLp(DefaultMaxRpcSize.int) +proc handleQueryRequest*( + self: WakuStore, requestor: PeerId, raw_request: seq[byte] +): Future[seq[byte]] {.async.} = + var res = StoreQueryResponse() - let decodeRes = HistoryRPC.decode(buf) - if decodeRes.isErr(): - error "failed to decode rpc", peerId = $conn.peerId - waku_store_errors.inc(labelValues = [decodeRpcFailure]) - # TODO: Return (BAD_REQUEST, cause: "decode rpc failed") - return + let req = StoreQueryRequest.decode(raw_request).valueOr: + error "failed to decode rpc", peerId = requestor + waku_store_errors.inc(labelValues = [decodeRpcFailure]) - let reqRpc = decodeRes.value + res.statusCode = uint32(ErrorCode.BAD_REQUEST) + res.statusDesc = "decode rpc failed" - if reqRpc.query.isNone(): - error "empty query rpc", peerId = $conn.peerId, requestId = reqRpc.requestId - waku_store_errors.inc(labelValues = [emptyRpcQueryFailure]) - # TODO: Return (BAD_REQUEST, cause: "empty query") - return + return res.encode().buffer - if ws.requestRateLimiter.isSome() and not ws.requestRateLimiter.get().tryConsume(1): - trace "store query request rejected due rate limit exceeded", - peerId = $conn.peerId, requestId = reqRpc.requestId - let error = HistoryError(kind: HistoryErrorKind.TOO_MANY_REQUESTS).toRPC() - let response = HistoryResponseRPC(error: error) - let rpc = HistoryRPC(requestId: reqRpc.requestId, response: some(response)) - await conn.writeLp(rpc.encode().buffer) - waku_service_requests_rejected.inc(labelValues = ["Store"]) - return + let requestId = req.requestId - waku_service_requests.inc(labelValues = ["Store"]) + if self.requestRateLimiter.isSome() and not self.requestRateLimiter.get().tryConsume( + 1 + ): + debug "store query request rejected due rate limit exceeded", + peerId = $requestor, requestId = requestId - let - requestId = reqRpc.requestId - request = reqRpc.query.get().toAPI() + res.statusCode = uint32(ErrorCode.TOO_MANY_REQUESTS) + res.statusDesc = $ErrorCode.TOO_MANY_REQUESTS - info "received history query", - peerId = conn.peerId, requestId = requestId, query = request - waku_store_queries.inc() + waku_service_requests_rejected.inc(labelValues = ["Store"]) - var responseRes: HistoryResult - try: - responseRes = await ws.queryHandler(request) - except Exception: - error "history query failed", - peerId = $conn.peerId, requestId = requestId, error = getCurrentExceptionMsg() + return res.encode().buffer - let error = HistoryError(kind: HistoryErrorKind.UNKNOWN).toRPC() - let response = HistoryResponseRPC(error: error) - let rpc = HistoryRPC(requestId: requestId, response: some(response)) - await conn.writeLp(rpc.encode().buffer) - return + waku_service_requests.inc(labelValues = ["Store"]) + + info "received store query request", + peerId = requestor, requestId = requestId, request = req + waku_store_queries.inc() + + let queryResult = await self.requestHandler(req) + + res = queryResult.valueOr: + error "store query failed", + peerId = requestor, requestId = requestId, error = queryResult.error + + res.statusCode = uint32(queryResult.error.kind) + res.statusDesc = $queryResult.error - if responseRes.isErr(): - error "history query failed", - peerId = $conn.peerId, requestId = requestId, error = responseRes.error + return res.encode().buffer - let response = responseRes.toRPC() - let rpc = HistoryRPC(requestId: requestId, response: some(response)) - await conn.writeLp(rpc.encode().buffer) + res.requestId = requestId + res.statusCode = 200 + + info "sending store query response", + peerId = requestor, requestId = requestId, messages = res.messages.len + + return res.encode().buffer + +proc initProtocolHandler(self: WakuStore) = + proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} = + let readRes = catch: + await conn.readLp(DefaultMaxRpcSize.int) + + let reqBuf = readRes.valueOr: + error "Connection read error", error = error.msg return - let response = responseRes.toRPC() + let resBuf = await self.handleQueryRequest(conn.peerId, reqBuf) - info "sending history response", - peerId = conn.peerId, requestId = requestId, messages = response.messages.len + let writeRes = catch: + await conn.writeLp(resBuf) - let rpc = HistoryRPC(requestId: requestId, response: some(response)) - await conn.writeLp(rpc.encode().buffer) + if writeRes.isErr(): + error "Connection write error", error = writeRes.error.msg + return - ws.handler = handler - ws.codec = WakuStoreCodec + self.handler = handler + self.codec = WakuStoreCodec proc new*( T: type WakuStore, peerManager: PeerManager, rng: ref rand.HmacDrbgContext, - queryHandler: HistoryQueryHandler, + requestHandler: StoreQueryRequestHandler, rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), ): T = - # Raise a defect if history query handler is nil - if queryHandler.isNil(): + if requestHandler.isNil(): # TODO use an Option instead ??? raise newException(NilAccessDefect, "history query handler is nil") - let ws = WakuStore( + let store = WakuStore( rng: rng, peerManager: peerManager, - queryHandler: queryHandler, + requestHandler: requestHandler, requestRateLimiter: newTokenBucket(rateLimitSetting), ) - ws.initProtocolHandler() - ws + + store.initProtocolHandler() + + return store diff --git a/waku/waku_store/rpc_codec.nim b/waku/waku_store/rpc_codec.nim index 2d5867e00b..cfe7423a26 100644 --- a/waku/waku_store/rpc_codec.nim +++ b/waku/waku_store/rpc_codec.nim @@ -3,256 +3,208 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import std/options, nimcrypto/hash -import ../common/[protobuf, paging], ../waku_core, ./common, ./rpc +import std/options, stew/arrayops, nimcrypto/hash +import ../common/[protobuf, paging], ../waku_core, ./common const DefaultMaxRpcSize* = -1 -## Pagination +### Request ### -proc encode*(index: PagingIndexRPC): ProtoBuffer = - ## Encode an Index object into a ProtoBuffer - ## returns the resultant ProtoBuffer +proc encode*(req: StoreQueryRequest): ProtoBuffer = var pb = initProtoBuffer() - pb.write3(1, index.digest.data) - pb.write3(2, zint64(index.receiverTime)) - pb.write3(3, zint64(index.senderTime)) - pb.write3(4, index.pubsubTopic) - pb.finish3() - - pb - -proc decode*(T: type PagingIndexRPC, buffer: seq[byte]): ProtobufResult[T] = - ## creates and returns an Index object out of buffer - var rpc = PagingIndexRPC() - let pb = initProtoBuffer(buffer) - - var data: seq[byte] - if not ?pb.getField(1, data): - return err(ProtobufError.missingRequiredField("digest")) - else: - var digest = MessageDigest() - for count, b in data: - digest.data[count] = b - - rpc.digest = digest - - var receiverTime: zint64 - if not ?pb.getField(2, receiverTime): - return err(ProtobufError.missingRequiredField("receiver_time")) - else: - rpc.receiverTime = int64(receiverTime) - - var senderTime: zint64 - if not ?pb.getField(3, senderTime): - return err(ProtobufError.missingRequiredField("sender_time")) - else: - rpc.senderTime = int64(senderTime) - - var pubsubTopic: string - if not ?pb.getField(4, pubsubTopic): - return err(ProtobufError.missingRequiredField("pubsub_topic")) - else: - rpc.pubsubTopic = pubsubTopic - - ok(rpc) - -proc encode*(rpc: PagingInfoRPC): ProtoBuffer = - ## Encodes a PagingInfo object into a ProtoBuffer - ## returns the resultant ProtoBuffer - var pb = initProtoBuffer() - - pb.write3(1, rpc.pageSize) - pb.write3(2, rpc.cursor.map(encode)) - pb.write3( - 3, - rpc.direction.map( - proc(d: PagingDirection): uint32 = - uint32(ord(d)) - ), - ) - pb.finish3() - - pb - -proc decode*(T: type PagingInfoRPC, buffer: seq[byte]): ProtobufResult[T] = - ## creates and returns a PagingInfo object out of buffer - var rpc = PagingInfoRPC() - let pb = initProtoBuffer(buffer) - - var pageSize: uint64 - if not ?pb.getField(1, pageSize): - rpc.pageSize = none(uint64) - else: - rpc.pageSize = some(pageSize) - - var cursorBuffer: seq[byte] - if not ?pb.getField(2, cursorBuffer): - rpc.cursor = none(PagingIndexRPC) - else: - let cursor = ?PagingIndexRPC.decode(cursorBuffer) - rpc.cursor = some(cursor) - - var direction: uint32 - if not ?pb.getField(3, direction): - rpc.direction = none(PagingDirection) - else: - rpc.direction = some(PagingDirection(direction)) - - ok(rpc) - -## Wire protocol - -proc encode*(rpc: HistoryContentFilterRPC): ProtoBuffer = - var pb = initProtoBuffer() - - pb.write3(1, rpc.contentTopic) - pb.finish3() - - pb + pb.write3(1, req.requestId) + pb.write3(2, req.includeData) -proc decode*(T: type HistoryContentFilterRPC, buffer: seq[byte]): ProtobufResult[T] = - let pb = initProtoBuffer(buffer) - - var contentTopic: ContentTopic - if not ?pb.getField(1, contentTopic): - return err(ProtobufError.missingRequiredField("content_topic")) - ok(HistoryContentFilterRPC(contentTopic: contentTopic)) + pb.write3(3, req.pubsubTopic) -proc encode*(rpc: HistoryQueryRPC): ProtoBuffer = - var pb = initProtoBuffer() - pb.write3(2, rpc.pubsubTopic) + for contentTopic in req.contentTopics: + pb.write3(4, contentTopic) - for filter in rpc.contentFilters: - pb.write3(3, filter.encode()) - - pb.write3(4, rpc.pagingInfo.map(encode)) pb.write3( 5, - rpc.startTime.map( + req.startTime.map( proc(time: int64): zint64 = zint64(time) ), ) pb.write3( 6, - rpc.endTime.map( + req.endTime.map( proc(time: int64): zint64 = zint64(time) ), ) + + for hash in req.messagehashes: + pb.write3(7, hash) + + pb.write3(8, req.paginationCursor) + pb.write3(9, uint32(req.paginationForward)) + pb.write3(10, req.paginationLimit) + pb.finish3() - pb + return pb -proc decode*(T: type HistoryQueryRPC, buffer: seq[byte]): ProtobufResult[T] = - var rpc = HistoryQueryRPC() +proc decode*( + T: type StoreQueryRequest, buffer: seq[byte] +): ProtobufResult[StoreQueryRequest] = + var req = StoreQueryRequest() let pb = initProtoBuffer(buffer) - var pubsubTopic: string - if not ?pb.getField(2, pubsubTopic): - rpc.pubsubTopic = none(string) + if not ?pb.getField(1, req.requestId): + return err(ProtobufError.missingRequiredField("request_id")) + + var inclData: uint + if not ?pb.getField(2, inclData): + req.includeData = false else: - rpc.pubsubTopic = some(pubsubTopic) + req.includeData = inclData == 1 - var buffs: seq[seq[byte]] - if not ?pb.getRepeatedField(3, buffs): - rpc.contentFilters = @[] + var pubsubTopic: string + if not ?pb.getField(3, pubsubTopic): + req.pubsubTopic = none(string) else: - for pb in buffs: - let filter = ?HistoryContentFilterRPC.decode(pb) - rpc.contentFilters.add(filter) + req.pubsubTopic = some(pubsubTopic) - var pagingInfoBuffer: seq[byte] - if not ?pb.getField(4, pagingInfoBuffer): - rpc.pagingInfo = none(PagingInfoRPC) + var topics: seq[string] + if not ?pb.getRepeatedField(4, topics): + req.contentTopics = @[] else: - let pagingInfo = ?PagingInfoRPC.decode(pagingInfoBuffer) - rpc.pagingInfo = some(pagingInfo) + req.contentTopics = topics - var startTime: zint64 - if not ?pb.getField(5, startTime): - rpc.startTime = none(int64) + var start: zint64 + if not ?pb.getField(5, start): + req.startTime = none(Timestamp) else: - rpc.startTime = some(int64(startTime)) + req.startTime = some(Timestamp(int64(start))) var endTime: zint64 if not ?pb.getField(6, endTime): - rpc.endTime = none(int64) + req.endTime = none(Timestamp) else: - rpc.endTime = some(int64(endTime)) + req.endTime = some(Timestamp(int64(endTime))) - ok(rpc) + var buffer: seq[seq[byte]] + if not ?pb.getRepeatedField(7, buffer): + req.messageHashes = @[] + else: + req.messageHashes = newSeqOfCap[WakuMessageHash](buffer.len) + for buf in buffer: + var hash: WakuMessageHash + discard copyFrom[byte](hash, buf) + req.messageHashes.add(hash) -proc encode*(response: HistoryResponseRPC): ProtoBuffer = - var pb = initProtoBuffer() + var cursor: seq[byte] + if not ?pb.getField(8, cursor): + req.paginationCursor = none(WakuMessageHash) + else: + var hash: WakuMessageHash + discard copyFrom[byte](hash, cursor) + req.paginationCursor = some(hash) - for rpc in response.messages: - pb.write3(2, rpc.encode()) + var paging: uint32 + if not ?pb.getField(9, paging): + req.paginationForward = PagingDirection.default() + else: + req.paginationForward = PagingDirection(paging) - pb.write3(3, response.pagingInfo.map(encode)) - pb.write3(4, uint32(ord(response.error))) - pb.finish3() + var limit: uint64 + if not ?pb.getField(10, limit): + req.paginationLimit = none(uint64) + else: + req.paginationLimit = some(limit) - pb + return ok(req) -proc decode*(T: type HistoryResponseRPC, buffer: seq[byte]): ProtobufResult[T] = - var rpc = HistoryResponseRPC() - let pb = initProtoBuffer(buffer) +### Response ### - var messages: seq[seq[byte]] - if ?pb.getRepeatedField(2, messages): - for pb in messages: - let message = ?WakuMessage.decode(pb) - rpc.messages.add(message) - else: - rpc.messages = @[] +proc encode*(keyValue: WakuMessageKeyValue): ProtoBuffer = + var pb = initProtoBuffer() - var pagingInfoBuffer: seq[byte] - if ?pb.getField(3, pagingInfoBuffer): - let pagingInfo = ?PagingInfoRPC.decode(pagingInfoBuffer) - rpc.pagingInfo = some(pagingInfo) - else: - rpc.pagingInfo = none(PagingInfoRPC) + pb.write3(1, keyValue.messageHash) + pb.write3(2, keyValue.message.encode()) - var error: uint32 - if not ?pb.getField(4, error): - return err(ProtobufError.missingRequiredField("error")) - else: - rpc.error = HistoryResponseErrorRPC.parse(error) + pb.finish3() - ok(rpc) + return pb -proc encode*(rpc: HistoryRPC): ProtoBuffer = +proc encode*(res: StoreQueryResponse): ProtoBuffer = var pb = initProtoBuffer() - pb.write3(1, rpc.requestId) - pb.write3(2, rpc.query.map(encode)) - pb.write3(3, rpc.response.map(encode)) + pb.write3(1, res.requestId) + + pb.write3(2, res.statusCode) + pb.write3(3, res.statusDesc) + + for msg in res.messages: + pb.write3(4, msg.encode()) + + pb.write3(5, res.paginationCursor) + pb.finish3() - pb + return pb -proc decode*(T: type HistoryRPC, buffer: seq[byte]): ProtobufResult[T] = - var rpc = HistoryRPC() +proc decode*( + T: type WakuMessageKeyValue, buffer: seq[byte] +): ProtobufResult[WakuMessageKeyValue] = + var keyValue = WakuMessageKeyValue() let pb = initProtoBuffer(buffer) - if not ?pb.getField(1, rpc.requestId): + var buf: seq[byte] + if not ?pb.getField(1, buf): + return err(ProtobufError.missingRequiredField("message_hash")) + else: + var hash: WakuMessageHash + discard copyFrom[byte](hash, buf) + keyValue.messagehash = hash + + var proto: ProtoBuffer + if not ?pb.getField(2, proto): + return err(ProtobufError.missingRequiredField("message")) + else: + keyValue.message = ?WakuMessage.decode(proto.buffer) + + return ok(keyValue) + +proc decode*( + T: type StoreQueryResponse, buffer: seq[byte] +): ProtobufResult[StoreQueryResponse] = + var res = StoreQueryResponse() + let pb = initProtoBuffer(buffer) + + if not ?pb.getField(1, res.requestId): return err(ProtobufError.missingRequiredField("request_id")) - var queryBuffer: seq[byte] - if not ?pb.getField(2, queryBuffer): - rpc.query = none(HistoryQueryRPC) + var code: uint32 + if not ?pb.getField(2, code): + return err(ProtobufError.missingRequiredField("status_code")) + else: + res.statusCode = code + + var desc: string + if not ?pb.getField(3, desc): + return err(ProtobufError.missingRequiredField("status_desc")) + else: + res.statusDesc = desc + + var buffer: seq[seq[byte]] + if not ?pb.getRepeatedField(4, buffer): + res.messages = @[] else: - let query = ?HistoryQueryRPC.decode(queryBuffer) - rpc.query = some(query) + res.messages = newSeqOfCap[WakuMessageKeyValue](buffer.len) + for buf in buffer: + let msg = ?WakuMessageKeyValue.decode(buf) + res.messages.add(msg) - var responseBuffer: seq[byte] - if not ?pb.getField(3, responseBuffer): - rpc.response = none(HistoryResponseRPC) + var cursor: seq[byte] + if not ?pb.getField(5, cursor): + res.paginationCursor = none(WakuMessageHash) else: - let response = ?HistoryResponseRPC.decode(responseBuffer) - rpc.response = some(response) + var hash: WakuMessageHash + discard copyFrom[byte](hash, cursor) + res.paginationCursor = some(hash) - ok(rpc) + return ok(res) diff --git a/waku/waku_store/self_req_handler.nim b/waku/waku_store/self_req_handler.nim index 183de1223c..426ae72b7b 100644 --- a/waku/waku_store/self_req_handler.nim +++ b/waku/waku_store/self_req_handler.nim @@ -13,19 +13,25 @@ ## stored by that local store node. ## -import stew/results, chronos, chronicles +import stew/results, chronos import ./protocol, ./common proc handleSelfStoreRequest*( - self: WakuStore, histQuery: HistoryQuery -): Future[WakuStoreResult[HistoryResponse]] {.async.} = + self: WakuStore, req: StoreQueryRequest +): Future[WakuStoreResult[StoreQueryResponse]] {.async.} = ## Handles the store requests made by the node to itself. ## Normally used in REST-store requests - try: - let resp: HistoryResponse = (await self.queryHandler(histQuery)).valueOr: - return err("error in handleSelfStoreRequest: " & $error) + let handlerResult = catch: + await self.requestHandler(req) - return WakuStoreResult[HistoryResponse].ok(resp) - except Exception: - return err("exception in handleSelfStoreRequest: " & getCurrentExceptionMsg()) + let resResult = + if handlerResult.isErr(): + return err("exception in handleSelfStoreRequest: " & handlerResult.error.msg) + else: + handlerResult.get() + + let res = resResult.valueOr: + return err("error in handleSelfStoreRequest: " & $error) + + return ok(res) diff --git a/waku/waku_store_legacy.nim b/waku/waku_store_legacy.nim new file mode 100644 index 0000000000..9dac194c73 --- /dev/null +++ b/waku/waku_store_legacy.nim @@ -0,0 +1,3 @@ +import ./waku_store_legacy/common, ./waku_store_legacy/protocol + +export common, protocol diff --git a/waku/waku_store/README.md b/waku/waku_store_legacy/README.md similarity index 100% rename from waku/waku_store/README.md rename to waku/waku_store_legacy/README.md diff --git a/waku/waku_store_legacy/client.nim b/waku/waku_store_legacy/client.nim new file mode 100644 index 0000000000..1ad0069e9b --- /dev/null +++ b/waku/waku_store_legacy/client.nim @@ -0,0 +1,242 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/options, stew/results, chronicles, chronos, metrics, bearssl/rand +import + ../node/peer_manager, + ../utils/requests, + ./protocol_metrics, + ./common, + ./rpc, + ./rpc_codec + +when defined(waku_exp_store_resume): + import std/[sequtils, times] + import ../waku_archive + import ../waku_core/message/digest + +logScope: + topics = "waku legacy store client" + +const DefaultPageSize*: uint = 20 + # A recommended default number of waku messages per page + +type WakuStoreClient* = ref object + peerManager: PeerManager + rng: ref rand.HmacDrbgContext + + # TODO: Move outside of the client + when defined(waku_exp_store_resume): + store: ArchiveDriver + +proc new*( + T: type WakuStoreClient, peerManager: PeerManager, rng: ref rand.HmacDrbgContext +): T = + WakuStoreClient(peerManager: peerManager, rng: rng) + +proc sendHistoryQueryRPC( + w: WakuStoreClient, req: HistoryQuery, peer: RemotePeerInfo +): Future[HistoryResult] {.async, gcsafe.} = + let connOpt = await w.peerManager.dialPeer(peer, WakuStoreCodec) + if connOpt.isNone(): + waku_legacy_store_errors.inc(labelValues = [dialFailure]) + return err(HistoryError(kind: HistoryErrorKind.PEER_DIAL_FAILURE, address: $peer)) + + let connection = connOpt.get() + + let reqRpc = HistoryRPC(requestId: generateRequestId(w.rng), query: some(req.toRPC())) + await connection.writeLP(reqRpc.encode().buffer) + + #TODO: I see a challenge here, if storeNode uses a different MaxRPCSize this read will fail. + # Need to find a workaround for this. + let buf = await connection.readLp(DefaultMaxRpcSize.int) + let respDecodeRes = HistoryRPC.decode(buf) + if respDecodeRes.isErr(): + waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure]) + return + err(HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: decodeRpcFailure)) + + let respRpc = respDecodeRes.get() + + # Disabled ,for now, since the default response is a possible case (no messages, pagesize = 0, error = NONE(0)) + # TODO: Rework the RPC protocol to differentiate the default value from an empty value (e.g., status = 200 (OK)) + # and rework the protobuf parsing to return Option[T] when empty values are received + if respRpc.response.isNone(): + waku_legacy_store_errors.inc(labelValues = [emptyRpcResponseFailure]) + return err( + HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: emptyRpcResponseFailure) + ) + + let resp = respRpc.response.get() + + return resp.toAPI() + +proc query*( + w: WakuStoreClient, req: HistoryQuery, peer: RemotePeerInfo +): Future[HistoryResult] {.async, gcsafe.} = + return await w.sendHistoryQueryRPC(req, peer) + +# TODO: Move outside of the client +when defined(waku_exp_store_resume): + ## Resume store + + const StoreResumeTimeWindowOffset: Timestamp = getNanosecondTime(20) + ## Adjust the time window with an offset of 20 seconds + + proc new*( + T: type WakuStoreClient, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, + store: ArchiveDriver, + ): T = + WakuStoreClient(peerManager: peerManager, rng: rng, store: store) + + proc queryAll( + w: WakuStoreClient, query: HistoryQuery, peer: RemotePeerInfo + ): Future[WakuStoreResult[seq[WakuMessage]]] {.async, gcsafe.} = + ## A thin wrapper for query. Sends the query to the given peer. when the query has a valid pagingInfo, + ## it retrieves the historical messages in pages. + ## Returns all the fetched messages, if error occurs, returns an error string + + # Make a copy of the query + var req = query + + var messageList: seq[WakuMessage] = @[] + + while true: + let queryRes = await w.query(req, peer) + if queryRes.isErr(): + return err($queryRes.error) + + let response = queryRes.get() + + messageList.add(response.messages) + + # Check whether it is the last page + if response.cursor.isNone(): + break + + # Update paging cursor + req.cursor = response.cursor + + return ok(messageList) + + proc queryLoop( + w: WakuStoreClient, req: HistoryQuery, peers: seq[RemotePeerInfo] + ): Future[WakuStoreResult[seq[WakuMessage]]] {.async, gcsafe.} = + ## Loops through the peers candidate list in order and sends the query to each + ## + ## Once all responses have been received, the retrieved messages are consolidated into one deduplicated list. + ## if no messages have been retrieved, the returned future will resolve into a result holding an empty seq. + let queryFuturesList = peers.mapIt(w.queryAll(req, it)) + + await allFutures(queryFuturesList) + + let messagesList = queryFuturesList + .map( + proc(fut: Future[WakuStoreResult[seq[WakuMessage]]]): seq[WakuMessage] = + try: + # fut.read() can raise a CatchableError + # These futures have been awaited before using allFutures(). Call completed() just as a sanity check. + if not fut.completed() or fut.read().isErr(): + return @[] + + fut.read().value + except CatchableError: + return @[] + ) + .concat() + .deduplicate() + + return ok(messagesList) + + proc put( + store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage + ): Result[void, string] = + let + digest = waku_archive.computeDigest(message) + messageHash = computeMessageHash(pubsubTopic, message) + receivedTime = + if message.timestamp > 0: + message.timestamp + else: + getNanosecondTime(getTime().toUnixFloat()) + + store.put(pubsubTopic, message, digest, messageHash, receivedTime) + + proc resume*( + w: WakuStoreClient, + peerList = none(seq[RemotePeerInfo]), + pageSize = DefaultPageSize, + pubsubTopic = DefaultPubsubTopic, + ): Future[WakuStoreResult[uint64]] {.async, gcsafe.} = + ## resume proc retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku store node has been online + ## messages are stored in the store node's messages field and in the message db + ## the offline time window is measured as the difference between the current time and the timestamp of the most recent persisted waku message + ## an offset of 20 second is added to the time window to count for nodes asynchrony + ## peerList indicates the list of peers to query from. + ## The history is fetched from all available peers in this list and then consolidated into one deduplicated list. + ## Such candidates should be found through a discovery method (to be developed). + ## if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from. + ## The history gets fetched successfully if the dialed peer has been online during the queried time window. + ## the resume proc returns the number of retrieved messages if no error occurs, otherwise returns the error string + + # If store has not been provided, don't even try + if w.store.isNil(): + return err("store not provided (nil)") + + # NOTE: Original implementation is based on the message's sender timestamp. At the moment + # of writing, the sqlite store implementation returns the last message's receiver + # timestamp. + # lastSeenTime = lastSeenItem.get().msg.timestamp + let + lastSeenTime = w.store.getNewestMessageTimestamp().get(Timestamp(0)) + now = getNanosecondTime(getTime().toUnixFloat()) + + debug "resuming with offline time window", + lastSeenTime = lastSeenTime, currentTime = now + + let + queryEndTime = now + StoreResumeTimeWindowOffset + queryStartTime = max(lastSeenTime - StoreResumeTimeWindowOffset, 0) + + let req = HistoryQuery( + pubsubTopic: some(pubsubTopic), + startTime: some(queryStartTime), + endTime: some(queryEndTime), + pageSize: uint64(pageSize), + direction: default(), + ) + + var res: WakuStoreResult[seq[WakuMessage]] + if peerList.isSome(): + debug "trying the candidate list to fetch the history" + res = await w.queryLoop(req, peerList.get()) + else: + debug "no candidate list is provided, selecting a random peer" + # if no peerList is set then query from one of the peers stored in the peer manager + let peerOpt = w.peerManager.selectPeer(WakuStoreCodec) + if peerOpt.isNone(): + warn "no suitable remote peers" + waku_legacy_store_errors.inc(labelValues = [peerNotFoundFailure]) + return err("no suitable remote peers") + + debug "a peer is selected from peer manager" + res = await w.queryAll(req, peerOpt.get()) + + if res.isErr(): + debug "failed to resume the history" + return err("failed to resume the history") + + # Save the retrieved messages in the store + var added: uint = 0 + for msg in res.get(): + let putStoreRes = w.store.put(pubsubTopic, msg) + if putStoreRes.isErr(): + continue + + added.inc() + + return ok(added) diff --git a/waku/waku_store_legacy/common.nim b/waku/waku_store_legacy/common.nim new file mode 100644 index 0000000000..67af41a68a --- /dev/null +++ b/waku/waku_store_legacy/common.nim @@ -0,0 +1,109 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/[options, sequtils], stew/results, stew/byteutils, nimcrypto/sha2 +import ../waku_core, ../common/paging + +const + WakuStoreCodec* = "/vac/waku/store/2.0.0-beta4" + + DefaultPageSize*: uint64 = 20 + + MaxPageSize*: uint64 = 100 + +type WakuStoreResult*[T] = Result[T, string] + +## Waku message digest + +type MessageDigest* = MDigest[256] + +proc computeDigest*(msg: WakuMessage): MessageDigest = + var ctx: sha256 + ctx.init() + defer: + ctx.clear() + + ctx.update(msg.contentTopic.toBytes()) + ctx.update(msg.payload) + + # Computes the hash + return ctx.finish() + +## Public API types + +type + HistoryCursor* = object + pubsubTopic*: PubsubTopic + senderTime*: Timestamp + storeTime*: Timestamp + digest*: MessageDigest + + HistoryQuery* = object + pubsubTopic*: Option[PubsubTopic] + contentTopics*: seq[ContentTopic] + cursor*: Option[HistoryCursor] + startTime*: Option[Timestamp] + endTime*: Option[Timestamp] + pageSize*: uint64 + direction*: PagingDirection + + HistoryResponse* = object + messages*: seq[WakuMessage] + cursor*: Option[HistoryCursor] + + HistoryErrorKind* {.pure.} = enum + UNKNOWN = uint32(000) + BAD_RESPONSE = uint32(300) + BAD_REQUEST = uint32(400) + TOO_MANY_REQUESTS = uint32(429) + SERVICE_UNAVAILABLE = uint32(503) + PEER_DIAL_FAILURE = uint32(504) + + HistoryError* = object + case kind*: HistoryErrorKind + of PEER_DIAL_FAILURE: + address*: string + of BAD_RESPONSE, BAD_REQUEST: + cause*: string + else: + discard + + HistoryResult* = Result[HistoryResponse, HistoryError] + +proc parse*(T: type HistoryErrorKind, kind: uint32): T = + case kind + of 000, 200, 300, 400, 429, 503: + HistoryErrorKind(kind) + else: + HistoryErrorKind.UNKNOWN + +proc `$`*(err: HistoryError): string = + case err.kind + of HistoryErrorKind.PEER_DIAL_FAILURE: + "PEER_DIAL_FAILURE: " & err.address + of HistoryErrorKind.BAD_RESPONSE: + "BAD_RESPONSE: " & err.cause + of HistoryErrorKind.BAD_REQUEST: + "BAD_REQUEST: " & err.cause + of HistoryErrorKind.TOO_MANY_REQUESTS: + "TOO_MANY_REQUESTS" + of HistoryErrorKind.SERVICE_UNAVAILABLE: + "SERVICE_UNAVAILABLE" + of HistoryErrorKind.UNKNOWN: + "UNKNOWN" + +proc checkHistCursor*(self: HistoryCursor): Result[void, HistoryError] = + if self.pubsubTopic.len == 0: + return err(HistoryError(kind: BAD_REQUEST, cause: "empty pubsubTopic")) + if self.senderTime == 0: + return err(HistoryError(kind: BAD_REQUEST, cause: "invalid senderTime")) + if self.storeTime == 0: + return err(HistoryError(kind: BAD_REQUEST, cause: "invalid storeTime")) + if self.digest.data.all( + proc(x: byte): bool = + x == 0 + ): + return err(HistoryError(kind: BAD_REQUEST, cause: "empty digest")) + return ok() diff --git a/waku/waku_store_legacy/protocol.nim b/waku/waku_store_legacy/protocol.nim new file mode 100644 index 0000000000..c50d8f9388 --- /dev/null +++ b/waku/waku_store_legacy/protocol.nim @@ -0,0 +1,137 @@ +## Waku Store protocol for historical messaging support. +## See spec for more details: +## https://github.com/vacp2p/specs/blob/master/specs/waku/v2/waku-store.md +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + std/options, + stew/results, + chronicles, + chronos, + bearssl/rand, + libp2p/crypto/crypto, + libp2p/protocols/protocol, + libp2p/protobuf/minprotobuf, + libp2p/stream/connection, + metrics +import + ../waku_core, + ../node/peer_manager, + ./common, + ./rpc, + ./rpc_codec, + ./protocol_metrics, + ../common/ratelimit, + ../common/waku_service_metrics + +logScope: + topics = "waku legacy store" + +const MaxMessageTimestampVariance* = getNanoSecondTime(20) + # 20 seconds maximum allowable sender timestamp "drift" + +type HistoryQueryHandler* = + proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} + +type WakuStore* = ref object of LPProtocol + peerManager: PeerManager + rng: ref rand.HmacDrbgContext + queryHandler*: HistoryQueryHandler + requestRateLimiter*: Option[TokenBucket] + +## Protocol + +proc initProtocolHandler(ws: WakuStore) = + proc handler(conn: Connection, proto: string) {.async.} = + let buf = await conn.readLp(DefaultMaxRpcSize.int) + + let decodeRes = HistoryRPC.decode(buf) + if decodeRes.isErr(): + error "failed to decode rpc", peerId = $conn.peerId + waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure]) + # TODO: Return (BAD_REQUEST, cause: "decode rpc failed") + return + + let reqRpc = decodeRes.value + + if reqRpc.query.isNone(): + error "empty query rpc", peerId = $conn.peerId, requestId = reqRpc.requestId + waku_legacy_store_errors.inc(labelValues = [emptyRpcQueryFailure]) + # TODO: Return (BAD_REQUEST, cause: "empty query") + return + + if ws.requestRateLimiter.isSome() and not ws.requestRateLimiter.get().tryConsume(1): + trace "store query request rejected due rate limit exceeded", + peerId = $conn.peerId, requestId = reqRpc.requestId + let error = HistoryError(kind: HistoryErrorKind.TOO_MANY_REQUESTS).toRPC() + let response = HistoryResponseRPC(error: error) + let rpc = HistoryRPC(requestId: reqRpc.requestId, response: some(response)) + await conn.writeLp(rpc.encode().buffer) + waku_service_requests_rejected.inc(labelValues = ["Store"]) + return + + waku_service_requests.inc(labelValues = ["Store"]) + + let + requestId = reqRpc.requestId + request = reqRpc.query.get().toAPI() + + info "received history query", + peerId = conn.peerId, requestId = requestId, query = request + waku_legacy_store_queries.inc() + + var responseRes: HistoryResult + try: + responseRes = await ws.queryHandler(request) + except Exception: + error "history query failed", + peerId = $conn.peerId, requestId = requestId, error = getCurrentExceptionMsg() + + let error = HistoryError(kind: HistoryErrorKind.UNKNOWN).toRPC() + let response = HistoryResponseRPC(error: error) + let rpc = HistoryRPC(requestId: requestId, response: some(response)) + await conn.writeLp(rpc.encode().buffer) + return + + if responseRes.isErr(): + error "history query failed", + peerId = $conn.peerId, requestId = requestId, error = responseRes.error + + let response = responseRes.toRPC() + let rpc = HistoryRPC(requestId: requestId, response: some(response)) + await conn.writeLp(rpc.encode().buffer) + return + + let response = responseRes.toRPC() + + info "sending history response", + peerId = conn.peerId, requestId = requestId, messages = response.messages.len + + let rpc = HistoryRPC(requestId: requestId, response: some(response)) + await conn.writeLp(rpc.encode().buffer) + + ws.handler = handler + ws.codec = WakuStoreCodec + +proc new*( + T: type WakuStore, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, + queryHandler: HistoryQueryHandler, + rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), +): T = + # Raise a defect if history query handler is nil + if queryHandler.isNil(): + raise newException(NilAccessDefect, "history query handler is nil") + + let ws = WakuStore( + rng: rng, + peerManager: peerManager, + queryHandler: queryHandler, + requestRateLimiter: newTokenBucket(rateLimitSetting), + ) + ws.initProtocolHandler() + ws diff --git a/waku/waku_store_legacy/protocol_metrics.nim b/waku/waku_store_legacy/protocol_metrics.nim new file mode 100644 index 0000000000..d854150232 --- /dev/null +++ b/waku/waku_store_legacy/protocol_metrics.nim @@ -0,0 +1,18 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import metrics + +declarePublicGauge waku_legacy_store_errors, + "number of legacy store protocol errors", ["type"] +declarePublicGauge waku_legacy_store_queries, "number of legacy store queries received" + +# Error types (metric label values) +const + dialFailure* = "dial_failure" + decodeRpcFailure* = "decode_rpc_failure" + peerNotFoundFailure* = "peer_not_found_failure" + emptyRpcQueryFailure* = "empty_rpc_query_failure" + emptyRpcResponseFailure* = "empty_rpc_response_failure" diff --git a/waku/waku_store/rpc.nim b/waku/waku_store_legacy/rpc.nim similarity index 100% rename from waku/waku_store/rpc.nim rename to waku/waku_store_legacy/rpc.nim diff --git a/waku/waku_store_legacy/rpc_codec.nim b/waku/waku_store_legacy/rpc_codec.nim new file mode 100644 index 0000000000..2d5867e00b --- /dev/null +++ b/waku/waku_store_legacy/rpc_codec.nim @@ -0,0 +1,258 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/options, nimcrypto/hash +import ../common/[protobuf, paging], ../waku_core, ./common, ./rpc + +const DefaultMaxRpcSize* = -1 + +## Pagination + +proc encode*(index: PagingIndexRPC): ProtoBuffer = + ## Encode an Index object into a ProtoBuffer + ## returns the resultant ProtoBuffer + var pb = initProtoBuffer() + + pb.write3(1, index.digest.data) + pb.write3(2, zint64(index.receiverTime)) + pb.write3(3, zint64(index.senderTime)) + pb.write3(4, index.pubsubTopic) + pb.finish3() + + pb + +proc decode*(T: type PagingIndexRPC, buffer: seq[byte]): ProtobufResult[T] = + ## creates and returns an Index object out of buffer + var rpc = PagingIndexRPC() + let pb = initProtoBuffer(buffer) + + var data: seq[byte] + if not ?pb.getField(1, data): + return err(ProtobufError.missingRequiredField("digest")) + else: + var digest = MessageDigest() + for count, b in data: + digest.data[count] = b + + rpc.digest = digest + + var receiverTime: zint64 + if not ?pb.getField(2, receiverTime): + return err(ProtobufError.missingRequiredField("receiver_time")) + else: + rpc.receiverTime = int64(receiverTime) + + var senderTime: zint64 + if not ?pb.getField(3, senderTime): + return err(ProtobufError.missingRequiredField("sender_time")) + else: + rpc.senderTime = int64(senderTime) + + var pubsubTopic: string + if not ?pb.getField(4, pubsubTopic): + return err(ProtobufError.missingRequiredField("pubsub_topic")) + else: + rpc.pubsubTopic = pubsubTopic + + ok(rpc) + +proc encode*(rpc: PagingInfoRPC): ProtoBuffer = + ## Encodes a PagingInfo object into a ProtoBuffer + ## returns the resultant ProtoBuffer + var pb = initProtoBuffer() + + pb.write3(1, rpc.pageSize) + pb.write3(2, rpc.cursor.map(encode)) + pb.write3( + 3, + rpc.direction.map( + proc(d: PagingDirection): uint32 = + uint32(ord(d)) + ), + ) + pb.finish3() + + pb + +proc decode*(T: type PagingInfoRPC, buffer: seq[byte]): ProtobufResult[T] = + ## creates and returns a PagingInfo object out of buffer + var rpc = PagingInfoRPC() + let pb = initProtoBuffer(buffer) + + var pageSize: uint64 + if not ?pb.getField(1, pageSize): + rpc.pageSize = none(uint64) + else: + rpc.pageSize = some(pageSize) + + var cursorBuffer: seq[byte] + if not ?pb.getField(2, cursorBuffer): + rpc.cursor = none(PagingIndexRPC) + else: + let cursor = ?PagingIndexRPC.decode(cursorBuffer) + rpc.cursor = some(cursor) + + var direction: uint32 + if not ?pb.getField(3, direction): + rpc.direction = none(PagingDirection) + else: + rpc.direction = some(PagingDirection(direction)) + + ok(rpc) + +## Wire protocol + +proc encode*(rpc: HistoryContentFilterRPC): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.contentTopic) + pb.finish3() + + pb + +proc decode*(T: type HistoryContentFilterRPC, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + + var contentTopic: ContentTopic + if not ?pb.getField(1, contentTopic): + return err(ProtobufError.missingRequiredField("content_topic")) + ok(HistoryContentFilterRPC(contentTopic: contentTopic)) + +proc encode*(rpc: HistoryQueryRPC): ProtoBuffer = + var pb = initProtoBuffer() + pb.write3(2, rpc.pubsubTopic) + + for filter in rpc.contentFilters: + pb.write3(3, filter.encode()) + + pb.write3(4, rpc.pagingInfo.map(encode)) + pb.write3( + 5, + rpc.startTime.map( + proc(time: int64): zint64 = + zint64(time) + ), + ) + pb.write3( + 6, + rpc.endTime.map( + proc(time: int64): zint64 = + zint64(time) + ), + ) + pb.finish3() + + pb + +proc decode*(T: type HistoryQueryRPC, buffer: seq[byte]): ProtobufResult[T] = + var rpc = HistoryQueryRPC() + let pb = initProtoBuffer(buffer) + + var pubsubTopic: string + if not ?pb.getField(2, pubsubTopic): + rpc.pubsubTopic = none(string) + else: + rpc.pubsubTopic = some(pubsubTopic) + + var buffs: seq[seq[byte]] + if not ?pb.getRepeatedField(3, buffs): + rpc.contentFilters = @[] + else: + for pb in buffs: + let filter = ?HistoryContentFilterRPC.decode(pb) + rpc.contentFilters.add(filter) + + var pagingInfoBuffer: seq[byte] + if not ?pb.getField(4, pagingInfoBuffer): + rpc.pagingInfo = none(PagingInfoRPC) + else: + let pagingInfo = ?PagingInfoRPC.decode(pagingInfoBuffer) + rpc.pagingInfo = some(pagingInfo) + + var startTime: zint64 + if not ?pb.getField(5, startTime): + rpc.startTime = none(int64) + else: + rpc.startTime = some(int64(startTime)) + + var endTime: zint64 + if not ?pb.getField(6, endTime): + rpc.endTime = none(int64) + else: + rpc.endTime = some(int64(endTime)) + + ok(rpc) + +proc encode*(response: HistoryResponseRPC): ProtoBuffer = + var pb = initProtoBuffer() + + for rpc in response.messages: + pb.write3(2, rpc.encode()) + + pb.write3(3, response.pagingInfo.map(encode)) + pb.write3(4, uint32(ord(response.error))) + pb.finish3() + + pb + +proc decode*(T: type HistoryResponseRPC, buffer: seq[byte]): ProtobufResult[T] = + var rpc = HistoryResponseRPC() + let pb = initProtoBuffer(buffer) + + var messages: seq[seq[byte]] + if ?pb.getRepeatedField(2, messages): + for pb in messages: + let message = ?WakuMessage.decode(pb) + rpc.messages.add(message) + else: + rpc.messages = @[] + + var pagingInfoBuffer: seq[byte] + if ?pb.getField(3, pagingInfoBuffer): + let pagingInfo = ?PagingInfoRPC.decode(pagingInfoBuffer) + rpc.pagingInfo = some(pagingInfo) + else: + rpc.pagingInfo = none(PagingInfoRPC) + + var error: uint32 + if not ?pb.getField(4, error): + return err(ProtobufError.missingRequiredField("error")) + else: + rpc.error = HistoryResponseErrorRPC.parse(error) + + ok(rpc) + +proc encode*(rpc: HistoryRPC): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.requestId) + pb.write3(2, rpc.query.map(encode)) + pb.write3(3, rpc.response.map(encode)) + pb.finish3() + + pb + +proc decode*(T: type HistoryRPC, buffer: seq[byte]): ProtobufResult[T] = + var rpc = HistoryRPC() + let pb = initProtoBuffer(buffer) + + if not ?pb.getField(1, rpc.requestId): + return err(ProtobufError.missingRequiredField("request_id")) + + var queryBuffer: seq[byte] + if not ?pb.getField(2, queryBuffer): + rpc.query = none(HistoryQueryRPC) + else: + let query = ?HistoryQueryRPC.decode(queryBuffer) + rpc.query = some(query) + + var responseBuffer: seq[byte] + if not ?pb.getField(3, responseBuffer): + rpc.response = none(HistoryResponseRPC) + else: + let response = ?HistoryResponseRPC.decode(responseBuffer) + rpc.response = some(response) + + ok(rpc) diff --git a/waku/waku_store_legacy/self_req_handler.nim b/waku/waku_store_legacy/self_req_handler.nim new file mode 100644 index 0000000000..183de1223c --- /dev/null +++ b/waku/waku_store_legacy/self_req_handler.nim @@ -0,0 +1,31 @@ +## +## This file is aimed to attend the requests that come directly +## from the 'self' node. It is expected to attend the store requests that +## come from REST-store endpoint when those requests don't indicate +## any store-peer address. +## +## Notice that the REST-store requests normally assume that the REST +## server is acting as a store-client. In this module, we allow that +## such REST-store node can act as store-server as well by retrieving +## its own stored messages. The typical use case for that is when +## using `nwaku-compose`, which spawn a Waku node connected to a local +## database, and the user is interested in retrieving the messages +## stored by that local store node. +## + +import stew/results, chronos, chronicles +import ./protocol, ./common + +proc handleSelfStoreRequest*( + self: WakuStore, histQuery: HistoryQuery +): Future[WakuStoreResult[HistoryResponse]] {.async.} = + ## Handles the store requests made by the node to itself. + ## Normally used in REST-store requests + + try: + let resp: HistoryResponse = (await self.queryHandler(histQuery)).valueOr: + return err("error in handleSelfStoreRequest: " & $error) + + return WakuStoreResult[HistoryResponse].ok(resp) + except Exception: + return err("exception in handleSelfStoreRequest: " & getCurrentExceptionMsg())