diff --git a/tests/waku_archive/archive_utils.nim b/tests/waku_archive/archive_utils.nim index df2f80846e..09bb4d6e92 100644 --- a/tests/waku_archive/archive_utils.nim +++ b/tests/waku_archive/archive_utils.nim @@ -7,7 +7,6 @@ import node/peer_manager, waku_core, waku_archive, - waku_archive/common, waku_archive/driver/sqlite_driver, common/databases/db_sqlite, ], @@ -23,26 +22,12 @@ proc newSqliteArchiveDriver*(): ArchiveDriver = proc newWakuArchive*(driver: ArchiveDriver): WakuArchive = WakuArchive.new(driver).get() -proc computeArchiveCursor*( - pubsubTopic: PubsubTopic, message: WakuMessage -): ArchiveCursor = - ArchiveCursor( - pubsubTopic: pubsubTopic, - senderTime: message.timestamp, - storeTime: message.timestamp, - digest: computeDigest(message), - hash: computeMessageHash(pubsubTopic, message), - ) - proc put*( driver: ArchiveDriver, pubsubTopic: PubSubTopic, msgList: seq[WakuMessage] ): ArchiveDriver = for msg in msgList: - let - msgDigest = computeDigest(msg) - msgHash = computeMessageHash(pubsubTopic, msg) - _ = waitFor driver.put(pubsubTopic, msg, msgDigest, msgHash, msg.timestamp) - # discard crashes + discard waitFor driver.put(computeMessageHash(pubsubTopic, msg), pubsubTopic, msg) + return driver proc newArchiveDriverWithMessages*( diff --git a/tests/waku_archive/test_driver_postgres.nim b/tests/waku_archive/test_driver_postgres.nim index 75afb226fa..a5a3e5b04f 100644 --- a/tests/waku_archive/test_driver_postgres.nim +++ b/tests/waku_archive/test_driver_postgres.nim @@ -10,15 +10,6 @@ import ../testlib/testasync, ../testlib/postgres -proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor = - ArchiveCursor( - pubsubTopic: pubsubTopic, - senderTime: message.timestamp, - storeTime: message.timestamp, - digest: computeDigest(message), - hash: computeMessageHash(pubsubTopic, message), - ) - suite "Postgres driver": ## Unique driver instance var driver {.threadvar.}: PostgresDriver @@ -58,11 +49,8 @@ suite "Postgres driver": let msg = fakeWakuMessage(contentTopic = contentTopic, meta = meta) - let computedDigest = computeDigest(msg) - let computedHash = computeMessageHash(DefaultPubsubTopic, msg) - let putRes = await driver.put( - DefaultPubsubTopic, msg, computedDigest, computedHash, msg.timestamp + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) assert putRes.isOk(), putRes.error @@ -70,12 +58,10 @@ suite "Postgres driver": assert storedMsg.len == 1 - let (pubsubTopic, actualMsg, digest, _, hash) = storedMsg[0] + let (_, pubsubTopic, actualMsg) = storedMsg[0] assert actualMsg.contentTopic == contentTopic assert pubsubTopic == DefaultPubsubTopic - assert toHex(computedDigest.data) == toHex(digest) assert toHex(actualMsg.payload) == toHex(msg.payload) - assert toHex(computedHash) == toHex(hash) assert toHex(actualMsg.meta) == toHex(msg.meta) asyncTest "Insert and query message": @@ -86,24 +72,14 @@ suite "Postgres driver": let msg1 = fakeWakuMessage(contentTopic = contentTopic1) - var putRes = await driver.put( - pubsubTopic1, - msg1, - computeDigest(msg1), - computeMessageHash(pubsubTopic1, msg1), - msg1.timestamp, - ) + var putRes = + await driver.put(computeMessageHash(pubsubTopic1, msg1), pubsubTopic1, msg1) assert putRes.isOk(), putRes.error let msg2 = fakeWakuMessage(contentTopic = contentTopic2) - putRes = await driver.put( - pubsubTopic2, - msg2, - computeDigest(msg2), - computeMessageHash(pubsubTopic2, msg2), - msg2.timestamp, - ) + putRes = + await driver.put(computeMessageHash(pubsubTopic2, msg2), pubsubTopic2, msg2) assert putRes.isOk(), putRes.error let countMessagesRes = await driver.getMessagesCount() @@ -111,49 +87,49 @@ suite "Postgres driver": assert countMessagesRes.isOk(), $countMessagesRes.error assert countMessagesRes.get() == 2 - var messagesRes = await driver.getMessages(contentTopic = @[contentTopic1]) + var messagesRes = await driver.getMessages(contentTopics = @[contentTopic1]) assert messagesRes.isOk(), $messagesRes.error assert messagesRes.get().len == 1 # Get both content topics, check ordering messagesRes = - await driver.getMessages(contentTopic = @[contentTopic1, contentTopic2]) + await driver.getMessages(contentTopics = @[contentTopic1, contentTopic2]) assert messagesRes.isOk(), messagesRes.error assert messagesRes.get().len == 2 - assert messagesRes.get()[0][1].contentTopic == contentTopic1 + assert messagesRes.get()[0][2].contentTopic == contentTopic1 # Descending order messagesRes = await driver.getMessages( - contentTopic = @[contentTopic1, contentTopic2], ascendingOrder = false + contentTopics = @[contentTopic1, contentTopic2], ascendingOrder = false ) assert messagesRes.isOk(), messagesRes.error assert messagesRes.get().len == 2 - assert messagesRes.get()[0][1].contentTopic == contentTopic2 + assert messagesRes.get()[0][2].contentTopic == contentTopic2 # cursor # Get both content topics messagesRes = await driver.getMessages( - contentTopic = @[contentTopic1, contentTopic2], - cursor = some(computeTestCursor(pubsubTopic1, messagesRes.get()[1][1])), + contentTopics = @[contentTopic1, contentTopic2], + cursor = some(computeMessageHash(pubsubTopic1, messagesRes.get()[1][2])), ) assert messagesRes.isOk() assert messagesRes.get().len == 1 # Get both content topics but one pubsub topic messagesRes = await driver.getMessages( - contentTopic = @[contentTopic1, contentTopic2], pubsubTopic = some(pubsubTopic1) + contentTopics = @[contentTopic1, contentTopic2], pubsubTopic = some(pubsubTopic1) ) assert messagesRes.isOk(), messagesRes.error assert messagesRes.get().len == 1 - assert messagesRes.get()[0][1].contentTopic == contentTopic1 + assert messagesRes.get()[0][2].contentTopic == contentTopic1 # Limit messagesRes = await driver.getMessages( - contentTopic = @[contentTopic1, contentTopic2], maxPageSize = 1 + contentTopics = @[contentTopic1, contentTopic2], maxPageSize = 1 ) assert messagesRes.isOk(), messagesRes.error assert messagesRes.get().len == 1 @@ -170,11 +146,7 @@ suite "Postgres driver": raiseAssert "could not get num mgs correctly: " & $error var putRes = await driver.put( - DefaultPubsubTopic, - msg1, - computeDigest(msg1), - computeMessageHash(DefaultPubsubTopic, msg1), - msg1.timestamp, + computeMessageHash(DefaultPubsubTopic, msg1), DefaultPubsubTopic, msg1 ) assert putRes.isOk(), putRes.error @@ -185,11 +157,7 @@ suite "Postgres driver": "wrong number of messages: " & $newNumMsgs putRes = await driver.put( - DefaultPubsubTopic, - msg2, - computeDigest(msg2), - computeMessageHash(DefaultPubsubTopic, msg2), - msg2.timestamp, + computeMessageHash(DefaultPubsubTopic, msg2), DefaultPubsubTopic, msg2 ) assert putRes.isOk() diff --git a/tests/waku_archive/test_driver_postgres_query.nim b/tests/waku_archive/test_driver_postgres_query.nim index 8614b6af1f..7cdd1c95ad 100644 --- a/tests/waku_archive/test_driver_postgres_query.nim +++ b/tests/waku_archive/test_driver_postgres_query.nim @@ -6,11 +6,8 @@ import chronos, chronicles import - ../../../waku/waku_archive, - ../../../waku/waku_archive/driver as driver_module, ../../../waku/waku_archive/driver/postgres_driver, ../../../waku/waku_core, - ../../../waku/waku_core/message/digest, ../testlib/common, ../testlib/wakucore, ../testlib/testasync, @@ -25,15 +22,6 @@ logScope: # Initialize the random number generator common.randomize() -proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor = - ArchiveCursor( - pubsubTopic: pubsubTopic, - senderTime: message.timestamp, - storeTime: message.timestamp, - digest: computeDigest(message), - hash: computeMessageHash(pubsubTopic, message), - ) - suite "Postgres driver - queries": ## Unique driver instance var driver {.threadvar.}: PostgresDriver @@ -75,11 +63,7 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() @@ -89,7 +73,7 @@ suite "Postgres driver - queries": ## Then assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[0 .. 4] @@ -116,23 +100,19 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true ) ## Then assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 3] @@ -171,23 +151,19 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true ) ## Then assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 3] @@ -214,23 +190,19 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = false + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = false ) ## Then assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[6 .. 7].reversed() @@ -259,17 +231,13 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When var res = await driver.getMessages( - contentTopic = @[contentTopic1, contentTopic2], + contentTopics = @[contentTopic1, contentTopic2], pubsubTopic = some(DefaultPubsubTopic), maxPageSize = 2, ascendingOrder = true, @@ -279,14 +247,14 @@ suite "Postgres driver - queries": ## Then assert res.isOk(), res.error - var filteredMessages = res.tryGet().mapIt(it[1]) + var filteredMessages = res.tryGet().mapIt(it[2]) check filteredMessages == expected[2 .. 3] ## When ## This is very similar to the previous one but we enforce to use the prepared ## statement by querying one single content topic res = await driver.getMessages( - contentTopic = @[contentTopic1], + contentTopics = @[contentTopic1], pubsubTopic = some(DefaultPubsubTopic), maxPageSize = 2, ascendingOrder = true, @@ -296,7 +264,7 @@ suite "Postgres driver - queries": ## Then assert res.isOk(), res.error - filteredMessages = res.tryGet().mapIt(it[1]) + filteredMessages = res.tryGet().mapIt(it[2]) check filteredMessages == @[expected[2]] asyncTest "single content topic - no results": @@ -319,23 +287,19 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true ) ## Then assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages.len == 0 @@ -347,17 +311,13 @@ suite "Postgres driver - queries": let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t)) require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[DefaultContentTopic], + contentTopics = @[DefaultContentTopic], maxPageSize = pageSize, ascendingOrder = true, ) @@ -411,11 +371,7 @@ suite "Postgres driver - queries": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() ## When let res = await driver.getMessages( @@ -426,7 +382,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5] @@ -472,11 +428,7 @@ suite "Postgres driver - queries": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() ## When let res = await driver.getMessages(maxPageSize = 2, ascendingOrder = true) @@ -485,7 +437,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[0 .. 1] @@ -531,15 +483,11 @@ suite "Postgres driver - queries": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), maxPageSize = 2, ascendingOrder = true, @@ -549,7 +497,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5] @@ -577,15 +525,11 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[4]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) ## When let res = await driver.getMessages( @@ -595,7 +539,7 @@ suite "Postgres driver - queries": ## Then assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[5 .. 6] @@ -623,15 +567,11 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[4]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) ## When let res = await driver.getMessages( @@ -641,7 +581,7 @@ suite "Postgres driver - queries": ## Then assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 3].reversed() @@ -667,21 +607,16 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let fakeCursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) - let cursor = ArchiveCursor(hash: fakeCursor) + let cursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) ## When let res = await driver.getMessages( includeData = true, - contentTopicSeq = @[DefaultContentTopic], + contentTopics = @[DefaultContentTopic], pubsubTopic = none(PubsubTopic), cursor = some(cursor), startTime = none(Timestamp), @@ -721,19 +656,15 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[4]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], cursor = some(cursor), maxPageSize = 10, ascendingOrder = true, @@ -742,7 +673,7 @@ suite "Postgres driver - queries": ## Then assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[5 .. 6] @@ -770,19 +701,15 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[6]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[6]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], cursor = some(cursor), maxPageSize = 10, ascendingOrder = false, @@ -791,7 +718,7 @@ suite "Postgres driver - queries": ## Then assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 5].reversed() @@ -862,13 +789,9 @@ suite "Postgres driver - queries": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() - let cursor = computeTestCursor(expected[5][0], expected[5][1]) + let cursor = computeMessageHash(expected[5][0], expected[5][1]) ## When let res = await driver.getMessages( @@ -882,7 +805,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[6 .. 7] @@ -953,13 +876,9 @@ suite "Postgres driver - queries": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() - let cursor = computeTestCursor(expected[6][0], expected[6][1]) + let cursor = computeMessageHash(expected[6][0], expected[6][1]) ## When let res = await driver.getMessages( @@ -973,7 +892,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5].reversed() @@ -1001,11 +920,7 @@ suite "Postgres driver - queries": let hashes = messages.mapIt(computeMessageHash(DefaultPubsubTopic, it)) for (msg, hash) in messages.zip(hashes): - require ( - await driver.put( - DefaultPubsubTopic, msg, computeDigest(msg), hash, msg.timestamp - ) - ).isOk() + require (await driver.put(hash, DefaultPubsubTopic, msg)).isOk() ## When let res = await driver.getMessages(hashes = hashes, ascendingOrder = false) @@ -1014,7 +929,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error let expectedMessages = expected.reversed() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages @@ -1042,11 +957,7 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() @@ -1058,7 +969,7 @@ suite "Postgres driver - queries": ## Then assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 6] @@ -1086,11 +997,7 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() @@ -1102,7 +1009,7 @@ suite "Postgres driver - queries": ## Then assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[0 .. 4] @@ -1175,11 +1082,7 @@ suite "Postgres driver - queries": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() ## When let res = await driver.getMessages( @@ -1193,7 +1096,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[2 .. 4] @@ -1222,17 +1125,13 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], startTime = some(ts(45, timeOrigin)), endTime = some(ts(15, timeOrigin)), maxPageSize = 2, @@ -1241,7 +1140,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages.len == 0 @@ -1269,17 +1168,13 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = true, @@ -1287,7 +1182,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 6] @@ -1318,17 +1213,13 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = false, @@ -1336,7 +1227,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 6].reversed() @@ -1368,19 +1259,15 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[3]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[3]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], cursor = some(cursor), startTime = some(ts(15, timeOrigin)), maxPageSize = 10, @@ -1389,7 +1276,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[4 .. 9] @@ -1421,19 +1308,15 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[6]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[6]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], cursor = some(cursor), startTime = some(ts(15, timeOrigin)), maxPageSize = 10, @@ -1442,7 +1325,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[3 .. 4].reversed() @@ -1506,17 +1389,13 @@ suite "Postgres driver - queries": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[1][1]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[1][1]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), cursor = some(cursor), startTime = some(ts(0, timeOrigin)), @@ -1528,7 +1407,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[3 .. 4] @@ -1591,17 +1470,13 @@ suite "Postgres driver - queries": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() - let cursor = computeTestCursor(expected[7][0], expected[7][1]) + let cursor = computeMessageHash(expected[7][0], expected[7][1]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), cursor = some(cursor), startTime = some(ts(35, timeOrigin)), @@ -1613,7 +1488,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5].reversed() @@ -1677,17 +1552,13 @@ suite "Postgres driver - queries": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() - let cursor = computeTestCursor(expected[1][0], expected[1][1]) + let cursor = computeMessageHash(expected[1][0], expected[1][1]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), cursor = some(cursor), startTime = some(ts(35, timeOrigin)), @@ -1700,7 +1571,7 @@ suite "Postgres driver - queries": assert res.isOk(), res.error let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5] @@ -1764,17 +1635,13 @@ suite "Postgres driver - queries": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() - let cursor = computeTestCursor(expected[1][0], expected[1][1]) + let cursor = computeMessageHash(expected[1][0], expected[1][1]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), cursor = some(cursor), startTime = some(ts(35, timeOrigin)), @@ -1786,7 +1653,7 @@ suite "Postgres driver - queries": ## Then assert res.isOk(), res.error - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages.len == 0 @@ -1814,11 +1681,7 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() @@ -1865,11 +1728,7 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() @@ -1906,11 +1765,7 @@ suite "Postgres driver - queries": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() diff --git a/tests/waku_archive/test_driver_queue.nim b/tests/waku_archive/test_driver_queue.nim index d57f0ee1d2..bbb5f835bf 100644 --- a/tests/waku_archive/test_driver_queue.nim +++ b/tests/waku_archive/test_driver_queue.nim @@ -18,13 +18,8 @@ proc genIndexedWakuMessage(i: int8): (Index, WakuMessage) = let message = WakuMessage(payload: @[byte i], timestamp: Timestamp(i)) topic = "test-pubsub-topic" - cursor = Index( - receiverTime: Timestamp(i), - senderTime: Timestamp(i), - digest: MessageDigest(data: data), - pubsubTopic: topic, - hash: computeMessageHash(topic, message), - ) + cursor = + Index(time: Timestamp(i), hash: computeMessageHash(topic, message), topic: topic) (cursor, message) @@ -70,7 +65,7 @@ procSuite "Sorted driver queue": # Attempt to add message with older value than oldest in queue should fail let - oldestTimestamp = driver.first().get().senderTime + oldestTimestamp = driver.first().get().time (index, message) = genIndexedWakuMessage(oldestTimestamp.int8 - 1) addRes = driver.add(index, message) @@ -119,7 +114,7 @@ procSuite "Sorted driver queue": let first = firstRes.tryGet() check: - first.senderTime == Timestamp(1) + first.time == Timestamp(1) test "get first item from empty queue should fail": ## Given @@ -150,7 +145,7 @@ procSuite "Sorted driver queue": let last = lastRes.tryGet() check: - last.senderTime == Timestamp(5) + last.time == Timestamp(5) test "get last item from empty queue should fail": ## Given diff --git a/tests/waku_archive/test_driver_queue_index.nim b/tests/waku_archive/test_driver_queue_index.nim index 2f1e685c7c..bd55996553 100644 --- a/tests/waku_archive/test_driver_queue_index.nim +++ b/tests/waku_archive/test_driver_queue_index.nim @@ -1,26 +1,12 @@ {.used.} -import std/[times, random], stew/byteutils, testutils/unittests, nimcrypto +import std/random, testutils/unittests import ../../../waku/waku_core, ../../../waku/waku_archive/driver/queue_driver/index var rng = initRand() ## Helpers -proc getTestTimestamp(offset = 0): Timestamp = - let now = getNanosecondTime(epochTime() + float(offset)) - Timestamp(now) - -proc hashFromStr(input: string): MDigest[256] = - var ctx: sha256 - - ctx.init() - ctx.update(input.toBytes()) - let hashed = ctx.finish() - ctx.clear() - - return hashed - proc randomHash(): WakuMessageHash = var hash: WakuMessageHash @@ -33,187 +19,29 @@ proc randomHash(): WakuMessageHash = suite "Queue Driver - index": ## Test vars let - smallIndex1 = Index( - digest: hashFromStr("1234"), - receiverTime: getNanosecondTime(0), - senderTime: getNanosecondTime(1000), - hash: randomHash(), - ) - smallIndex2 = Index( - digest: hashFromStr("1234567"), # digest is less significant than senderTime - receiverTime: getNanosecondTime(0), - senderTime: getNanosecondTime(1000), - hash: randomHash(), - ) - largeIndex1 = Index( - digest: hashFromStr("1234"), - receiverTime: getNanosecondTime(0), - senderTime: getNanosecondTime(9000), - hash: randomHash(), - ) # only senderTime differ from smallIndex1 - largeIndex2 = Index( - digest: hashFromStr("12345"), # only digest differs from smallIndex1 - receiverTime: getNanosecondTime(0), - senderTime: getNanosecondTime(1000), - hash: randomHash(), - ) - eqIndex1 = Index( - digest: hashFromStr("0003"), - receiverTime: getNanosecondTime(0), - senderTime: getNanosecondTime(54321), - hash: randomHash(), - ) - eqIndex2 = Index( - digest: hashFromStr("0003"), - receiverTime: getNanosecondTime(0), - senderTime: getNanosecondTime(54321), - hash: randomHash(), - ) - eqIndex3 = Index( - digest: hashFromStr("0003"), - receiverTime: getNanosecondTime(9999), - # receiverTime difference should have no effect on comparisons - senderTime: getNanosecondTime(54321), - hash: randomHash(), - ) - diffPsTopic = Index( - digest: hashFromStr("1234"), - receiverTime: getNanosecondTime(0), - senderTime: getNanosecondTime(1000), - pubsubTopic: "zzzz", - hash: randomHash(), - ) - noSenderTime1 = Index( - digest: hashFromStr("1234"), - receiverTime: getNanosecondTime(1100), - senderTime: getNanosecondTime(0), - pubsubTopic: "zzzz", - hash: randomHash(), - ) - noSenderTime2 = Index( - digest: hashFromStr("1234"), - receiverTime: getNanosecondTime(10000), - senderTime: getNanosecondTime(0), - pubsubTopic: "zzzz", - hash: randomHash(), - ) - noSenderTime3 = Index( - digest: hashFromStr("1234"), - receiverTime: getNanosecondTime(1200), - senderTime: getNanosecondTime(0), - pubsubTopic: "aaaa", - hash: randomHash(), - ) - noSenderTime4 = Index( - digest: hashFromStr("0"), - receiverTime: getNanosecondTime(1200), - senderTime: getNanosecondTime(0), - pubsubTopic: "zzzz", - hash: randomHash(), - ) + hash = randomHash() + eqIndex1 = Index(time: getNanosecondTime(54321), hash: hash) + eqIndex2 = Index(time: getNanosecondTime(54321), hash: hash) + eqIndex3 = Index(time: getNanosecondTime(54321), hash: randomHash()) + eqIndex4 = Index(time: getNanosecondTime(65432), hash: hash) test "Index comparison": - # Index comparison with senderTime diff - check: - cmp(smallIndex1, largeIndex1) < 0 - cmp(smallIndex2, largeIndex1) < 0 - - # Index comparison with digest diff - check: - cmp(smallIndex1, smallIndex2) < 0 - cmp(smallIndex1, largeIndex2) < 0 - cmp(smallIndex2, largeIndex2) > 0 - cmp(largeIndex1, largeIndex2) > 0 - - # Index comparison when equal check: + # equality cmp(eqIndex1, eqIndex2) == 0 + cmp(eqIndex1, eqIndex3) != 0 + cmp(eqIndex1, eqIndex4) != 0 - # pubsubTopic difference - check: - cmp(smallIndex1, diffPsTopic) < 0 - - # receiverTime diff plays no role when senderTime set - check: - cmp(eqIndex1, eqIndex3) == 0 + # ordering + cmp(eqIndex3, eqIndex4) < 0 + cmp(eqIndex4, eqIndex3) > 0 # Test symmetry - # receiverTime diff plays no role when digest/pubsubTopic equal - check: - cmp(noSenderTime1, noSenderTime2) == 0 - - # sort on receiverTime with no senderTimestamp and unequal pubsubTopic - check: - cmp(noSenderTime1, noSenderTime3) < 0 - - # sort on receiverTime with no senderTimestamp and unequal digest - check: - cmp(noSenderTime1, noSenderTime4) < 0 - - # sort on receiverTime if no senderTimestamp on only one side - check: - cmp(smallIndex1, noSenderTime1) < 0 - cmp(noSenderTime1, smallIndex1) > 0 # Test symmetry - cmp(noSenderTime2, eqIndex3) < 0 - cmp(eqIndex3, noSenderTime2) > 0 # Test symmetry + cmp(eqIndex2, eqIndex4) < 0 + cmp(eqIndex4, eqIndex2) > 0 # Test symmetry test "Index equality": - # Exactly equal check: eqIndex1 == eqIndex2 - - # Receiver time plays no role, even without sender time - check: - eqIndex1 == eqIndex3 - noSenderTime1 == noSenderTime2 # only receiver time differs, indices are equal - noSenderTime1 != noSenderTime3 # pubsubTopics differ - noSenderTime1 != noSenderTime4 # digests differ - - # Unequal sender time - check: - smallIndex1 != largeIndex1 - - # Unequal digest - check: - smallIndex1 != smallIndex2 - - # Unequal hash and digest - check: - smallIndex1 != eqIndex1 - - # Unequal pubsubTopic - check: - smallIndex1 != diffPsTopic - - test "Index computation should not be empty": - ## Given - let ts = getTestTimestamp() - let wm = WakuMessage(payload: @[byte 1, 2, 3], timestamp: ts) - - ## When - let ts2 = getTestTimestamp() + 10 - let index = Index.compute(wm, ts2, DefaultContentTopic) - - ## Then - check: - index.digest.data.len != 0 - index.digest.data.len == 32 # sha2 output length in bytes - index.receiverTime == ts2 # the receiver timestamp should be a non-zero value - index.senderTime == ts - index.pubsubTopic == DefaultContentTopic - - test "Index digest of two identical messsage should be the same": - ## Given - let topic = ContentTopic("test-content-topic") - let - wm1 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic) - wm2 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic) - - ## When - let ts = getTestTimestamp() - let - index1 = Index.compute(wm1, ts, DefaultPubsubTopic) - index2 = Index.compute(wm2, ts, DefaultPubsubTopic) - - ## Then - check: - index1.digest == index2.digest + eqIndex1 == eqIndex4 + eqIndex2 != eqIndex3 + eqIndex4 != eqIndex3 diff --git a/tests/waku_archive/test_driver_queue_pagination.nim b/tests/waku_archive/test_driver_queue_pagination.nim index 6ce1d6d560..fce8662d70 100644 --- a/tests/waku_archive/test_driver_queue_pagination.nim +++ b/tests/waku_archive/test_driver_queue_pagination.nim @@ -21,10 +21,9 @@ proc getTestQueueDriver(numMessages: int): QueueDriver = let msg = WakuMessage(payload: @[byte i], timestamp: Timestamp(i)) let index = Index( - receiverTime: Timestamp(i), - senderTime: Timestamp(i), - digest: MessageDigest(data: data), + time: Timestamp(i), hash: computeMessageHash(DefaultPubsubTopic, msg), + topic: DefaultPubsubTopic, ) discard testQueueDriver.add(index, msg) @@ -48,7 +47,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 2 data == msgList[4 .. 5] @@ -64,7 +63,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 2 data == msgList[0 .. 1] @@ -80,7 +79,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 10 data == msgList[0 .. 9] @@ -97,7 +96,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 0 @@ -112,7 +111,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 6 data == msgList[4 .. 9] @@ -128,7 +127,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: uint(data.len) <= MaxPageSize @@ -143,19 +142,14 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 0 test "Forward pagination - invalid cursor": ## Given let msg = fakeWakuMessage(payload = @[byte 10]) - let index = ArchiveCursor( - pubsubTopic: DefaultPubsubTopic, - senderTime: msg.timestamp, - storeTime: msg.timestamp, - digest: computeDigest(msg), - ).toIndex() + let index = Index(hash: computeMessageHash(DefaultPubsubTopic, msg)) let pageSize: uint = 10 @@ -182,7 +176,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 1 @@ -198,7 +192,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 0 @@ -218,7 +212,7 @@ procSuite "Queue driver - pagination": ) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.mapIt(it.timestamp.int) == @[0, 2, 4] @@ -233,7 +227,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data == msgList[1 .. 2].reversed @@ -249,7 +243,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 0 @@ -264,7 +258,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 2 data == msgList[8 .. 9].reversed @@ -280,7 +274,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 10 data == msgList[0 .. 9].reversed @@ -296,7 +290,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data == msgList[0 .. 2].reversed @@ -311,7 +305,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: uint(data.len) <= MaxPageSize @@ -326,19 +320,14 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 0 test "Backward pagination - invalid cursor": ## Given let msg = fakeWakuMessage(payload = @[byte 10]) - let index = ArchiveCursor( - pubsubTopic: DefaultPubsubTopic, - senderTime: msg.timestamp, - storeTime: msg.timestamp, - digest: computeDigest(msg), - ).toIndex() + let index = Index(hash: computeMessageHash(DefaultPubsubTopic, msg)) let pageSize: uint = 2 @@ -365,7 +354,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 1 @@ -381,7 +370,7 @@ procSuite "Queue driver - pagination": let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.len == 0 @@ -401,6 +390,6 @@ procSuite "Queue driver - pagination": ) ## Then - let data = page.tryGet().mapIt(it[1]) + let data = page.tryGet().mapIt(it[2]) check: data.mapIt(it.timestamp.int) == @[5, 7, 9].reversed diff --git a/tests/waku_archive/test_driver_queue_query.nim b/tests/waku_archive/test_driver_queue_query.nim index 60016c771d..062bdd007d 100644 --- a/tests/waku_archive/test_driver_queue_query.nim +++ b/tests/waku_archive/test_driver_queue_query.nim @@ -19,15 +19,6 @@ common.randomize() proc newTestSqliteDriver(): ArchiveDriver = QueueDriver.new(capacity = 50) -proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor = - ArchiveCursor( - pubsubTopic: pubsubTopic, - senderTime: message.timestamp, - storeTime: message.timestamp, - digest: computeDigest(message), - hash: computeMessageHash(pubsubTopic, message), - ) - suite "Queue driver - query by content topic": test "no content topic": ## Given @@ -53,11 +44,7 @@ suite "Queue driver - query by content topic": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() @@ -68,7 +55,7 @@ suite "Queue driver - query by content topic": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[0 .. 4] @@ -99,24 +86,20 @@ suite "Queue driver - query by content topic": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true ) ## Then check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 3] @@ -147,24 +130,20 @@ suite "Queue driver - query by content topic": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = false + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = false ) ## Then check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[6 .. 7].reversed() @@ -197,17 +176,13 @@ suite "Queue driver - query by content topic": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic1, contentTopic2], + contentTopics = @[contentTopic1, contentTopic2], maxPageSize = 2, ascendingOrder = true, ) @@ -216,7 +191,7 @@ suite "Queue driver - query by content topic": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 3] @@ -244,24 +219,20 @@ suite "Queue driver - query by content topic": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true ) ## Then check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages.len == 0 @@ -277,17 +248,13 @@ suite "Queue driver - query by content topic": for t in 0 ..< 40: let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t)) let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() ## When let res = waitFor driver.getMessages( - contentTopic = @[DefaultContentTopic], + contentTopics = @[DefaultContentTopic], maxPageSize = pageSize, ascendingOrder = true, ) @@ -296,7 +263,7 @@ suite "Queue driver - query by content topic": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages.len == 40 @@ -348,9 +315,7 @@ suite "SQLite driver - query by pubsub topic": for row in messages: let (topic, msg) = row - let retFut = waitFor driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) require retFut.isOk() ## When @@ -363,7 +328,7 @@ suite "SQLite driver - query by pubsub topic": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5] @@ -414,9 +379,7 @@ suite "SQLite driver - query by pubsub topic": for row in messages: let (topic, msg) = row - let retFut = waitFor driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) require retFut.isOk() ## When @@ -427,7 +390,7 @@ suite "SQLite driver - query by pubsub topic": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[0 .. 1] @@ -478,14 +441,12 @@ suite "SQLite driver - query by pubsub topic": for row in messages: let (topic, msg) = row - let retFut = waitFor driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) require retFut.isOk() ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), maxPageSize = 2, ascendingOrder = true, @@ -496,7 +457,7 @@ suite "SQLite driver - query by pubsub topic": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5] @@ -529,15 +490,11 @@ suite "Queue driver - query by cursor": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[4]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) ## When let res = waitFor driver.getMessages( @@ -548,7 +505,7 @@ suite "Queue driver - query by cursor": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[5 .. 6] @@ -580,15 +537,11 @@ suite "Queue driver - query by cursor": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[4]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) ## When let res = waitFor driver.getMessages( @@ -599,7 +552,7 @@ suite "Queue driver - query by cursor": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 3].reversed() @@ -629,21 +582,16 @@ suite "Queue driver - query by cursor": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() - let fakeCursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) - let cursor = ArchiveCursor(hash: fakeCursor) + let cursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) ## When let res = waitFor driver.getMessages( includeData = true, - contentTopic = @[DefaultContentTopic], + contentTopics = @[DefaultContentTopic], pubsubTopic = none(PubsubTopic), cursor = some(cursor), startTime = none(Timestamp), @@ -686,19 +634,15 @@ suite "Queue driver - query by cursor": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[4]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], cursor = some(cursor), maxPageSize = 10, ascendingOrder = true, @@ -708,7 +652,7 @@ suite "Queue driver - query by cursor": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[5 .. 6] @@ -740,19 +684,15 @@ suite "Queue driver - query by cursor": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[6]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[6]) ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], cursor = some(cursor), maxPageSize = 10, ascendingOrder = false, @@ -762,7 +702,7 @@ suite "Queue driver - query by cursor": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 5].reversed() @@ -838,12 +778,10 @@ suite "Queue driver - query by cursor": for row in messages: let (topic, msg) = row - let retFut = waitFor driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) require retFut.isOk() - let cursor = computeTestCursor(expected[5][0], expected[5][1]) + let cursor = computeMessageHash(expected[5][0], expected[5][1]) ## When let res = waitFor driver.getMessages( @@ -858,7 +796,7 @@ suite "Queue driver - query by cursor": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[6 .. 7] @@ -934,12 +872,10 @@ suite "Queue driver - query by cursor": for row in messages: let (topic, msg) = row - let retFut = waitFor driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) require retFut.isOk() - let cursor = computeTestCursor(expected[6][0], expected[6][1]) + let cursor = computeMessageHash(expected[6][0], expected[6][1]) ## When let res = waitFor driver.getMessages( @@ -954,7 +890,7 @@ suite "Queue driver - query by cursor": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5].reversed() @@ -987,11 +923,7 @@ suite "Queue driver - query by time range": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() @@ -1004,7 +936,7 @@ suite "Queue driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 6] @@ -1036,11 +968,7 @@ suite "Queue driver - query by time range": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() @@ -1053,7 +981,7 @@ suite "Queue driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[0 .. 4] @@ -1131,9 +1059,7 @@ suite "Queue driver - query by time range": for row in messages: let (topic, msg) = row - let retFut = waitFor driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) require retFut.isOk() ## When @@ -1149,7 +1075,7 @@ suite "Queue driver - query by time range": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[2 .. 4] @@ -1182,17 +1108,13 @@ suite "Queue driver - query by time range": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], startTime = some(ts(45, timeOrigin)), endTime = some(ts(15, timeOrigin)), maxPageSize = 2, @@ -1202,7 +1124,7 @@ suite "Queue driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages.len == 0 @@ -1234,17 +1156,13 @@ suite "Queue driver - query by time range": for msg in messages: let retFut = await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = true, @@ -1253,7 +1171,7 @@ suite "Queue driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 6] @@ -1288,17 +1206,13 @@ suite "Queue driver - query by time range": for msg in messages: let retFut = waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = false, @@ -1307,7 +1221,7 @@ suite "Queue driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 6].reversed() @@ -1343,19 +1257,15 @@ suite "Queue driver - query by time range": for msg in messages: let retFut = await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[3]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[3]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], cursor = some(cursor), startTime = some(ts(15, timeOrigin)), maxPageSize = 10, @@ -1365,7 +1275,7 @@ suite "Queue driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[4 .. 9] @@ -1401,19 +1311,15 @@ suite "Queue driver - query by time range": for msg in messages: let retFut = await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) require retFut.isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[6]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[6]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], cursor = some(cursor), startTime = some(ts(15, timeOrigin)), maxPageSize = 10, @@ -1423,7 +1329,7 @@ suite "Queue driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[3 .. 4].reversed() @@ -1492,16 +1398,14 @@ suite "Queue driver - query by time range": for row in messages: let (topic, msg) = row - let retFut = waitFor driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) require retFut.isOk() - let cursor = computeTestCursor(DefaultPubsubTopic, expected[1][1]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[1][1]) ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), cursor = some(cursor), startTime = some(ts(0, timeOrigin)), @@ -1514,7 +1418,7 @@ suite "Queue driver - query by time range": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[3 .. 4] @@ -1582,16 +1486,14 @@ suite "Queue driver - query by time range": for row in messages: let (topic, msg) = row - let retFut = waitFor driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) require retFut.isOk() - let cursor = computeTestCursor(expected[7][0], expected[7][1]) + let cursor = computeMessageHash(expected[7][0], expected[7][1]) ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), cursor = some(cursor), startTime = some(ts(35, timeOrigin)), @@ -1604,7 +1506,7 @@ suite "Queue driver - query by time range": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5].reversed() @@ -1673,16 +1575,14 @@ suite "Queue driver - query by time range": for row in messages: let (topic, msg) = row - let retFut = waitFor driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) require retFut.isOk() - let cursor = computeTestCursor(expected[1][0], expected[1][1]) + let cursor = computeMessageHash(expected[1][0], expected[1][1]) ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), cursor = some(cursor), startTime = some(ts(35, timeOrigin)), @@ -1696,7 +1596,7 @@ suite "Queue driver - query by time range": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5] @@ -1765,16 +1665,14 @@ suite "Queue driver - query by time range": for row in messages: let (topic, msg) = row - let retFut = waitFor driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) require retFut.isOk() - let cursor = computeTestCursor(expected[1][0], expected[1][1]) + let cursor = computeMessageHash(expected[1][0], expected[1][1]) ## When let res = waitFor driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), cursor = some(cursor), startTime = some(ts(35, timeOrigin)), @@ -1787,7 +1685,7 @@ suite "Queue driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages.len == 0 diff --git a/tests/waku_archive/test_driver_sqlite.nim b/tests/waku_archive/test_driver_sqlite.nim index 72f560ca9a..cd478f89c4 100644 --- a/tests/waku_archive/test_driver_sqlite.nim +++ b/tests/waku_archive/test_driver_sqlite.nim @@ -2,12 +2,10 @@ import std/sequtils, testutils/unittests, chronos import - ../../../waku/common/databases/db_sqlite, ../../../waku/waku_archive, ../../../waku/waku_archive/driver/sqlite_driver, ../../../waku/waku_core, ../waku_archive/archive_utils, - ../testlib/common, ../testlib/wakucore suite "SQLite driver": @@ -40,9 +38,7 @@ suite "SQLite driver": let msgHash = computeMessageHash(DefaultPubsubTopic, msg) ## When - let putRes = waitFor driver.put( - DefaultPubsubTopic, msg, computeDigest(msg), msgHash, msg.timestamp - ) + let putRes = waitFor driver.put(msgHash, DefaultPubsubTopic, msg) ## Then check: @@ -52,7 +48,7 @@ suite "SQLite driver": check: storedMsg.len == 1 storedMsg.all do(item: auto) -> bool: - let (pubsubTopic, actualMsg, _, _, hash) = item + let (hash, pubsubTopic, actualMsg) = item actualMsg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic and hash == msgHash and msg.meta == actualMsg.meta diff --git a/tests/waku_archive/test_driver_sqlite_query.nim b/tests/waku_archive/test_driver_sqlite_query.nim index 58968b1b87..a029abb7e7 100644 --- a/tests/waku_archive/test_driver_sqlite_query.nim +++ b/tests/waku_archive/test_driver_sqlite_query.nim @@ -4,9 +4,7 @@ import std/[options, sequtils, random, algorithm], testutils/unittests, chronos, chronicles import - ../../../waku/common/databases/db_sqlite, ../../../waku/waku_archive, - ../../../waku/waku_archive/driver/sqlite_driver, ../../../waku/waku_core, ../../../waku/waku_core/message/digest, ../testlib/common, @@ -45,11 +43,7 @@ suite "SQLite driver - query by content topic": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() @@ -60,7 +54,7 @@ suite "SQLite driver - query by content topic": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[0 .. 4] @@ -92,24 +86,20 @@ suite "SQLite driver - query by content topic": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true ) ## Then check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 3] @@ -153,24 +143,20 @@ suite "SQLite driver - query by content topic": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true ) ## Then check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 3] @@ -202,24 +188,20 @@ suite "SQLite driver - query by content topic": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = false + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = false ) ## Then check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[6 .. 7].reversed() @@ -253,17 +235,13 @@ suite "SQLite driver - query by content topic": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic1, contentTopic2], + contentTopics = @[contentTopic1, contentTopic2], maxPageSize = 2, ascendingOrder = true, ) @@ -272,7 +250,7 @@ suite "SQLite driver - query by content topic": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 3] @@ -301,24 +279,20 @@ suite "SQLite driver - query by content topic": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true ) ## Then check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages.len == 0 @@ -335,17 +309,13 @@ suite "SQLite driver - query by content topic": let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t)) require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[DefaultContentTopic], + contentTopics = @[DefaultContentTopic], maxPageSize = pageSize, ascendingOrder = true, ) @@ -354,7 +324,7 @@ suite "SQLite driver - query by content topic": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages.len == 40 @@ -406,11 +376,7 @@ suite "SQLite driver - query by pubsub topic": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() ## When let res = await driver.getMessages( @@ -422,7 +388,7 @@ suite "SQLite driver - query by pubsub topic": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5] @@ -473,11 +439,7 @@ suite "SQLite driver - query by pubsub topic": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() ## When let res = await driver.getMessages(maxPageSize = 2, ascendingOrder = true) @@ -487,7 +449,7 @@ suite "SQLite driver - query by pubsub topic": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[0 .. 1] @@ -538,15 +500,11 @@ suite "SQLite driver - query by pubsub topic": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), maxPageSize = 2, ascendingOrder = true, @@ -557,7 +515,7 @@ suite "SQLite driver - query by pubsub topic": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5] @@ -591,15 +549,11 @@ suite "SQLite driver - query by cursor": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[4]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) ## When let res = await driver.getMessages( @@ -610,7 +564,7 @@ suite "SQLite driver - query by cursor": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[5 .. 6] @@ -643,15 +597,11 @@ suite "SQLite driver - query by cursor": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[4]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) ## When let res = await driver.getMessages( @@ -662,7 +612,7 @@ suite "SQLite driver - query by cursor": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 3].reversed() @@ -693,21 +643,16 @@ suite "SQLite driver - query by cursor": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let fakeCursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) - let cursor = ArchiveCursor(hash: fakeCursor) + let cursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) ## When let res = await driver.getMessages( includeData = true, - contentTopic = @[DefaultContentTopic], + contentTopics = @[DefaultContentTopic], pubsubTopic = none(PubsubTopic), cursor = some(cursor), startTime = none(Timestamp), @@ -751,19 +696,15 @@ suite "SQLite driver - query by cursor": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[4]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], cursor = some(cursor), maxPageSize = 10, ascendingOrder = true, @@ -773,7 +714,7 @@ suite "SQLite driver - query by cursor": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[5 .. 6] @@ -806,19 +747,15 @@ suite "SQLite driver - query by cursor": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[6]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[6]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], cursor = some(cursor), maxPageSize = 10, ascendingOrder = false, @@ -828,7 +765,7 @@ suite "SQLite driver - query by cursor": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 5].reversed() @@ -904,13 +841,9 @@ suite "SQLite driver - query by cursor": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() - let cursor = computeArchiveCursor(expected[5][0], expected[5][1]) + let cursor = computeMessageHash(expected[5][0], expected[5][1]) ## When let res = await driver.getMessages( @@ -925,7 +858,7 @@ suite "SQLite driver - query by cursor": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[6 .. 7] @@ -1001,13 +934,9 @@ suite "SQLite driver - query by cursor": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() - let cursor = computeArchiveCursor(expected[6][0], expected[6][1]) + let cursor = computeMessageHash(expected[6][0], expected[6][1]) ## When let res = await driver.getMessages( @@ -1022,7 +951,7 @@ suite "SQLite driver - query by cursor": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5].reversed() @@ -1056,11 +985,7 @@ suite "SQLite driver - query by time range": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() @@ -1073,7 +998,7 @@ suite "SQLite driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 6] @@ -1106,11 +1031,7 @@ suite "SQLite driver - query by time range": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() @@ -1123,7 +1044,7 @@ suite "SQLite driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[0 .. 4] @@ -1201,11 +1122,7 @@ suite "SQLite driver - query by time range": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() ## When let res = await driver.getMessages( @@ -1220,7 +1137,7 @@ suite "SQLite driver - query by time range": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[2 .. 4] @@ -1254,17 +1171,13 @@ suite "SQLite driver - query by time range": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], startTime = some(ts(45, timeOrigin)), endTime = some(ts(15, timeOrigin)), maxPageSize = 2, @@ -1274,7 +1187,7 @@ suite "SQLite driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages.len == 0 @@ -1307,17 +1220,13 @@ suite "SQLite driver - query by time range": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = true, @@ -1326,7 +1235,7 @@ suite "SQLite driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 6] @@ -1362,17 +1271,13 @@ suite "SQLite driver - query by time range": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = false, @@ -1381,7 +1286,7 @@ suite "SQLite driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[2 .. 6].reversed() @@ -1418,19 +1323,15 @@ suite "SQLite driver - query by time range": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[3]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[3]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], cursor = some(cursor), startTime = some(ts(15, timeOrigin)), maxPageSize = 10, @@ -1440,7 +1341,7 @@ suite "SQLite driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[4 .. 9] @@ -1477,19 +1378,15 @@ suite "SQLite driver - query by time range": for msg in messages: require ( await driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() - let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[6]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[6]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], cursor = some(cursor), startTime = some(ts(15, timeOrigin)), maxPageSize = 10, @@ -1499,7 +1396,7 @@ suite "SQLite driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expected[3 .. 4].reversed() @@ -1568,17 +1465,13 @@ suite "SQLite driver - query by time range": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() - let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[1][1]) + let cursor = computeMessageHash(DefaultPubsubTopic, expected[1][1]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), cursor = some(cursor), startTime = some(ts(0, timeOrigin)), @@ -1591,7 +1484,7 @@ suite "SQLite driver - query by time range": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[3 .. 4] @@ -1659,17 +1552,13 @@ suite "SQLite driver - query by time range": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() - let cursor = computeArchiveCursor(expected[7][0], expected[7][1]) + let cursor = computeMessageHash(expected[7][0], expected[7][1]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), cursor = some(cursor), startTime = some(ts(35, timeOrigin)), @@ -1682,7 +1571,7 @@ suite "SQLite driver - query by time range": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5].reversed() @@ -1751,17 +1640,13 @@ suite "SQLite driver - query by time range": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() - let cursor = computeArchiveCursor(expected[1][0], expected[1][1]) + let cursor = computeMessageHash(expected[1][0], expected[1][1]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), cursor = some(cursor), startTime = some(ts(35, timeOrigin)), @@ -1775,7 +1660,7 @@ suite "SQLite driver - query by time range": res.isOk() let expectedMessages = expected.mapIt(it[1]) - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages == expectedMessages[4 .. 5] @@ -1844,17 +1729,13 @@ suite "SQLite driver - query by time range": for row in messages: let (topic, msg) = row - require ( - await driver.put( - topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp - ) - ).isOk() + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() - let cursor = computeArchiveCursor(expected[1][0], expected[1][1]) + let cursor = computeMessageHash(expected[1][0], expected[1][1]) ## When let res = await driver.getMessages( - contentTopic = @[contentTopic], + contentTopics = @[contentTopic], pubsubTopic = some(pubsubTopic), cursor = some(cursor), startTime = some(ts(35, timeOrigin)), @@ -1867,7 +1748,7 @@ suite "SQLite driver - query by time range": check: res.isOk() - let filteredMessages = res.tryGet().mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) check: filteredMessages.len == 0 diff --git a/tests/waku_archive/test_retention_policy.nim b/tests/waku_archive/test_retention_policy.nim index f0d6f82e4c..6f21afb655 100644 --- a/tests/waku_archive/test_retention_policy.nim +++ b/tests/waku_archive/test_retention_policy.nim @@ -33,13 +33,7 @@ suite "Waku Archive - Retention policy": payload = @[byte i], contentTopic = DefaultContentTopic, ts = Timestamp(i) ) putFutures.add( - driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, - ) + driver.put(computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg) ) discard waitFor allFinished(putFutures) @@ -86,13 +80,7 @@ suite "Waku Archive - Retention policy": payload = @[byte i], contentTopic = DefaultContentTopic, ts = Timestamp(i) ) putFutures.add( - driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, - ) + driver.put(computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg) ) # waitFor is used to synchronously wait for the futures to complete. @@ -148,11 +136,7 @@ suite "Waku Archive - Retention policy": for msg in messages: require ( waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() require (waitFor retentionPolicy.execute(driver)).isOk() @@ -162,7 +146,7 @@ suite "Waku Archive - Retention policy": check: storedMsg.len == capacity storedMsg.all do(item: auto) -> bool: - let (pubsubTopic, msg, _, _, _) = item + let (_, pubsubTopic, msg) = item msg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic ## Cleanup diff --git a/tests/waku_archive/test_waku_archive.nim b/tests/waku_archive/test_waku_archive.nim index 8408d3f3de..893909d35d 100644 --- a/tests/waku_archive/test_waku_archive.nim +++ b/tests/waku_archive/test_waku_archive.nim @@ -1,21 +1,13 @@ {.used.} -import - std/[options, sequtils], - testutils/unittests, - chronicles, - chronos, - libp2p/crypto/crypto +import std/[options, sequtils], testutils/unittests, chronos, libp2p/crypto/crypto import - ../../../waku/common/databases/db_sqlite, ../../../waku/common/paging, ../../../waku/waku_core, ../../../waku/waku_core/message/digest, - ../../../waku/waku_archive/driver/sqlite_driver, ../../../waku/waku_archive, ../waku_archive/archive_utils, - ../testlib/common, ../testlib/wakucore suite "Waku Archive - message handling": @@ -158,11 +150,7 @@ procSuite "Waku Archive - find messages": for msg in msgListA: require ( waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() @@ -392,8 +380,8 @@ procSuite "Waku Archive - find messages": ## Then check: - cursors[0] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[3])) - cursors[1] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[7])) + cursors[0] == some(computeMessageHash(DefaultPubsubTopic, msgListA[3])) + cursors[1] == some(computeMessageHash(DefaultPubsubTopic, msgListA[7])) cursors[2] == none(ArchiveCursor) check: @@ -426,8 +414,8 @@ procSuite "Waku Archive - find messages": ## Then check: - cursors[0] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[6])) - cursors[1] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[2])) + cursors[0] == some(computeMessageHash(DefaultPubsubTopic, msgListA[6])) + cursors[1] == some(computeMessageHash(DefaultPubsubTopic, msgListA[2])) cursors[2] == none(ArchiveCursor) check: @@ -458,11 +446,7 @@ procSuite "Waku Archive - find messages": for msg in msgList: require ( waitFor driver.put( - DefaultPubsubTopic, - msg, - computeDigest(msg), - computeMessageHash(DefaultPubsubTopic, msg), - msg.timestamp, + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg ) ).isOk() diff --git a/tests/waku_store/test_wakunode_store.nim b/tests/waku_store/test_wakunode_store.nim index c9806971ac..c8195608c6 100644 --- a/tests/waku_store/test_wakunode_store.nim +++ b/tests/waku_store/test_wakunode_store.nim @@ -17,10 +17,8 @@ import ../../../waku/common/paging, ../../../waku/waku_core, ../../../waku/waku_core/message/digest, - ../../../waku/waku_core/subscription, ../../../waku/node/peer_manager, ../../../waku/waku_archive, - ../../../waku/waku_archive/driver/sqlite_driver, ../../../waku/waku_filter_v2, ../../../waku/waku_filter_v2/client, ../../../waku/waku_store, @@ -60,12 +58,7 @@ procSuite "WakuNode - Store": for kv in kvs: let message = kv.message.get() - let msg_digest = computeDigest(message) - require ( - waitFor driver.put( - DefaultPubsubTopic, message, msg_digest, kv.messageHash, message.timestamp - ) - ).isOk() + require (waitFor driver.put(kv.messageHash, DefaultPubsubTopic, message)).isOk() driver diff --git a/tests/waku_store_legacy/test_wakunode_store.nim b/tests/waku_store_legacy/test_wakunode_store.nim index ede1f943dc..16daf7569f 100644 --- a/tests/waku_store_legacy/test_wakunode_store.nim +++ b/tests/waku_store_legacy/test_wakunode_store.nim @@ -13,21 +13,17 @@ import libp2p/protocols/pubsub/pubsub, libp2p/protocols/pubsub/gossipsub import - ../../../waku/common/databases/db_sqlite, ../../../waku/common/paging, ../../../waku/waku_core, ../../../waku/waku_core/message/digest, - ../../../waku/waku_core/subscription, ../../../waku/node/peer_manager, ../../../waku/waku_archive, - ../../../waku/waku_archive/driver/sqlite_driver, ../../../waku/waku_filter_v2, ../../../waku/waku_filter_v2/client, ../../../waku/waku_store_legacy, ../../../waku/waku_node, ../waku_store_legacy/store_utils, ../waku_archive/archive_utils, - ../testlib/common, ../testlib/wakucore, ../testlib/wakunode @@ -52,11 +48,8 @@ procSuite "WakuNode - Store Legacy": let driver = newSqliteArchiveDriver() for msg in msgListA: - let msg_digest = waku_archive.computeDigest(msg) let msg_hash = computeMessageHash(DefaultPubsubTopic, msg) - require ( - waitFor driver.put(DefaultPubsubTopic, msg, msg_digest, msg_hash, msg.timestamp) - ).isOk() + require (waitFor driver.put(msg_hash, DefaultPubsubTopic, msg)).isOk() driver diff --git a/tests/wakunode_rest/test_rest_store.nim b/tests/wakunode_rest/test_rest_store.nim index cb5c0cd87b..75e42c0d70 100644 --- a/tests/wakunode_rest/test_rest_store.nim +++ b/tests/wakunode_rest/test_rest_store.nim @@ -38,16 +38,9 @@ logScope: proc put( store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage ): Future[Result[void, string]] = - let - digest = computeDigest(message) - msgHash = computeMessageHash(pubsubTopic, message) - receivedTime = - if message.timestamp > 0: - message.timestamp - else: - getNowInNanosecondTime() + let msgHash = computeMessageHash(pubsubTopic, message) - store.put(pubsubTopic, message, digest, msgHash, receivedTime) + store.put(msgHash, pubsubTopic, message) # Creates a new WakuNode proc testWakuNode(): WakuNode = diff --git a/waku/factory/external_config.nim b/waku/factory/external_config.nim index abcc7a0f7a..c290b4e90d 100644 --- a/waku/factory/external_config.nim +++ b/waku/factory/external_config.nim @@ -319,6 +319,12 @@ type WakuNodeConf* = object desc: "Enable/disable waku store protocol", defaultValue: false, name: "store" .}: bool + legacyStore* {. + desc: "Enable/disable waku store legacy mode", + defaultValue: false, + name: "legacy-store" + .}: bool + storenode* {. desc: "Peer multiaddress to query for storage", defaultValue: "", diff --git a/waku/factory/node_factory.nim b/waku/factory/node_factory.nim index f3cbb7fe74..87a1901197 100644 --- a/waku/factory/node_factory.nim +++ b/waku/factory/node_factory.nim @@ -230,7 +230,7 @@ proc setupProtocols( # Archive setup let archiveDriverRes = waitFor ArchiveDriver.new( conf.storeMessageDbUrl, conf.storeMessageDbVacuum, conf.storeMessageDbMigration, - conf.storeMaxNumDbConnections, onFatalErrorAction, + conf.storeMaxNumDbConnections, onFatalErrorAction, conf.legacyStore, ) if archiveDriverRes.isErr(): return err("failed to setup archive driver: " & archiveDriverRes.error) diff --git a/waku/node/waku_node.nim b/waku/node/waku_node.nim index 7221e7d8bd..5914998522 100644 --- a/waku/node/waku_node.nim +++ b/waku/node/waku_node.nim @@ -685,13 +685,15 @@ proc mountArchive*( ## Legacy Waku Store # TODO: Review this mapping logic. Maybe, move it to the appplication code -proc toArchiveQuery(request: legacy_store_common.HistoryQuery): ArchiveQuery = - ArchiveQuery( +proc toArchiveQuery( + request: legacy_store_common.HistoryQuery +): ArchiveQueryV2 {.deprecated.} = + ArchiveQueryV2( pubsubTopic: request.pubsubTopic, contentTopics: request.contentTopics, cursor: request.cursor.map( - proc(cursor: HistoryCursor): ArchiveCursor = - ArchiveCursor( + proc(cursor: HistoryCursor): ArchiveCursorV2 = + ArchiveCursorV2( pubsubTopic: cursor.pubsubTopic, senderTime: cursor.senderTime, storeTime: cursor.storeTime, @@ -705,7 +707,9 @@ proc toArchiveQuery(request: legacy_store_common.HistoryQuery): ArchiveQuery = ) # TODO: Review this mapping logic. Maybe, move it to the appplication code -proc toHistoryResult*(res: ArchiveResult): legacy_store_common.HistoryResult = +proc toHistoryResult*( + res: ArchiveResultV2 +): legacy_store_common.HistoryResult {.deprecated.} = if res.isErr(): let error = res.error case res.error.kind @@ -719,7 +723,7 @@ proc toHistoryResult*(res: ArchiveResult): legacy_store_common.HistoryResult = HistoryResponse( messages: response.messages, cursor: response.cursor.map( - proc(cursor: ArchiveCursor): HistoryCursor = + proc(cursor: ArchiveCursorV2): HistoryCursor = HistoryCursor( pubsubTopic: cursor.pubsubTopic, senderTime: cursor.senderTime, @@ -732,7 +736,7 @@ proc toHistoryResult*(res: ArchiveResult): legacy_store_common.HistoryResult = proc mountLegacyStore*( node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit -) {.async.} = +) {.async, deprecated.} = info "mounting waku legacy store protocol" if node.wakuArchive.isNil(): @@ -763,7 +767,7 @@ proc mountLegacyStore*( node.wakuLegacyStore, protocolMatcher(legacy_store_common.WakuLegacyStoreCodec) ) -proc mountLegacyStoreClient*(node: WakuNode) = +proc mountLegacyStoreClient*(node: WakuNode) {.deprecated.} = info "mounting legacy store client" node.wakuLegacyStoreClient = @@ -772,7 +776,7 @@ proc mountLegacyStoreClient*(node: WakuNode) = proc query*( node: WakuNode, query: legacy_store_common.HistoryQuery, peer: RemotePeerInfo ): Future[legacy_store_common.WakuStoreResult[legacy_store_common.HistoryResponse]] {. - async, gcsafe + async, gcsafe, deprecated .} = ## Queries known nodes for historical messages if node.wakuLegacyStoreClient.isNil(): @@ -790,7 +794,7 @@ proc query*( proc query*( node: WakuNode, query: legacy_store_common.HistoryQuery ): Future[legacy_store_common.WakuStoreResult[legacy_store_common.HistoryResponse]] {. - async, gcsafe, deprecated: "Use 'node.query()' with peer destination instead" + async, gcsafe, deprecated .} = ## Queries known nodes for historical messages if node.wakuLegacyStoreClient.isNil(): @@ -807,7 +811,7 @@ when defined(waku_exp_store_resume): # TODO: Move to application module (e.g., wakunode2.nim) proc resume*( node: WakuNode, peerList: Option[seq[RemotePeerInfo]] = none(seq[RemotePeerInfo]) - ) {.async, gcsafe.} = + ) {.async, gcsafe, deprecated.} = ## resume proc retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku node has been online ## for resume to work properly the waku node must have the store protocol mounted in the full mode (i.e., persisting messages) ## messages are stored in the wakuStore's messages field and in the message db @@ -838,11 +842,7 @@ proc toArchiveQuery(request: StoreQueryRequest): ArchiveQuery = query.startTime = request.startTime query.endTime = request.endTime query.hashes = request.messageHashes - - if request.paginationCursor.isSome(): - var cursor = ArchiveCursor() - cursor.hash = request.paginationCursor.get() - query.cursor = some(cursor) + query.cursor = request.paginationCursor query.direction = request.paginationForward @@ -860,6 +860,8 @@ proc toStoreResult(res: ArchiveResult): StoreQueryResult = res.statusCode = 200 res.statusDesc = "OK" + res.paginationCursor = response.cursor + for i in 0 ..< response.hashes.len: let hash = response.hashes[i] @@ -871,9 +873,6 @@ proc toStoreResult(res: ArchiveResult): StoreQueryResult = res.messages[i].message = some(response.messages[i]) res.messages[i].pubsubTopic = some(response.topics[i]) - if response.cursor.isSome(): - res.paginationCursor = some(response.cursor.get().hash) - return ok(res) proc mountStore*( diff --git a/waku/waku_archive/archive.nim b/waku/waku_archive/archive.nim index fa395c62ec..ba14e5bc34 100644 --- a/waku/waku_archive/archive.nim +++ b/waku/waku_archive/archive.nim @@ -55,6 +55,24 @@ proc validate*(msg: WakuMessage): Result[void, string] = # Ephemeral message, do not store return + let + now = getNanosecondTime(getTime().toUnixFloat()) + lowerBound = now - MaxMessageTimestampVariance + upperBound = now + MaxMessageTimestampVariance + + if msg.timestamp < lowerBound: + return err(invalidMessageOld) + + if upperBound < msg.timestamp: + return err(invalidMessageFuture) + + return ok() + +proc validateV2*(msg: WakuMessage): Result[void, string] {.deprecated.} = + if msg.ephemeral: + # Ephemeral message, do not store + return + if msg.timestamp == 0: return ok() @@ -92,6 +110,35 @@ proc handleMessage*( waku_archive_errors.inc(labelValues = [error]) return + let msgHash = computeMessageHash(pubsubTopic, msg) + + let insertStartTime = getTime().toUnixFloat() + + (await self.driver.put(msgHash, pubsubTopic, msg)).isOkOr: + waku_archive_errors.inc(labelValues = [insertFailure]) + trace "failed to insert message", + hash = msgHash.to0xHex(), + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + timestamp = msg.timestamp, + error = error + + trace "message archived", + hash = msgHash.to0xHex(), + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + timestamp = msg.timestamp + + let insertDuration = getTime().toUnixFloat() - insertStartTime + waku_archive_insert_duration_seconds.observe(insertDuration) + +proc handleMessageV2*( + self: WakuArchive, pubsubTopic: PubsubTopic, msg: WakuMessage +) {.async, deprecated.} = + self.validator(msg).isOkOr: + waku_archive_errors.inc(labelValues = [error]) + return + let msgDigest = computeDigest(msg) msgDigestHex = msgDigest.data.to0xHex() @@ -113,7 +160,7 @@ proc handleMessage*( let insertStartTime = getTime().toUnixFloat() - (await self.driver.put(pubsubTopic, msg, msgDigest, msgHash, msgTimestamp)).isOkOr: + (await self.driver.putV2(pubsubTopic, msg, msgDigest, msgHash, msgTimestamp)).isOkOr: waku_archive_errors.inc(labelValues = [insertFailure]) error "failed to insert message", error = error @@ -133,6 +180,18 @@ proc findMessages*( ): Future[ArchiveResult] {.async, gcsafe.} = ## Search the archive to return a single page of messages matching the query criteria + if query.contentTopics.len > 10: + return err(ArchiveError.invalidQuery("too many content topics")) + + if query.cursor.isSome(): + let cursor = query.cursor.get() + + if cursor.len != 32: + return err(ArchiveError.invalidQuery("cursor hash length not 32")) + + if cursor == EmptyWakuMessageHash: + return err(ArchiveError.invalidQuery("all zeroes cursor hash")) + let maxPageSize = if query.pageSize <= 0: DefaultPageSize @@ -141,18 +200,12 @@ proc findMessages*( let isAscendingOrder = query.direction.into() - if query.contentTopics.len > 10: - return err(ArchiveError.invalidQuery("too many content topics")) - - if query.cursor.isSome() and query.cursor.get().hash.len != 32: - return err(ArchiveError.invalidQuery("invalid cursor hash length")) - let queryStartTime = getTime().toUnixFloat() let rows = ( await self.driver.getMessages( includeData = query.includeData, - contentTopic = query.contentTopics, + contentTopics = query.contentTopics, pubsubTopic = query.pubsubTopic, cursor = query.cursor, startTime = query.startTime, @@ -167,58 +220,44 @@ proc findMessages*( let queryDuration = getTime().toUnixFloat() - queryStartTime waku_archive_query_duration_seconds.observe(queryDuration) + var cursor = none(ArchiveCursor) var hashes = newSeq[WakuMessageHash]() - var messages = newSeq[WakuMessage]() var topics = newSeq[PubsubTopic]() - var cursor = none(ArchiveCursor) + var messages = newSeq[WakuMessage]() if rows.len == 0: - return ok(ArchiveResponse(hashes: hashes, messages: messages, cursor: cursor)) + return ok(ArchiveResponse()) - ## Messages let pageSize = min(rows.len, int(maxPageSize)) - #TODO once store v2 is removed, unzip instead of 2x map - #TODO once store v2 is removed, update driver to not return messages when not needed - if query.includeData: - topics = rows[0 ..< pageSize].mapIt(it[0]) - messages = rows[0 ..< pageSize].mapIt(it[1]) + hashes = rows[0 ..< pageSize].mapIt(it[0]) - hashes = rows[0 ..< pageSize].mapIt(it[4]) + if query.includeData: + topics = rows[0 ..< pageSize].mapIt(it[1]) + messages = rows[0 ..< pageSize].mapIt(it[2]) - ## Cursor if rows.len > int(maxPageSize): ## Build last message cursor ## The cursor is built from the last message INCLUDED in the response ## (i.e. the second last message in the rows list) - #TODO Once Store v2 is removed keep only message and hash - let (pubsubTopic, message, digest, storeTimestamp, hash) = rows[^2] + let (hash, _, _) = rows[^2] - #TODO Once Store v2 is removed, the cursor becomes the hash of the last message - cursor = some( - ArchiveCursor( - digest: MessageDigest.fromBytes(digest), - storeTime: storeTimestamp, - sendertime: message.timestamp, - pubsubTopic: pubsubTopic, - hash: hash, - ) - ) + cursor = some(hash) - # All messages MUST be returned in chronological order + # Messages MUST be returned in chronological order if not isAscendingOrder: reverse(hashes) - reverse(messages) reverse(topics) + reverse(messages) return ok( - ArchiveResponse(hashes: hashes, messages: messages, topics: topics, cursor: cursor) + ArchiveResponse(cursor: cursor, hashes: hashes, topics: topics, messages: messages) ) proc findMessagesV2*( - self: WakuArchive, query: ArchiveQuery -): Future[ArchiveResult] {.async, deprecated, gcsafe.} = + self: WakuArchive, query: ArchiveQueryV2 +): Future[ArchiveResultV2] {.async, deprecated, gcsafe.} = ## Search the archive to return a single page of messages matching the query criteria let maxPageSize = @@ -251,10 +290,10 @@ proc findMessagesV2*( waku_archive_query_duration_seconds.observe(queryDuration) var messages = newSeq[WakuMessage]() - var cursor = none(ArchiveCursor) + var cursor = none(ArchiveCursorV2) if rows.len == 0: - return ok(ArchiveResponse(messages: messages, cursor: cursor)) + return ok(ArchiveResponseV2()) ## Messages let pageSize = min(rows.len, int(maxPageSize)) @@ -270,7 +309,7 @@ proc findMessagesV2*( let (pubsubTopic, message, digest, storeTimestamp, _) = rows[^2] cursor = some( - ArchiveCursor( + ArchiveCursorV2( digest: MessageDigest.fromBytes(digest), storeTime: storeTimestamp, sendertime: message.timestamp, @@ -282,7 +321,7 @@ proc findMessagesV2*( if not isAscendingOrder: reverse(messages) - return ok(ArchiveResponse(messages: messages, cursor: cursor)) + return ok(ArchiveResponseV2(messages: messages, cursor: cursor)) proc periodicRetentionPolicy(self: WakuArchive) {.async.} = debug "executing message retention policy" diff --git a/waku/waku_archive/common.nim b/waku/waku_archive/common.nim index 5b14fb111c..703bffc4e9 100644 --- a/waku/waku_archive/common.nim +++ b/waku/waku_archive/common.nim @@ -8,9 +8,9 @@ import ../waku_core, ../common/paging ## Waku message digest -type MessageDigest* = MDigest[256] +type MessageDigest* {.deprecated.} = MDigest[256] -proc fromBytes*(T: type MessageDigest, src: seq[byte]): T = +proc fromBytes*(T: type MessageDigest, src: seq[byte]): T {.deprecated.} = var data: array[32, byte] let byteCount = copyFrom[byte](data, src) @@ -19,7 +19,7 @@ proc fromBytes*(T: type MessageDigest, src: seq[byte]): T = return MessageDigest(data: data) -proc computeDigest*(msg: WakuMessage): MessageDigest = +proc computeDigest*(msg: WakuMessage): MessageDigest {.deprecated.} = var ctx: sha256 ctx.init() defer: @@ -34,30 +34,48 @@ proc computeDigest*(msg: WakuMessage): MessageDigest = ## Public API types type - #TODO Once Store v2 is removed, the cursor becomes the hash of the last message - ArchiveCursor* = object + ArchiveCursor* = WakuMessageHash + + ArchiveQuery* = object + includeData*: bool # indicate if messages should be returned in addition to hashes. + pubsubTopic*: Option[PubsubTopic] + contentTopics*: seq[ContentTopic] + cursor*: Option[ArchiveCursor] + startTime*: Option[Timestamp] + endTime*: Option[Timestamp] + hashes*: seq[WakuMessageHash] + pageSize*: uint + direction*: PagingDirection + + ArchiveResponse* = object + cursor*: Option[ArchiveCursor] + hashes*: seq[WakuMessageHash] + topics*: seq[PubsubTopic] + messages*: seq[WakuMessage] + + ArchiveCursorV2* {.deprecated.} = object digest*: MessageDigest storeTime*: Timestamp senderTime*: Timestamp pubsubTopic*: PubsubTopic hash*: WakuMessageHash - ArchiveQuery* = object + ArchiveQueryV2* {.deprecated.} = object includeData*: bool # indicate if messages should be returned in addition to hashes. pubsubTopic*: Option[PubsubTopic] contentTopics*: seq[ContentTopic] - cursor*: Option[ArchiveCursor] + cursor*: Option[ArchiveCursorV2] startTime*: Option[Timestamp] endTime*: Option[Timestamp] hashes*: seq[WakuMessageHash] pageSize*: uint direction*: PagingDirection - ArchiveResponse* = object + ArchiveResponseV2* {.deprecated.} = object hashes*: seq[WakuMessageHash] messages*: seq[WakuMessage] topics*: seq[PubsubTopic] - cursor*: Option[ArchiveCursor] + cursor*: Option[ArchiveCursorV2] ArchiveErrorKind* {.pure.} = enum UNKNOWN = uint32(0) @@ -74,6 +92,8 @@ type ArchiveResult* = Result[ArchiveResponse, ArchiveError] + ArchiveResultV2* {.deprecated.} = Result[ArchiveResponseV2, ArchiveError] + proc `$`*(err: ArchiveError): string = case err.kind of ArchiveErrorKind.DRIVER_ERROR: diff --git a/waku/waku_archive/driver.nim b/waku/waku_archive/driver.nim index 235c4b8a7c..f2c0a3a3f1 100644 --- a/waku/waku_archive/driver.nim +++ b/waku/waku_archive/driver.nim @@ -12,18 +12,19 @@ type ArchiveDriverResult*[T] = Result[T, string] ArchiveDriver* = ref object of RootObj -#TODO Once Store v2 is removed keep only messages and hashes -type ArchiveRow* = (PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash) +type + ArchiveRow* = (WakuMessageHash, PubsubTopic, WakuMessage) + + ArchiveRowV2* {.deprecated.} = + (PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash) # ArchiveDriver interface method put*( driver: ArchiveDriver, + messageHash: WakuMessageHash, pubsubTopic: PubsubTopic, message: WakuMessage, - digest: MessageDigest, - messageHash: WakuMessageHash, - receivedTime: Timestamp, ): Future[ArchiveDriverResult[void]] {.base, async.} = discard @@ -32,30 +33,45 @@ method getAllMessages*( ): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = discard -method getMessagesV2*( +method getMessages*( driver: ArchiveDriver, - contentTopic = newSeq[ContentTopic](0), + includeData = true, + contentTopics = newSeq[ContentTopic](0), pubsubTopic = none(PubsubTopic), cursor = none(ArchiveCursor), startTime = none(Timestamp), endTime = none(Timestamp), + hashes = newSeq[WakuMessageHash](0), maxPageSize = DefaultPageSize, ascendingOrder = true, -): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, deprecated, async.} = +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = discard -method getMessages*( +method putV2*( + driver: ArchiveDriver, + pubsubTopic: PubsubTopic, + message: WakuMessage, + digest: MessageDigest, + messageHash: WakuMessageHash, + receivedTime: Timestamp, +): Future[ArchiveDriverResult[void]] {.base, deprecated, async.} = + discard + +method getAllMessagesV2*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[seq[ArchiveRowV2]]] {.base, deprecated, async.} = + discard + +method getMessagesV2*( driver: ArchiveDriver, - includeData = false, contentTopic = newSeq[ContentTopic](0), pubsubTopic = none(PubsubTopic), - cursor = none(ArchiveCursor), + cursor = none(ArchiveCursorV2), startTime = none(Timestamp), endTime = none(Timestamp), - hashes = newSeq[WakuMessageHash](0), maxPageSize = DefaultPageSize, ascendingOrder = true, -): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = +): Future[ArchiveDriverResult[seq[ArchiveRowV2]]] {.base, deprecated, async.} = discard method getMessagesCount*( diff --git a/waku/waku_archive/driver/builder.nim b/waku/waku_archive/driver/builder.nim index 1768774d24..44cfc892aa 100644 --- a/waku/waku_archive/driver/builder.nim +++ b/waku/waku_archive/driver/builder.nim @@ -28,6 +28,7 @@ proc new*( migrate: bool, maxNumConn: int, onFatalErrorAction: OnFatalErrorHandler, + legacy: bool = false, ): Future[Result[T, string]] {.async.} = ## url - string that defines the database ## vacuum - if true, a cleanup operation will be applied to the database @@ -78,48 +79,98 @@ proc new*( return err("error in migrate sqlite: " & $migrateRes.error) debug "setting up sqlite waku archive driver" - let res = SqliteDriver.new(db) - if res.isErr(): - return err("failed to init sqlite archive driver: " & res.error) - return ok(res.get()) - of "postgres": - when defined(postgres): - let res = PostgresDriver.new( - dbUrl = url, - maxConnections = maxNumConn, - onFatalErrorAction = onFatalErrorAction, - ) - if res.isErr(): - return err("failed to init postgres archive driver: " & res.error) - - let driver = res.get() + if legacy: + let res = LegacySqliteDriver.new(db) - # Database migration - if migrate: - let migrateRes = await archive_postgres_driver_migrations.migrate(driver) - if migrateRes.isErr(): - return err("ArchiveDriver build failed in migration: " & $migrateRes.error) - - ## This should be started once we make sure the 'messages' table exists - ## Hence, this should be run after the migration is completed. - asyncSpawn driver.startPartitionFactory(onFatalErrorAction) + if res.isErr(): + return err("failed to init sqlite archive driver: " & res.error) - info "waiting for a partition to be created" - for i in 0 ..< 100: - if driver.containsAnyPartition(): - break - await sleepAsync(chronos.milliseconds(100)) + return ok(res.get()) + else: + let res = SqliteDriver.new(db) - if not driver.containsAnyPartition(): - onFatalErrorAction("a partition could not be created") + if res.isErr(): + return err("failed to init sqlite archive driver: " & res.error) - return ok(driver) + return ok(res.get()) + of "postgres": + when defined(postgres): + if legacy: + let res = LegacyPostgresDriver.new( + dbUrl = url, + maxConnections = maxNumConn, + onFatalErrorAction = onFatalErrorAction, + ) + if res.isErr(): + return err("failed to init postgres archive driver: " & res.error) + + let driver = res.get() + + # Database migration + if migrate: + let migrateRes = await archive_postgres_driver_migrations.migrate(driver) + if migrateRes.isErr(): + return err("ArchiveDriver build failed in migration: " & $migrateRes.error) + + ## This should be started once we make sure the 'messages' table exists + ## Hence, this should be run after the migration is completed. + asyncSpawn driver.startPartitionFactory(onFatalErrorAction) + + info "waiting for a partition to be created" + for i in 0 ..< 100: + if driver.containsAnyPartition(): + break + await sleepAsync(chronos.milliseconds(100)) + + if not driver.containsAnyPartition(): + onFatalErrorAction("a partition could not be created") + + return ok(driver) + else: + let res = PostgresDriver.new( + dbUrl = url, + maxConnections = maxNumConn, + onFatalErrorAction = onFatalErrorAction, + ) + if res.isErr(): + return err("failed to init postgres archive driver: " & res.error) + + let driver = res.get() + + # Database migration + if migrate: + let migrateRes = await archive_postgres_driver_migrations.migrate(driver) + if migrateRes.isErr(): + return err("ArchiveDriver build failed in migration: " & $migrateRes.error) + + ## This should be started once we make sure the 'messages' table exists + ## Hence, this should be run after the migration is completed. + asyncSpawn driver.startPartitionFactory(onFatalErrorAction) + + info "waiting for a partition to be created" + for i in 0 ..< 100: + if driver.containsAnyPartition(): + break + await sleepAsync(chronos.milliseconds(100)) + + if not driver.containsAnyPartition(): + onFatalErrorAction("a partition could not be created") + + return ok(driver) else: return err( "Postgres has been configured but not been compiled. Check compiler definitions." ) else: debug "setting up in-memory waku archive driver" - let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages - return ok(driver) + # Defaults to a capacity of 25.000 messages + + if legacy: + let driver = LegacyQueueDriver.new() + + return ok(driver) + else: + let driver = QueueDriver.new() + + return ok(driver) diff --git a/waku/waku_archive/driver/postgres_driver.nim b/waku/waku_archive/driver/postgres_driver.nim index a106eb2c40..ca5c9b8854 100644 --- a/waku/waku_archive/driver/postgres_driver.nim +++ b/waku/waku_archive/driver/postgres_driver.nim @@ -5,7 +5,8 @@ else: import ./postgres_driver/postgres_driver, + ./postgres_driver/postgres_driver_legacy, ./postgres_driver/partitions_manager, ./postgres_driver/postgres_healthcheck -export postgres_driver, partitions_manager, postgres_healthcheck +export postgres_driver, postgres_driver_legacy, partitions_manager, postgres_healthcheck diff --git a/waku/waku_archive/driver/postgres_driver/migrations.nim b/waku/waku_archive/driver/postgres_driver/migrations.nim index 254decd98e..3f1bf688ef 100644 --- a/waku/waku_archive/driver/postgres_driver/migrations.nim +++ b/waku/waku_archive/driver/postgres_driver/migrations.nim @@ -87,3 +87,38 @@ proc migrate*( debug "finished message store's postgres database migration" return ok() + +proc migrate*( + driver: LegacyPostgresDriver, targetVersion = SchemaVersion +): Future[DatabaseResult[void]] {.async.} = + debug "starting message store's postgres database migration" + + let currentVersion = (await driver.getCurrentVersion()).valueOr: + return err("migrate error could not retrieve current version: " & $error) + + if currentVersion == targetVersion: + debug "database schema is up to date", + currentVersion = currentVersion, targetVersion = targetVersion + return ok() + + info "database schema is outdated", + currentVersion = currentVersion, targetVersion = targetVersion + + # Load migration scripts + let scripts = pg_migration_manager.getMigrationScripts(currentVersion, targetVersion) + + # Run the migration scripts + for script in scripts: + for statement in script.breakIntoStatements(): + debug "executing migration statement", statement = statement + + (await driver.performWriteQuery(statement)).isOkOr: + error "failed to execute migration statement", + statement = statement, error = error + return err("failed to execute migration statement") + + debug "migration statement executed succesfully", statement = statement + + debug "finished message store's postgres database migration" + + return ok() diff --git a/waku/waku_archive/driver/postgres_driver/postgres_driver.nim b/waku/waku_archive/driver/postgres_driver/postgres_driver.nim index efd43ba4ca..41dcc6af10 100644 --- a/waku/waku_archive/driver/postgres_driver/postgres_driver.nim +++ b/waku/waku_archive/driver/postgres_driver/postgres_driver.nim @@ -29,93 +29,107 @@ type PostgresDriver* = ref object of ArchiveDriver futLoopPartitionFactory: Future[void] const InsertRowStmtName = "InsertRow" -const InsertRowStmtDefinition = # TODO: get the sql queries from a file - """INSERT INTO messages (id, messageHash, storedAt, contentTopic, payload, pubsubTopic, - version, timestamp, meta) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, CASE WHEN $9 = '' THEN NULL ELSE $9 END) ON CONFLICT DO NOTHING;""" +const InsertRowStmtDefinition = + """INSERT INTO messages (messageHash, pubsubTopic, contentTopic, payload, + version, timestamp, meta) VALUES ($1, $2, $3, $4, $5, $6, CASE WHEN $7 = '' THEN NULL ELSE $7 END) ON CONFLICT DO NOTHING;""" const SelectNoCursorAscStmtName = "SelectWithoutCursorAsc" const SelectNoCursorAscStmtDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + """SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta + FROM messages WHERE contentTopic IN ($1) AND messageHash IN ($2) AND pubsubTopic = $3 AND - storedAt >= $4 AND - storedAt <= $5 - ORDER BY storedAt ASC, messageHash ASC LIMIT $6;""" + timestamp >= $4 AND + timestamp <= $5 + ORDER BY timestamp ASC, messageHash ASC LIMIT $6;""" + +const SelectNoCursorNoDataAscStmtName = "SelectWithoutCursorAndDataAsc" +const SelectNoCursorNoDataAscStmtDef = + """SELECT messageHash + FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + timestamp >= $4 AND + timestamp <= $5 + ORDER BY timestamp ASC, messageHash ASC LIMIT $6;""" const SelectNoCursorDescStmtName = "SelectWithoutCursorDesc" const SelectNoCursorDescStmtDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + """SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta + FROM messages WHERE contentTopic IN ($1) AND messageHash IN ($2) AND pubsubTopic = $3 AND - storedAt >= $4 AND - storedAt <= $5 - ORDER BY storedAt DESC, messageHash DESC LIMIT $6;""" + timestamp >= $4 AND + timestamp <= $5 + ORDER BY timestamp DESC, messageHash DESC LIMIT $6;""" + +const SelectNoCursorNoDataDescStmtName = "SelectWithoutCursorAndDataDesc" +const SelectNoCursorNoDataDescStmtDef = + """SELECT messageHash + FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + timestamp >= $4 AND + timestamp <= $5 + ORDER BY timestamp DESC, messageHash DESC LIMIT $6;""" const SelectWithCursorDescStmtName = "SelectWithCursorDesc" const SelectWithCursorDescStmtDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + """SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta + FROM messages WHERE contentTopic IN ($1) AND messageHash IN ($2) AND pubsubTopic = $3 AND - (storedAt, messageHash) < ($4,$5) AND - storedAt >= $6 AND - storedAt <= $7 - ORDER BY storedAt DESC, messageHash DESC LIMIT $8;""" + (timestamp, messageHash) < ($4,$5) AND + timestamp >= $6 AND + timestamp <= $7 + ORDER BY timestamp DESC, messageHash DESC LIMIT $8;""" + +const SelectWithCursorNoDataDescStmtName = "SelectWithCursorNoDataDesc" +const SelectWithCursorNoDataDescStmtDef = + """SELECT messageHash + FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + (timestamp, messageHash) < ($4,$5) AND + timestamp >= $6 AND + timestamp <= $7 + ORDER BY timestamp DESC, messageHash DESC LIMIT $8;""" const SelectWithCursorAscStmtName = "SelectWithCursorAsc" const SelectWithCursorAscStmtDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + """SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta + FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + (timestamp, messageHash) > ($4,$5) AND + timestamp >= $6 AND + timestamp <= $7 + ORDER BY timestamp ASC, messageHash ASC LIMIT $8;""" + +const SelectWithCursorNoDataAscStmtName = "SelectWithCursorNoDataAsc" +const SelectWithCursorNoDataAscStmtDef = + """SELECT messageHash + FROM messages WHERE contentTopic IN ($1) AND messageHash IN ($2) AND pubsubTopic = $3 AND - (storedAt, messageHash) > ($4,$5) AND - storedAt >= $6 AND - storedAt <= $7 - ORDER BY storedAt ASC, messageHash ASC LIMIT $8;""" + (timestamp, messageHash) > ($4,$5) AND + timestamp >= $6 AND + timestamp <= $7 + ORDER BY timestamp ASC, messageHash ASC LIMIT $8;""" const SelectMessageByHashName = "SelectMessageByHash" const SelectMessageByHashDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages WHERE messageHash = $1""" - -const SelectNoCursorV2AscStmtName = "SelectWithoutCursorV2Asc" -const SelectNoCursorV2AscStmtDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages - WHERE contentTopic IN ($1) AND - pubsubTopic = $2 AND - storedAt >= $3 AND - storedAt <= $4 - ORDER BY storedAt ASC LIMIT $5;""" - -const SelectNoCursorV2DescStmtName = "SelectWithoutCursorV2Desc" -const SelectNoCursorV2DescStmtDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages - WHERE contentTopic IN ($1) AND - pubsubTopic = $2 AND - storedAt >= $3 AND - storedAt <= $4 - ORDER BY storedAt DESC LIMIT $5;""" - -const SelectWithCursorV2DescStmtName = "SelectWithCursorV2Desc" -const SelectWithCursorV2DescStmtDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages - WHERE contentTopic IN ($1) AND - pubsubTopic = $2 AND - (storedAt, id) < ($3,$4) AND - storedAt >= $5 AND - storedAt <= $6 - ORDER BY storedAt DESC LIMIT $7;""" - -const SelectWithCursorV2AscStmtName = "SelectWithCursorV2Asc" -const SelectWithCursorV2AscStmtDef = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages - WHERE contentTopic IN ($1) AND - pubsubTopic = $2 AND - (storedAt, id) > ($3,$4) AND - storedAt >= $5 AND - storedAt <= $6 - ORDER BY storedAt ASC LIMIT $7;""" + """SELECT timestamp + FROM messages + WHERE messageHash = $1""" const DefaultMaxNumConns = 50 @@ -154,9 +168,47 @@ proc reset*(s: PostgresDriver): Future[ArchiveDriverResult[void]] {.async.} = let ret = await s.decreaseDatabaseSize(targetSize, forceRemoval) return ret +proc timeCursorCallbackImpl(pqResult: ptr PGresult, timeCursor: var Option[Timestamp]) = + let numFields = pqResult.pqnfields() + if numFields != 1: + error "Wrong number of fields" + return + + let catchable = catch: + parseInt($(pqgetvalue(pqResult, 0, 1))) + + if catchable.isErr(): + error "could not parse correctly", error = catchable.error.msg + return + + let timestamp: Timestamp = catchable.get() + + timeCursor = some(timestamp) + +proc hashCallbackImpl( + pqResult: ptr PGresult, rows: var seq[(WakuMessageHash, PubsubTopic, WakuMessage)] +) = + let numFields = pqResult.pqnfields() + if numFields != 1: + error "Wrong number of fields" + return + + for iRow in 0 ..< pqResult.pqNtuples(): + let catchable = catch: + parseHexStr($(pqgetvalue(pqResult, iRow, 1))) + + if catchable.isErr(): + error "could not parse correctly", error = catchable.error.msg + return + + let hashHex = catchable.get() + let msgHash = fromBytes(hashHex.toOpenArrayByte(0, 31)) + + rows.add((msgHash, "", WakuMessage())) + proc rowCallbackImpl( pqResult: ptr PGresult, - outRows: var seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)], + outRows: var seq[(WakuMessageHash, PubsubTopic, WakuMessage)], ) = ## Proc aimed to contain the logic of the callback passed to the `psasyncpool`. ## That callback is used in "SELECT" queries. @@ -165,64 +217,54 @@ proc rowCallbackImpl( ## outRows - seq of Store-rows. This is populated from the info contained in pqResult let numFields = pqResult.pqnfields() - if numFields != 9: + if numFields != 7: error "Wrong number of fields" return for iRow in 0 ..< pqResult.pqNtuples(): - var wakuMessage: WakuMessage - var timestamp: Timestamp - var version: uint - var pubSubTopic: string - var contentTopic: string - var storedAt: int64 - var digest: string - var payload: string - var hashHex: string - var msgHash: WakuMessageHash - var meta: string + var + hashHex: string + msgHash: WakuMessageHash + + pubSubTopic: string + + contentTopic: string + payload: string + version: uint + timestamp: Timestamp + meta: string + wakuMessage: WakuMessage try: - storedAt = parseInt($(pqgetvalue(pqResult, iRow, 0))) - contentTopic = $(pqgetvalue(pqResult, iRow, 1)) - payload = parseHexStr($(pqgetvalue(pqResult, iRow, 2))) - pubSubTopic = $(pqgetvalue(pqResult, iRow, 3)) - version = parseUInt($(pqgetvalue(pqResult, iRow, 4))) - timestamp = parseInt($(pqgetvalue(pqResult, iRow, 5))) - digest = parseHexStr($(pqgetvalue(pqResult, iRow, 6))) - hashHex = parseHexStr($(pqgetvalue(pqResult, iRow, 7))) - meta = parseHexStr($(pqgetvalue(pqResult, iRow, 8))) + hashHex = parseHexStr($(pqgetvalue(pqResult, iRow, 1))) msgHash = fromBytes(hashHex.toOpenArrayByte(0, 31)) + + pubSubTopic = $(pqgetvalue(pqResult, iRow, 2)) + + contentTopic = $(pqgetvalue(pqResult, iRow, 3)) + payload = parseHexStr($(pqgetvalue(pqResult, iRow, 4))) + version = parseUInt($(pqgetvalue(pqResult, iRow, 5))) + timestamp = parseInt($(pqgetvalue(pqResult, iRow, 6))) + meta = parseHexStr($(pqgetvalue(pqResult, iRow, 7))) except ValueError: error "could not parse correctly", error = getCurrentExceptionMsg() - wakuMessage.timestamp = timestamp - wakuMessage.version = uint32(version) wakuMessage.contentTopic = contentTopic wakuMessage.payload = @(payload.toOpenArrayByte(0, payload.high)) + wakuMessage.version = uint32(version) + wakuMessage.timestamp = timestamp wakuMessage.meta = @(meta.toOpenArrayByte(0, meta.high)) - outRows.add( - ( - pubSubTopic, - wakuMessage, - @(digest.toOpenArrayByte(0, digest.high)), - storedAt, - msgHash, - ) - ) + outRows.add((msgHash, pubSubTopic, wakuMessage)) method put*( s: PostgresDriver, + messageHash: WakuMessageHash, pubsubTopic: PubsubTopic, message: WakuMessage, - digest: MessageDigest, - messageHash: WakuMessageHash, - receivedTime: Timestamp, ): Future[ArchiveDriverResult[void]] {.async.} = - let digest = toHex(digest.data) let messageHash = toHex(messageHash) - let rxTime = $receivedTime + let contentTopic = message.contentTopic let payload = toHex(message.payload) let version = $message.version @@ -234,32 +276,17 @@ method put*( return await s.writeConnPool.runStmt( InsertRowStmtName, InsertRowStmtDefinition, + @[messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta], @[ - digest, messageHash, rxTime, contentTopic, payload, pubsubTopic, version, - timestamp, meta, - ], - @[ - int32(digest.len), int32(messageHash.len), - int32(rxTime.len), + int32(pubsubTopic.len), int32(contentTopic.len), int32(payload.len), - int32(pubsubTopic.len), int32(version.len), int32(timestamp.len), int32(meta.len), ], - @[ - int32(0), - int32(0), - int32(0), - int32(0), - int32(0), - int32(0), - int32(0), - int32(0), - int32(0), - ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], ) method getAllMessages*( @@ -267,15 +294,15 @@ method getAllMessages*( ): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = ## Retrieve all messages from the store. - var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] proc rowCallback(pqResult: ptr PGresult) = rowCallbackImpl(pqResult, rows) ( await s.readConnPool.pgQuery( - """SELECT storedAt, contentTopic, - payload, pubsubTopic, version, timestamp, - id, messageHash, meta FROM messages ORDER BY storedAt ASC""", + """SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta + FROM messages + ORDER BY timestamp ASC, messageHash ASC""", newSeq[string](0), rowCallback, ) @@ -316,9 +343,28 @@ proc getPartitionsList( return ok(partitions) +proc getTimeCursor( + s: PostgresDriver, hashHex: string +): Future[ArchiveDriverResult[Option[Timestamp]]] {.async.} = + var timeCursor: Option[Timestamp] + + proc cursorCallback(pqResult: ptr PGresult) = + timeCursorCallbackImpl(pqResult, timeCursor) + + ?await s.readConnPool.runStmt( + SelectMessageByHashName, + SelectMessageByHashDef, + @[hashHex], + @[int32(hashHex.len)], + @[int32(0)], + cursorCallback, + ) + + return ok(timeCursor) + proc getMessagesArbitraryQuery( s: PostgresDriver, - contentTopic: seq[ContentTopic] = @[], + contentTopics: seq[ContentTopic] = @[], pubsubTopic = none(PubsubTopic), cursor = none(ArchiveCursor), startTime = none(Timestamp), @@ -330,14 +376,29 @@ proc getMessagesArbitraryQuery( ## This proc allows to handle atypical queries. We don't use prepared statements for those. var query = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages""" + """SELECT messageHash, pubsubTopic,contentTopic, payload, version, timestamp, meta + FROM messages""" var statements: seq[string] var args: seq[string] - if contentTopic.len > 0: - let cstmt = "contentTopic IN (" & "?".repeat(contentTopic.len).join(",") & ")" + if cursor.isSome(): + let hashHex = toHex(cursor.get()) + + let timeCursor = ?await s.getTimeCursor(hashHex) + + if timeCursor.isNone(): + return err("cursor not found") + + let comp = if ascendingOrder: ">" else: "<" + statements.add("(timestamp, messageHash) " & comp & " (?,?)") + + args.add($timeCursor.get()) + args.add(hashHex) + + if contentTopics.len > 0: + let cstmt = "contentTopic IN (" & "?".repeat(contentTopics.len).join(",") & ")" statements.add(cstmt) - for t in contentTopic: + for t in contentTopics: args.add(t) if hexHashes.len > 0: @@ -350,41 +411,12 @@ proc getMessagesArbitraryQuery( statements.add("pubsubTopic = ?") args.add(pubsubTopic.get()) - if cursor.isSome(): - let hashHex = toHex(cursor.get().hash) - - var entree: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] - proc entreeCallback(pqResult: ptr PGresult) = - rowCallbackImpl(pqResult, entree) - - ( - await s.readConnPool.runStmt( - SelectMessageByHashName, - SelectMessageByHashDef, - @[hashHex], - @[int32(hashHex.len)], - @[int32(0)], - entreeCallback, - ) - ).isOkOr: - return err("failed to run query with cursor: " & $error) - - if entree.len == 0: - return ok(entree) - - let storetime = entree[0][3] - - let comp = if ascendingOrder: ">" else: "<" - statements.add("(storedAt, messageHash) " & comp & " (?,?)") - args.add($storetime) - args.add(hashHex) - if startTime.isSome(): - statements.add("storedAt >= ?") + statements.add("timestamp >= ?") args.add($startTime.get()) if endTime.isSome(): - statements.add("storedAt <= ?") + statements.add("timestamp <= ?") args.add($endTime.get()) if statements.len > 0: @@ -396,12 +428,12 @@ proc getMessagesArbitraryQuery( else: direction = "DESC" - query &= " ORDER BY storedAt " & direction & ", messageHash " & direction + query &= " ORDER BY timestamp " & direction & ", messageHash " & direction query &= " LIMIT ?" args.add($maxPageSize) - var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] proc rowCallback(pqResult: ptr PGresult) = rowCallbackImpl(pqResult, rows) @@ -410,45 +442,61 @@ proc getMessagesArbitraryQuery( return ok(rows) -proc getMessagesV2ArbitraryQuery( +proc getMessageHashesArbitraryQuery( s: PostgresDriver, - contentTopic: seq[ContentTopic] = @[], + contentTopics: seq[ContentTopic] = @[], pubsubTopic = none(PubsubTopic), cursor = none(ArchiveCursor), startTime = none(Timestamp), endTime = none(Timestamp), + hexHashes: seq[string] = @[], maxPageSize = DefaultPageSize, ascendingOrder = true, -): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} = +): Future[ArchiveDriverResult[seq[(WakuMessageHash, PubsubTopic, WakuMessage)]]] {. + async +.} = ## This proc allows to handle atypical queries. We don't use prepared statements for those. - var query = - """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages""" + var query = """SELECT messageHash FROM messages""" var statements: seq[string] var args: seq[string] - if contentTopic.len > 0: - let cstmt = "contentTopic IN (" & "?".repeat(contentTopic.len).join(",") & ")" + if cursor.isSome(): + let hashHex = toHex(cursor.get()) + + let timeCursor = ?await s.getTimeCursor(hashHex) + + if timeCursor.isNone(): + return err("cursor not found") + + let comp = if ascendingOrder: ">" else: "<" + statements.add("(timestamp, messageHash) " & comp & " (?,?)") + + args.add($timeCursor.get()) + args.add(hashHex) + + if contentTopics.len > 0: + let cstmt = "contentTopic IN (" & "?".repeat(contentTopics.len).join(",") & ")" statements.add(cstmt) - for t in contentTopic: + for t in contentTopics: + args.add(t) + + if hexHashes.len > 0: + let cstmt = "messageHash IN (" & "?".repeat(hexHashes.len).join(",") & ")" + statements.add(cstmt) + for t in hexHashes: args.add(t) if pubsubTopic.isSome(): statements.add("pubsubTopic = ?") args.add(pubsubTopic.get()) - if cursor.isSome(): - let comp = if ascendingOrder: ">" else: "<" - statements.add("(storedAt, id) " & comp & " (?,?)") - args.add($cursor.get().storeTime) - args.add(toHex(cursor.get().digest.data)) - if startTime.isSome(): - statements.add("storedAt >= ?") + statements.add("timestamp >= ?") args.add($startTime.get()) if endTime.isSome(): - statements.add("storedAt <= ?") + statements.add("timestamp <= ?") args.add($endTime.get()) if statements.len > 0: @@ -460,14 +508,14 @@ proc getMessagesV2ArbitraryQuery( else: direction = "DESC" - query &= " ORDER BY storedAt " & direction & ", id " & direction + query &= " ORDER BY timestamp " & direction & ", messageHash " & direction query &= " LIMIT ?" args.add($maxPageSize) - var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] proc rowCallback(pqResult: ptr PGresult) = - rowCallbackImpl(pqResult, rows) + hashCallbackImpl(pqResult, rows) (await s.readConnPool.pgQuery(query, args, rowCallback)).isOkOr: return err("failed to run query: " & $error) @@ -485,12 +533,11 @@ proc getMessagesPreparedStmt( maxPageSize = DefaultPageSize, ascOrder = true, ): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = - ## This proc aims to run the most typical queries in a more performant way, i.e. by means of - ## prepared statements. - ## - ## contentTopic - string with list of conten topics. e.g: "'ctopic1','ctopic2','ctopic3'" + ## This proc aims to run the most typical queries in a more performant way, + ## i.e. by means of prepared statements. + + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] - var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] proc rowCallback(pqResult: ptr PGresult) = rowCallbackImpl(pqResult, rows) @@ -498,59 +545,7 @@ proc getMessagesPreparedStmt( let endTimeStr = $endTime let limit = $maxPageSize - if cursor.isSome(): - let hash = toHex(cursor.get().hash) - - var entree: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] - - proc entreeCallback(pqResult: ptr PGresult) = - rowCallbackImpl(pqResult, entree) - - ( - await s.readConnPool.runStmt( - SelectMessageByHashName, - SelectMessageByHashDef, - @[hash], - @[int32(hash.len)], - @[int32(0)], - entreeCallback, - ) - ).isOkOr: - return err("failed to run query with cursor: " & $error) - - if entree.len == 0: - return ok(entree) - - let storeTime = $entree[0][3] - - var stmtName = - if ascOrder: SelectWithCursorAscStmtName else: SelectWithCursorDescStmtName - var stmtDef = - if ascOrder: SelectWithCursorAscStmtDef else: SelectWithCursorDescStmtDef - - ( - await s.readConnPool.runStmt( - stmtName, - stmtDef, - @[ - contentTopic, hashes, pubsubTopic, storeTime, hash, startTimeStr, endTimeStr, - limit, - ], - @[ - int32(contentTopic.len), - int32(pubsubTopic.len), - int32(storeTime.len), - int32(hash.len), - int32(startTimeStr.len), - int32(endTimeStr.len), - int32(limit.len), - ], - @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], - rowCallback, - ) - ).isOkOr: - return err("failed to run query with cursor: " & $error) - else: + if cursor.isNone(): var stmtName = if ascOrder: SelectNoCursorAscStmtName else: SelectNoCursorDescStmtName var stmtDef = if ascOrder: SelectNoCursorAscStmtDef else: SelectNoCursorDescStmtDef @@ -571,91 +566,147 @@ proc getMessagesPreparedStmt( rowCallback, ) ).isOkOr: - return err("failed to run query without cursor: " & $error) + return err(stmtName & ": " & $error) + + return ok(rows) + + let hashHex = toHex(cursor.get()) + + let timeCursor = ?await s.getTimeCursor(hashHex) + + if timeCursor.isNone(): + return err("cursor not found") + + let timeString = $timeCursor.get() + + var stmtName = + if ascOrder: SelectWithCursorAscStmtName else: SelectWithCursorDescStmtName + var stmtDef = + if ascOrder: SelectWithCursorAscStmtDef else: SelectWithCursorDescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[ + contentTopic, hashes, pubsubTopic, hashHex, timeString, startTimeStr, + endTimeStr, limit, + ], + @[ + int32(contentTopic.len), + int32(hashes.len), + int32(pubsubTopic.len), + int32(timeString.len), + int32(hashHex.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + ) + ).isOkOr: + return err(stmtName & ": " & $error) return ok(rows) -proc getMessagesV2PreparedStmt( +proc getMessageHashesPreparedStmt( s: PostgresDriver, contentTopic: string, pubsubTopic: PubsubTopic, cursor = none(ArchiveCursor), startTime: Timestamp, endTime: Timestamp, + hashes: string, maxPageSize = DefaultPageSize, ascOrder = true, -): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} = - ## This proc aims to run the most typical queries in a more performant way, i.e. by means of - ## prepared statements. - ## - ## contentTopic - string with list of conten topics. e.g: "'ctopic1','ctopic2','ctopic3'" +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## This proc aims to run the most typical queries in a more performant way, + ## i.e. by means of prepared statements. + + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] - var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] proc rowCallback(pqResult: ptr PGresult) = - rowCallbackImpl(pqResult, rows) + hashCallbackImpl(pqResult, rows) let startTimeStr = $startTime let endTimeStr = $endTime let limit = $maxPageSize - if cursor.isSome(): + if cursor.isNone(): var stmtName = - if ascOrder: SelectWithCursorV2AscStmtName else: SelectWithCursorV2DescStmtName + if ascOrder: SelectNoCursorNoDataAscStmtName else: SelectNoCursorNoDataDescStmtName var stmtDef = - if ascOrder: SelectWithCursorV2AscStmtDef else: SelectWithCursorV2DescStmtDef - - let digest = toHex(cursor.get().digest.data) - let storeTime = $cursor.get().storeTime + if ascOrder: SelectNoCursorNoDataAscStmtDef else: SelectNoCursorNoDataDescStmtDef ( await s.readConnPool.runStmt( stmtName, stmtDef, - @[contentTopic, pubsubTopic, storeTime, digest, startTimeStr, endTimeStr, limit], + @[contentTopic, hashes, pubsubTopic, startTimeStr, endTimeStr, limit], @[ int32(contentTopic.len), + int32(hashes.len), int32(pubsubTopic.len), - int32(storeTime.len), - int32(digest.len), int32(startTimeStr.len), int32(endTimeStr.len), int32(limit.len), ], - @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], rowCallback, ) ).isOkOr: - return err("failed to run query with cursor: " & $error) - else: - var stmtName = - if ascOrder: SelectNoCursorV2AscStmtName else: SelectNoCursorV2DescStmtName - var stmtDef = - if ascOrder: SelectNoCursorV2AscStmtDef else: SelectNoCursorV2DescStmtDef + return err(stmtName & ": " & $error) - ( - await s.readConnPool.runStmt( - stmtName, - stmtDef, - @[contentTopic, pubsubTopic, startTimeStr, endTimeStr, limit], - @[ - int32(contentTopic.len), - int32(pubsubTopic.len), - int32(startTimeStr.len), - int32(endTimeStr.len), - int32(limit.len), - ], - @[int32(0), int32(0), int32(0), int32(0), int32(0)], - rowCallback, - ) - ).isOkOr: - return err("failed to run query without cursor: " & $error) + return ok(rows) + + let hashHex = toHex(cursor.get()) + + let timeCursor = ?await s.getTimeCursor(hashHex) + + if timeCursor.isNone(): + return err("cursor not found") + + let timeString = $timeCursor.get() + + var stmtName = + if ascOrder: + SelectWithCursorNoDataAscStmtName + else: + SelectWithCursorNoDataDescStmtName + var stmtDef = + if ascOrder: SelectWithCursorNoDataAscStmtDef else: SelectWithCursorNoDataDescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[ + contentTopic, hashes, pubsubTopic, hashHex, timeString, startTimeStr, + endTimeStr, limit, + ], + @[ + int32(contentTopic.len), + int32(hashes.len), + int32(pubsubTopic.len), + int32(timeString.len), + int32(hashHex.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + ) + ).isOkOr: + return err(stmtName & ": " & $error) return ok(rows) method getMessages*( s: PostgresDriver, - includeData = false, - contentTopicSeq = newSeq[ContentTopic](0), + includeData = true, + contentTopics = newSeq[ContentTopic](0), pubsubTopic = none(PubsubTopic), cursor = none(ArchiveCursor), startTime = none(Timestamp), @@ -666,54 +717,43 @@ method getMessages*( ): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = let hexHashes = hashes.mapIt(toHex(it)) - if contentTopicSeq.len == 1 and hexHashes.len == 1 and pubsubTopic.isSome() and + if contentTopics.len > 0 and hexHashes.len > 0 and pubsubTopic.isSome() and startTime.isSome() and endTime.isSome(): ## Considered the most common query. Therefore, we use prepared statements to optimize it. - return await s.getMessagesPreparedStmt( - contentTopicSeq.join(","), - PubsubTopic(pubsubTopic.get()), - cursor, - startTime.get(), - endTime.get(), - hexHashes.join(","), - maxPageSize, - ascendingOrder, - ) - else: - ## We will run atypical query. In this case we don't use prepared statemets - return await s.getMessagesArbitraryQuery( - contentTopicSeq, pubsubTopic, cursor, startTime, endTime, hexHashes, maxPageSize, - ascendingOrder, - ) - -method getMessagesV2*( - s: PostgresDriver, - contentTopicSeq = newSeq[ContentTopic](0), - pubsubTopic = none(PubsubTopic), - cursor = none(ArchiveCursor), - startTime = none(Timestamp), - endTime = none(Timestamp), - maxPageSize = DefaultPageSize, - ascendingOrder = true, -): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} = - if contentTopicSeq.len == 1 and pubsubTopic.isSome() and startTime.isSome() and - endTime.isSome(): - ## Considered the most common query. Therefore, we use prepared statements to optimize it. - return await s.getMessagesV2PreparedStmt( - contentTopicSeq.join(","), - PubsubTopic(pubsubTopic.get()), - cursor, - startTime.get(), - endTime.get(), - maxPageSize, - ascendingOrder, - ) + if includeData: + return await s.getMessagesPreparedStmt( + contentTopics.join(","), + PubsubTopic(pubsubTopic.get()), + cursor, + startTime.get(), + endTime.get(), + hexHashes.join(","), + maxPageSize, + ascendingOrder, + ) + else: + return await s.getMessageHashesPreparedStmt( + contentTopics.join(","), + PubsubTopic(pubsubTopic.get()), + cursor, + startTime.get(), + endTime.get(), + hexHashes.join(","), + maxPageSize, + ascendingOrder, + ) else: - ## We will run atypical query. In this case we don't use prepared statemets - return await s.getMessagesV2ArbitraryQuery( - contentTopicSeq, pubsubTopic, cursor, startTime, endTime, maxPageSize, - ascendingOrder, - ) + if includeData: + ## We will run atypical query. In this case we don't use prepared statemets + return await s.getMessagesArbitraryQuery( + contentTopics, pubsubTopic, cursor, startTime, endTime, hexHashes, maxPageSize, + ascendingOrder, + ) + else: + return await s.getMessageHashesArbitraryQuery( + contentTopics, pubsubTopic, cursor, startTime, endTime, hexHashes, maxPageSize, + ascendingOrder, + ) proc getStr( s: PostgresDriver, query: string @@ -785,7 +825,7 @@ method getOldestMessageTimestamp*( let oldestPartitionTimeNanoSec = oldestPartition.getPartitionStartTimeInNanosec() - let intRes = await s.getInt("SELECT MIN(storedAt) FROM messages") + let intRes = await s.getInt("SELECT MIN(timestamp) FROM messages") if intRes.isErr(): ## Just return the oldest partition time considering the partitions set return ok(Timestamp(oldestPartitionTimeNanoSec)) @@ -795,7 +835,7 @@ method getOldestMessageTimestamp*( method getNewestMessageTimestamp*( s: PostgresDriver ): Future[ArchiveDriverResult[Timestamp]] {.async.} = - let intRes = await s.getInt("SELECT MAX(storedAt) FROM messages") + let intRes = await s.getInt("SELECT MAX(timestamp) FROM messages") if intRes.isErr(): return err("error in getNewestMessageTimestamp: " & intRes.error) @@ -807,7 +847,7 @@ method deleteOldestMessagesNotWithinLimit*( let execRes = await s.writeConnPool.pgQuery( """DELETE FROM messages WHERE id NOT IN ( - SELECT id FROM messages ORDER BY storedAt DESC LIMIT ? + SELECT id FROM messages ORDER BY timestamp DESC LIMIT ? );""", @[$limit], ) @@ -818,7 +858,7 @@ method deleteOldestMessagesNotWithinLimit*( method close*(s: PostgresDriver): Future[ArchiveDriverResult[void]] {.async.} = ## Cancel the partition factory loop - s.futLoopPartitionFactory.cancel() + s.futLoopPartitionFactory.cancelSoon() ## Close the database connection let writeCloseRes = await s.writeConnPool.close() @@ -867,7 +907,7 @@ proc addPartition( self: PostgresDriver, startTime: Timestamp, duration: timer.Duration ): Future[ArchiveDriverResult[void]] {.async.} = ## Creates a partition table that will store the messages that fall in the range - ## `startTime` <= storedAt < `startTime + duration`. + ## `startTime` <= timestamp < `startTime + duration`. ## `startTime` is measured in seconds since epoch let beginning = startTime @@ -1162,7 +1202,11 @@ method deleteMessagesOlderThanTimestamp*( (await s.removePartitionsOlderThan(tsNanoSec)).isOkOr: return err("error while removing older partitions: " & $error) - (await s.writeConnPool.pgQuery("DELETE FROM messages WHERE storedAt < " & $tsNanoSec)).isOkOr: + ( + await s.writeConnPool.pgQuery( + "DELETE FROM messages WHERE timestamp < " & $tsNanoSec + ) + ).isOkOr: return err("error in deleteMessagesOlderThanTimestamp: " & $error) return ok() diff --git a/waku/waku_archive/driver/postgres_driver/postgres_driver_legacy.nim b/waku/waku_archive/driver/postgres_driver/postgres_driver_legacy.nim new file mode 100644 index 0000000000..416b6a0585 --- /dev/null +++ b/waku/waku_archive/driver/postgres_driver/postgres_driver_legacy.nim @@ -0,0 +1,1168 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + std/[nre, options, sequtils, strutils, strformat, times], + stew/[results, byteutils, arrayops], + db_postgres, + postgres, + chronos, + chronicles +import + ../../../common/error_handling, + ../../../waku_core, + ../../common, + ../../driver, + ../../../common/databases/db_postgres as waku_postgres, + ./postgres_healthcheck, + ./partitions_manager + +type LegacyPostgresDriver* = ref object of ArchiveDriver + ## Establish a separate pools for read/write operations + writeConnPool: PgAsyncPool + readConnPool: PgAsyncPool + + ## Partition container + partitionMngr: PartitionManager + futLoopPartitionFactory: Future[void] + +const InsertRowStmtName = "InsertRow" +const InsertRowStmtDefinition = # TODO: get the sql queries from a file + """INSERT INTO messages (id, messageHash, storedAt, contentTopic, payload, pubsubTopic, + version, timestamp, meta) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, CASE WHEN $9 = '' THEN NULL ELSE $9 END) ON CONFLICT DO NOTHING;""" + +const SelectNoCursorAscStmtName = "SelectWithoutCursorAsc" +const SelectNoCursorAscStmtDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + storedAt >= $4 AND + storedAt <= $5 + ORDER BY storedAt ASC, messageHash ASC LIMIT $6;""" + +const SelectNoCursorDescStmtName = "SelectWithoutCursorDesc" +const SelectNoCursorDescStmtDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + storedAt >= $4 AND + storedAt <= $5 + ORDER BY storedAt DESC, messageHash DESC LIMIT $6;""" + +const SelectWithCursorDescStmtName = "SelectWithCursorDesc" +const SelectWithCursorDescStmtDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + (storedAt, messageHash) < ($4,$5) AND + storedAt >= $6 AND + storedAt <= $7 + ORDER BY storedAt DESC, messageHash DESC LIMIT $8;""" + +const SelectWithCursorAscStmtName = "SelectWithCursorAsc" +const SelectWithCursorAscStmtDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + (storedAt, messageHash) > ($4,$5) AND + storedAt >= $6 AND + storedAt <= $7 + ORDER BY storedAt ASC, messageHash ASC LIMIT $8;""" + +const SelectMessageByHashName = "SelectMessageByHash" +const SelectMessageByHashDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages WHERE messageHash = $1""" + +const SelectNoCursorV2AscStmtName = "SelectWithoutCursorV2Asc" +const SelectNoCursorV2AscStmtDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages + WHERE contentTopic IN ($1) AND + pubsubTopic = $2 AND + storedAt >= $3 AND + storedAt <= $4 + ORDER BY storedAt ASC LIMIT $5;""" + +const SelectNoCursorV2DescStmtName = "SelectWithoutCursorV2Desc" +const SelectNoCursorV2DescStmtDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + WHERE contentTopic IN ($1) AND + pubsubTopic = $2 AND + storedAt >= $3 AND + storedAt <= $4 + ORDER BY storedAt DESC LIMIT $5;""" + +const SelectWithCursorV2DescStmtName = "SelectWithCursorV2Desc" +const SelectWithCursorV2DescStmtDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + WHERE contentTopic IN ($1) AND + pubsubTopic = $2 AND + (storedAt, id) < ($3,$4) AND + storedAt >= $5 AND + storedAt <= $6 + ORDER BY storedAt DESC LIMIT $7;""" + +const SelectWithCursorV2AscStmtName = "SelectWithCursorV2Asc" +const SelectWithCursorV2AscStmtDef = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + WHERE contentTopic IN ($1) AND + pubsubTopic = $2 AND + (storedAt, id) > ($3,$4) AND + storedAt >= $5 AND + storedAt <= $6 + ORDER BY storedAt ASC LIMIT $7;""" + +const DefaultMaxNumConns = 50 + +proc new*( + T: type LegacyPostgresDriver, + dbUrl: string, + maxConnections = DefaultMaxNumConns, + onFatalErrorAction: OnFatalErrorHandler = nil, +): ArchiveDriverResult[T] = + ## Very simplistic split of max connections + let maxNumConnOnEachPool = int(maxConnections / 2) + + let readConnPool = PgAsyncPool.new(dbUrl, maxNumConnOnEachPool).valueOr: + return err("error creating read conn pool PgAsyncPool") + + let writeConnPool = PgAsyncPool.new(dbUrl, maxNumConnOnEachPool).valueOr: + return err("error creating write conn pool PgAsyncPool") + + if not isNil(onFatalErrorAction): + asyncSpawn checkConnectivity(readConnPool, onFatalErrorAction) + + if not isNil(onFatalErrorAction): + asyncSpawn checkConnectivity(writeConnPool, onFatalErrorAction) + + let driver = LegacyPostgresDriver( + writeConnPool: writeConnPool, + readConnPool: readConnPool, + partitionMngr: PartitionManager.new(), + ) + return ok(driver) + +proc reset*(s: LegacyPostgresDriver): Future[ArchiveDriverResult[void]] {.async.} = + ## Clear the database partitions + let targetSize = 0 + let forceRemoval = true + let ret = await s.decreaseDatabaseSize(targetSize, forceRemoval) + return ret + +proc rowCallbackImpl( + pqResult: ptr PGresult, + outRows: var seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)], +) = + ## Proc aimed to contain the logic of the callback passed to the `psasyncpool`. + ## That callback is used in "SELECT" queries. + ## + ## pqResult - contains the query results + ## outRows - seq of Store-rows. This is populated from the info contained in pqResult + + let numFields = pqResult.pqnfields() + if numFields != 9: + error "Wrong number of fields" + return + + for iRow in 0 ..< pqResult.pqNtuples(): + var wakuMessage: WakuMessage + var timestamp: Timestamp + var version: uint + var pubSubTopic: string + var contentTopic: string + var storedAt: int64 + var digest: string + var payload: string + var hashHex: string + var msgHash: WakuMessageHash + var meta: string + + try: + storedAt = parseInt($(pqgetvalue(pqResult, iRow, 0))) + contentTopic = $(pqgetvalue(pqResult, iRow, 1)) + payload = parseHexStr($(pqgetvalue(pqResult, iRow, 2))) + pubSubTopic = $(pqgetvalue(pqResult, iRow, 3)) + version = parseUInt($(pqgetvalue(pqResult, iRow, 4))) + timestamp = parseInt($(pqgetvalue(pqResult, iRow, 5))) + digest = parseHexStr($(pqgetvalue(pqResult, iRow, 6))) + hashHex = parseHexStr($(pqgetvalue(pqResult, iRow, 7))) + meta = parseHexStr($(pqgetvalue(pqResult, iRow, 8))) + msgHash = fromBytes(hashHex.toOpenArrayByte(0, 31)) + except ValueError: + error "could not parse correctly", error = getCurrentExceptionMsg() + + wakuMessage.timestamp = timestamp + wakuMessage.version = uint32(version) + wakuMessage.contentTopic = contentTopic + wakuMessage.payload = @(payload.toOpenArrayByte(0, payload.high)) + wakuMessage.meta = @(meta.toOpenArrayByte(0, meta.high)) + + outRows.add( + ( + pubSubTopic, + wakuMessage, + @(digest.toOpenArrayByte(0, digest.high)), + storedAt, + msgHash, + ) + ) + +method put*( + s: LegacyPostgresDriver, + pubsubTopic: PubsubTopic, + message: WakuMessage, + digest: MessageDigest, + messageHash: WakuMessageHash, + receivedTime: Timestamp, +): Future[ArchiveDriverResult[void]] {.async.} = + let digest = toHex(digest.data) + let messageHash = toHex(messageHash) + let rxTime = $receivedTime + let contentTopic = message.contentTopic + let payload = toHex(message.payload) + let version = $message.version + let timestamp = $message.timestamp + let meta = toHex(message.meta) + + trace "put LegacyPostgresDriver", timestamp = timestamp + + return await s.writeConnPool.runStmt( + InsertRowStmtName, + InsertRowStmtDefinition, + @[ + digest, messageHash, rxTime, contentTopic, payload, pubsubTopic, version, + timestamp, meta, + ], + @[ + int32(digest.len), + int32(messageHash.len), + int32(rxTime.len), + int32(contentTopic.len), + int32(payload.len), + int32(pubsubTopic.len), + int32(version.len), + int32(timestamp.len), + int32(meta.len), + ], + @[ + int32(0), + int32(0), + int32(0), + int32(0), + int32(0), + int32(0), + int32(0), + int32(0), + int32(0), + ], + ) + +method getAllMessages*( + s: LegacyPostgresDriver +): Future[ArchiveDriverResult[seq[ArchiveRowV2]]] {.async.} = + ## Retrieve all messages from the store. + + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + ( + await s.readConnPool.pgQuery( + """SELECT storedAt, contentTopic, + payload, pubsubTopic, version, timestamp, + id, messageHash, meta FROM messages ORDER BY storedAt ASC""", + newSeq[string](0), + rowCallback, + ) + ).isOkOr: + return err("failed in query: " & $error) + + return ok(rows) + +proc getPartitionsList( + s: LegacyPostgresDriver +): Future[ArchiveDriverResult[seq[string]]] {.async.} = + ## Retrieves the seq of partition table names. + ## e.g: @["messages_1708534333_1708534393", "messages_1708534273_1708534333"] + + var partitions: seq[string] + proc rowCallback(pqResult: ptr PGresult) = + for iRow in 0 ..< pqResult.pqNtuples(): + let partitionName = $(pqgetvalue(pqResult, iRow, 0)) + partitions.add(partitionName) + + ( + await s.readConnPool.pgQuery( + """ + SELECT child.relname AS partition_name + FROM pg_inherits + JOIN pg_class parent ON pg_inherits.inhparent = parent.oid + JOIN pg_class child ON pg_inherits.inhrelid = child.oid + JOIN pg_namespace nmsp_parent ON nmsp_parent.oid = parent.relnamespace + JOIN pg_namespace nmsp_child ON nmsp_child.oid = child.relnamespace + WHERE parent.relname='messages' + ORDER BY partition_name ASC + """, + newSeq[string](0), + rowCallback, + ) + ).isOkOr: + return err("getPartitionsList failed in query: " & $error) + + return ok(partitions) + +proc getMessagesArbitraryQuery( + s: LegacyPostgresDriver, + contentTopic: seq[ContentTopic] = @[], + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursorV2), + startTime = none(Timestamp), + endTime = none(Timestamp), + hexHashes: seq[string] = @[], + maxPageSize = DefaultPageSize, + ascendingOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRowV2]]] {.async.} = + ## This proc allows to handle atypical queries. We don't use prepared statements for those. + + var query = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages""" + var statements: seq[string] + var args: seq[string] + + if contentTopic.len > 0: + let cstmt = "contentTopic IN (" & "?".repeat(contentTopic.len).join(",") & ")" + statements.add(cstmt) + for t in contentTopic: + args.add(t) + + if hexHashes.len > 0: + let cstmt = "messageHash IN (" & "?".repeat(hexHashes.len).join(",") & ")" + statements.add(cstmt) + for t in hexHashes: + args.add(t) + + if pubsubTopic.isSome(): + statements.add("pubsubTopic = ?") + args.add(pubsubTopic.get()) + + if cursor.isSome(): + let hashHex = toHex(cursor.get().hash) + + var entree: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc entreeCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, entree) + + ( + await s.readConnPool.runStmt( + SelectMessageByHashName, + SelectMessageByHashDef, + @[hashHex], + @[int32(hashHex.len)], + @[int32(0)], + entreeCallback, + ) + ).isOkOr: + return err("failed to run query with cursor: " & $error) + + if entree.len == 0: + return ok(entree) + + let storetime = entree[0][3] + + let comp = if ascendingOrder: ">" else: "<" + statements.add("(storedAt, messageHash) " & comp & " (?,?)") + args.add($storetime) + args.add(hashHex) + + if startTime.isSome(): + statements.add("storedAt >= ?") + args.add($startTime.get()) + + if endTime.isSome(): + statements.add("storedAt <= ?") + args.add($endTime.get()) + + if statements.len > 0: + query &= " WHERE " & statements.join(" AND ") + + var direction: string + if ascendingOrder: + direction = "ASC" + else: + direction = "DESC" + + query &= " ORDER BY storedAt " & direction & ", messageHash " & direction + + query &= " LIMIT ?" + args.add($maxPageSize) + + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + (await s.readConnPool.pgQuery(query, args, rowCallback)).isOkOr: + return err("failed to run query: " & $error) + + return ok(rows) + +proc getMessagesV2ArbitraryQuery( + s: LegacyPostgresDriver, + contentTopic: seq[ContentTopic] = @[], + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursorV2), + startTime = none(Timestamp), + endTime = none(Timestamp), + maxPageSize = DefaultPageSize, + ascendingOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRowV2]]] {.async, deprecated.} = + ## This proc allows to handle atypical queries. We don't use prepared statements for those. + + var query = + """SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash FROM messages""" + var statements: seq[string] + var args: seq[string] + + if contentTopic.len > 0: + let cstmt = "contentTopic IN (" & "?".repeat(contentTopic.len).join(",") & ")" + statements.add(cstmt) + for t in contentTopic: + args.add(t) + + if pubsubTopic.isSome(): + statements.add("pubsubTopic = ?") + args.add(pubsubTopic.get()) + + if cursor.isSome(): + let comp = if ascendingOrder: ">" else: "<" + statements.add("(storedAt, id) " & comp & " (?,?)") + args.add($cursor.get().storeTime) + args.add(toHex(cursor.get().digest.data)) + + if startTime.isSome(): + statements.add("storedAt >= ?") + args.add($startTime.get()) + + if endTime.isSome(): + statements.add("storedAt <= ?") + args.add($endTime.get()) + + if statements.len > 0: + query &= " WHERE " & statements.join(" AND ") + + var direction: string + if ascendingOrder: + direction = "ASC" + else: + direction = "DESC" + + query &= " ORDER BY storedAt " & direction & ", id " & direction + + query &= " LIMIT ?" + args.add($maxPageSize) + + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + (await s.readConnPool.pgQuery(query, args, rowCallback)).isOkOr: + return err("failed to run query: " & $error) + + return ok(rows) + +proc getMessagesPreparedStmt( + s: LegacyPostgresDriver, + contentTopic: string, + pubsubTopic: PubsubTopic, + cursor = none(ArchiveCursorV2), + startTime: Timestamp, + endTime: Timestamp, + hashes: string, + maxPageSize = DefaultPageSize, + ascOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRowV2]]] {.async.} = + ## This proc aims to run the most typical queries in a more performant way, i.e. by means of + ## prepared statements. + ## + ## contentTopic - string with list of conten topics. e.g: "'ctopic1','ctopic2','ctopic3'" + + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + let startTimeStr = $startTime + let endTimeStr = $endTime + let limit = $maxPageSize + + if cursor.isSome(): + let hash = toHex(cursor.get().hash) + + var entree: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + + proc entreeCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, entree) + + ( + await s.readConnPool.runStmt( + SelectMessageByHashName, + SelectMessageByHashDef, + @[hash], + @[int32(hash.len)], + @[int32(0)], + entreeCallback, + ) + ).isOkOr: + return err("failed to run query with cursor: " & $error) + + if entree.len == 0: + return ok(entree) + + let storeTime = $entree[0][3] + + var stmtName = + if ascOrder: SelectWithCursorAscStmtName else: SelectWithCursorDescStmtName + var stmtDef = + if ascOrder: SelectWithCursorAscStmtDef else: SelectWithCursorDescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[ + contentTopic, hashes, pubsubTopic, storeTime, hash, startTimeStr, endTimeStr, + limit, + ], + @[ + int32(contentTopic.len), + int32(pubsubTopic.len), + int32(storeTime.len), + int32(hash.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + ) + ).isOkOr: + return err("failed to run query with cursor: " & $error) + else: + var stmtName = + if ascOrder: SelectNoCursorAscStmtName else: SelectNoCursorDescStmtName + var stmtDef = if ascOrder: SelectNoCursorAscStmtDef else: SelectNoCursorDescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[contentTopic, hashes, pubsubTopic, startTimeStr, endTimeStr, limit], + @[ + int32(contentTopic.len), + int32(pubsubTopic.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + ) + ).isOkOr: + return err("failed to run query without cursor: " & $error) + + return ok(rows) + +proc getMessagesV2PreparedStmt( + s: LegacyPostgresDriver, + contentTopic: string, + pubsubTopic: PubsubTopic, + cursor = none(ArchiveCursorV2), + startTime: Timestamp, + endTime: Timestamp, + maxPageSize = DefaultPageSize, + ascOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRowV2]]] {.async, deprecated.} = + ## This proc aims to run the most typical queries in a more performant way, i.e. by means of + ## prepared statements. + ## + ## contentTopic - string with list of conten topics. e.g: "'ctopic1','ctopic2','ctopic3'" + + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + let startTimeStr = $startTime + let endTimeStr = $endTime + let limit = $maxPageSize + + if cursor.isSome(): + var stmtName = + if ascOrder: SelectWithCursorV2AscStmtName else: SelectWithCursorV2DescStmtName + var stmtDef = + if ascOrder: SelectWithCursorV2AscStmtDef else: SelectWithCursorV2DescStmtDef + + let digest = toHex(cursor.get().digest.data) + let storeTime = $cursor.get().storeTime + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[contentTopic, pubsubTopic, storeTime, digest, startTimeStr, endTimeStr, limit], + @[ + int32(contentTopic.len), + int32(pubsubTopic.len), + int32(storeTime.len), + int32(digest.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + ) + ).isOkOr: + return err("failed to run query with cursor: " & $error) + else: + var stmtName = + if ascOrder: SelectNoCursorV2AscStmtName else: SelectNoCursorV2DescStmtName + var stmtDef = + if ascOrder: SelectNoCursorV2AscStmtDef else: SelectNoCursorV2DescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[contentTopic, pubsubTopic, startTimeStr, endTimeStr, limit], + @[ + int32(contentTopic.len), + int32(pubsubTopic.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + ) + ).isOkOr: + return err("failed to run query without cursor: " & $error) + + return ok(rows) + +method getMessages*( + s: LegacyPostgresDriver, + includeData = false, + contentTopicSeq = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursorV2), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = newSeq[WakuMessageHash](0), + maxPageSize = DefaultPageSize, + ascendingOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRowV2]]] {.async.} = + let hexHashes = hashes.mapIt(toHex(it)) + + if contentTopicSeq.len == 1 and hexHashes.len == 1 and pubsubTopic.isSome() and + startTime.isSome() and endTime.isSome(): + ## Considered the most common query. Therefore, we use prepared statements to optimize it. + return await s.getMessagesPreparedStmt( + contentTopicSeq.join(","), + PubsubTopic(pubsubTopic.get()), + cursor, + startTime.get(), + endTime.get(), + hexHashes.join(","), + maxPageSize, + ascendingOrder, + ) + else: + ## We will run atypical query. In this case we don't use prepared statemets + return await s.getMessagesArbitraryQuery( + contentTopicSeq, pubsubTopic, cursor, startTime, endTime, hexHashes, maxPageSize, + ascendingOrder, + ) + +method getMessagesV2*( + s: LegacyPostgresDriver, + contentTopicSeq = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursorV2), + startTime = none(Timestamp), + endTime = none(Timestamp), + maxPageSize = DefaultPageSize, + ascendingOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRowV2]]] {.async, deprecated.} = + if contentTopicSeq.len == 1 and pubsubTopic.isSome() and startTime.isSome() and + endTime.isSome(): + ## Considered the most common query. Therefore, we use prepared statements to optimize it. + return await s.getMessagesV2PreparedStmt( + contentTopicSeq.join(","), + PubsubTopic(pubsubTopic.get()), + cursor, + startTime.get(), + endTime.get(), + maxPageSize, + ascendingOrder, + ) + else: + ## We will run atypical query. In this case we don't use prepared statemets + return await s.getMessagesV2ArbitraryQuery( + contentTopicSeq, pubsubTopic, cursor, startTime, endTime, maxPageSize, + ascendingOrder, + ) + +proc getStr( + s: LegacyPostgresDriver, query: string +): Future[ArchiveDriverResult[string]] {.async.} = + # Performs a query that is expected to return a single string + + var ret: string + proc rowCallback(pqResult: ptr PGresult) = + if pqResult.pqnfields() != 1: + error "Wrong number of fields in getStr" + return + + if pqResult.pqNtuples() != 1: + error "Wrong number of rows in getStr" + return + + ret = $(pqgetvalue(pqResult, 0, 0)) + + (await s.readConnPool.pgQuery(query, newSeq[string](0), rowCallback)).isOkOr: + return err("failed in getRow: " & $error) + + return ok(ret) + +proc getInt( + s: LegacyPostgresDriver, query: string +): Future[ArchiveDriverResult[int64]] {.async.} = + # Performs a query that is expected to return a single numeric value (int64) + + var retInt = 0'i64 + let str = (await s.getStr(query)).valueOr: + return err("could not get str in getInt: " & $error) + + try: + retInt = parseInt(str) + except ValueError: + return err( + "exception in getInt, parseInt, str: " & str & " query: " & query & " exception: " & + getCurrentExceptionMsg() + ) + + return ok(retInt) + +method getDatabaseSize*( + s: LegacyPostgresDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + let intRes = (await s.getInt("SELECT pg_database_size(current_database())")).valueOr: + return err("error in getDatabaseSize: " & error) + + let databaseSize: int64 = int64(intRes) + return ok(databaseSize) + +method getMessagesCount*( + s: LegacyPostgresDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + let intRes = await s.getInt("SELECT COUNT(1) FROM messages") + if intRes.isErr(): + return err("error in getMessagesCount: " & intRes.error) + + return ok(intRes.get()) + +method getOldestMessageTimestamp*( + s: LegacyPostgresDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + ## In some cases it could happen that we have + ## empty partitions which are older than the current stored rows. + ## In those cases we want to consider those older partitions as the oldest considered timestamp. + let oldestPartition = s.partitionMngr.getOldestPartition().valueOr: + return err("could not get oldest partition: " & $error) + + let oldestPartitionTimeNanoSec = oldestPartition.getPartitionStartTimeInNanosec() + + let intRes = await s.getInt("SELECT MIN(storedAt) FROM messages") + if intRes.isErr(): + ## Just return the oldest partition time considering the partitions set + return ok(Timestamp(oldestPartitionTimeNanoSec)) + + return ok(Timestamp(min(intRes.get(), oldestPartitionTimeNanoSec))) + +method getNewestMessageTimestamp*( + s: LegacyPostgresDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + let intRes = await s.getInt("SELECT MAX(storedAt) FROM messages") + if intRes.isErr(): + return err("error in getNewestMessageTimestamp: " & intRes.error) + + return ok(Timestamp(intRes.get())) + +method deleteOldestMessagesNotWithinLimit*( + s: LegacyPostgresDriver, limit: int +): Future[ArchiveDriverResult[void]] {.async.} = + let execRes = await s.writeConnPool.pgQuery( + """DELETE FROM messages WHERE id NOT IN + ( + SELECT id FROM messages ORDER BY storedAt DESC LIMIT ? + );""", + @[$limit], + ) + if execRes.isErr(): + return err("error in deleteOldestMessagesNotWithinLimit: " & execRes.error) + + return ok() + +method close*(s: LegacyPostgresDriver): Future[ArchiveDriverResult[void]] {.async.} = + ## Cancel the partition factory loop + s.futLoopPartitionFactory.cancel() + + ## Close the database connection + let writeCloseRes = await s.writeConnPool.close() + let readCloseRes = await s.readConnPool.close() + + writeCloseRes.isOkOr: + return err("error closing write pool: " & $error) + + readCloseRes.isOkOr: + return err("error closing read pool: " & $error) + + return ok() + +proc sleep*( + s: LegacyPostgresDriver, seconds: int +): Future[ArchiveDriverResult[void]] {.async.} = + # This is for testing purposes only. It is aimed to test the proper + # implementation of asynchronous requests. It merely triggers a sleep in the + # database for the amount of seconds given as a parameter. + + proc rowCallback(result: ptr PGresult) = + ## We are not interested in any value in this case + discard + + try: + let params = @[$seconds] + (await s.writeConnPool.pgQuery("SELECT pg_sleep(?)", params, rowCallback)).isOkOr: + return err("error in postgres_driver sleep: " & $error) + except DbError: + # This always raises an exception although the sleep works + return err("exception sleeping: " & getCurrentExceptionMsg()) + + return ok() + +proc performWriteQuery*( + s: LegacyPostgresDriver, query: string +): Future[ArchiveDriverResult[void]] {.async.} = + ## Performs a query that somehow changes the state of the database + + (await s.writeConnPool.pgQuery(query)).isOkOr: + return err("error in performWriteQuery: " & $error) + + return ok() + +proc addPartition( + self: LegacyPostgresDriver, startTime: Timestamp, duration: timer.Duration +): Future[ArchiveDriverResult[void]] {.async.} = + ## Creates a partition table that will store the messages that fall in the range + ## `startTime` <= storedAt < `startTime + duration`. + ## `startTime` is measured in seconds since epoch + + let beginning = startTime + let `end` = (startTime + duration.seconds) + + let fromInSec: string = $beginning + let untilInSec: string = $`end` + + let fromInNanoSec: string = fromInSec & "000000000" + let untilInNanoSec: string = untilInSec & "000000000" + + let partitionName = "messages_" & fromInSec & "_" & untilInSec + + let createPartitionQuery = + "CREATE TABLE IF NOT EXISTS " & partitionName & " PARTITION OF " & + "messages FOR VALUES FROM ('" & fromInNanoSec & "') TO ('" & untilInNanoSec & "');" + + (await self.performWriteQuery(createPartitionQuery)).isOkOr: + return err(fmt"error adding partition [{partitionName}]: " & $error) + + debug "new partition added", query = createPartitionQuery + + self.partitionMngr.addPartitionInfo(partitionName, beginning, `end`) + return ok() + +proc initializePartitionsInfo( + self: LegacyPostgresDriver +): Future[ArchiveDriverResult[void]] {.async.} = + let partitionNamesRes = await self.getPartitionsList() + if not partitionNamesRes.isOk(): + return err("Could not retrieve partitions list: " & $partitionNamesRes.error) + else: + let partitionNames = partitionNamesRes.get() + for partitionName in partitionNames: + ## partitionName contains something like 'messages_1708449815_1708449875' + let bothTimes = partitionName.replace("messages_", "") + let times = bothTimes.split("_") + if times.len != 2: + return err(fmt"loopPartitionFactory wrong partition name {partitionName}") + + var beginning: int64 + try: + beginning = parseInt(times[0]) + except ValueError: + return err("Could not parse beginning time: " & getCurrentExceptionMsg()) + + var `end`: int64 + try: + `end` = parseInt(times[1]) + except ValueError: + return err("Could not parse end time: " & getCurrentExceptionMsg()) + + self.partitionMngr.addPartitionInfo(partitionName, beginning, `end`) + + return ok() + +const DefaultDatabasePartitionCheckTimeInterval = timer.minutes(10) +const PartitionsRangeInterval = timer.hours(1) ## Time range covered by each parition + +proc loopPartitionFactory( + self: LegacyPostgresDriver, onFatalError: OnFatalErrorHandler +) {.async.} = + ## Loop proc that continuously checks whether we need to create a new partition. + ## Notice that the deletion of partitions is handled by the retention policy modules. + + debug "starting loopPartitionFactory" + + if PartitionsRangeInterval < DefaultDatabasePartitionCheckTimeInterval: + onFatalError( + "partition factory partition range interval should be bigger than check interval" + ) + + ## First of all, let's make the 'partition_manager' aware of the current partitions + (await self.initializePartitionsInfo()).isOkOr: + onFatalError("issue in loopPartitionFactory: " & $error) + + while true: + trace "Check if we need to create a new partition" + + let now = times.now().toTime().toUnix() + + if self.partitionMngr.isEmpty(): + debug "adding partition because now there aren't more partitions" + (await self.addPartition(now, PartitionsRangeInterval)).isOkOr: + onFatalError("error when creating a new partition from empty state: " & $error) + else: + let newestPartitionRes = self.partitionMngr.getNewestPartition() + if newestPartitionRes.isErr(): + onFatalError("could not get newest partition: " & $newestPartitionRes.error) + + let newestPartition = newestPartitionRes.get() + if newestPartition.containsMoment(now): + debug "creating a new partition for the future" + ## The current used partition is the last one that was created. + ## Thus, let's create another partition for the future. + + ( + await self.addPartition( + newestPartition.getLastMoment(), PartitionsRangeInterval + ) + ).isOkOr: + onFatalError("could not add the next partition for 'now': " & $error) + elif now >= newestPartition.getLastMoment(): + debug "creating a new partition to contain current messages" + ## There is no partition to contain the current time. + ## This happens if the node has been stopped for quite a long time. + ## Then, let's create the needed partition to contain 'now'. + (await self.addPartition(now, PartitionsRangeInterval)).isOkOr: + onFatalError("could not add the next partition: " & $error) + + await sleepAsync(DefaultDatabasePartitionCheckTimeInterval) + +proc startPartitionFactory*( + self: LegacyPostgresDriver, onFatalError: OnFatalErrorHandler +) {.async.} = + self.futLoopPartitionFactory = self.loopPartitionFactory(onFatalError) + +proc getTableSize*( + self: LegacyPostgresDriver, tableName: string +): Future[ArchiveDriverResult[string]] {.async.} = + ## Returns a human-readable representation of the size for the requested table. + ## tableName - table of interest. + + let tableSize = ( + await self.getStr( + fmt""" + SELECT pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size" + FROM pg_class C + where relname = '{tableName}'""" + ) + ).valueOr: + return err("error in getDatabaseSize: " & error) + + return ok(tableSize) + +proc removePartition( + self: LegacyPostgresDriver, partitionName: string +): Future[ArchiveDriverResult[void]] {.async.} = + var partSize = "" + let partSizeRes = await self.getTableSize(partitionName) + if partSizeRes.isOk(): + partSize = partSizeRes.get() + + ## Detach and remove the partition concurrently to not block the parent table (messages) + let detachPartitionQuery = + "ALTER TABLE messages DETACH PARTITION " & partitionName & " CONCURRENTLY;" + debug "removeOldestPartition", query = detachPartitionQuery + (await self.performWriteQuery(detachPartitionQuery)).isOkOr: + return err(fmt"error in {detachPartitionQuery}: " & $error) + + ## Drop the partition + let dropPartitionQuery = "DROP TABLE " & partitionName + debug "removeOldestPartition drop partition", query = dropPartitionQuery + (await self.performWriteQuery(dropPartitionQuery)).isOkOr: + return err(fmt"error in {dropPartitionQuery}: " & $error) + + debug "removed partition", partition_name = partitionName, partition_size = partSize + self.partitionMngr.removeOldestPartitionName() + + return ok() + +proc removePartitionsOlderThan( + self: LegacyPostgresDriver, tsInNanoSec: Timestamp +): Future[ArchiveDriverResult[void]] {.async.} = + ## Removes old partitions that don't contain the specified timestamp + + let tsInSec = Timestamp(float(tsInNanoSec) / 1_000_000_000) + + var oldestPartition = self.partitionMngr.getOldestPartition().valueOr: + return err("could not get oldest partition in removePartitionOlderThan: " & $error) + + while not oldestPartition.containsMoment(tsInSec): + (await self.removePartition(oldestPartition.getName())).isOkOr: + return err("issue in removePartitionsOlderThan: " & $error) + + oldestPartition = self.partitionMngr.getOldestPartition().valueOr: + return err( + "could not get partition in removePartitionOlderThan in while loop: " & $error + ) + + ## We reached the partition that contains the target timestamp plus don't want to remove it + return ok() + +proc removeOldestPartition( + self: LegacyPostgresDriver, forceRemoval: bool = false, ## To allow cleanup in tests +): Future[ArchiveDriverResult[void]] {.async.} = + ## Indirectly called from the retention policy + + let oldestPartition = self.partitionMngr.getOldestPartition().valueOr: + return err("could not remove oldest partition: " & $error) + + if not forceRemoval: + let now = times.now().toTime().toUnix() + let currentPartitionRes = self.partitionMngr.getPartitionFromDateTime(now) + if currentPartitionRes.isOk(): + ## The database contains a partition that would store current messages. + + if currentPartitionRes.get() == oldestPartition: + debug "Skipping to remove the current partition" + return ok() + + return await self.removePartition(oldestPartition.getName()) + +proc containsAnyPartition*(self: LegacyPostgresDriver): bool = + return not self.partitionMngr.isEmpty() + +method decreaseDatabaseSize*( + driver: LegacyPostgresDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.async.} = + var dbSize = (await driver.getDatabaseSize()).valueOr: + return err("decreaseDatabaseSize failed to get database size: " & $error) + + ## database size in bytes + var totalSizeOfDB: int64 = int64(dbSize) + + if totalSizeOfDB <= targetSizeInBytes: + return ok() + + debug "start reducing database size", + targetSize = $targetSizeInBytes, currentSize = $totalSizeOfDB + + while totalSizeOfDB > targetSizeInBytes and driver.containsAnyPartition(): + (await driver.removeOldestPartition(forceRemoval)).isOkOr: + return err( + "decreaseDatabaseSize inside loop failed to remove oldest partition: " & $error + ) + + dbSize = (await driver.getDatabaseSize()).valueOr: + return + err("decreaseDatabaseSize inside loop failed to get database size: " & $error) + + let newCurrentSize = int64(dbSize) + if newCurrentSize == totalSizeOfDB: + return err("the previous partition removal didn't clear database size") + + totalSizeOfDB = newCurrentSize + + debug "reducing database size", + targetSize = $targetSizeInBytes, newCurrentSize = $totalSizeOfDB + + return ok() + +method existsTable*( + s: LegacyPostgresDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.async.} = + let query: string = + fmt""" + SELECT EXISTS ( + SELECT FROM + pg_tables + WHERE + tablename = '{tableName}' + ); + """ + + var exists: string + proc rowCallback(pqResult: ptr PGresult) = + if pqResult.pqnfields() != 1: + error "Wrong number of fields in existsTable" + return + + if pqResult.pqNtuples() != 1: + error "Wrong number of rows in existsTable" + return + + exists = $(pqgetvalue(pqResult, 0, 0)) + + (await s.readConnPool.pgQuery(query, newSeq[string](0), rowCallback)).isOkOr: + return err("existsTable failed in getRow: " & $error) + + return ok(exists == "t") + +proc getCurrentVersion*( + s: LegacyPostgresDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + let existsVersionTable = (await s.existsTable("version")).valueOr: + return err("error in getCurrentVersion-existsTable: " & $error) + + if not existsVersionTable: + return ok(0) + + let res = (await s.getInt(fmt"SELECT version FROM version")).valueOr: + return err("error in getMessagesCount: " & $error) + + return ok(res) + +method deleteMessagesOlderThanTimestamp*( + s: LegacyPostgresDriver, tsNanoSec: Timestamp +): Future[ArchiveDriverResult[void]] {.async.} = + ## First of all, let's remove the older partitions so that we can reduce + ## the database size. + (await s.removePartitionsOlderThan(tsNanoSec)).isOkOr: + return err("error while removing older partitions: " & $error) + + (await s.writeConnPool.pgQuery("DELETE FROM messages WHERE storedAt < " & $tsNanoSec)).isOkOr: + return err("error in deleteMessagesOlderThanTimestamp: " & $error) + + return ok() diff --git a/waku/waku_archive/driver/queue_driver.nim b/waku/waku_archive/driver/queue_driver.nim index 1ea8a29d3a..aeb74b0034 100644 --- a/waku/waku_archive/driver/queue_driver.nim +++ b/waku/waku_archive/driver/queue_driver.nim @@ -3,6 +3,10 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import ./queue_driver/queue_driver, ./queue_driver/index +import + ./queue_driver/queue_driver, + ./queue_driver/index, + ./queue_driver/queue_driver_legacy, + ./queue_driver/index_legacy -export queue_driver, index +export queue_driver, index, queue_driver_legacy, index_legacy diff --git a/waku/waku_archive/driver/queue_driver/index.nim b/waku/waku_archive/driver/queue_driver/index.nim index d34b550c85..d59445dc02 100644 --- a/waku/waku_archive/driver/queue_driver/index.nim +++ b/waku/waku_archive/driver/queue_driver/index.nim @@ -3,59 +3,17 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import stew/byteutils, nimcrypto/sha2 -import ../../../waku_core, ../../common +import stew/byteutils +import ../../../waku_core type Index* = object ## This type contains the description of an Index used in the pagination of WakuMessages - pubsubTopic*: string - senderTime*: Timestamp # the time at which the message is generated - receiverTime*: Timestamp - digest*: MessageDigest # calculated over payload and content topic + time*: Timestamp # the time at which the message is generated hash*: WakuMessageHash - -proc compute*( - T: type Index, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: PubsubTopic -): T = - ## Takes a WakuMessage with received timestamp and returns its Index. - let - digest = computeDigest(msg) - senderTime = msg.timestamp - hash = computeMessageHash(pubsubTopic, msg) - - return Index( - pubsubTopic: pubsubTopic, - senderTime: senderTime, - receiverTime: receivedTime, - digest: digest, - hash: hash, - ) - -proc tohistoryCursor*(index: Index): ArchiveCursor = - return ArchiveCursor( - pubsubTopic: index.pubsubTopic, - senderTime: index.senderTime, - storeTime: index.receiverTime, - digest: index.digest, - hash: index.hash, - ) - -proc toIndex*(index: ArchiveCursor): Index = - return Index( - pubsubTopic: index.pubsubTopic, - senderTime: index.senderTime, - receiverTime: index.storeTime, - digest: index.digest, - hash: index.hash, - ) + topic*: PubsubTopic proc `==`*(x, y: Index): bool = - ## receiverTime plays no role in index equality - return - ( - (x.senderTime == y.senderTime) and (x.digest == y.digest) and - (x.pubsubTopic == y.pubsubTopic) - ) or (x.hash == y.hash) # this applies to store v3 queries only + return x.hash == y.hash proc cmp*(x, y: Index): int = ## compares x and y @@ -64,28 +22,11 @@ proc cmp*(x, y: Index): int = ## returns 1 if x > y ## ## Default sorting order priority is: - ## 1. senderTimestamp - ## 2. receiverTimestamp (a fallback only if senderTimestamp unset on either side, and all other fields unequal) - ## 3. message digest - ## 4. pubsubTopic - - if x == y: - # Quick exit ensures receiver time does not affect index equality - return 0 - - # Timestamp has a higher priority for comparison - let - # Use receiverTime where senderTime is unset - xTimestamp = if x.senderTime == 0: x.receiverTime else: x.senderTime - yTimestamp = if y.senderTime == 0: y.receiverTime else: y.senderTime - - let timecmp = cmp(xTimestamp, yTimestamp) - if timecmp != 0: - return timecmp + ## 1. time + ## 2. hash - # Continue only when timestamps are equal - let digestcmp = cmp(x.digest.data, y.digest.data) - if digestcmp != 0: - return digestcmp + let timeCMP = cmp(x.time, y.time) + if timeCMP != 0: + return timeCMP - return cmp(x.pubsubTopic, y.pubsubTopic) + return cmp(x.hash, y.hash) diff --git a/waku/waku_archive/driver/queue_driver/index_legacy.nim b/waku/waku_archive/driver/queue_driver/index_legacy.nim new file mode 100644 index 0000000000..815d0abb42 --- /dev/null +++ b/waku/waku_archive/driver/queue_driver/index_legacy.nim @@ -0,0 +1,91 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import stew/byteutils, nimcrypto/sha2 +import ../../../waku_core, ../../common + +type Index* = object + ## This type contains the description of an Index used in the pagination of WakuMessages + pubsubTopic*: string + senderTime*: Timestamp # the time at which the message is generated + receiverTime*: Timestamp + digest*: MessageDigest # calculated over payload and content topic + hash*: WakuMessageHash + +proc compute*( + T: type Index, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: PubsubTopic +): T = + ## Takes a WakuMessage with received timestamp and returns its Index. + let + digest = computeDigest(msg) + senderTime = msg.timestamp + hash = computeMessageHash(pubsubTopic, msg) + + return Index( + pubsubTopic: pubsubTopic, + senderTime: senderTime, + receiverTime: receivedTime, + digest: digest, + hash: hash, + ) + +proc tohistoryCursor*(index: Index): ArchiveCursorV2 = + return ArchiveCursorV2( + pubsubTopic: index.pubsubTopic, + senderTime: index.senderTime, + storeTime: index.receiverTime, + digest: index.digest, + hash: index.hash, + ) + +proc toIndex*(index: ArchiveCursorV2): Index = + return Index( + pubsubTopic: index.pubsubTopic, + senderTime: index.senderTime, + receiverTime: index.storeTime, + digest: index.digest, + hash: index.hash, + ) + +proc `==`*(x, y: Index): bool = + ## receiverTime plays no role in index equality + return + ( + (x.senderTime == y.senderTime) and (x.digest == y.digest) and + (x.pubsubTopic == y.pubsubTopic) + ) or (x.hash == y.hash) # this applies to store v3 queries only + +proc cmp*(x, y: Index): int = + ## compares x and y + ## returns 0 if they are equal + ## returns -1 if x < y + ## returns 1 if x > y + ## + ## Default sorting order priority is: + ## 1. senderTimestamp + ## 2. receiverTimestamp (a fallback only if senderTimestamp unset on either side, and all other fields unequal) + ## 3. message digest + ## 4. pubsubTopic + + if x == y: + # Quick exit ensures receiver time does not affect index equality + return 0 + + # Timestamp has a higher priority for comparison + let + # Use receiverTime where senderTime is unset + xTimestamp = if x.senderTime == 0: x.receiverTime else: x.senderTime + yTimestamp = if y.senderTime == 0: y.receiverTime else: y.senderTime + + let timecmp = cmp(xTimestamp, yTimestamp) + if timecmp != 0: + return timecmp + + # Continue only when timestamps are equal + let digestcmp = cmp(x.digest.data, y.digest.data) + if digestcmp != 0: + return digestcmp + + return cmp(x.pubsubTopic, y.pubsubTopic) diff --git a/waku/waku_archive/driver/queue_driver/queue_driver.nim b/waku/waku_archive/driver/queue_driver/queue_driver.nim index dcc45f9700..b6fda13ab1 100644 --- a/waku/waku_archive/driver/queue_driver/queue_driver.nim +++ b/waku/waku_archive/driver/queue_driver/queue_driver.nim @@ -12,7 +12,8 @@ logScope: const QueueDriverDefaultMaxCapacity* = 25_000 type - QueryFilterMatcher = proc(index: Index, msg: WakuMessage): bool {.gcsafe, closure.} + QueryFilterMatcher = + proc(index: Index, msg: WakuMessage): bool {.gcsafe, raises: [], closure.} QueueDriver* = ref object of ArchiveDriver ## Bounded repository for indexed messages @@ -135,9 +136,7 @@ proc getPage( if predicate.isNil() or predicate(key, data): numberOfItems += 1 - outSeq.add( - (key.pubsubTopic, data, @(key.digest.data), key.receiverTime, key.hash) - ) + outSeq.add((key.hash, key.topic, data)) currentEntry = if forward: @@ -229,19 +228,11 @@ proc add*( method put*( driver: QueueDriver, + messageHash: WakuMessageHash, pubsubTopic: PubsubTopic, message: WakuMessage, - digest: MessageDigest, - messageHash: WakuMessageHash, - receivedTime: Timestamp, ): Future[ArchiveDriverResult[void]] {.async.} = - let index = Index( - pubsubTopic: pubsubTopic, - senderTime: message.timestamp, - receiverTime: receivedTime, - digest: digest, - hash: messageHash, - ) + let index = Index(time: message.timestamp, hash: messageHash, topic: pubsubTopic) return driver.add(index, message) @@ -258,8 +249,8 @@ method existsTable*( method getMessages*( driver: QueueDriver, - includeData = false, - contentTopic: seq[ContentTopic] = @[], + includeData = true, + contentTopics: seq[ContentTopic] = @[], pubsubTopic = none(PubsubTopic), cursor = none(ArchiveCursor), startTime = none(Timestamp), @@ -268,14 +259,17 @@ method getMessages*( maxPageSize = DefaultPageSize, ascendingOrder = true, ): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = - let cursor = cursor.map(toIndex) + var index = none(Index) + + if cursor.isSome(): + index = some(Index(hash: cursor.get())) let matchesQuery: QueryFilterMatcher = func (index: Index, msg: WakuMessage): bool = - if pubsubTopic.isSome() and index.pubsubTopic != pubsubTopic.get(): + if pubsubTopic.isSome() and index.topic != pubsubTopic.get(): return false - if contentTopic.len > 0 and msg.contentTopic notin contentTopic: + if contentTopics.len > 0 and msg.contentTopic notin contentTopics: return false if startTime.isSome() and msg.timestamp < startTime.get(): @@ -289,11 +283,14 @@ method getMessages*( return true - var pageRes: QueueDriverGetPageResult - try: - pageRes = driver.getPage(maxPageSize, ascendingOrder, cursor, matchesQuery) - except CatchableError, Exception: - return err(getCurrentExceptionMsg()) + let catchable = catch: + driver.getPage(maxPageSize, ascendingOrder, index, matchesQuery) + + let pageRes: QueueDriverGetPageResult = + if catchable.isErr(): + return err(catchable.error.msg) + else: + catchable.get() if pageRes.isErr(): return err($pageRes.error) @@ -330,7 +327,7 @@ method getOldestMessageTimestamp*( ): Future[ArchiveDriverResult[Timestamp]] {.async.} = return driver.first().map( proc(index: Index): Timestamp = - index.receiverTime + index.time ) method getNewestMessageTimestamp*( @@ -338,7 +335,7 @@ method getNewestMessageTimestamp*( ): Future[ArchiveDriverResult[Timestamp]] {.async.} = return driver.last().map( proc(index: Index): Timestamp = - index.receiverTime + index.time ) method deleteMessagesOlderThanTimestamp*( diff --git a/waku/waku_archive/driver/queue_driver/queue_driver_legacy.nim b/waku/waku_archive/driver/queue_driver/queue_driver_legacy.nim new file mode 100644 index 0000000000..4b8bcb1456 --- /dev/null +++ b/waku/waku_archive/driver/queue_driver/queue_driver_legacy.nim @@ -0,0 +1,362 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/options, stew/results, stew/sorted_set, chronicles, chronos +import ../../../waku_core, ../../common, ../../driver, ./index_legacy + +logScope: + topics = "waku archive queue_store" + +const QueueDriverDefaultMaxCapacity* = 25_000 + +type + QueryFilterMatcher = proc(index: Index, msg: WakuMessage): bool {.gcsafe, closure.} + + LegacyQueueDriver* {.deprecated.} = ref object of ArchiveDriver + ## Bounded repository for indexed messages + ## + ## The store queue will keep messages up to its + ## configured capacity. As soon as this capacity + ## is reached and a new message is added, the oldest + ## item will be removed to make space for the new one. + ## This implies both a `delete` and `add` operation + ## for new items. + + # TODO: a circular/ring buffer may be a more efficient implementation + items: SortedSet[Index, WakuMessage] # sorted set of stored messages + capacity: int # Maximum amount of messages to keep + + QueueDriverErrorKind {.pure.} = enum + INVALID_CURSOR + + QueueDriverGetPageResult = Result[seq[ArchiveRowV2], QueueDriverErrorKind] + +proc `$`(error: QueueDriverErrorKind): string = + case error + of INVALID_CURSOR: "invalid_cursor" + +### Helpers + +proc walkToCursor( + w: SortedSetWalkRef[Index, WakuMessage], startCursor: Index, forward: bool +): SortedSetResult[Index, WakuMessage] = + ## Walk to util we find the cursor + ## TODO: Improve performance here with a binary/tree search + + var nextItem = + if forward: + w.first() + else: + w.last() + + ## Fast forward until we reach the startCursor + while nextItem.isOk(): + if nextItem.value.key == startCursor: + break + + # Not yet at cursor. Continue advancing + nextItem = + if forward: + w.next() + else: + w.prev() + + return nextItem + +#### API + +proc new*(T: type LegacyQueueDriver, capacity: int = QueueDriverDefaultMaxCapacity): T = + var items = SortedSet[Index, WakuMessage].init() + return LegacyQueueDriver(items: items, capacity: capacity) + +proc contains*(driver: LegacyQueueDriver, index: Index): bool = + ## Return `true` if the store queue already contains the `index`, `false` otherwise. + return driver.items.eq(index).isOk() + +proc len*(driver: LegacyQueueDriver): int {.noSideEffect.} = + return driver.items.len + +proc getPage( + driver: LegacyQueueDriver, + pageSize: uint = 0, + forward: bool = true, + cursor: Option[Index] = none(Index), + predicate: QueryFilterMatcher = nil, +): QueueDriverGetPageResult = + ## Populate a single page in forward direction + ## Start at the `startCursor` (exclusive), or first entry (inclusive) if not defined. + ## Page size must not exceed `maxPageSize` + ## Each entry must match the `pred` + var outSeq: seq[ArchiveRowV2] + + var w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + defer: + w.destroy() + + var currentEntry: SortedSetResult[Index, WakuMessage] + + # Find starting entry + if cursor.isSome(): + let cursorEntry = w.walkToCursor(cursor.get(), forward) + if cursorEntry.isErr(): + return err(QueueDriverErrorKind.INVALID_CURSOR) + + # Advance walker once more + currentEntry = + if forward: + w.next() + else: + w.prev() + else: + # Start from the beginning of the queue + currentEntry = + if forward: + w.first() + else: + w.last() + + trace "Starting page query", currentEntry = currentEntry + + ## This loop walks forward over the queue: + ## 1. from the given cursor (or first/last entry, if not provided) + ## 2. adds entries matching the predicate function to output page + ## 3. until either the end of the queue or maxPageSize is reached + var numberOfItems: uint = 0 + while currentEntry.isOk() and numberOfItems < pageSize: + trace "Continuing page query", + currentEntry = currentEntry, numberOfItems = numberOfItems + + let + key = currentEntry.value.key + data = currentEntry.value.data + + if predicate.isNil() or predicate(key, data): + numberOfItems += 1 + + outSeq.add( + (key.pubsubTopic, data, @(key.digest.data), key.receiverTime, key.hash) + ) + + currentEntry = + if forward: + w.next() + else: + w.prev() + + trace "Successfully retrieved page", len = outSeq.len + + return ok(outSeq) + +## --- SortedSet accessors --- + +iterator fwdIterator*(driver: LegacyQueueDriver): (Index, WakuMessage) = + ## Forward iterator over the entire store queue + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + res = w.first() + + while res.isOk(): + yield (res.value.key, res.value.data) + res = w.next() + + w.destroy() + +iterator bwdIterator*(driver: LegacyQueueDriver): (Index, WakuMessage) = + ## Backwards iterator over the entire store queue + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + res = w.last() + + while res.isOk(): + yield (res.value.key, res.value.data) + res = w.prev() + + w.destroy() + +proc first*(driver: LegacyQueueDriver): ArchiveDriverResult[Index] = + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + res = w.first() + w.destroy() + + if res.isErr(): + return err("Not found") + + return ok(res.value.key) + +proc last*(driver: LegacyQueueDriver): ArchiveDriverResult[Index] = + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + res = w.last() + w.destroy() + + if res.isErr(): + return err("Not found") + + return ok(res.value.key) + +## --- Queue API --- + +proc add*( + driver: LegacyQueueDriver, index: Index, msg: WakuMessage +): ArchiveDriverResult[void] = + ## Add a message to the queue + ## + ## If we're at capacity, we will be removing, the oldest (first) item + if driver.contains(index): + trace "could not add item to store queue. Index already exists", index = index + return err("duplicate") + + # TODO: the below delete block can be removed if we convert to circular buffer + if driver.items.len >= driver.capacity: + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + firstItem = w.first + + if cmp(index, firstItem.value.key) < 0: + # When at capacity, we won't add if message index is smaller (older) than our oldest item + w.destroy # Clean up walker + return err("too_old") + + discard driver.items.delete(firstItem.value.key) + w.destroy # better to destroy walker after a delete operation + + driver.items.insert(index).value.data = msg + + return ok() + +method put*( + driver: LegacyQueueDriver, + pubsubTopic: PubsubTopic, + message: WakuMessage, + digest: MessageDigest, + messageHash: WakuMessageHash, + receivedTime: Timestamp, +): Future[ArchiveDriverResult[void]] {.async.} = + let index = Index( + pubsubTopic: pubsubTopic, + senderTime: message.timestamp, + receiverTime: receivedTime, + digest: digest, + hash: messageHash, + ) + + return driver.add(index, message) + +method getAllMessages*( + driver: LegacyQueueDriver +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + # TODO: Implement this message_store method + return err("interface method not implemented") + +method existsTable*( + driver: LegacyQueueDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.async.} = + return err("interface method not implemented") + +method getMessages*( + driver: LegacyQueueDriver, + includeData = false, + contentTopic: seq[ContentTopic] = @[], + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursorV2), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes: seq[WakuMessageHash] = @[], + maxPageSize = DefaultPageSize, + ascendingOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRowV2]]] {.async.} = + let cursor = cursor.map(toIndex) + + let matchesQuery: QueryFilterMatcher = + func (index: Index, msg: WakuMessage): bool = + if pubsubTopic.isSome() and index.pubsubTopic != pubsubTopic.get(): + return false + + if contentTopic.len > 0 and msg.contentTopic notin contentTopic: + return false + + if startTime.isSome() and msg.timestamp < startTime.get(): + return false + + if endTime.isSome() and msg.timestamp > endTime.get(): + return false + + if hashes.len > 0 and index.hash notin hashes: + return false + + return true + + var pageRes: QueueDriverGetPageResult + try: + pageRes = driver.getPage(maxPageSize, ascendingOrder, cursor, matchesQuery) + except CatchableError, Exception: + return err(getCurrentExceptionMsg()) + + if pageRes.isErr(): + return err($pageRes.error) + + return ok(pageRes.value) + +method getMessagesCount*( + driver: LegacyQueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return ok(int64(driver.len())) + +method getPagesCount*( + driver: LegacyQueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return ok(int64(driver.len())) + +method getPagesSize*( + driver: LegacyQueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return ok(int64(driver.len())) + +method getDatabaseSize*( + driver: LegacyQueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return ok(int64(driver.len())) + +method performVacuum*( + driver: LegacyQueueDriver +): Future[ArchiveDriverResult[void]] {.async.} = + return err("interface method not implemented") + +method getOldestMessageTimestamp*( + driver: LegacyQueueDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return driver.first().map( + proc(index: Index): Timestamp = + index.receiverTime + ) + +method getNewestMessageTimestamp*( + driver: LegacyQueueDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return driver.last().map( + proc(index: Index): Timestamp = + index.receiverTime + ) + +method deleteMessagesOlderThanTimestamp*( + driver: LegacyQueueDriver, ts: Timestamp +): Future[ArchiveDriverResult[void]] {.async.} = + # TODO: Implement this message_store method + return err("interface method not implemented") + +method deleteOldestMessagesNotWithinLimit*( + driver: LegacyQueueDriver, limit: int +): Future[ArchiveDriverResult[void]] {.async.} = + # TODO: Implement this message_store method + return err("interface method not implemented") + +method decreaseDatabaseSize*( + driver: LegacyQueueDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.async.} = + return err("interface method not implemented") + +method close*(driver: LegacyQueueDriver): Future[ArchiveDriverResult[void]] {.async.} = + return ok() diff --git a/waku/waku_archive/driver/sqlite_driver.nim b/waku/waku_archive/driver/sqlite_driver.nim index 027e00488c..4537b7db83 100644 --- a/waku/waku_archive/driver/sqlite_driver.nim +++ b/waku/waku_archive/driver/sqlite_driver.nim @@ -3,6 +3,6 @@ when (NimMajor, NimMinor) < (1, 4): else: {.push raises: [].} -import ./sqlite_driver/sqlite_driver +import ./sqlite_driver/sqlite_driver, ./sqlite_driver/sqlite_driver_legacy -export sqlite_driver +export sqlite_driver, sqlite_driver_legacy diff --git a/waku/waku_archive/driver/sqlite_driver/cursor.nim b/waku/waku_archive/driver/sqlite_driver/cursor.nim index 9729f0ff79..bd9d50081a 100644 --- a/waku/waku_archive/driver/sqlite_driver/cursor.nim +++ b/waku/waku_archive/driver/sqlite_driver/cursor.nim @@ -5,7 +5,7 @@ else: import ../../../waku_core, ../../common -type DbCursor* = (Timestamp, seq[byte], PubsubTopic) +type DbCursor* {.deprecated.} = (Timestamp, seq[byte], PubsubTopic) -proc toDbCursor*(c: ArchiveCursor): DbCursor = +proc toDbCursor*(c: ArchiveCursorV2): DbCursor {.deprecated.} = (c.storeTime, @(c.digest.data), c.pubsubTopic) diff --git a/waku/waku_archive/driver/sqlite_driver/queries.nim b/waku/waku_archive/driver/sqlite_driver/queries.nim index 94f323b2de..902f5e1da2 100644 --- a/waku/waku_archive/driver/sqlite_driver/queries.nim +++ b/waku/waku_archive/driver/sqlite_driver/queries.nim @@ -7,8 +7,7 @@ import std/[options, sequtils], stew/[results, byteutils], sqlite3_abi import ../../../common/databases/db_sqlite, ../../../common/databases/common, - ../../../waku_core, - ./cursor + ../../../waku_core const DbTable = "Message" @@ -18,7 +17,7 @@ type SqlQueryStr = string proc queryRowWakuMessageCallback( s: ptr sqlite3_stmt, - contentTopicCol, payloadCol, versionCol, senderTimestampCol, metaCol: cint, + contentTopicCol, payloadCol, versionCol, timestampCol, metaCol: cint, ): WakuMessage = let topic = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, contentTopicCol)) @@ -32,22 +31,20 @@ proc queryRowWakuMessageCallback( metaLength = sqlite3_column_bytes(s, metaCol) payload = @(toOpenArray(p, 0, payloadLength - 1)) version = sqlite3_column_int64(s, versionCol) - senderTimestamp = sqlite3_column_int64(s, senderTimestampCol) + timestamp = sqlite3_column_int64(s, timestampCol) meta = @(toOpenArray(m, 0, metaLength - 1)) return WakuMessage( contentTopic: ContentTopic(contentTopic), payload: payload, version: uint32(version), - timestamp: Timestamp(senderTimestamp), + timestamp: Timestamp(timestamp), meta: meta, ) -proc queryRowReceiverTimestampCallback( - s: ptr sqlite3_stmt, storedAtCol: cint -): Timestamp = - let storedAt = sqlite3_column_int64(s, storedAtCol) - return Timestamp(storedAt) +proc queryRowTimestampCallback(s: ptr sqlite3_stmt, timestampCol: cint): Timestamp = + let timestamp = sqlite3_column_int64(s, timestampCol) + return Timestamp(timestamp) proc queryRowPubsubTopicCallback( s: ptr sqlite3_stmt, pubsubTopicCol: cint @@ -61,14 +58,6 @@ proc queryRowPubsubTopicCallback( return pubsubTopic -proc queryRowDigestCallback(s: ptr sqlite3_stmt, digestCol: cint): seq[byte] = - let - digestPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, digestCol)) - digestLength = sqlite3_column_bytes(s, digestCol) - digest = @(toOpenArray(digestPointer, 0, digestLength - 1)) - - return digest - proc queryRowWakuMessageHashCallback( s: ptr sqlite3_stmt, hashCol: cint ): WakuMessageHash = @@ -84,11 +73,10 @@ proc queryRowWakuMessageHashCallback( ## Create table proc createTableQuery(table: string): SqlQueryStr = - "CREATE TABLE IF NOT EXISTS " & table & " (" & " pubsubTopic BLOB NOT NULL," & + "CREATE TABLE IF NOT EXISTS " & table & " (" & + " messageHash BLOB NOT NULL PRIMARY KEY," & " pubsubTopic BLOB NOT NULL," & " contentTopic BLOB NOT NULL," & " payload BLOB," & " version INTEGER NOT NULL," & - " timestamp INTEGER NOT NULL," & " id BLOB," & " messageHash BLOB," & - " storedAt INTEGER NOT NULL," & " meta BLOB," & - " CONSTRAINT messageIndex PRIMARY KEY (messageHash)" & ") WITHOUT ROWID;" + " timestamp INTEGER NOT NULL," & " meta BLOB" & ") WITHOUT ROWID;" proc createTable*(db: SqliteDatabase): DatabaseResult[void] = let query = createTableQuery(DbTable) @@ -104,7 +92,7 @@ proc createTable*(db: SqliteDatabase): DatabaseResult[void] = ## Create indices proc createOldestMessageTimestampIndexQuery(table: string): SqlQueryStr = - "CREATE INDEX IF NOT EXISTS i_ts ON " & table & " (storedAt);" + "CREATE INDEX IF NOT EXISTS i_ts ON " & table & " (timestamp);" proc createOldestMessageTimestampIndex*(db: SqliteDatabase): DatabaseResult[void] = let query = createOldestMessageTimestampIndexQuery(DbTable) @@ -117,39 +105,15 @@ proc createOldestMessageTimestampIndex*(db: SqliteDatabase): DatabaseResult[void ) return ok() -proc createHistoryQueryIndexQuery(table: string): SqlQueryStr = - "CREATE INDEX IF NOT EXISTS i_query ON " & table & - " (contentTopic, pubsubTopic, storedAt, id);" - -proc createHistoryQueryIndex*(db: SqliteDatabase): DatabaseResult[void] = - let query = createHistoryQueryIndexQuery(DbTable) - discard - ?db.query( - query, - proc(s: ptr sqlite3_stmt) = - discard - , - ) - return ok() - ## Insert message -type InsertMessageParams* = ( - seq[byte], - seq[byte], - Timestamp, - seq[byte], - seq[byte], - seq[byte], - int64, - Timestamp, - seq[byte], -) +type InsertMessageParams* = + (seq[byte], seq[byte], seq[byte], seq[byte], int64, Timestamp, seq[byte]) proc insertMessageQuery(table: string): SqlQueryStr = return "INSERT INTO " & table & - "(id, messageHash, storedAt, contentTopic, payload, pubsubTopic, version, timestamp, meta)" & - " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);" + "(messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta)" & + " VALUES (?, ?, ?, ?, ?, ?, ?);" proc prepareInsertMessageStmt*( db: SqliteDatabase @@ -178,14 +142,12 @@ proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] = ## Get oldest message receiver timestamp proc selectOldestMessageTimestampQuery(table: string): SqlQueryStr = - return "SELECT MIN(storedAt) FROM " & table + return "SELECT MIN(timestamp) FROM " & table -proc selectOldestReceiverTimestamp*( - db: SqliteDatabase -): DatabaseResult[Timestamp] {.inline.} = +proc selectOldestTimestamp*(db: SqliteDatabase): DatabaseResult[Timestamp] {.inline.} = var timestamp: Timestamp proc queryRowCallback(s: ptr sqlite3_stmt) = - timestamp = queryRowReceiverTimestampCallback(s, 0) + timestamp = queryRowTimestampCallback(s, 0) let query = selectOldestMessageTimestampQuery(DbTable) let res = db.query(query, queryRowCallback) @@ -197,14 +159,12 @@ proc selectOldestReceiverTimestamp*( ## Get newest message receiver timestamp proc selectNewestMessageTimestampQuery(table: string): SqlQueryStr = - return "SELECT MAX(storedAt) FROM " & table + return "SELECT MAX(timestamp) FROM " & table -proc selectNewestReceiverTimestamp*( - db: SqliteDatabase -): DatabaseResult[Timestamp] {.inline.} = +proc selectNewestTimestamp*(db: SqliteDatabase): DatabaseResult[Timestamp] {.inline.} = var timestamp: Timestamp proc queryRowCallback(s: ptr sqlite3_stmt) = - timestamp = queryRowReceiverTimestampCallback(s, 0) + timestamp = queryRowTimestampCallback(s, 0) let query = selectNewestMessageTimestampQuery(DbTable) let res = db.query(query, queryRowCallback) @@ -216,7 +176,7 @@ proc selectNewestReceiverTimestamp*( ## Delete messages older than timestamp proc deleteMessagesOlderThanTimestampQuery(table: string, ts: Timestamp): SqlQueryStr = - return "DELETE FROM " & table & " WHERE storedAt < " & $ts + return "DELETE FROM " & table & " WHERE timestamp < " & $ts proc deleteMessagesOlderThanTimestamp*( db: SqliteDatabase, ts: int64 @@ -231,58 +191,32 @@ proc deleteMessagesOlderThanTimestamp*( ) return ok() -## Delete oldest messages not within limit - -proc deleteOldestMessagesNotWithinLimitQuery(table: string, limit: int): SqlQueryStr = - return - "DELETE FROM " & table & " WHERE (storedAt, id, pubsubTopic) NOT IN (" & - " SELECT storedAt, id, pubsubTopic FROM " & table & - " ORDER BY storedAt DESC, id DESC" & " LIMIT " & $limit & ");" - -proc deleteOldestMessagesNotWithinLimit*( - db: SqliteDatabase, limit: int -): DatabaseResult[void] = - # NOTE: The word `limit` here refers the store capacity/maximum number-of-messages allowed limit - let query = deleteOldestMessagesNotWithinLimitQuery(DbTable, limit = limit) - discard - ?db.query( - query, - proc(s: ptr sqlite3_stmt) = - discard - , - ) - return ok() - ## Select all messages proc selectAllMessagesQuery(table: string): SqlQueryStr = return - "SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta" & - " FROM " & table & " ORDER BY storedAt ASC" + "SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta" & + " FROM " & table & " ORDER BY timestamp ASC" proc selectAllMessages*( db: SqliteDatabase -): DatabaseResult[ - seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] -] = +): DatabaseResult[seq[(WakuMessageHash, PubsubTopic, WakuMessage)]] = ## Retrieve all messages from the store. - var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] proc queryRowCallback(s: ptr sqlite3_stmt) = let - pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3) + hash = queryRowWakuMessageHashCallback(s, hashCol = 0) + pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 1) wakuMessage = queryRowWakuMessageCallback( s, - contentTopicCol = 1, - payloadCol = 2, + contentTopicCol = 2, + payloadCol = 3, versionCol = 4, - senderTimestampCol = 5, - metaCol = 8, + timestampCol = 5, + metaCol = 6, ) - digest = queryRowDigestCallback(s, digestCol = 6) - storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0) - hash = queryRowWakuMessageHashCallback(s, hashCol = 7) - rows.add((pubsubTopic, wakuMessage, digest, storedAt, hash)) + rows.add((hash, pubsubTopic, wakuMessage)) let query = selectAllMessagesQuery(DbTable) let res = db.query(query, queryRowCallback) @@ -291,6 +225,25 @@ proc selectAllMessages*( return ok(rows) +## Select all messages without data + +proc selectAllMessageHashesQuery(table: string): SqlQueryStr = + return "SELECT messageHash" & " FROM " & table & " ORDER BY timestamp ASC" + +proc selectAllMessageHashes*(db: SqliteDatabase): DatabaseResult[seq[WakuMessageHash]] = + ## Retrieve all messages from the store. + var rows: seq[WakuMessageHash] + proc queryRowCallback(s: ptr sqlite3_stmt) = + let hash = queryRowWakuMessageHashCallback(s, hashCol = 0) + rows.add(hash) + + let query = selectAllMessageHashesQuery(DbTable) + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err(res.error()) + + return ok(rows) + ## Select messages by history query with limit proc combineClauses(clauses: varargs[Option[string]]): Option[string] = @@ -303,75 +256,6 @@ proc combineClauses(clauses: varargs[Option[string]]): Option[string] = where &= " AND " & clause return some(where) -proc whereClausev2( - cursor: bool, - pubsubTopic: Option[PubsubTopic], - contentTopic: seq[ContentTopic], - startTime: Option[Timestamp], - endTime: Option[Timestamp], - ascending: bool, -): Option[string] {.deprecated.} = - let cursorClause = - if cursor: - let comp = if ascending: ">" else: "<" - - some("(storedAt, id) " & comp & " (?, ?)") - else: - none(string) - - let pubsubTopicClause = - if pubsubTopic.isNone(): - none(string) - else: - some("pubsubTopic = (?)") - - let contentTopicClause = - if contentTopic.len <= 0: - none(string) - else: - var where = "contentTopic IN (" - where &= "?" - for _ in 1 ..< contentTopic.len: - where &= ", ?" - where &= ")" - some(where) - - let startTimeClause = - if startTime.isNone(): - none(string) - else: - some("storedAt >= (?)") - - let endTimeClause = - if endTime.isNone(): - none(string) - else: - some("storedAt <= (?)") - - return combineClauses( - cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause - ) - -proc selectMessagesWithLimitQueryv2( - table: string, where: Option[string], limit: uint, ascending = true, v3 = false -): SqlQueryStr {.deprecated.} = - let order = if ascending: "ASC" else: "DESC" - - var query: string - - query = - "SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta" - query &= " FROM " & table - - if where.isSome(): - query &= " WHERE " & where.get() - - query &= " ORDER BY storedAt " & order & ", id " & order - - query &= " LIMIT " & $limit & ";" - - return query - proc prepareStmt( db: SqliteDatabase, stmt: string ): DatabaseResult[SqliteStmt[void, void]] = @@ -379,110 +263,6 @@ proc prepareStmt( checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil) return ok(SqliteStmt[void, void](s)) -proc execSelectMessagesV2WithLimitStmt( - s: SqliteStmt, - cursor: Option[DbCursor], - pubsubTopic: Option[PubsubTopic], - contentTopic: seq[ContentTopic], - startTime: Option[Timestamp], - endTime: Option[Timestamp], - onRowCallback: DataProc, -): DatabaseResult[void] {.deprecated.} = - let s = RawStmtPtr(s) - - # Bind params - var paramIndex = 1 - - if cursor.isSome(): - let (storedAt, id, _) = cursor.get() - checkErr bindParam(s, paramIndex, storedAt) - paramIndex += 1 - checkErr bindParam(s, paramIndex, id) - paramIndex += 1 - - if pubsubTopic.isSome(): - let pubsubTopic = toBytes(pubsubTopic.get()) - checkErr bindParam(s, paramIndex, pubsubTopic) - paramIndex += 1 - - for topic in contentTopic: - checkErr bindParam(s, paramIndex, topic.toBytes()) - paramIndex += 1 - - if startTime.isSome(): - let time = startTime.get() - checkErr bindParam(s, paramIndex, time) - paramIndex += 1 - - if endTime.isSome(): - let time = endTime.get() - checkErr bindParam(s, paramIndex, time) - paramIndex += 1 - - try: - while true: - let v = sqlite3_step(s) - case v - of SQLITE_ROW: - onRowCallback(s) - of SQLITE_DONE: - return ok() - else: - return err($sqlite3_errstr(v)) - finally: - # release implicit transaction - discard sqlite3_reset(s) # same return information as step - discard sqlite3_clear_bindings(s) # no errors possible - -proc selectMessagesByHistoryQueryWithLimit*( - db: SqliteDatabase, - contentTopic: seq[ContentTopic], - pubsubTopic: Option[PubsubTopic], - cursor: Option[DbCursor], - startTime: Option[Timestamp], - endTime: Option[Timestamp], - limit: uint, - ascending: bool, -): DatabaseResult[ - seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] -] {.deprecated.} = - var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] = - @[] - - proc queryRowCallback(s: ptr sqlite3_stmt) = - let - pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3) - message = queryRowWakuMessageCallback( - s, - contentTopicCol = 1, - payloadCol = 2, - versionCol = 4, - senderTimestampCol = 5, - metaCol = 8, - ) - digest = queryRowDigestCallback(s, digestCol = 6) - storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0) - hash = queryRowWakuMessageHashCallback(s, hashCol = 7) - - messages.add((pubsubTopic, message, digest, storedAt, hash)) - - let query = block: - let where = whereClausev2( - cursor.isSome(), pubsubTopic, contentTopic, startTime, endTime, ascending - ) - - selectMessagesWithLimitQueryv2(DbTable, where, limit, ascending) - - let dbStmt = ?db.prepareStmt(query) - ?dbStmt.execSelectMessagesV2WithLimitStmt( - cursor, pubsubTopic, contentTopic, startTime, endTime, queryRowCallback - ) - dbStmt.dispose() - - return ok(messages) - -### Store v3 ### - proc execSelectMessageByHash( s: SqliteStmt, hash: WakuMessageHash, onRowCallback: DataProc ): DatabaseResult[void] = @@ -505,14 +285,23 @@ proc execSelectMessageByHash( discard sqlite3_reset(s) # same return information as step discard sqlite3_clear_bindings(s) # no errors possible -proc selectMessageByHashQuery(): SqlQueryStr = - var query: string +proc selectTimestampByHashQuery(table: string): SqlQueryStr = + return "SELECT timestamp FROM " & table & " WHERE messageHash = (?)" - query = "SELECT contentTopic, payload, version, timestamp, meta, messageHash" - query &= " FROM " & DbTable - query &= " WHERE messageHash = (?)" +proc getCursorTimestamp( + db: SqliteDatabase, hash: WakuMessageHash +): DatabaseResult[Option[Timestamp]] = + var timestamp = none(Timestamp) - return query + proc queryRowCallback(s: ptr sqlite3_stmt) = + timestamp = some(queryRowTimestampCallback(s, 0)) + + let query = selectTimestampByHashQuery(DbTable) + let dbStmt = ?db.prepareStmt(query) + ?dbStmt.execSelectMessageByHash(hash, queryRowCallback) + dbStmt.dispose() + + return ok(timestamp) proc whereClause( cursor: bool, @@ -552,13 +341,13 @@ proc whereClause( if startTime.isNone(): none(string) else: - some("storedAt >= (?)") + some("timestamp >= (?)") let endTimeClause = if endTime.isNone(): none(string) else: - some("storedAt <= (?)") + some("timestamp <= (?)") let hashesClause = if hashes.len <= 0: @@ -637,20 +426,36 @@ proc execSelectMessagesWithLimitStmt( discard sqlite3_clear_bindings(s) # no errors possible proc selectMessagesWithLimitQuery( - table: string, where: Option[string], limit: uint, ascending = true, v3 = false + table: string, where: Option[string], limit: uint, ascending = true ): SqlQueryStr = let order = if ascending: "ASC" else: "DESC" var query: string query = - "SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta" + "SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta" query &= " FROM " & table if where.isSome(): query &= " WHERE " & where.get() - query &= " ORDER BY storedAt " & order & ", messageHash " & order + query &= " ORDER BY timestamp " & order & ", messageHash " & order + + query &= " LIMIT " & $limit & ";" + + return query + +proc selectMessageHashesWithLimitQuery( + table: string, where: Option[string], limit: uint, ascending = true +): SqlQueryStr = + let order = if ascending: "ASC" else: "DESC" + + var query = "SELECT messageHash FROM " & table + + if where.isSome(): + query &= " WHERE " & where.get() + + query &= " ORDER BY timestamp " & order & ", messageHash " & order query &= " LIMIT " & $limit & ";" @@ -666,79 +471,101 @@ proc selectMessagesByStoreQueryWithLimit*( hashes: seq[WakuMessageHash], limit: uint, ascending: bool, -): DatabaseResult[ - seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] -] = - # Must first get the message timestamp before paginating by time - let newCursor = - if cursor.isSome() and cursor.get() != EmptyWakuMessageHash: - let hash: WakuMessageHash = cursor.get() - - var wakuMessage: Option[WakuMessage] - - proc queryRowCallback(s: ptr sqlite3_stmt) = - wakuMessage = some( - queryRowWakuMessageCallback( - s, - contentTopicCol = 0, - payloadCol = 1, - versionCol = 2, - senderTimestampCol = 3, - metaCol = 4, - ) - ) - - let query = selectMessageByHashQuery() - let dbStmt = ?db.prepareStmt(query) - ?dbStmt.execSelectMessageByHash(hash, queryRowCallback) - dbStmt.dispose() - - if wakuMessage.isSome(): - let time = wakuMessage.get().timestamp - - some((time, hash)) - else: - return err("cursor not found") - else: - none((Timestamp, WakuMessageHash)) +): DatabaseResult[seq[(WakuMessageHash, PubsubTopic, WakuMessage)]] = + var timeCursor = none((Timestamp, WakuMessageHash)) + + if cursor.isSome(): + let hash: WakuMessageHash = cursor.get() - var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] = - @[] + let timeOpt = ?getCursorTimestamp(db, hash) + + if timeOpt.isNone(): + return err("cursor not found") + + timeCursor = some((timeOpt.get(), hash)) + + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] = @[] proc queryRowCallback(s: ptr sqlite3_stmt) = let - pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3) + hash = queryRowWakuMessageHashCallback(s, hashCol = 0) + pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 1) message = queryRowWakuMessageCallback( s, - contentTopicCol = 1, - payloadCol = 2, + contentTopicCol = 2, + payloadCol = 3, versionCol = 4, - senderTimestampCol = 5, - metaCol = 8, + timestampCol = 5, + metaCol = 6, ) - digest = queryRowDigestCallback(s, digestCol = 6) - storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0) - hash = queryRowWakuMessageHashCallback(s, hashCol = 7) - - messages.add((pubsubTopic, message, digest, storedAt, hash)) - - let query = block: - let where = whereClause( - newCursor.isSome(), - pubsubTopic, - contentTopic, - startTime, - endTime, - hashes, - ascending, - ) - selectMessagesWithLimitQuery(DbTable, where, limit, ascending, true) + rows.add((hash, pubsubTopic, message)) + + let where = whereClause( + timeCursor.isSome(), + pubsubTopic, + contentTopic, + startTime, + endTime, + hashes, + ascending, + ) + + let query = selectMessagesWithLimitQuery(DbTable, where, limit, ascending) let dbStmt = ?db.prepareStmt(query) ?dbStmt.execSelectMessagesWithLimitStmt( - newCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback + timeCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback ) dbStmt.dispose() - return ok(messages) + return ok(rows) + +proc selectMessageHashesByStoreQueryWithLimit*( + db: SqliteDatabase, + contentTopic: seq[ContentTopic], + pubsubTopic: Option[PubsubTopic], + cursor: Option[WakuMessageHash], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + hashes: seq[WakuMessageHash], + limit: uint, + ascending: bool, +): DatabaseResult[seq[(WakuMessageHash, PubsubTopic, WakuMessage)]] = + var timeCursor = none((Timestamp, WakuMessageHash)) + + if cursor.isSome(): + let hash: WakuMessageHash = cursor.get() + + let timeOpt = ?getCursorTimestamp(db, hash) + + if timeOpt.isNone(): + return err("cursor not found") + + timeCursor = some((timeOpt.get(), hash)) + + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] = @[] + + proc queryRowCallback(s: ptr sqlite3_stmt) = + let hash = queryRowWakuMessageHashCallback(s, hashCol = 0) + rows.add((hash, "", WakuMessage())) + + let where = whereClause( + timeCursor.isSome(), + pubsubTopic, + contentTopic, + startTime, + endTime, + hashes, + ascending, + ) + + let query = selectMessageHashesWithLimitQuery(DbTable, where, limit, ascending) + + let dbStmt = ?db.prepareStmt(query) + ?dbStmt.execSelectMessagesWithLimitStmt( + timeCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback + ) + dbStmt.dispose() + + return ok(rows) diff --git a/waku/waku_archive/driver/sqlite_driver/queries_legacy.nim b/waku/waku_archive/driver/sqlite_driver/queries_legacy.nim new file mode 100644 index 0000000000..659cbb8095 --- /dev/null +++ b/waku/waku_archive/driver/sqlite_driver/queries_legacy.nim @@ -0,0 +1,482 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/[options, sequtils], stew/[results, byteutils], sqlite3_abi +import + ../../../common/databases/db_sqlite, + ../../../common/databases/common, + ../../../waku_core, + ./cursor + +const DbTable = "Message" + +type SqlQueryStr = string + +### SQLite column helper methods + +proc queryRowWakuMessageCallback( + s: ptr sqlite3_stmt, + contentTopicCol, payloadCol, versionCol, senderTimestampCol, metaCol: cint, +): WakuMessage = + let + topic = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, contentTopicCol)) + topicLength = sqlite3_column_bytes(s, contentTopicCol) + contentTopic = string.fromBytes(@(toOpenArray(topic, 0, topicLength - 1))) + + p = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, payloadCol)) + m = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, metaCol)) + + payloadLength = sqlite3_column_bytes(s, payloadCol) + metaLength = sqlite3_column_bytes(s, metaCol) + payload = @(toOpenArray(p, 0, payloadLength - 1)) + version = sqlite3_column_int64(s, versionCol) + senderTimestamp = sqlite3_column_int64(s, senderTimestampCol) + meta = @(toOpenArray(m, 0, metaLength - 1)) + + return WakuMessage( + contentTopic: ContentTopic(contentTopic), + payload: payload, + version: uint32(version), + timestamp: Timestamp(senderTimestamp), + meta: meta, + ) + +proc queryRowReceiverTimestampCallback( + s: ptr sqlite3_stmt, storedAtCol: cint +): Timestamp = + let storedAt = sqlite3_column_int64(s, storedAtCol) + return Timestamp(storedAt) + +proc queryRowPubsubTopicCallback( + s: ptr sqlite3_stmt, pubsubTopicCol: cint +): PubsubTopic = + let + pubsubTopicPointer = + cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, pubsubTopicCol)) + pubsubTopicLength = sqlite3_column_bytes(s, pubsubTopicCol) + pubsubTopic = + string.fromBytes(@(toOpenArray(pubsubTopicPointer, 0, pubsubTopicLength - 1))) + + return pubsubTopic + +proc queryRowDigestCallback(s: ptr sqlite3_stmt, digestCol: cint): seq[byte] = + let + digestPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, digestCol)) + digestLength = sqlite3_column_bytes(s, digestCol) + digest = @(toOpenArray(digestPointer, 0, digestLength - 1)) + + return digest + +proc queryRowWakuMessageHashCallback( + s: ptr sqlite3_stmt, hashCol: cint +): WakuMessageHash = + let + hashPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, hashCol)) + hashLength = sqlite3_column_bytes(s, hashCol) + hash = fromBytes(toOpenArray(hashPointer, 0, hashLength - 1)) + + return hash + +### SQLite queries + +## Create table + +proc createTableQuery(table: string): SqlQueryStr = + "CREATE TABLE IF NOT EXISTS " & table & " (" & " pubsubTopic BLOB NOT NULL," & + " contentTopic BLOB NOT NULL," & " payload BLOB," & " version INTEGER NOT NULL," & + " timestamp INTEGER NOT NULL," & " id BLOB," & " messageHash BLOB," & + " storedAt INTEGER NOT NULL," & " meta BLOB," & + " CONSTRAINT messageIndex PRIMARY KEY (messageHash)" & ") WITHOUT ROWID;" + +proc createTable*(db: SqliteDatabase): DatabaseResult[void] = + let query = createTableQuery(DbTable) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard + , + ) + return ok() + +## Create indices + +proc createOldestMessageTimestampIndexQuery(table: string): SqlQueryStr = + "CREATE INDEX IF NOT EXISTS i_ts ON " & table & " (storedAt);" + +proc createOldestMessageTimestampIndex*(db: SqliteDatabase): DatabaseResult[void] = + let query = createOldestMessageTimestampIndexQuery(DbTable) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard + , + ) + return ok() + +proc createHistoryQueryIndexQuery(table: string): SqlQueryStr = + "CREATE INDEX IF NOT EXISTS i_query ON " & table & + " (contentTopic, pubsubTopic, storedAt, id);" + +proc createHistoryQueryIndex*(db: SqliteDatabase): DatabaseResult[void] = + let query = createHistoryQueryIndexQuery(DbTable) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard + , + ) + return ok() + +## Insert message +type InsertMessageParams* = ( + seq[byte], + seq[byte], + Timestamp, + seq[byte], + seq[byte], + seq[byte], + int64, + Timestamp, + seq[byte], +) + +proc insertMessageQuery(table: string): SqlQueryStr = + return + "INSERT INTO " & table & + "(id, messageHash, storedAt, contentTopic, payload, pubsubTopic, version, timestamp, meta)" & + " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);" + +proc prepareInsertMessageStmt*( + db: SqliteDatabase +): SqliteStmt[InsertMessageParams, void] = + let query = insertMessageQuery(DbTable) + return + db.prepareStmt(query, InsertMessageParams, void).expect("this is a valid statement") + +## Count table messages + +proc countMessagesQuery(table: string): SqlQueryStr = + return "SELECT COUNT(*) FROM " & table + +proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] = + var count: int64 + proc queryRowCallback(s: ptr sqlite3_stmt) = + count = sqlite3_column_int64(s, 0) + + let query = countMessagesQuery(DbTable) + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err("failed to count number of messages in the database") + + return ok(count) + +## Get oldest message receiver timestamp + +proc selectOldestMessageTimestampQuery(table: string): SqlQueryStr = + return "SELECT MIN(storedAt) FROM " & table + +proc selectOldestReceiverTimestamp*( + db: SqliteDatabase +): DatabaseResult[Timestamp] {.inline.} = + var timestamp: Timestamp + proc queryRowCallback(s: ptr sqlite3_stmt) = + timestamp = queryRowReceiverTimestampCallback(s, 0) + + let query = selectOldestMessageTimestampQuery(DbTable) + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err("failed to get the oldest receiver timestamp from the database") + + return ok(timestamp) + +## Get newest message receiver timestamp + +proc selectNewestMessageTimestampQuery(table: string): SqlQueryStr = + return "SELECT MAX(storedAt) FROM " & table + +proc selectNewestReceiverTimestamp*( + db: SqliteDatabase +): DatabaseResult[Timestamp] {.inline.} = + var timestamp: Timestamp + proc queryRowCallback(s: ptr sqlite3_stmt) = + timestamp = queryRowReceiverTimestampCallback(s, 0) + + let query = selectNewestMessageTimestampQuery(DbTable) + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err("failed to get the newest receiver timestamp from the database") + + return ok(timestamp) + +## Delete messages older than timestamp + +proc deleteMessagesOlderThanTimestampQuery(table: string, ts: Timestamp): SqlQueryStr = + return "DELETE FROM " & table & " WHERE storedAt < " & $ts + +proc deleteMessagesOlderThanTimestamp*( + db: SqliteDatabase, ts: int64 +): DatabaseResult[void] = + let query = deleteMessagesOlderThanTimestampQuery(DbTable, ts) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard + , + ) + return ok() + +## Delete oldest messages not within limit + +proc deleteOldestMessagesNotWithinLimitQuery(table: string, limit: int): SqlQueryStr = + return + "DELETE FROM " & table & " WHERE (storedAt, id, pubsubTopic) NOT IN (" & + " SELECT storedAt, id, pubsubTopic FROM " & table & + " ORDER BY storedAt DESC, id DESC" & " LIMIT " & $limit & ");" + +proc deleteOldestMessagesNotWithinLimit*( + db: SqliteDatabase, limit: int +): DatabaseResult[void] = + # NOTE: The word `limit` here refers the store capacity/maximum number-of-messages allowed limit + let query = deleteOldestMessagesNotWithinLimitQuery(DbTable, limit = limit) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard + , + ) + return ok() + +## Select all messages + +proc selectAllMessagesQuery(table: string): SqlQueryStr = + return + "SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta" & + " FROM " & table & " ORDER BY storedAt ASC" + +proc selectAllMessages*( + db: SqliteDatabase +): DatabaseResult[ + seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] +] = + ## Retrieve all messages from the store. + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc queryRowCallback(s: ptr sqlite3_stmt) = + let + pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3) + wakuMessage = queryRowWakuMessageCallback( + s, + contentTopicCol = 1, + payloadCol = 2, + versionCol = 4, + senderTimestampCol = 5, + metaCol = 8, + ) + digest = queryRowDigestCallback(s, digestCol = 6) + storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0) + hash = queryRowWakuMessageHashCallback(s, hashCol = 7) + + rows.add((pubsubTopic, wakuMessage, digest, storedAt, hash)) + + let query = selectAllMessagesQuery(DbTable) + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err(res.error()) + + return ok(rows) + +## Select messages by history query with limit + +proc combineClauses(clauses: varargs[Option[string]]): Option[string] = + let whereSeq = @clauses.filterIt(it.isSome()).mapIt(it.get()) + if whereSeq.len <= 0: + return none(string) + + var where: string = whereSeq[0] + for clause in whereSeq[1 ..^ 1]: + where &= " AND " & clause + return some(where) + +proc whereClause( + cursor: bool, + pubsubTopic: Option[PubsubTopic], + contentTopic: seq[ContentTopic], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + ascending: bool, +): Option[string] = + let cursorClause = + if cursor: + let comp = if ascending: ">" else: "<" + + some("(storedAt, id) " & comp & " (?, ?)") + else: + none(string) + + let pubsubTopicClause = + if pubsubTopic.isNone(): + none(string) + else: + some("pubsubTopic = (?)") + + let contentTopicClause = + if contentTopic.len <= 0: + none(string) + else: + var where = "contentTopic IN (" + where &= "?" + for _ in 1 ..< contentTopic.len: + where &= ", ?" + where &= ")" + some(where) + + let startTimeClause = + if startTime.isNone(): + none(string) + else: + some("storedAt >= (?)") + + let endTimeClause = + if endTime.isNone(): + none(string) + else: + some("storedAt <= (?)") + + return combineClauses( + cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause + ) + +proc selectMessagesWithLimitQuery( + table: string, where: Option[string], limit: uint, ascending = true, v3 = false +): SqlQueryStr = + let order = if ascending: "ASC" else: "DESC" + + var query: string + + query = + "SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta" + query &= " FROM " & table + + if where.isSome(): + query &= " WHERE " & where.get() + + query &= " ORDER BY storedAt " & order & ", id " & order + + query &= " LIMIT " & $limit & ";" + + return query + +proc prepareStmt( + db: SqliteDatabase, stmt: string +): DatabaseResult[SqliteStmt[void, void]] = + var s: RawStmtPtr + checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil) + return ok(SqliteStmt[void, void](s)) + +proc execSelectMessagesWithLimitStmt( + s: SqliteStmt, + cursor: Option[DbCursor], + pubsubTopic: Option[PubsubTopic], + contentTopic: seq[ContentTopic], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + onRowCallback: DataProc, +): DatabaseResult[void] = + let s = RawStmtPtr(s) + + # Bind params + var paramIndex = 1 + + if cursor.isSome(): + let (storedAt, id, _) = cursor.get() + checkErr bindParam(s, paramIndex, storedAt) + paramIndex += 1 + checkErr bindParam(s, paramIndex, id) + paramIndex += 1 + + if pubsubTopic.isSome(): + let pubsubTopic = toBytes(pubsubTopic.get()) + checkErr bindParam(s, paramIndex, pubsubTopic) + paramIndex += 1 + + for topic in contentTopic: + checkErr bindParam(s, paramIndex, topic.toBytes()) + paramIndex += 1 + + if startTime.isSome(): + let time = startTime.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + + if endTime.isSome(): + let time = endTime.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + + try: + while true: + let v = sqlite3_step(s) + case v + of SQLITE_ROW: + onRowCallback(s) + of SQLITE_DONE: + return ok() + else: + return err($sqlite3_errstr(v)) + finally: + # release implicit transaction + discard sqlite3_reset(s) # same return information as step + discard sqlite3_clear_bindings(s) # no errors possible + +proc selectMessagesByHistoryQueryWithLimit*( + db: SqliteDatabase, + contentTopic: seq[ContentTopic], + pubsubTopic: Option[PubsubTopic], + cursor: Option[DbCursor], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + limit: uint, + ascending: bool, +): DatabaseResult[ + seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] +] = + var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] = + @[] + + proc queryRowCallback(s: ptr sqlite3_stmt) = + let + pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3) + message = queryRowWakuMessageCallback( + s, + contentTopicCol = 1, + payloadCol = 2, + versionCol = 4, + senderTimestampCol = 5, + metaCol = 8, + ) + digest = queryRowDigestCallback(s, digestCol = 6) + storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0) + hash = queryRowWakuMessageHashCallback(s, hashCol = 7) + + messages.add((pubsubTopic, message, digest, storedAt, hash)) + + let query = block: + let where = whereClause( + cursor.isSome(), pubsubTopic, contentTopic, startTime, endTime, ascending + ) + + selectMessagesWithLimitQuery(DbTable, where, limit, ascending) + + let dbStmt = ?db.prepareStmt(query) + ?dbStmt.execSelectMessagesWithLimitStmt( + cursor, pubsubTopic, contentTopic, startTime, endTime, queryRowCallback + ) + dbStmt.dispose() + + return ok(messages) diff --git a/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim b/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim index b817282f51..99d28bec04 100644 --- a/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim +++ b/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim @@ -12,7 +12,6 @@ import ../../../waku_core/message/digest, ../../common, ../../driver, - ./cursor, ./queries logScope: @@ -31,11 +30,7 @@ proc init(db: SqliteDatabase): ArchiveDriverResult[void] = # Create indices, if don't exist let resRtIndex = createOldestMessageTimestampIndex(db) if resRtIndex.isErr(): - return err("failed to create i_rt index: " & resRtIndex.error()) - - let resMsgIndex = createHistoryQueryIndex(db) - if resMsgIndex.isErr(): - return err("failed to create i_query index: " & resMsgIndex.error()) + return err("failed to create i_ts index: " & resRtIndex.error()) return ok() @@ -55,24 +50,20 @@ proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] = method put*( s: SqliteDriver, + messageHash: WakuMessageHash, pubsubTopic: PubsubTopic, message: WakuMessage, - digest: MessageDigest, - messageHash: WakuMessageHash, - receivedTime: Timestamp, ): Future[ArchiveDriverResult[void]] {.async.} = ## Inserts a message into the store let res = s.insertStmt.exec( ( - @(digest.data), # id - @(messageHash), # messageHash - receivedTime, # storedAt - toBytes(message.contentTopic), # contentTopic - message.payload, # payload - toBytes(pubsubTopic), # pubsubTopic - int64(message.version), # version - message.timestamp, # senderTimestamp - message.meta, # meta + @(messageHash), + toBytes(pubsubTopic), + toBytes(message.contentTopic), + message.payload, + int64(message.version), + message.timestamp, + message.meta, ) ) @@ -84,35 +75,9 @@ method getAllMessages*( ## Retrieve all messages from the store. return s.db.selectAllMessages() -method getMessagesV2*( - s: SqliteDriver, - contentTopic = newSeq[ContentTopic](0), - pubsubTopic = none(PubsubTopic), - cursor = none(ArchiveCursor), - startTime = none(Timestamp), - endTime = none(Timestamp), - maxPageSize = DefaultPageSize, - ascendingOrder = true, -): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} = - echo "here" - - let cursor = cursor.map(toDbCursor) - - let rowsRes = s.db.selectMessagesByHistoryQueryWithLimit( - contentTopic, - pubsubTopic, - cursor, - startTime, - endTime, - limit = maxPageSize, - ascending = ascendingOrder, - ) - - return rowsRes - method getMessages*( s: SqliteDriver, - includeData = false, + includeData = true, contentTopic = newSeq[ContentTopic](0), pubsubTopic = none(PubsubTopic), cursor = none(ArchiveCursor), @@ -122,13 +87,19 @@ method getMessages*( maxPageSize = DefaultPageSize, ascendingOrder = true, ): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = - let cursor = - if cursor.isSome(): - some(cursor.get().hash) - else: - none(WakuMessageHash) + if not includeData: + return s.db.selectMessageHashesByStoreQueryWithLimit( + contentTopic, + pubsubTopic, + cursor, + startTime, + endTime, + hashes, + limit = maxPageSize, + ascending = ascendingOrder, + ) - let rowsRes = s.db.selectMessagesByStoreQueryWithLimit( + return s.db.selectMessagesByStoreQueryWithLimit( contentTopic, pubsubTopic, cursor, @@ -139,8 +110,6 @@ method getMessages*( ascending = ascendingOrder, ) - return rowsRes - method getMessagesCount*( s: SqliteDriver ): Future[ArchiveDriverResult[int64]] {.async.} = @@ -161,23 +130,18 @@ method performVacuum*(s: SqliteDriver): Future[ArchiveDriverResult[void]] {.asyn method getOldestMessageTimestamp*( s: SqliteDriver ): Future[ArchiveDriverResult[Timestamp]] {.async.} = - return s.db.selectOldestReceiverTimestamp() + return s.db.selectOldestTimestamp() method getNewestMessageTimestamp*( s: SqliteDriver ): Future[ArchiveDriverResult[Timestamp]] {.async.} = - return s.db.selectnewestReceiverTimestamp() + return s.db.selectnewestTimestamp() method deleteMessagesOlderThanTimestamp*( s: SqliteDriver, ts: Timestamp ): Future[ArchiveDriverResult[void]] {.async.} = return s.db.deleteMessagesOlderThanTimestamp(ts) -method deleteOldestMessagesNotWithinLimit*( - s: SqliteDriver, limit: int -): Future[ArchiveDriverResult[void]] {.async.} = - return s.db.deleteOldestMessagesNotWithinLimit(limit) - method decreaseDatabaseSize*( driver: SqliteDriver, targetSizeInBytes: int64, forceRemoval: bool = false ): Future[ArchiveDriverResult[void]] {.async.} = diff --git a/waku/waku_archive/driver/sqlite_driver/sqlite_driver_legacy.nim b/waku/waku_archive/driver/sqlite_driver/sqlite_driver_legacy.nim new file mode 100644 index 0000000000..14deeea160 --- /dev/null +++ b/waku/waku_archive/driver/sqlite_driver/sqlite_driver_legacy.nim @@ -0,0 +1,204 @@ +# The code in this file is an adaptation of the Sqlite KV Store found in nim-eth. +# https://github.com/status-im/nim-eth/blob/master/eth/db/kvstore_sqlite3.nim +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/options, stew/[byteutils, results], chronicles, chronos +import + ../../../common/databases/db_sqlite, + ../../../waku_core, + ../../../waku_core/message/digest, + ../../common, + ../../driver, + ./cursor, + ./queries_legacy + +logScope: + topics = "waku archive sqlite" + +proc init(db: SqliteDatabase): ArchiveDriverResult[void] {.deprecated.} = + ## Misconfiguration can lead to nil DB + if db.isNil(): + return err("db not initialized") + + # Create table, if doesn't exist + let resCreate = createTable(db) + if resCreate.isErr(): + return err("failed to create table: " & resCreate.error()) + + # Create indices, if don't exist + let resRtIndex = createOldestMessageTimestampIndex(db) + if resRtIndex.isErr(): + return err("failed to create i_rt index: " & resRtIndex.error()) + + let resMsgIndex = createHistoryQueryIndex(db) + if resMsgIndex.isErr(): + return err("failed to create i_query index: " & resMsgIndex.error()) + + return ok() + +type LegacySqliteDriver* {.deprecated.} = ref object of ArchiveDriver + db: SqliteDatabase + insertStmt: SqliteStmt[InsertMessageParams, void] + +proc new*( + T: type LegacySqliteDriver, db: SqliteDatabase +): ArchiveDriverResult[T] {.deprecated.} = + # Database initialization + let resInit = init(db) + if resInit.isErr(): + return err(resInit.error()) + + # General initialization + let insertStmt = db.prepareInsertMessageStmt() + return ok(LegacySqliteDriver(db: db, insertStmt: insertStmt)) + +method putV2*( + s: LegacySqliteDriver, + pubsubTopic: PubsubTopic, + message: WakuMessage, + digest: MessageDigest, + messageHash: WakuMessageHash, + receivedTime: Timestamp, +): Future[ArchiveDriverResult[void]] {.async, deprecated.} = + ## Inserts a message into the store + let res = s.insertStmt.exec( + ( + @(digest.data), # id + @(messageHash), # messageHash + receivedTime, # storedAt + toBytes(message.contentTopic), # contentTopic + message.payload, # payload + toBytes(pubsubTopic), # pubsubTopic + int64(message.version), # version + message.timestamp, # senderTimestamp + message.meta, # meta + ) + ) + + return res + +method getAllMessages*( + s: LegacySqliteDriver +): Future[ArchiveDriverResult[seq[ArchiveRowV2]]] {.async, deprecated.} = + ## Retrieve all messages from the store. + return s.db.selectAllMessages() + +method getMessagesV2*( + s: LegacySqliteDriver, + contentTopic = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursorV2), + startTime = none(Timestamp), + endTime = none(Timestamp), + maxPageSize = DefaultPageSize, + ascendingOrder = true, +): Future[ArchiveDriverResult[seq[ArchiveRowV2]]] {.async, deprecated.} = + let cursor = cursor.map(toDbCursor) + + let rowsRes = s.db.selectMessagesByHistoryQueryWithLimit( + contentTopic, + pubsubTopic, + cursor, + startTime, + endTime, + limit = maxPageSize, + ascending = ascendingOrder, + ) + + return rowsRes + +method getMessagesCount*( + s: LegacySqliteDriver +): Future[ArchiveDriverResult[int64]] {.async, deprecated.} = + return s.db.getMessageCount() + +method getPagesCount*( + s: LegacySqliteDriver +): Future[ArchiveDriverResult[int64]] {.async, deprecated.} = + return s.db.getPageCount() + +method getPagesSize*( + s: LegacySqliteDriver +): Future[ArchiveDriverResult[int64]] {.async, deprecated.} = + return s.db.getPageSize() + +method getDatabaseSize*( + s: LegacySqliteDriver +): Future[ArchiveDriverResult[int64]] {.async, deprecated.} = + return s.db.getDatabaseSize() + +method performVacuum*( + s: LegacySqliteDriver +): Future[ArchiveDriverResult[void]] {.async, deprecated.} = + return s.db.performSqliteVacuum() + +method getOldestMessageTimestamp*( + s: LegacySqliteDriver +): Future[ArchiveDriverResult[Timestamp]] {.async, deprecated.} = + return s.db.selectOldestReceiverTimestamp() + +method getNewestMessageTimestamp*( + s: LegacySqliteDriver +): Future[ArchiveDriverResult[Timestamp]] {.async, deprecated.} = + return s.db.selectnewestReceiverTimestamp() + +method deleteMessagesOlderThanTimestamp*( + s: LegacySqliteDriver, ts: Timestamp +): Future[ArchiveDriverResult[void]] {.async, deprecated.} = + return s.db.deleteMessagesOlderThanTimestamp(ts) + +method deleteOldestMessagesNotWithinLimit*( + s: LegacySqliteDriver, limit: int +): Future[ArchiveDriverResult[void]] {.async, deprecated.} = + return s.db.deleteOldestMessagesNotWithinLimit(limit) + +method decreaseDatabaseSize*( + driver: LegacySqliteDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.async, deprecated.} = + ## To remove 20% of the outdated data from database + const DeleteLimit = 0.80 + + ## when db size overshoots the database limit, shread 20% of outdated messages + ## get size of database + let dbSize = (await driver.getDatabaseSize()).valueOr: + return err("failed to get database size: " & $error) + + ## database size in bytes + let totalSizeOfDB: int64 = int64(dbSize) + + if totalSizeOfDB < targetSizeInBytes: + return ok() + + ## to shread/delete messsges, get the total row/message count + let numMessages = (await driver.getMessagesCount()).valueOr: + return err("failed to get messages count: " & error) + + ## NOTE: Using SQLite vacuuming is done manually, we delete a percentage of rows + ## if vacumming is done automatically then we aim to check DB size periodially for efficient + ## retention policy implementation. + + ## 80% of the total messages are to be kept, delete others + let pageDeleteWindow = int(float(numMessages) * DeleteLimit) + + (await driver.deleteOldestMessagesNotWithinLimit(limit = pageDeleteWindow)).isOkOr: + return err("deleting oldest messages failed: " & error) + + return ok() + +method close*( + s: LegacySqliteDriver +): Future[ArchiveDriverResult[void]] {.async, deprecated.} = + ## Close the database connection + # Dispose statements + s.insertStmt.dispose() + # Close connection + s.db.close() + return ok() + +method existsTable*( + s: LegacySqliteDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.async, deprecated.} = + return err("existsTable method not implemented in sqlite_driver") diff --git a/waku/waku_core/message/digest.nim b/waku/waku_core/message/digest.nim index 67e8d81c28..46e76d0c9a 100644 --- a/waku/waku_core/message/digest.nim +++ b/waku/waku_core/message/digest.nim @@ -21,10 +21,10 @@ converter fromBytes*(array: openArray[byte]): WakuMessageHash = discard copyFrom(hash, array) hash -converter toBytesArray*(digest: MDigest[256]): WakuMessageHash = +converter toBytesArray*(digest: MDigest[256]): WakuMessageHash {.deprecated.} = digest.data -converter toBytes*(digest: MDigest[256]): seq[byte] = +converter toBytes*(digest: MDigest[256]): seq[byte] {.deprecated.} = toSeq(digest.data) proc computeMessageHash*(pubsubTopic: PubsubTopic, msg: WakuMessage): WakuMessageHash =