From ffb07feb15b6d5a66aabc87b2127cffc5b624cac Mon Sep 17 00:00:00 2001 From: wangshao1 <30471730+wangshao1@users.noreply.github.com> Date: Wed, 31 Jan 2024 14:09:06 +0800 Subject: [PATCH 1/4] feat: add floyd (#2347) * floyd: refactor storage to support multi rocksdb instance && todis key format * fix compile error after rebase * fix compile error after rebase * fix by review comments * fix by review comments * fix by review comments * fix by lqx and panlei's review comments * fix by review comments * fix by review comments * fix by review comments * fix by review comments * rebase upstream code * rebase upstream code * fix storage construct error * change uint32_t ttl to uint64_t * change int() to static_cast * fix by xiaoshuai's review comments * fix compile error in debug mode * fix slot_indexer by review comments * fix by xiaoshuai's review comments * fix dynamic change ttl and periodic_seconds * define CRCU32 type * change uint64_t to int64_t to support nagative time ttl * fix cache list error in floyd * fix by yuge's review comments * change shared_ptr to unique_ptr to be consisten with pikiwidb --------- Co-authored-by: wangshaoyi --- CMakeLists.txt | 2 - conf/pika.conf | 4 + include/pika_admin.h | 2 +- include/pika_binlog.h | 2 - include/pika_cache.h | 2 +- include/pika_cache_load_thread.h | 2 +- include/pika_conf.h | 27 +- include/pika_consensus.h | 4 +- include/pika_define.h | 8 +- include/pika_meta.h | 33 - include/pika_monitor_thread.h | 47 + include/pika_repl_client.h | 2 +- include/pika_rsync_service.h | 2 +- include/pika_slot_command.h | 17 +- src/cache/include/cache.h | 4 +- src/cache/src/hash.cc | 2 +- src/net/include/thread_pool.h | 1 + src/pika.cc | 18 +- src/pika_admin.cc | 39 +- src/pika_auxiliary_thread.cc | 3 +- src/pika_bit.cc | 2 + src/pika_cache.cc | 6 +- src/pika_client_conn.cc | 1 + src/pika_conf.cc | 24 +- src/pika_db.cc | 39 +- src/pika_inner_message.proto | 1 + src/pika_kv.cc | 32 +- src/pika_meta.cc | 106 - src/pika_migrate_thread.cc | 14 +- src/pika_repl_bgworker.cc | 7 +- src/pika_repl_client_conn.cc | 2 +- src/pika_repl_server_conn.cc | 1 + src/pika_server.cc | 11 +- src/pika_set.cc | 5 +- src/pika_slave_node.cc | 3 +- src/pika_slot_command.cc | 110 +- src/pika_stable_log.cc | 10 +- src/pika_transaction.cc | 1 - src/pstd/include/pika_codis_slot.h | 22 + src/pstd/src/pika_codis_slot.cc | 52 + src/pstd/src/pstd_string.cc | 2 +- src/rsync_client.cc | 9 +- src/storage/CMakeLists.txt | 5 +- src/storage/include/storage/backupable.h | 26 +- src/storage/include/storage/slot_indexer.h | 28 + src/storage/include/storage/storage.h | 98 +- src/storage/include/storage/storage_define.h | 132 + src/storage/include/storage/util.h | 3 +- src/storage/src/backupable.cc | 29 +- src/storage/src/base_data_key_format.h | 123 +- src/storage/src/base_data_value_format.h | 107 + src/storage/src/base_filter.h | 77 +- src/storage/src/base_key_format.h | 99 + src/storage/src/base_meta_value_format.h | 101 +- src/storage/src/base_value_format.h | 97 +- src/storage/src/custom_comparator.h | 204 +- src/storage/src/debug.h | 15 + src/storage/src/lists_data_key_format.h | 101 +- src/storage/src/lists_filter.h | 75 +- src/storage/src/lists_meta_value_format.h | 157 +- src/storage/src/murmurhash.h | 1 + src/storage/src/options_helper.h | 5 +- src/storage/src/redis.cc | 280 +- src/storage/src/redis.h | 287 +- src/storage/src/redis_hashes.cc | 793 ++--- src/storage/src/redis_hashes.h | 76 - src/storage/src/redis_lists.cc | 827 ++--- src/storage/src/redis_lists.h | 70 - src/storage/src/redis_sets.cc | 822 ++--- src/storage/src/redis_sets.h | 72 - src/storage/src/redis_strings.cc | 634 ++-- src/storage/src/redis_strings.h | 83 - src/storage/src/redis_zsets.cc | 946 +++--- src/storage/src/redis_zsets.h | 86 - src/storage/src/storage.cc | 2061 +++++++----- src/storage/src/strings_filter.h | 21 +- src/storage/src/strings_value_format.h | 47 +- src/storage/src/type_iterator.h | 404 +++ src/storage/src/util.cc | 58 +- src/storage/src/zsets_data_key_format.h | 94 +- src/storage/src/zsets_filter.h | 61 +- src/storage/tests/CMakeLists.txt | 6 +- src/storage/tests/hashes_test.cc | 17 +- src/storage/tests/keys_test.cc | 3072 ++---------------- src/storage/tests/kv_format_test.cc | 120 + src/storage/tests/lists_filter_test.cc | 41 +- src/storage/tests/lists_test.cc | 20 +- src/storage/tests/sets_test.cc | 25 +- src/storage/tests/strings_filter_test.cc | 2 +- src/storage/tests/strings_test.cc | 17 +- src/storage/tests/zsets_test.cc | 18 +- tests/assets/default.conf | 4 + tests/conf/pika.conf | 2 + tests/integration/start_master_and_slave.sh | 7 +- tools/CMakeLists.txt | 6 +- 95 files changed, 5602 insertions(+), 7641 deletions(-) delete mode 100644 include/pika_meta.h create mode 100644 include/pika_monitor_thread.h delete mode 100644 src/pika_meta.cc create mode 100644 src/pstd/include/pika_codis_slot.h create mode 100644 src/pstd/src/pika_codis_slot.cc create mode 100644 src/storage/include/storage/slot_indexer.h create mode 100644 src/storage/include/storage/storage_define.h create mode 100644 src/storage/src/base_data_value_format.h create mode 100644 src/storage/src/base_key_format.h delete mode 100644 src/storage/src/redis_hashes.h delete mode 100644 src/storage/src/redis_lists.h delete mode 100644 src/storage/src/redis_sets.h delete mode 100644 src/storage/src/redis_strings.h delete mode 100644 src/storage/src/redis_zsets.h create mode 100644 src/storage/src/type_iterator.h create mode 100644 src/storage/tests/kv_format_test.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index d422ba8e92..485ff0fe25 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -52,7 +52,6 @@ elseif(${BUILD_TYPE} STREQUAL RELWITHDEBINFO) else() set(LIB_BUILD_TYPE RELEASE) set(CMAKE_CXX_FLAGS_RELEASE "-O2 -g -DNDEBUG") - endif() if(CMAKE_SYSTEM_NAME MATCHES "Darwin") @@ -172,7 +171,6 @@ set(GTEST_INCLUDE_DIR ${INSTALL_INCLUDEDIR}) set(GTEST_MAIN_INCLUDE_DIR ${INSTALL_INCLUDEDIR}) set(GMOCK_INCLUDE_DIR ${INSTALL_INCLUDEDIR}) - ExternalProject_Add(gflags URL https://github.com/gflags/gflags/archive/refs/tags/v2.2.2.tar.gz diff --git a/conf/pika.conf b/conf/pika.conf index 09c48018c0..2f6990b959 100644 --- a/conf/pika.conf +++ b/conf/pika.conf @@ -7,6 +7,10 @@ # Port 10221 is used for Rsync, and port 11221 is used for Replication, while the listening port is 9221. port : 9221 +db-instance-num : 3 +rocksdb-ttl-second : 86400 * 7; +rocksdb-periodic-second : 86400 * 3; + # Random value identifying the Pika server, its string length must be 40. # If not set, Pika will generate a random string with a length of 40 random characters. # run-id : diff --git a/include/pika_admin.h b/include/pika_admin.h index a3e6f3217a..7693f0329d 100644 --- a/include/pika_admin.h +++ b/include/pika_admin.h @@ -536,7 +536,7 @@ class DiskRecoveryCmd : public Cmd { private: void DoInitial() override; - std::map background_errors_; + std::map background_errors_; }; class ClearReplicationIDCmd : public Cmd { diff --git a/include/pika_binlog.h b/include/pika_binlog.h index 6a1e8aa1ca..84127fb535 100644 --- a/include/pika_binlog.h +++ b/include/pika_binlog.h @@ -12,10 +12,8 @@ #include "pstd/include/pstd_mutex.h" #include "pstd/include/pstd_status.h" #include "pstd/include/noncopyable.h" - #include "include/pika_define.h" - std::string NewFileName(const std::string& name, uint32_t current); class Version final : public pstd::noncopyable { diff --git a/include/pika_cache.h b/include/pika_cache.h index eb16dac0d3..c7d9125e9d 100644 --- a/include/pika_cache.h +++ b/include/pika_cache.h @@ -223,4 +223,4 @@ class PikaCache : public pstd::noncopyable, public std::enable_shared_from_this< std::vector> cache_mutexs_; }; -#endif \ No newline at end of file +#endif diff --git a/include/pika_cache_load_thread.h b/include/pika_cache_load_thread.h index a6bf35ce09..fa949e8d81 100644 --- a/include/pika_cache_load_thread.h +++ b/include/pika_cache_load_thread.h @@ -38,7 +38,7 @@ class PikaCacheLoadThread : public net::Thread { private: std::atomic_bool should_exit_; std::deque>> loadkeys_queue_; - + pstd::CondVar loadkeys_cond_; pstd::Mutex loadkeys_mutex_; diff --git a/include/pika_conf.h b/include/pika_conf.h index 32df043bca..f1b4dbe75b 100644 --- a/include/pika_conf.h +++ b/include/pika_conf.h @@ -11,13 +11,14 @@ #include #include +#include "rocksdb/compression_type.h" + #include "pstd/include/base_conf.h" #include "pstd/include/pstd_mutex.h" #include "pstd/include/pstd_string.h" #include "acl.h" #include "include/pika_define.h" -#include "include/pika_meta.h" #include "rocksdb/compression_type.h" #define kBinlogReadWinDefaultSize 9000 @@ -76,6 +77,15 @@ class PikaConf : public pstd::BaseConf { std::shared_lock l(rwlock_); return db_path_; } + int db_instance_num() { + return db_instance_num_; + } + uint64_t rocksdb_ttl_second() { + return rocksdb_ttl_second_.load(); + } + uint64_t rocksdb_periodic_compaction_second() { + return rocksdb_periodic_second_.load(); + } std::string db_sync_path() { std::shared_lock l(rwlock_); return db_sync_path_; @@ -376,7 +386,6 @@ class PikaConf : public pstd::BaseConf { bool daemonize() { return daemonize_; } std::string pidfile() { return pidfile_; } int binlog_file_size() { return binlog_file_size_; } - PikaMeta* local_meta() { return local_meta_.get(); } std::vector compression_per_level(); std::string compression_all_levels() const { return compression_per_level_; }; static rocksdb::CompressionType GetCompression(const std::string& value); @@ -416,6 +425,15 @@ class PikaConf : public pstd::BaseConf { TryPushDiffCommands("slaveof", value); slaveof_ = value; } + + void SetRocksdbTTLSecond(uint64_t ttl) { + rocksdb_ttl_second_.store(ttl); + } + + void SetRocksdbPeriodicSecond(uint64_t value) { + rocksdb_periodic_second_.store(value); + } + void SetReplicationID(const std::string& value) { std::lock_guard l(rwlock_); TryPushDiffCommands("replication-id", value); @@ -655,6 +673,7 @@ class PikaConf : public pstd::BaseConf { int ConfigRewriteReplicationID(); private: + // TODO: replace mutex with atomic value int port_ = 0; int slave_priority_ = 0; int thread_num_ = 0; @@ -668,6 +687,7 @@ class PikaConf : public pstd::BaseConf { std::string log_path_; std::string log_level_; std::string db_path_; + int db_instance_num_ = 0; std::string db_sync_path_; std::string compact_cron_; std::string compact_interval_; @@ -719,6 +739,8 @@ class PikaConf : public pstd::BaseConf { int max_background_compactions_ = 0; int max_background_jobs_ = 0; int max_cache_files_ = 0; + std::atomic rocksdb_ttl_second_ = 0; + std::atomic rocksdb_periodic_second_ = 0; int max_bytes_for_level_multiplier_ = 0; int64_t block_size_ = 0; int64_t block_cache_ = 0; @@ -787,7 +809,6 @@ class PikaConf : public pstd::BaseConf { int64_t blob_file_size_ = 256 * 1024 * 1024; // 256M std::string blob_compression_type_ = "none"; - std::unique_ptr local_meta_; std::shared_mutex rwlock_; // Rsync Rate limiting configuration diff --git a/include/pika_consensus.h b/include/pika_consensus.h index d40e4efec4..b289b425b7 100644 --- a/include/pika_consensus.h +++ b/include/pika_consensus.h @@ -7,12 +7,12 @@ #include +#include "include/pika_define.h" +#include "pstd/include/env.h" #include "include/pika_binlog_transverter.h" #include "include/pika_client_conn.h" -#include "include/pika_define.h" #include "include/pika_slave_node.h" #include "include/pika_stable_log.h" -#include "pstd/include/env.h" class Context : public pstd::noncopyable { public: diff --git a/include/pika_define.h b/include/pika_define.h index 216b8407cd..176b371111 100644 --- a/include/pika_define.h +++ b/include/pika_define.h @@ -43,16 +43,16 @@ const std::string kDefaultRsyncAuth = "default"; const int kMaxRsyncParallelNum = 4; struct DBStruct { - DBStruct(std::string tn) - : db_name(std::move(tn)) {} + DBStruct(std::string tn, int32_t inst_num) + : db_name(std::move(tn)), db_instance_num(inst_num) {} bool operator==(const DBStruct& db_struct) const { - return db_name == db_struct.db_name; + return db_name == db_struct.db_name && db_instance_num == db_struct.db_instance_num; } std::string db_name; + int32_t db_instance_num = 0; }; -// slave item struct SlaveItem { std::string ip_port; std::string ip; diff --git a/include/pika_meta.h b/include/pika_meta.h deleted file mode 100644 index 25fe22bb60..0000000000 --- a/include/pika_meta.h +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_META -#define PIKA_META - -#include - -#include "pstd/include/env.h" -#include "pstd/include/pstd_mutex.h" - -#include "include/pika_define.h" - - -class PikaMeta : public pstd::noncopyable { - public: - PikaMeta() = default; - ~PikaMeta() = default; - - void SetPath(const std::string& path); - - pstd::Status StableSave(const std::vector& db_structs); - pstd::Status ParseMeta(std::vector* db_structs); - - private: - std::shared_mutex rwlock_; - std::string local_meta_path_; - -}; - -#endif diff --git a/include/pika_monitor_thread.h b/include/pika_monitor_thread.h new file mode 100644 index 0000000000..27bfa24050 --- /dev/null +++ b/include/pika_monitor_thread.h @@ -0,0 +1,47 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_MONITOR_THREAD_H_ +#define PIKA_MONITOR_THREAD_H_ + +#include +#include +#include +#include + +#include "net/include/net_thread.h" +#include "pstd/include/pstd_mutex.h" +#include "include/pika_define.h" +#include "include/pika_client_conn.h" + +class PikaMonitorThread : public net::Thread { + public: + PikaMonitorThread(); + ~PikaMonitorThread() override; + + void AddMonitorClient(const std::shared_ptr& client_ptr); + void AddMonitorMessage(const std::string& monitor_message); + int32_t ThreadClientList(std::vector* client = nullptr); + bool ThreadClientKill(const std::string& ip_port = "all"); + bool HasMonitorClients(); + + private: + void AddCronTask(const MonitorCronTask& task); + bool FindClient(const std::string& ip_port); + net::WriteStatus SendMessage(int32_t fd, std::string& message); + void RemoveMonitorClient(const std::string& ip_port); + + std::atomic has_monitor_clients_; + pstd::Mutex monitor_mutex_protector_; + pstd::CondVar monitor_cond_; + + std::list monitor_clients_; + std::deque monitor_messages_; + std::queue cron_tasks_; + + void* ThreadMain() override; + void RemoveMonitorClient(int32_t client_fd); +}; +#endif diff --git a/include/pika_repl_client.h b/include/pika_repl_client.h index 2389f35978..aa20b83c78 100644 --- a/include/pika_repl_client.h +++ b/include/pika_repl_client.h @@ -14,9 +14,9 @@ #include "net/include/net_conn.h" #include "net/include/thread_pool.h" #include "pstd/include/pstd_status.h" +#include "include/pika_define.h" #include "include/pika_binlog_reader.h" -#include "include/pika_define.h" #include "include/pika_repl_bgworker.h" #include "include/pika_repl_client_thread.h" diff --git a/include/pika_rsync_service.h b/include/pika_rsync_service.h index a5c2bdf1e7..ccd4605a15 100644 --- a/include/pika_rsync_service.h +++ b/include/pika_rsync_service.h @@ -6,7 +6,7 @@ #ifndef PIKA_RSYNC_SERVICE_H_ #define PIKA_RSYNC_SERVICE_H_ -#include "iostream" +#include class PikaRsyncService { public: diff --git a/include/pika_slot_command.h b/include/pika_slot_command.h index 644483b932..6e5083e3b6 100644 --- a/include/pika_slot_command.h +++ b/include/pika_slot_command.h @@ -11,23 +11,14 @@ const std::string SlotKeyPrefix = "_internal:slotkey:4migrate:"; const std::string SlotTagPrefix = "_internal:slottag:4migrate:"; -extern uint32_t crc32tab[256]; +const size_t MaxKeySendSize = 10 * 1024; -void CRC32TableInit(uint32_t poly); - -extern void InitCRC32Table(); - -extern uint32_t CRC32Update(uint32_t crc, const char* buf, int len); -extern uint32_t CRC32CheckSum(const char* buf, int len); - -int GetSlotID(const std::string &str); -int GetKeyType(const std::string& key, std::string& key_type, const std::shared_ptr& db); -int DeleteKey(const std::string& key, const char key_type, const std::shared_ptr& db); -int GetSlotsID(const std::string& str, uint32_t* pcrc, int* phastag); +int GetKeyType(const std::string& key, std::string &key_type, const std::shared_ptr& db); void AddSlotKey(const std::string& type, const std::string& key, const std::shared_ptr& db); void RemSlotKey(const std::string& key, const std::shared_ptr& db); +int DeleteKey(const std::string& key, const char key_type, const std::shared_ptr& db); void RemSlotKeyByType(const std::string& type, const std::string& key, const std::shared_ptr& db); -std::string GetSlotKey(int slot); +std::string GetSlotKey(uint32_t slot); std::string GetSlotsTagKey(uint32_t crc); class PikaMigrate { diff --git a/src/cache/include/cache.h b/src/cache/include/cache.h index 869cb5aa1b..41c8da2db0 100644 --- a/src/cache/include/cache.h +++ b/src/cache/include/cache.h @@ -40,7 +40,7 @@ class RedisCache { static void ResetHitAndMissNum(void); Status Open(void); int32_t ActiveExpireCycle(void); - + // Normal Commands bool Exists(std::string& key); int64_t DbSize(void); @@ -163,7 +163,7 @@ class RedisCache { void FreeHitemList(hitem *items, uint32_t size); void FreeZitemList(zitem *items, uint32_t size); void ConvertObjectToString(robj *obj, std::string *value); - + private: RedisCache(const RedisCache&); RedisCache& operator=(const RedisCache&); diff --git a/src/cache/src/hash.cc b/src/cache/src/hash.cc index 3b7b019a06..1e55c0004d 100644 --- a/src/cache/src/hash.cc +++ b/src/cache/src/hash.cc @@ -309,4 +309,4 @@ Status RedisCache::HStrlen(std::string& key, std::string &field, uint64_t *len) } // namespace cache -/* EOF */ \ No newline at end of file +/* EOF */ diff --git a/src/net/include/thread_pool.h b/src/net/include/thread_pool.h index c3b3999914..bdced7d3bd 100644 --- a/src/net/include/thread_pool.h +++ b/src/net/include/thread_pool.h @@ -19,6 +19,7 @@ namespace net { using TaskFunc = void (*)(void *); struct Task { + Task() = default; TaskFunc func; void* arg; Task(TaskFunc _func, void* _arg) : func(_func), arg(_arg) {} diff --git a/src/pika.cc b/src/pika.cc index 2d62d3d6b9..89f344cb76 100644 --- a/src/pika.cc +++ b/src/pika.cc @@ -8,18 +8,19 @@ #include #include -#include "include/build_version.h" +#include "net/include/net_stats.h" +#include "pstd/include/pika_codis_slot.h" +#include "include/pika_define.h" +#include "pstd/include/pstd_defer.h" +#include "include/pika_conf.h" +#include "pstd/include/env.h" #include "include/pika_cmd_table_manager.h" +#include "include/pika_slot_command.h" +#include "include/build_version.h" #include "include/pika_command.h" -#include "include/pika_conf.h" -#include "include/pika_define.h" -#include "include/pika_rm.h" #include "include/pika_server.h" -#include "include/pika_slot_command.h" #include "include/pika_version.h" -#include "net/include/net_stats.h" -#include "pstd/include/env.h" -#include "pstd/include/pstd_defer.h" +#include "include/pika_rm.h" std::unique_ptr g_pika_conf; // todo : change to unique_ptr will coredump @@ -202,7 +203,6 @@ int main(int argc, char* argv[]) { PikaGlogInit(); PikaSignalSetup(); - InitCRC32Table(); LOG(INFO) << "Server at: " << path; g_pika_cmd_table_manager = std::make_unique(); diff --git a/src/pika_admin.cc b/src/pika_admin.cc index 772f61feab..f0f5692b37 100644 --- a/src/pika_admin.cc +++ b/src/pika_admin.cc @@ -15,10 +15,11 @@ #include #include "include/build_version.h" -#include "include/pika_conf.h" +#include "include/pika_cmd_table_manager.h" #include "include/pika_rm.h" #include "include/pika_server.h" #include "include/pika_version.h" +#include "include/pika_conf.h" #include "pstd/include/rsync.h" using pstd::Status; @@ -653,7 +654,9 @@ void FlushdbCmd::Do() { if (db_name_ == "all") { db_->FlushDB(); } else { - db_->FlushSubDB(db_name_); + //Floyd does not support flushdb by type + LOG(ERROR) << "cannot flushdb by type in floyd"; + // db_->FlushSubDB(db_name_); } } } @@ -1334,7 +1337,7 @@ void InfoCmd::InfoData(std::string& info) { tmp_stream << "compression:" << g_pika_conf->compression() << "\r\n"; // rocksdb related memory usage - std::map background_errors; + std::map background_errors; uint64_t total_background_errors = 0; uint64_t total_memtable_usage = 0; uint64_t total_table_reader_usage = 0; @@ -2307,7 +2310,7 @@ void ConfigCmd::ConfigSet(std::shared_ptr db) { if (value != "true" && value != "false") { res_.AppendStringRaw("-ERR invalid disable_auto_compactions (true or false)\r\n"); return; - } + } std::unordered_map options_map{{"disable_auto_compactions", value}}; storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); if (!s.ok()) { @@ -2447,6 +2450,32 @@ void ConfigCmd::ConfigSet(std::shared_ptr db) { } g_pika_conf->SetMaxBackgroudCompactions(static_cast(ival)); res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "rocksdb-periodic-second") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'rocksdb-periodic-second'\r\n"); + return; + } + std::unordered_map options_map{{"periodic_compaction_seconds", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set rocksdb-periodic-second wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetRocksdbPeriodicSecond(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "rocksdb-ttl-second") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'rocksdb-ttl-second'\r\n"); + return; + } + std::unordered_map options_map{{"ttl", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set rocksdb-ttl-second wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetRocksdbTTLSecond(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); } else if (set_item == "max-background-jobs") { if (pstd::string2int(value.data(), value.size(), &ival) == 0) { res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-background-jobs'\r\n"); @@ -3081,7 +3110,7 @@ void DiskRecoveryCmd::Do() { db_item.second->DbRWUnLock(); for (const auto &item: background_errors_) { if (item.second != 0) { - rocksdb::Status s = db_item.second->storage()->GetDBByType(item.first)->Resume(); + rocksdb::Status s = db_item.second->storage()->GetDBByIndex(item.first)->Resume(); if (!s.ok()) { res_.SetRes(CmdRes::kErrOther, "The restore operation failed."); } diff --git a/src/pika_auxiliary_thread.cc b/src/pika_auxiliary_thread.cc index e94104b442..003a43c93b 100644 --- a/src/pika_auxiliary_thread.cc +++ b/src/pika_auxiliary_thread.cc @@ -3,9 +3,8 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "include/pika_auxiliary_thread.h" - #include "include/pika_define.h" +#include "include/pika_auxiliary_thread.h" #include "include/pika_rm.h" #include "include/pika_server.h" diff --git a/src/pika_bit.cc b/src/pika_bit.cc index 1b6455dab2..4e2f8dd637 100644 --- a/src/pika_bit.cc +++ b/src/pika_bit.cc @@ -12,6 +12,8 @@ #include "include/pika_define.h" #include "include/pika_slot_command.h" #include "include/pika_cache.h" +#include "pstd/include/pstd_string.h" +#include "include/pika_define.h" void BitSetCmd::DoInitial() { if (!CheckArg(argv_.size())) { diff --git a/src/pika_cache.cc b/src/pika_cache.cc index 9866a9f74a..7204bf64b4 100644 --- a/src/pika_cache.cc +++ b/src/pika_cache.cc @@ -7,11 +7,13 @@ #include #include #include +#include #include "include/pika_cache.h" #include "include/pika_cache_load_thread.h" #include "include/pika_server.h" #include "include/pika_slot_command.h" +#include "pstd/include/pika_codis_slot.h" #include "cache/include/cache.h" #include "cache/include/config.h" @@ -1607,7 +1609,7 @@ void PikaCache::DestroyWithoutLock(void) } int PikaCache::CacheIndex(const std::string& key) { - uint32_t crc = CRC32Update(0, key.data(), (int)key.size()); + auto crc = crc32(0L, (const Bytef*)key.data(), (int)key.size()); return (int)(crc % caches_.size()); } @@ -1683,4 +1685,4 @@ void PikaCache::PushKeyToAsyncLoadQueue(const char key_type, std::string& key, c void PikaCache::ClearHitRatio(void) { std::unique_lock l(rwlock_); cache::RedisCache::ResetHitAndMissNum(); -} \ No newline at end of file +} diff --git a/src/pika_client_conn.cc b/src/pika_client_conn.cc index ea5244067e..1c4e5a182b 100644 --- a/src/pika_client_conn.cc +++ b/src/pika_client_conn.cc @@ -12,6 +12,7 @@ #include +#include "include/pika_client_conn.h" #include "include/pika_admin.h" #include "include/pika_cmd_table_manager.h" #include "include/pika_command.h" diff --git a/src/pika_conf.cc b/src/pika_conf.cc index c7234e145f..b00ce94c93 100644 --- a/src/pika_conf.cc +++ b/src/pika_conf.cc @@ -3,24 +3,20 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "include/pika_conf.h" - -#include - #include #include -#include "pstd/include/env.h" -#include "pstd/include/pstd_string.h" +#include #include "cache/include/config.h" #include "include/acl.h" #include "include/pika_define.h" +#include "include/pika_conf.h" using pstd::Status; PikaConf::PikaConf(const std::string& path) - : pstd::BaseConf(path), conf_path_(path), local_meta_(std::make_unique()) {} + : pstd::BaseConf(path), conf_path_(path) {} int PikaConf::Load() { int ret = LoadConf(); @@ -122,11 +118,17 @@ int PikaConf::Load() { } GetConfStr("loglevel", &log_level_); GetConfStr("db-path", &db_path_); + GetConfInt("db-instance-num", &db_instance_num_); + int64_t t_val = 0; + GetConfInt64("rocksdb-ttl-second", &t_val); + rocksdb_ttl_second_.store(uint64_t(t_val)); + t_val = 0; + GetConfInt64("rocksdb-periodic-second", &t_val); + rocksdb_periodic_second_.store(uint64_t(t_val)); db_path_ = db_path_.empty() ? "./db/" : db_path_; if (db_path_[db_path_.length() - 1] != '/') { db_path_ += "/"; } - local_meta_->SetPath(db_path_); GetConfInt("thread-num", &thread_num_); if (thread_num_ <= 0) { @@ -171,7 +173,7 @@ int PikaConf::Load() { LOG(FATAL) << "config databases error, limit [1 ~ 8], the actual is: " << databases_; } for (int idx = 0; idx < databases_; ++idx) { - db_structs_.push_back({"db" + std::to_string(idx)}); + db_structs_.push_back({"db" + std::to_string(idx), db_instance_num_}); } } default_db_ = db_structs_[0].db_name; @@ -339,7 +341,7 @@ int PikaConf::Load() { if (max_cache_statistic_keys_ <= 0) { max_cache_statistic_keys_ = 0; } - + // disable_auto_compactions GetConfBool("disable_auto_compactions", &disable_auto_compactions_); @@ -482,7 +484,7 @@ int PikaConf::Load() { // slaveof slaveof_ = ""; GetConfStr("slaveof", &slaveof_); - + int cache_num = 16; GetConfInt("cache-num", &cache_num); cache_num_ = (0 >= cache_num || 48 < cache_num) ? 16 : cache_num; diff --git a/src/pika_db.cc b/src/pika_db.cc index ac036efda8..ce51132499 100644 --- a/src/pika_db.cc +++ b/src/pika_db.cc @@ -37,7 +37,8 @@ DB::DB(std::string db_name, const std::string& db_path, bgsave_sub_path_ = db_name; dbsync_path_ = DbSyncPath(g_pika_conf->db_sync_path(), db_name); log_path_ = DBPath(log_path, "log_" + db_name_); - storage_ = std::make_shared(); + storage_ = std::make_shared(g_pika_conf->db_instance_num(), + g_pika_conf->default_slot_num(), g_pika_conf->classic_mode()); rocksdb::Status s = storage_->Open(g_pika_server->storage_options(), db_path_); pstd::CreatePath(db_path_); pstd::CreatePath(log_path_); @@ -218,7 +219,8 @@ bool DB::FlushDBWithoutLock() { dbpath.append("_deleting/"); pstd::RenameFile(db_path_, dbpath); - storage_ = std::make_shared(); + storage_ = std::make_shared(g_pika_conf->db_instance_num(), + g_pika_conf->default_slot_num(), g_pika_conf->classic_mode()); rocksdb::Status s = storage_->Open(g_pika_server->storage_options(), db_path_); assert(storage_); assert(s.ok()); @@ -245,7 +247,8 @@ bool DB::FlushSubDBWithoutLock(const std::string& db_name) { std::string del_dbpath = dbpath + db_name + "_deleting"; pstd::RenameFile(sub_dbpath, del_dbpath); - storage_ = std::make_shared(); + storage_ = std::make_shared(g_pika_conf->db_instance_num(), + g_pika_conf->default_slot_num(), g_pika_conf->classic_mode()); rocksdb::Status s = storage_->Open(g_pika_server->storage_options(), db_path_); assert(storage_); assert(s.ok()); @@ -343,7 +346,7 @@ bool DB::InitBgsaveEnv() { // Prepare bgsave env, need bgsave_protector protect bool DB::InitBgsaveEngine() { bgsave_engine_.reset(); - rocksdb::Status s = storage::BackupEngine::Open(storage().get(), bgsave_engine_); + rocksdb::Status s = storage::BackupEngine::Open(storage().get(), bgsave_engine_, g_pika_conf->db_instance_num()); if (!s.ok()) { LOG(WARNING) << db_name_ << " open backup engine failed " << s.ToString(); return false; @@ -385,22 +388,22 @@ void DB::Init() { void DB::GetBgSaveMetaData(std::vector* fileNames, std::string* snapshot_uuid) { const std::string dbPath = bgsave_info().path; - std::string types[] = {storage::STRINGS_DB, storage::HASHES_DB, storage::LISTS_DB, storage::ZSETS_DB, storage::SETS_DB}; - for (const auto& type : types) { - std::string typePath = dbPath + ((dbPath.back() != '/') ? "/" : "") + type; - if (!pstd::FileExists(typePath)) { + int db_instance_num = g_pika_conf->db_instance_num(); + for (int index = 0; index < db_instance_num; index++) { + std::string instPath = dbPath + ((dbPath.back() != '/') ? "/" : "") + std::to_string(index); + if (!pstd::FileExists(instPath)) { continue ; } std::vector tmpFileNames; - int ret = pstd::GetChildren(typePath, tmpFileNames); + int ret = pstd::GetChildren(instPath, tmpFileNames); if (ret) { - LOG(WARNING) << dbPath << " read dump meta files failed, path " << typePath; + LOG(WARNING) << dbPath << " read dump meta files failed, path " << instPath; return; } for (const std::string fileName : tmpFileNames) { - fileNames -> push_back(type + "/" + fileName); + fileNames -> push_back(std::to_string(index) + "/" + fileName); } } fileNames->push_back(kBgsaveInfoFile); @@ -521,11 +524,10 @@ bool DB::TryUpdateMasterOffset() { void DB::PrepareRsync() { pstd::DeleteDirIfExist(dbsync_path_); - pstd::CreatePath(dbsync_path_ + "strings"); - pstd::CreatePath(dbsync_path_ + "hashes"); - pstd::CreatePath(dbsync_path_ + "lists"); - pstd::CreatePath(dbsync_path_ + "sets"); - pstd::CreatePath(dbsync_path_ + "zsets"); + int db_instance_num = g_pika_conf->db_instance_num(); + for (int index = 0; index < db_instance_num; index++) { + pstd::CreatePath(dbsync_path_ + std::to_string(index)); + } } bool DB::IsBgSaving() { @@ -562,7 +564,8 @@ bool DB::ChangeDb(const std::string& new_path) { return false; } - storage_ = std::make_shared(); + storage_ = std::make_shared(g_pika_conf->db_instance_num(), + g_pika_conf->default_slot_num(), g_pika_conf->classic_mode()); rocksdb::Status s = storage_->Open(g_pika_server->storage_options(), db_path_); assert(storage_); assert(s.ok()); @@ -634,4 +637,4 @@ bool DB::FlushDB() { std::lock_guard rwl(db_rwlock_); std::lock_guard l(bgsave_protector_); return FlushDBWithoutLock(); -} \ No newline at end of file +} diff --git a/src/pika_inner_message.proto b/src/pika_inner_message.proto index 537d0bf613..9e2a3ef04c 100644 --- a/src/pika_inner_message.proto +++ b/src/pika_inner_message.proto @@ -113,6 +113,7 @@ message InnerResponse { message DBInfo { required string db_name = 1; required int32 slot_num = 2; + required int32 db_instance_num = 3; } required bool classic_mode = 1; repeated DBInfo dbs_info = 2; diff --git a/src/pika_kv.cc b/src/pika_kv.cc index 6a04dd726b..e1e9315306 100644 --- a/src/pika_kv.cc +++ b/src/pika_kv.cc @@ -7,11 +7,11 @@ #include #include "include/pika_command.h" -#include "pstd/include/pstd_string.h" - +#include "include/pika_slot_command.h" #include "include/pika_cache.h" +#include "include/pika_stream_base.h" #include "include/pika_conf.h" -#include "include/pika_slot_command.h" +#include "pstd/include/pstd_string.h" extern std::unique_ptr g_pika_conf; /* SET key value [NX] [XX] [EX ] [PX ] */ @@ -68,16 +68,16 @@ void SetCmd::Do() { int32_t res = 1; switch (condition_) { case SetCmd::kXX: - s_ = db_->storage()->Setxx(key_, value_, &res, static_cast(sec_)); + s_ = db_->storage()->Setxx(key_, value_, &res, sec_); break; case SetCmd::kNX: - s_ = db_->storage()->Setnx(key_, value_, &res, static_cast(sec_)); + s_ = db_->storage()->Setnx(key_, value_, &res, sec_); break; case SetCmd::kVX: - s_ = db_->storage()->Setvx(key_, target_, value_, &success_, static_cast(sec_)); + s_ = db_->storage()->Setvx(key_, target_, value_, &success_, sec_); break; case SetCmd::kEXORPX: - s_ = db_->storage()->Setex(key_, value_, static_cast(sec_)); + s_ = db_->storage()->Setex(key_, value_, sec_); break; default: s_ = db_->storage()->Set(key_, value_); @@ -133,7 +133,7 @@ std::string SetCmd::ToRedisProtocol() { RedisAppendContent(content, key_); // time_stamp char buf[100]; - auto time_stamp = static_cast(time(nullptr) + sec_); + auto time_stamp = time(nullptr) + sec_; pstd::ll2string(buf, 100, time_stamp); std::string at(buf); RedisAppendLenUint64(content, at.size(), "$"); @@ -698,7 +698,7 @@ void SetexCmd::DoInitial() { } void SetexCmd::Do() { - s_ = db_->storage()->Setex(key_, value_, static_cast(sec_)); + s_ = db_->storage()->Setex(key_, value_, sec_); if (s_.ok()) { res_.SetRes(CmdRes::kOk); AddSlotKey("k", key_, db_); @@ -732,7 +732,7 @@ std::string SetexCmd::ToRedisProtocol() { RedisAppendContent(content, key_); // time_stamp char buf[100]; - auto time_stamp = static_cast(time(nullptr) + sec_); + auto time_stamp = time(nullptr) + sec_; pstd::ll2string(buf, 100, time_stamp); std::string at(buf); RedisAppendLenUint64(content, at.size(), "$"); @@ -757,7 +757,7 @@ void PsetexCmd::DoInitial() { } void PsetexCmd::Do() { - s_ = db_->storage()->Setex(key_, value_, static_cast(usec_ / 1000)); + s_ = db_->storage()->Setex(key_, value_, usec_ / 1000); if (s_.ok()) { res_.SetRes(CmdRes::kOk); } else { @@ -772,7 +772,7 @@ void PsetexCmd::DoThroughDB() { void PsetexCmd::DoUpdateCache() { if (s_.ok()) { std::string CachePrefixKeyK = PCacheKeyPrefixK + key_; - db_->cache()->Setxx(CachePrefixKeyK, value_, static_cast(usec_ / 1000)); + db_->cache()->Setxx(CachePrefixKeyK, value_, usec_ / 1000); } } @@ -790,7 +790,7 @@ std::string PsetexCmd::ToRedisProtocol() { RedisAppendContent(content, key_); // time_stamp char buf[100]; - auto time_stamp = static_cast(time(nullptr) + usec_ / 1000); + auto time_stamp = time(nullptr) + usec_ / 1000; pstd::ll2string(buf, 100, time_stamp); std::string at(buf); RedisAppendLenUint64(content, at.size(), "$"); @@ -1170,7 +1170,7 @@ void ExpireCmd::DoInitial() { void ExpireCmd::Do() { std::map type_status; - int64_t res = db_->storage()->Expire(key_, static_cast(sec_), &type_status); + int64_t res = db_->storage()->Expire(key_, sec_, &type_status); if (res != -1) { res_.AppendInteger(res); s_ = rocksdb::Status::OK(); @@ -1234,7 +1234,7 @@ void PexpireCmd::DoInitial() { void PexpireCmd::Do() { std::map type_status; - int64_t res = db_->storage()->Expire(key_, static_cast(msec_ / 1000), &type_status); + int64_t res = db_->storage()->Expire(key_, msec_ / 1000, &type_status); if (res != -1) { res_.AppendInteger(res); s_ = rocksdb::Status::OK(); @@ -1298,7 +1298,7 @@ void ExpireatCmd::DoInitial() { void ExpireatCmd::Do() { std::map type_status; - int32_t res = db_->storage()->Expireat(key_, static_cast(time_stamp_), &type_status); + int32_t res = db_->storage()->Expireat(key_, time_stamp_, &type_status); if (res != -1) { res_.AppendInteger(res); s_ = rocksdb::Status::OK(); diff --git a/src/pika_meta.cc b/src/pika_meta.cc deleted file mode 100644 index 87e11fbe7d..0000000000 --- a/src/pika_meta.cc +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_meta.h" -#include "pika_inner_message.pb.h" - -using pstd::Status; - -const uint32_t VERSION = 1; - -void PikaMeta::SetPath(const std::string& path) { local_meta_path_ = path; } - -/* - * ******************* Meta File Format ****************** - * | | | | - * 4 Bytes 4 Bytes meta size Bytes - */ -Status PikaMeta::StableSave(const std::vector& db_structs) { - std::lock_guard l(rwlock_); - if (local_meta_path_.empty()) { - LOG(WARNING) << "Local meta file path empty"; - return Status::Corruption("local meta file path empty"); - } - std::string local_meta_file = local_meta_path_ + kPikaMeta; - std::string tmp_file = local_meta_file; - tmp_file.append("_tmp"); - - std::unique_ptr saver; - pstd::CreatePath(local_meta_path_); - Status s = pstd::NewRWFile(tmp_file, saver); - if (!s.ok()) { - LOG(WARNING) << "Open local meta file failed"; - return Status::Corruption("open local meta file failed"); - } - - InnerMessage::PikaMeta meta; - for (const auto& ts : db_structs) { - InnerMessage::DBInfo* db_info = meta.add_db_infos(); - db_info->set_db_name(ts.db_name); - } - - std::string meta_str; - if (!meta.SerializeToString(&meta_str)) { - LOG(WARNING) << "Serialize meta string failed"; - return Status::Corruption("serialize meta string failed"); - } - uint32_t meta_str_size = meta_str.size(); - - char* p = saver->GetData(); - memcpy(p, &VERSION, sizeof(uint32_t)); - p += sizeof(uint32_t); - memcpy(p, &meta_str_size, sizeof(uint32_t)); - p += sizeof(uint32_t); - strncpy(p, meta_str.data(), meta_str.size()); - - pstd::DeleteFile(local_meta_file); - if (pstd::RenameFile(tmp_file, local_meta_file) != 0) { - LOG(WARNING) << "Failed to rename file, error: " << strerror(errno); - return Status::Corruption("faild to rename file"); - } - return Status::OK(); -} - -Status PikaMeta::ParseMeta(std::vector* const db_structs) { - std::shared_lock l(rwlock_); - std::string local_meta_file = local_meta_path_ + kPikaMeta; - if (!pstd::FileExists(local_meta_file)) { - LOG(WARNING) << "Local meta file not found, path: " << local_meta_file; - return Status::Corruption("meta file not found"); - } - - std::unique_ptr reader; - Status s = pstd::NewRWFile(local_meta_file, reader); - if (!s.ok()) { - LOG(WARNING) << "Open local meta file failed"; - return Status::Corruption("open local meta file failed"); - } - - if (!reader->GetData()) { - LOG(WARNING) << "Meta file init error"; - return Status::Corruption("meta file init error"); - } - - uint32_t version = 0; - uint32_t meta_size = 0; - memcpy(reinterpret_cast(&version), reader->GetData(), sizeof(uint32_t)); - memcpy(reinterpret_cast(&meta_size), reader->GetData() + sizeof(uint32_t), sizeof(uint32_t)); - auto const buf_ptr = std::make_unique(meta_size); - char* const buf = buf_ptr.get(); - memcpy(buf, reader->GetData() + 2 * sizeof(uint32_t), meta_size); - - InnerMessage::PikaMeta meta; - if (!meta.ParseFromArray(buf, static_cast(meta_size))) { - LOG(WARNING) << "Parse meta string failed"; - return Status::Corruption("parse meta string failed"); - } - - db_structs->clear(); - for (int idx = 0; idx < meta.db_infos_size(); ++idx) { - const InnerMessage::DBInfo& ti = meta.db_infos(idx); - db_structs->emplace_back(ti.db_name()); - } - return Status::OK(); -} diff --git a/src/pika_migrate_thread.cc b/src/pika_migrate_thread.cc index fce5c6886c..b88364d73a 100644 --- a/src/pika_migrate_thread.cc +++ b/src/pika_migrate_thread.cc @@ -1,17 +1,17 @@ -#include - #include +#include + #include "include/pika_command.h" -#include "include/pika_conf.h" -#include "include/pika_define.h" #include "include/pika_migrate_thread.h" #include "include/pika_server.h" #include "include/pika_slot_command.h" - #include "include/pika_admin.h" #include "include/pika_cmd_table_manager.h" #include "include/pika_rm.h" +#include "pstd/include/pika_codis_slot.h" +#include "include/pika_conf.h" +#include "include/pika_define.h" #define min(a, b) (((a) > (b)) ? (b) : (a)) @@ -534,7 +534,7 @@ PikaMigrateThread::PikaMigrateThread() send_num_(0), response_num_(0), moved_num_(0), - + workers_num_(8), working_thread_num_(0) {} @@ -597,7 +597,7 @@ bool PikaMigrateThread::ReqMigrateBatch(const std::string &ip, int64_t port, int int PikaMigrateThread::ReqMigrateOne(const std::string& key, const std::shared_ptr& db) { std::unique_lock lm(migrator_mutex_); - int slot_id = GetSlotID(key); + int slot_id = GetSlotID(g_pika_conf->default_slot_num(), key); std::vector type_str(1); char key_type; rocksdb::Status s = db->storage()->GetType(key, true, type_str); diff --git a/src/pika_repl_bgworker.cc b/src/pika_repl_bgworker.cc index 308e3e14fa..213576f1f5 100644 --- a/src/pika_repl_bgworker.cc +++ b/src/pika_repl_bgworker.cc @@ -3,15 +3,14 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "include/pika_repl_bgworker.h" - #include +#include "include/pika_repl_bgworker.h" #include "include/pika_cmd_table_manager.h" -#include "include/pika_conf.h" #include "include/pika_rm.h" #include "include/pika_server.h" #include "pstd/include/pstd_defer.h" +#include "include/pika_conf.h" extern PikaServer* g_pika_server; extern std::unique_ptr g_pika_rm; @@ -49,7 +48,7 @@ void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { PikaReplBgWorker* worker = task_arg->worker; worker->ip_port_ = conn->ip_port(); - DEFER { + DEFER { delete index; delete task_arg; }; diff --git a/src/pika_repl_client_conn.cc b/src/pika_repl_client_conn.cc index 6436dc88f5..b911134e9a 100644 --- a/src/pika_repl_client_conn.cc +++ b/src/pika_repl_client_conn.cc @@ -109,7 +109,7 @@ void PikaReplClientConn::HandleMetaSyncResponse(void* arg) { std::vector master_db_structs; for (int idx = 0; idx < meta_sync.dbs_info_size(); ++idx) { const InnerMessage::InnerResponse_MetaSync_DBInfo& db_info = meta_sync.dbs_info(idx); - master_db_structs.push_back({db_info.db_name()}); + master_db_structs.push_back({db_info.db_name(), db_info.db_instance_num()}); } std::vector self_db_structs = g_pika_conf->db_structs(); diff --git a/src/pika_repl_server_conn.cc b/src/pika_repl_server_conn.cc index 022e8f31d8..21847db3cd 100644 --- a/src/pika_repl_server_conn.cc +++ b/src/pika_repl_server_conn.cc @@ -64,6 +64,7 @@ void PikaReplServerConn::HandleMetaSyncRequest(void* arg) { * with older versions, but slot_num is not used */ db_info->set_slot_num(1); + db_info->set_db_instance_num(db_struct.db_instance_num); } } } diff --git a/src/pika_server.cc b/src/pika_server.cc index fc4944248a..57224c5c09 100644 --- a/src/pika_server.cc +++ b/src/pika_server.cc @@ -17,6 +17,7 @@ #include "net/include/redis_cli.h" #include "pstd/include/env.h" #include "pstd/include/rsync.h" +#include "pstd/include/pika_codis_slot.h" #include "include/pika_cmd_table_manager.h" #include "include/pika_dispatch_thread.h" @@ -1372,6 +1373,10 @@ void PikaServer::InitStorageOptions() { rocksdb::NewLRUCache(g_pika_conf->blob_cache(), static_cast(g_pika_conf->blob_num_shard_bits())); } } + + // for column-family options + storage_options_.options.ttl = g_pika_conf->rocksdb_ttl_second(); + storage_options_.options.periodic_compaction_seconds = g_pika_conf->rocksdb_periodic_compaction_second(); } storage::Status PikaServer::RewriteStorageOptions(const storage::OptionType& option_type, @@ -1583,13 +1588,13 @@ void DoBgslotscleanup(void* arg) { if ((*iter).find(SlotKeyPrefix) != std::string::npos || (*iter).find(SlotTagPrefix) != std::string::npos) { continue; } - if (std::find(cleanupSlots.begin(), cleanupSlots.end(), GetSlotID(*iter)) != cleanupSlots.end()){ + if (std::find(cleanupSlots.begin(), cleanupSlots.end(), GetSlotID(g_pika_conf->default_slot_num(), *iter)) != cleanupSlots.end()){ if (GetKeyType(*iter, key_type, g_pika_server->bgslots_cleanup_.db) <= 0) { - LOG(WARNING) << "slots clean get key type for slot " << GetSlotID(*iter) << " key " << *iter << " error"; + LOG(WARNING) << "slots clean get key type for slot " << GetSlotID(g_pika_conf->default_slot_num(), *iter) << " key " << *iter << " error"; continue; } if (DeleteKey(*iter, key_type[0], g_pika_server->bgslots_cleanup_.db) <= 0){ - LOG(WARNING) << "slots clean del for slot " << GetSlotID(*iter) << " key "<< *iter << " error"; + LOG(WARNING) << "slots clean del for slot " << GetSlotID(g_pika_conf->default_slot_num(), *iter) << " key "<< *iter << " error"; } } } diff --git a/src/pika_set.cc b/src/pika_set.cc index 0c16624b76..6f44478c9d 100644 --- a/src/pika_set.cc +++ b/src/pika_set.cc @@ -4,11 +4,10 @@ // of patent rights can be found in the PATENTS file in the same directory. #include "include/pika_set.h" - -#include "include/pika_slot_command.h" -#include "pstd/include/pstd_string.h" #include "include/pika_cache.h" #include "include/pika_conf.h" +#include "pstd/include/pstd_string.h" +#include "include/pika_slot_command.h" void SAddCmd::DoInitial() { if (!CheckArg(argv_.size())) { diff --git a/src/pika_slave_node.cc b/src/pika_slave_node.cc index dc36ac38e4..a9adbd89b8 100644 --- a/src/pika_slave_node.cc +++ b/src/pika_slave_node.cc @@ -4,7 +4,6 @@ // of patent rights can be found in the PATENTS file in the same directory. #include "include/pika_slave_node.h" - #include "include/pika_conf.h" using pstd::Status; @@ -61,7 +60,7 @@ int SyncWindow::Remaining() { SlaveNode::SlaveNode(const std::string& ip, int port, const std::string& db_name, int session_id) : RmNode(ip, port, db_name, session_id) - + {} SlaveNode::~SlaveNode() = default; diff --git a/src/pika_slot_command.cc b/src/pika_slot_command.cc index 8f8e6e5830..57361a731a 100644 --- a/src/pika_slot_command.cc +++ b/src/pika_slot_command.cc @@ -3,24 +3,26 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "include/pika_slot_command.h" #include #include #include #include + +#include "include/pika_slot_command.h" #include "include/pika_command.h" -#include "include/pika_conf.h" #include "include/pika_data_distribution.h" -#include "include/pika_define.h" #include "include/pika_migrate_thread.h" #include "include/pika_server.h" +#include "include/pika_admin.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_rm.h" #include "pstd/include/pstd_status.h" #include "pstd/include/pstd_string.h" +#include "include/pika_conf.h" +#include "pstd/include/pika_codis_slot.h" +#include "include/pika_define.h" #include "storage/include/storage/storage.h" -#include "include/pika_admin.h" -#include "include/pika_cmd_table_manager.h" -#include "include/pika_rm.h" #define min(a, b) (((a) > (b)) ? (b) : (a)) #define MAX_MEMBERS_NUM 512 @@ -30,35 +32,6 @@ extern std::unique_ptr g_pika_conf; extern std::unique_ptr g_pika_rm; extern std::unique_ptr g_pika_cmd_table_manager; -uint32_t crc32tab[256]; -void CRC32TableInit(uint32_t poly) { - int i, j; - for (i = 0; i < 256; i++) { - uint32_t crc = i; - for (j = 0; j < 8; j++) { - if (crc & 1) { - crc = (crc >> 1) ^ poly; - } else { - crc = (crc >> 1); - } - } - crc32tab[i] = crc; - } -} - -void InitCRC32Table() { - CRC32TableInit(IEEE_POLY); -} - -uint32_t CRC32Update(uint32_t crc, const char *buf, int len) { - int i; - crc = ~crc; - for (i = 0; i < len; i++) { - crc = crc32tab[static_cast(static_cast(crc) ^ buf[i])] ^ (crc >> 8); - } - return ~crc; -} - PikaMigrate::PikaMigrate() { migrate_clients_.clear(); } PikaMigrate::~PikaMigrate() { @@ -639,7 +612,7 @@ static int SlotsMgrtOne(const std::string &host, const int port, int timeout, co void RemSlotKeyByType(const std::string& type, const std::string& key, const std::shared_ptr& db) { uint32_t crc; int hastag; - int slotNum = GetSlotsID(key, &crc, &hastag); + uint32_t slotNum = GetSlotsID(g_pika_conf->default_slot_num(), key, &crc, &hastag); std::string slot_key = GetSlotKey(slotNum); int32_t res = 0; @@ -673,7 +646,7 @@ static int SlotsMgrtTag(const std::string& host, const int port, int timeout, co int count = 0; uint32_t crc; int hastag; - GetSlotsID(key, &crc, &hastag); + GetSlotsID(g_pika_conf->default_slot_num(), key, &crc, &hastag); if (!hastag) { if (type == 0) { return 0; @@ -718,57 +691,10 @@ static int SlotsMgrtTag(const std::string& host, const int port, int timeout, co return count; } -// get slot tag -static const char *GetSlotsTag(const std::string& str, int* plen) { - const char *s = str.data(); - int i, j, n = static_cast(str.length()); - for (i = 0; i < n && s[i] != '{'; i++) { - } - if (i == n) { - return nullptr; - } - i++; - for (j = i; j < n && s[j] != '}'; j++) { - } - if (j == n) { - return nullptr; - } - if (plen != nullptr) { - *plen = j - i; - } - return s + i; -} - -std::string GetSlotKey(int db) { - return SlotKeyPrefix + std::to_string(db); +std::string GetSlotKey(uint32_t slot) { + return SlotKeyPrefix + std::to_string(slot); } -// get slot number of the key -int GetSlotID(const std::string& str) { return GetSlotsID(str, nullptr, nullptr); } - -// get the slot number by key -int GetSlotsID(const std::string &str, uint32_t *pcrc, int *phastag) { - const char *s = str.data(); - int taglen; - int hastag = 0; - const char *tag = GetSlotsTag(str, &taglen); - if (tag == nullptr) { - tag = s, taglen = static_cast(str.length()); - } else { - hastag = 1; - } - uint32_t crc = CRC32CheckSum(tag, taglen); - if (pcrc != nullptr) { - *pcrc = crc; - } - if (phastag != nullptr) { - *phastag = hastag; - } - return crc % g_pika_conf->default_slot_num(); -} - -uint32_t CRC32CheckSum(const char* buf, int len) { return CRC32Update(0, buf, len); } - // add key to slotkey void AddSlotKey(const std::string& type, const std::string& key, const std::shared_ptr& db) { if (g_pika_conf->slotmigrate() != true) { @@ -779,7 +705,7 @@ void AddSlotKey(const std::string& type, const std::string& key, const std::shar int32_t res = -1; uint32_t crc; int hastag; - int slotID = GetSlotsID(key, &crc, &hastag); + uint32_t slotID = GetSlotsID(g_pika_conf->default_slot_num(), key, &crc, &hastag); std::string slot_key = GetSlotKey(slotID); std::vector members; members.emplace_back(type + key); @@ -811,7 +737,7 @@ void RemSlotKey(const std::string& key, const std::shared_ptr& db) { LOG(WARNING) << "SRem key: " << key << " from slotKey error"; return; } - std::string slotKey = GetSlotKey(GetSlotID(key)); + std::string slotKey = GetSlotKey(GetSlotID(g_pika_conf->default_slot_num(), key)); int32_t count = 0; std::vector members(1, type + key); rocksdb::Status s = db->storage()->SRem(slotKey, members, &count); @@ -856,7 +782,7 @@ std::string GetSlotsTagKey(uint32_t crc) { int DeleteKey(const std::string& key, const char key_type, const std::shared_ptr& db) { LOG(INFO) << "Del key Srem key " << key; int32_t res = 0; - std::string slotKey = GetSlotKey(GetSlotID(key)); + std::string slotKey = GetSlotKey(GetSlotID(g_pika_conf->default_slot_num(), key)); LOG(INFO) << "Del key Srem key " << key; // delete key from slot @@ -879,7 +805,7 @@ int DeleteKey(const std::string& key, const char key_type, const std::shared_ptr std::map type_status; int64_t del_nums = db->storage()->Del(members, &type_status); if (0 > del_nums) { - LOG(WARNING) << "Del key: " << key << " at slot " << GetSlotID(key) << " error"; + LOG(WARNING) << "Del key: " << key << " at slot " << GetSlotID(g_pika_conf->default_slot_num(), key) << " error"; return -1; } WriteDelKeyToBinlog(key, db); @@ -1085,7 +1011,7 @@ void SlotsMgrtTagOneCmd::Do() { std::map type_status; // if you need migrates key, if the key is not existed, return - GetSlotsID(key_, &crc, &hastag); + GetSlotsID(g_pika_conf->default_slot_num(), key_, &crc, &hastag); if (!hastag) { std::vector keys; keys.emplace_back(key_); @@ -1428,7 +1354,7 @@ void SlotsHashKeyCmd::Do() { res_.AppendArrayLenUint64(keys_.size()); for (keys_it = keys_.begin(); keys_it != keys_.end(); ++keys_it) { - res_.AppendInteger(GetSlotsID(*keys_it, nullptr, nullptr)); + res_.AppendInteger(GetSlotsID(g_pika_conf->default_slot_num(), *keys_it, nullptr, nullptr)); } return; diff --git a/src/pika_stable_log.cc b/src/pika_stable_log.cc index b1e9fc278a..ba51d9171c 100644 --- a/src/pika_stable_log.cc +++ b/src/pika_stable_log.cc @@ -3,18 +3,16 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "include/pika_stable_log.h" - -#include - #include #include -#include "include/pika_conf.h" +#include + #include "include/pika_rm.h" #include "include/pika_server.h" - +#include "include/pika_stable_log.h" #include "pstd/include/env.h" +#include "include/pika_conf.h" using pstd::Status; diff --git a/src/pika_transaction.cc b/src/pika_transaction.cc index 7e4f0194ed..fa77be3e86 100644 --- a/src/pika_transaction.cc +++ b/src/pika_transaction.cc @@ -8,7 +8,6 @@ #include "include/pika_transaction.h" #include "include/pika_admin.h" #include "include/pika_client_conn.h" -#include "include/pika_define.h" #include "include/pika_list.h" #include "include/pika_rm.h" #include "include/pika_server.h" diff --git a/src/pstd/include/pika_codis_slot.h b/src/pstd/include/pika_codis_slot.h new file mode 100644 index 0000000000..cb21fd0968 --- /dev/null +++ b/src/pstd/include/pika_codis_slot.h @@ -0,0 +1,22 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_CODIS_SLOT_H_ +#define PIKA_CODIS_SLOT_H_ + +#include +#include +#include + +using CRCU32 = uint32_t; + +// get the slot number by key +CRCU32 GetSlotsID(int slot_num, const std::string& str, CRCU32* pcrc, int* phastag); + +// get slot number of the key +CRCU32 GetSlotID(int slot_num, const std::string& str); + +#endif + diff --git a/src/pstd/src/pika_codis_slot.cc b/src/pstd/src/pika_codis_slot.cc new file mode 100644 index 0000000000..731cf480b3 --- /dev/null +++ b/src/pstd/src/pika_codis_slot.cc @@ -0,0 +1,52 @@ +// Copyright (c) 2023-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "pstd/include/pika_codis_slot.h" + +// get slot tag +static const char *GetSlotsTag(const std::string &str, int *plen) { + const char *s = str.data(); + int i, j, n = static_cast(str.length()); + for (i = 0; i < n && s[i] != '{'; i++) { + } + if (i == n) { + return nullptr; + } + i++; + for (j = i; j < n && s[j] != '}'; j++) { + } + if (j == n) { + return nullptr; + } + if (plen != nullptr) { + *plen = j - i; + } + return s + i; +} + +// get slot number of the key +CRCU32 GetSlotID(int slot_num, const std::string &str) { return GetSlotsID(slot_num, str, nullptr, nullptr); } + +// get the slot number by key +CRCU32 GetSlotsID(int slot_num, const std::string &str, CRCU32 *pcrc, int *phastag) { + const char *s = str.data(); + int taglen; int hastag = 0; + const char *tag = GetSlotsTag(str, &taglen); + if (tag == nullptr) { + tag = s, taglen = static_cast(str.length()); + } else { + hastag = 1; + } + auto crc = crc32(0L, (const Bytef*)tag, taglen); + if (pcrc != nullptr) { + *pcrc = CRCU32(crc); + } + if (phastag != nullptr) { + *phastag = hastag; + } + return static_cast(crc) % slot_num; +} diff --git a/src/pstd/src/pstd_string.cc b/src/pstd/src/pstd_string.cc index b8c1e14a3c..15c7f865c4 100644 --- a/src/pstd/src/pstd_string.cc +++ b/src/pstd/src/pstd_string.cc @@ -184,7 +184,7 @@ int stringmatchlen(const char* pattern, int patternLen, const char* string, int } int stringmatch(const char* pattern, const char* string, int nocase) { - return stringmatchlen(pattern, static_cast(strlen(pattern)), + return stringmatchlen(pattern, static_cast(strlen(pattern)), string, static_cast(strlen(string)), nocase); } diff --git a/src/rsync_client.cc b/src/rsync_client.cc index 4d8421e135..f8ee7aeae8 100644 --- a/src/rsync_client.cc +++ b/src/rsync_client.cc @@ -439,11 +439,10 @@ Status RsyncClient::CleanUpExpiredFiles(bool need_reset_path, const std::setdb_instance_num(); + for (int idx = 0; idx < db_instance_num; idx++) { + pstd::CreatePath(db_path + std::to_string(idx)); + } return Status::OK(); } diff --git a/src/storage/CMakeLists.txt b/src/storage/CMakeLists.txt index 7143682ce6..e12cae9b7d 100644 --- a/src/storage/CMakeLists.txt +++ b/src/storage/CMakeLists.txt @@ -5,8 +5,8 @@ project (storage) # Other CMake modules add_subdirectory(tests) -add_subdirectory(examples) -add_subdirectory(benchmark) +# add_subdirectory(examples) +# add_subdirectory(benchmark) add_definitions(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX) add_compile_options("-fno-builtin-memcmp") @@ -23,6 +23,7 @@ add_library(storage STATIC ${DIR_SRCS} ) add_dependencies(storage rocksdb gtest glog gflags fmt ${LIBUNWIND_NAME} pstd) # TODO fix rocksdb include path target_include_directories(storage + PUBLIC ${CMAKE_SOURCE_DIR} PUBLIC ${PROJECT_SOURCE_DIR} PUBLIC ${PROJECT_SOURCE_DIR}/include ${INSTALL_INCLUDEDIR} diff --git a/src/storage/include/storage/backupable.h b/src/storage/include/storage/backupable.h index c5462a14b1..e190993c29 100644 --- a/src/storage/include/storage/backupable.h +++ b/src/storage/include/storage/backupable.h @@ -22,15 +22,15 @@ inline const std::string DEFAULT_RS_PATH = "db"; // Default restore root dir // Arguments which will used by BackupSave Thread // p_engine for BackupEngine handler // backup_dir -// key_type kv, hash, list, set or zset struct BackupSaveArgs { - void* p_engine; + void* p_engine = nullptr; const std::string backup_dir; - const std::string key_type; + // rocksdb instance number, consistent will instance index in storage. + int index_ = 0; Status res; - BackupSaveArgs(void* _p_engine, std::string _backup_dir, std::string _key_type) - : p_engine(_p_engine), backup_dir(std::move(_backup_dir)), key_type(std::move(_key_type)) {} + BackupSaveArgs(void* _p_engine, std::string _backup_dir, int index) + : p_engine(_p_engine), backup_dir(std::move(_backup_dir)), index_(index) {} }; struct BackupContent { @@ -43,7 +43,7 @@ struct BackupContent { class BackupEngine { public: ~BackupEngine(); - static Status Open(Storage* db, std::shared_ptr& backup_engine_ret); + static Status Open(Storage* db, std::shared_ptr& backup_engine_ret, int inst_count); Status SetBackupContent(); @@ -51,19 +51,19 @@ class BackupEngine { void StopBackup(); - Status CreateNewBackupSpecify(const std::string& dir, const std::string& type); + Status CreateNewBackupSpecify(const std::string& dir, int index); private: BackupEngine() = default; - std::map> engines_; - std::map backup_content_; - std::map backup_pthread_ts_; + std::map> engines_; + std::map backup_content_; + std::map backup_pthread_ts_; - Status NewCheckpoint(rocksdb::DB* rocksdb_db, const std::string& type); - std::string GetSaveDirByType(const std::string& _dir, const std::string& _type) const { + Status NewCheckpoint(rocksdb::DB* rocksdb_db, int index); + std::string GetSaveDirByIndex(const std::string& _dir, int index) const { std::string backup_dir = _dir.empty() ? DEFAULT_BK_PATH : _dir; - return backup_dir + ((backup_dir.back() != '/') ? "/" : "") + _type; + return backup_dir + ((backup_dir.back() != '/') ? "/" : "") + std::to_string(index); } Status WaitBackupPthread(); }; diff --git a/src/storage/include/storage/slot_indexer.h b/src/storage/include/storage/slot_indexer.h new file mode 100644 index 0000000000..92a49aeda2 --- /dev/null +++ b/src/storage/include/storage/slot_indexer.h @@ -0,0 +1,28 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef __SLOT_INDEXER_H__ +#define __SLOT_INDEXER_H__ + +#include +#include + +namespace storage { +// Manage slots to rocksdb indexes +// TODO(wangshaoyi): temporarily mock return +class SlotIndexer { +public: + SlotIndexer() = delete; + SlotIndexer(uint32_t inst_num) : inst_num_(inst_num) {} + ~SlotIndexer() {} + uint32_t GetInstanceID(uint32_t slot_id) {return slot_id % inst_num_; } + void ReshardSlots(const std::vector& slots) {} + +private: + uint32_t inst_num_ = 3; +}; +} // namespace storage end + +#endif diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index 351c7b2263..8855c06476 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -22,6 +22,7 @@ #include "rocksdb/status.h" #include "rocksdb/table.h" +#include "slot_indexer.h" #include "pstd/include/pstd_mutex.h" namespace storage { @@ -49,13 +50,7 @@ using BlockBasedTableOptions = rocksdb::BlockBasedTableOptions; using Status = rocksdb::Status; using Slice = rocksdb::Slice; -class RedisStrings; -class RedisHashes; -class RedisSets; -class RedisLists; -class RedisZSets; -class RedisStreams; -class HyperLogLog; +class Redis; enum class OptionType; struct StreamAddTrimArgs; @@ -86,10 +81,23 @@ struct KeyValue { }; struct KeyInfo { - uint64_t keys; - uint64_t expires; - uint64_t avg_ttl; - uint64_t invaild_keys; + uint64_t keys = 0; + uint64_t expires = 0; + uint64_t avg_ttl = 0; + uint64_t invaild_keys = 0; + + KeyInfo() : keys(0), expires(0), avg_ttl(0), invaild_keys(0) {} + + KeyInfo(uint64_t k, uint64_t e, uint64_t a, uint64_t i) : keys(k), expires(e), avg_ttl(a), invaild_keys(i) {} + + KeyInfo operator + (const KeyInfo& info) { + KeyInfo res; + res.keys = keys + info.keys; + res.expires = expires + info.expires; + res.avg_ttl = avg_ttl + info.avg_ttl; + res.invaild_keys = invaild_keys + info.invaild_keys; + return res; + } }; struct ValueStatus { @@ -102,6 +110,9 @@ struct ValueStatus { struct FieldValue { std::string field; std::string value; + FieldValue() = default; + FieldValue(const std::string& k, const std::string& v) : field(k), value(v) {} + FieldValue(std::string&& k, std::string&& v) : field(std::move(k)), value(std::move(v)) {} bool operator==(const FieldValue& fv) const { return (fv.field == field && fv.value == value); } }; @@ -113,11 +124,13 @@ struct IdMessage { struct KeyVersion { std::string key; - int32_t version; + uint64_t version = 0; bool operator==(const KeyVersion& kv) const { return (kv.key == key && kv.version == version); } }; struct ScoreMember { + ScoreMember() : score(0.0), member("") {} + ScoreMember(double t_score, const std::string& t_member) : score(t_score), member(t_member) {} double score; std::string member; bool operator==(const ScoreMember& sm) const { return (sm.score == score && sm.member == member); } @@ -164,14 +177,19 @@ struct BGTask { class Storage { public: - Storage(); + Storage(); // for unit test only + Storage(int db_instance_num, int slot_num, bool is_classic_mode); ~Storage(); Status Open(const StorageOptions& storage_options, const std::string& db_path); - Status GetStartKey(const DataType& dtype, int64_t cursor, std::string* start_key); + Status LoadCursorStartKey(const DataType& dtype, int64_t cursor, char* type, std::string* start_key); - Status StoreCursorStartKey(const DataType& dtype, int64_t cursor, const std::string& next_key); + Status StoreCursorStartKey(const DataType& dtype, int64_t cursor, char type, const std::string& next_key); + + std::unique_ptr& GetDBInstance(const Slice& key); + + std::unique_ptr& GetDBInstance(const std::string& key); // Strings Commands @@ -180,7 +198,7 @@ class Storage { Status Set(const Slice& key, const Slice& value); // Set key to hold the string value. if key exist - Status Setxx(const Slice& key, const Slice& value, int32_t* ret, int32_t ttl = 0); + Status Setxx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl = 0); // Get the value of key. If the key does not exist // the special value nil is returned @@ -217,7 +235,7 @@ class Storage { // Set key to hold string value if key does not exist // return 1 if the key was set // return 0 if the key was not set - Status Setnx(const Slice& key, const Slice& value, int32_t* ret, int32_t ttl = 0); + Status Setnx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl = 0); // Sets the given keys to their respective values. // MSETNX will not perform any operation at all even @@ -228,7 +246,7 @@ class Storage { // return 1 if the key currently hold the give value And override success // return 0 if the key doesn't exist And override fail // return -1 if the key currently does not hold the given value And override fail - Status Setvx(const Slice& key, const Slice& value, const Slice& new_value, int32_t* ret, int32_t ttl = 0); + Status Setvx(const Slice& key, const Slice& value, const Slice& new_value, int32_t* ret, int64_t ttl = 0); // delete the key that holds a given value // return 1 if the key currently hold the give value And delete success @@ -283,7 +301,7 @@ class Storage { // Set key to hold the string value and set key to timeout after a given // number of seconds - Status Setex(const Slice& key, const Slice& value, int32_t ttl); + Status Setex(const Slice& key, const Slice& value, int64_t ttl); // Returns the length of the string value stored at key. An error // is returned when key holds a non-string value. @@ -293,7 +311,7 @@ class Storage { // specifying the number of seconds representing the TTL (time to live), it // takes an absolute Unix timestamp (seconds since January 1, 1970). A // timestamp in the past will delete the key immediately. - Status PKSetexAt(const Slice& key, const Slice& value, int32_t timestamp); + Status PKSetexAt(const Slice& key, const Slice& value, int64_t timestamp); // Hashes Commands @@ -950,7 +968,7 @@ class Storage { // Set a timeout on key // return -1 operation exception errors happen in database // return >=0 success - int32_t Expire(const Slice& key, int32_t ttl, std::map* type_status); + int32_t Expire(const Slice& key, int64_t ttl, std::map* type_status); // Removes the specified keys // return -1 operation exception errors happen in database @@ -968,13 +986,6 @@ class Storage { int64_t Scan(const DataType& dtype, int64_t cursor, const std::string& pattern, int64_t count, std::vector* keys); - // Iterate over a collection of elements, obtaining the item which timeout - // conforms to the inequality (min_ttl < item_ttl < max_ttl) - // return an updated cursor that the user need to use as the cursor argument - // in the next call - int64_t PKExpireScan(const DataType& dtype, int64_t cursor, int32_t min_ttl, int32_t max_ttl, int64_t count, - std::vector* keys); - // Iterate over a collection of elements by specified range // return a next_key that the user need to use as the key_start argument // in the next call @@ -1011,7 +1022,7 @@ class Storage { // return -1 operation exception errors happen in database // return 0 if key does not exist // return >=1 if the timueout was set - int32_t Expireat(const Slice& key, int32_t timestamp, std::map* type_status); + int32_t Expireat(const Slice& key, int64_t timestamp, std::map* type_status); // Remove the existing timeout on key, turning the key from volatile (a key // with an expire set) to persistent (a key that will never expire as no @@ -1069,8 +1080,8 @@ class Storage { Status Compact(const DataType& type, bool sync = false); Status CompactRange(const DataType& type, const std::string& start, const std::string& end, bool sync = false); - Status DoCompact(const DataType& type); Status DoCompactRange(const DataType& type, const std::string& start, const std::string& end); + Status DoCompactSpecificKey(const DataType& type, const std::string& key); Status SetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys); Status SetSmallCompactionThreshold(uint32_t small_compaction_threshold); @@ -1078,31 +1089,30 @@ class Storage { std::string GetCurrentTaskType(); Status GetUsage(const std::string& property, uint64_t* result); - Status GetUsage(const std::string& property, std::map* type_result); - uint64_t GetProperty(const std::string& db_type, const std::string& property); + Status GetUsage(const std::string& property, std::map* type_result); + uint64_t GetProperty(const std::string& property); Status GetKeyNum(std::vector* key_infos); Status StopScanKeyNum(); - rocksdb::DB* GetDBByType(const std::string& type); + rocksdb::DB* GetDBByIndex(int index); Status SetOptions(const OptionType& option_type, const std::string& db_type, const std::unordered_map& options); void SetCompactRangeOptions(const bool is_canceled); Status EnableDymayticOptions(const OptionType& option_type, const std::string& db_type, const std::unordered_map& options); - Status EnableAutoCompaction(const OptionType& option_type, + Status EnableAutoCompaction(const OptionType& option_type, const std::string& db_type, const std::unordered_map& options); void GetRocksDBInfo(std::string& info); private: - std::unique_ptr strings_db_; - std::unique_ptr hashes_db_; - std::unique_ptr sets_db_; - std::unique_ptr zsets_db_; - std::unique_ptr lists_db_; - std::unique_ptr streams_db_; - std::atomic is_opened_ = false; + std::vector> insts_; + std::unique_ptr slot_indexer_; + std::atomic is_opened_ = {false}; + int db_instance_num_ = 3; + int slot_num_ = 1024; + bool is_classic_mode_ = true; std::unique_ptr> cursors_store_; @@ -1112,11 +1122,11 @@ class Storage { pstd::CondVar bg_tasks_cond_var_; std::queue bg_tasks_queue_; - std::atomic current_task_type_ = kNone; - std::atomic bg_tasks_should_exit_ = false; + std::atomic current_task_type_ = {kNone}; + std::atomic bg_tasks_should_exit_ = {false}; // For scan keys in data base - std::atomic scan_keynum_exit_ = false; + std::atomic scan_keynum_exit_ = {false}; }; } // namespace storage diff --git a/src/storage/include/storage/storage_define.h b/src/storage/include/storage/storage_define.h new file mode 100644 index 0000000000..367687d92b --- /dev/null +++ b/src/storage/include/storage/storage_define.h @@ -0,0 +1,132 @@ +// Copyright (c) 2023-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef STORAGE_DEFINE_H_ +#define STORAGE_DEFINE_H_ + +#include +#include +#include "stdint.h" + +#include "rocksdb/slice.h" + +namespace storage { +using Slice = rocksdb::Slice; + +// remove 'unused parameter' warning +#define UNUSED(expr) \ + do { \ + (void)(expr); \ + } while (0) + +const int kPrefixReserveLength = 8; +const int kVersionLength = 8; +const int kScoreLength = 8; +const int kSuffixReserveLength = 16; +const int kListValueIndexLength = 16; + +const int kTimestampLength = 8; + +enum ColumnFamilyIndex { + kStringsCF = 0, + kHashesMetaCF = 1, + kHashesDataCF = 2, + kSetsMetaCF = 3, + kSetsDataCF = 4, + kListsMetaCF = 5, + kListsDataCF = 6, + kZsetsMetaCF = 7, + kZsetsDataCF = 8, + kZsetsScoreCF = 9, + kStreamsCF = 10, +}; + +const static char kNeedTransformCharacter = '\u0000'; +const static char* kEncodedTransformCharacter = "\u0000\u0001"; +const static char* kEncodedKeyDelim = "\u0000\u0000"; +const static int kEncodedKeyDelimSize = 2; + +inline char* EncodeUserKey(const Slice& user_key, char* dst_ptr, size_t nzero) { + // no \u0000 exists in user_key, memcopy user_key directly. + if (nzero == 0) { + memcpy(dst_ptr, user_key.data(), user_key.size()); + dst_ptr += user_key.size(); + memcpy(dst_ptr, kEncodedKeyDelim, 2); + dst_ptr += 2; + return dst_ptr; + } + + // \u0000 exists in user_key, iterate and replace. + size_t pos = 0; + const char* user_data = user_key.data(); + for (size_t i = 0; i < user_key.size(); i++) { + if (user_data[i] == kNeedTransformCharacter) { + size_t sub_len = i - pos; + if (sub_len != 0) { + memcpy(dst_ptr, user_data + pos, sub_len); + dst_ptr += sub_len; + } + memcpy(dst_ptr, kEncodedTransformCharacter, 2); + dst_ptr += 2; + pos = i + 1; + } + } + if (pos != user_key.size()) { + memcpy(dst_ptr, user_data + pos, user_key.size() - pos); + } + + memcpy(dst_ptr, kEncodedKeyDelim, 2); + dst_ptr += 2; + return dst_ptr; +} + +inline const char* DecodeUserKey(const char* ptr, int length, std::string* user_key) { + const char* ret_ptr = ptr; + user_key->resize(length - kEncodedKeyDelimSize); + bool zero_ahead = false; + bool delim_found = false; + int output_idx = 0; + + for (int idx = 0; idx < length; idx++) { + switch (ptr[idx]) { + case '\u0000': { + delim_found = zero_ahead ? true : false; + zero_ahead = true; + break; + } + case '\u0001': { + (*user_key)[output_idx++] = zero_ahead ? '\u0000' : ptr[idx]; + zero_ahead = false; + break; + } + default: { + (*user_key)[output_idx++] = ptr[idx]; + zero_ahead = false; + break; + } + } + if (delim_found) { + user_key->resize(output_idx); + ret_ptr = ptr + idx + 1; + break; + } + } + return ret_ptr; +} + +inline const char* SeekUserkeyDelim(const char* ptr, int length) { + bool zero_ahead = false; + for (int i = 0; i < length; i++) { + if (ptr[i] == kNeedTransformCharacter && zero_ahead) { + return ptr + i + 1; + } + zero_ahead = ptr[i] == kNeedTransformCharacter; + } + //TODO: handle invalid format + return ptr; +} + +} // end namespace storage +#endif diff --git a/src/storage/include/storage/util.h b/src/storage/include/storage/util.h index 379cc241df..d50f0ea081 100644 --- a/src/storage/include/storage/util.h +++ b/src/storage/include/storage/util.h @@ -24,8 +24,7 @@ int do_mkdir(const char* path, mode_t mode); int mkpath(const char* path, mode_t mode); int delete_dir(const char* dirname); int is_dir(const char* filename); -int CalculateMetaStartAndEndKey(const std::string& key, std::string* meta_start_key, std::string* meta_end_key); -int CalculateDataStartAndEndKey(const std::string& key, std::string* data_start_key, std::string* data_end_key); +int CalculateStartAndEndKey(const std::string& key, std::string* start_key, std::string* end_key); bool isTailWildcard(const std::string& pattern); void GetFilepath(const char* path, const char* filename, char* filepath); bool DeleteFiles(const char* path); diff --git a/src/storage/src/backupable.cc b/src/storage/src/backupable.cc index 9ff3ef6e07..4acd8dee72 100644 --- a/src/storage/src/backupable.cc +++ b/src/storage/src/backupable.cc @@ -17,34 +17,33 @@ BackupEngine::~BackupEngine() { WaitBackupPthread(); } -Status BackupEngine::NewCheckpoint(rocksdb::DB* rocksdb_db, const std::string& type) { +Status BackupEngine::NewCheckpoint(rocksdb::DB* rocksdb_db, int index) { rocksdb::DBCheckpoint* checkpoint; Status s = rocksdb::DBCheckpoint::Create(rocksdb_db, &checkpoint); if (!s.ok()) { return s; } - engines_.insert(std::make_pair(type, std::unique_ptr(checkpoint))); + engines_.insert(std::make_pair(index, std::unique_ptr(checkpoint))); return s; } -Status BackupEngine::Open(storage::Storage* storage, std::shared_ptr& backup_engine_ret) { +Status BackupEngine::Open(storage::Storage* storage, std::shared_ptr& backup_engine_ret, int inst_count) { // BackupEngine() is private, can't use make_shared backup_engine_ret = std::shared_ptr(new BackupEngine()); if (!backup_engine_ret) { return Status::Corruption("New BackupEngine failed!"); } - // Create BackupEngine for each db type + // Create BackupEngine for each rocksdb instance rocksdb::Status s; rocksdb::DB* rocksdb_db; - std::string types[] = {STRINGS_DB, HASHES_DB, LISTS_DB, ZSETS_DB, SETS_DB, STREAMS_DB}; - for (const auto& type : types) { - if (!(rocksdb_db = storage->GetDBByType(type))) { - s = Status::Corruption("Error db type"); + for (int index = 0; index < inst_count; index++) { + if (!(rocksdb_db = storage->GetDBByIndex(index))) { + s = Status::Corruption("Invalid db index"); } if (s.ok()) { - s = backup_engine_ret->NewCheckpoint(rocksdb_db, type); + s = backup_engine_ret->NewCheckpoint(rocksdb_db, index); } if (!s.ok()) { @@ -70,10 +69,10 @@ Status BackupEngine::SetBackupContent() { return s; } -Status BackupEngine::CreateNewBackupSpecify(const std::string& backup_dir, const std::string& type) { - auto it_engine = engines_.find(type); - auto it_content = backup_content_.find(type); - std::string dir = GetSaveDirByType(backup_dir, type); +Status BackupEngine::CreateNewBackupSpecify(const std::string& backup_dir, int index) { + auto it_engine = engines_.find(index); + auto it_content = backup_content_.find(index); + std::string dir = GetSaveDirByIndex(backup_dir, index); delete_dir(dir.c_str()); if (it_content != backup_content_.end() && it_engine != engines_.end()) { @@ -86,7 +85,7 @@ Status BackupEngine::CreateNewBackupSpecify(const std::string& backup_dir, const } } else { - return Status::Corruption("invalid db type"); + return Status::Corruption("Invalid db index"); } return Status::OK(); } @@ -94,7 +93,7 @@ Status BackupEngine::CreateNewBackupSpecify(const std::string& backup_dir, const void* ThreadFuncSaveSpecify(void* arg) { auto arg_ptr = static_cast(arg); auto p = static_cast(arg_ptr->p_engine); - arg_ptr->res = p->CreateNewBackupSpecify(arg_ptr->backup_dir, arg_ptr->key_type); + arg_ptr->res = p->CreateNewBackupSpecify(arg_ptr->backup_dir, arg_ptr->index_); pthread_exit(&(arg_ptr->res)); } diff --git a/src/storage/src/base_data_key_format.h b/src/storage/src/base_data_key_format.h index ffac531046..32be63a909 100644 --- a/src/storage/src/base_data_key_format.h +++ b/src/storage/src/base_data_key_format.h @@ -6,13 +6,22 @@ #ifndef SRC_BASE_DATA_KEY_FORMAT_H_ #define SRC_BASE_DATA_KEY_FORMAT_H_ -#include "pstd/include/pstd_coding.h" +#include "src/coding.h" +#include "storage/storage_define.h" namespace storage { + +using Slice = rocksdb::Slice; +/* +* used for Hash/Set/Zset's member data key. format: +* | reserve1 | key | version | data | reserve2 | +* | 8B | | 8B | | 16B | +*/ class BaseDataKey { public: - BaseDataKey(const Slice& key, int32_t version, const Slice& data) - : key_(key), version_(version), data_(data) {} + BaseDataKey(const Slice& key, + uint64_t version, const Slice& data) + : key_(key), version_(version), data_(data) {} ~BaseDataKey() { if (start_ != space_) { @@ -20,9 +29,45 @@ class BaseDataKey { } } + Slice EncodeSeekKey() { + size_t meta_size = sizeof(reserve1_) + sizeof(version_); + size_t usize = key_.size() + data_.size() + kEncodedKeyDelimSize; + size_t nzero = std::count(key_.data(), key_.data() + key_.size(), kNeedTransformCharacter); + usize += nzero; + size_t needed = meta_size + usize; + char* dst; + if (needed <= sizeof(space_)) { + dst = space_; + } else { + dst = new char[needed]; + + // Need to allocate space, delete previous space + if (start_ != space_) { + delete[] start_; + } + } + + start_ = dst; + // reserve1: 8 byte + memcpy(dst, reserve1_, sizeof(reserve1_)); + dst += sizeof(reserve1_); + // key + dst = EncodeUserKey(key_, dst, nzero); + // version 8 byte + EncodeFixed64(dst, version_); + dst += sizeof(version_); + // data + memcpy(dst, data_.data(), data_.size()); + dst += data_.size(); + return Slice(start_, needed); + } + Slice Encode() { - size_t usize = key_.size() + data_.size(); - size_t needed = usize + sizeof(int32_t) * 2; + size_t meta_size = sizeof(reserve1_) + sizeof(version_) + sizeof(reserve2_); + size_t usize = key_.size() + data_.size() + kEncodedKeyDelimSize; + size_t nzero = std::count(key_.data(), key_.data() + key_.size(), kNeedTransformCharacter); + usize += nzero; + size_t needed = meta_size + usize; char* dst; if (needed <= sizeof(space_)) { dst = space_; @@ -36,59 +81,73 @@ class BaseDataKey { } start_ = dst; - pstd::EncodeFixed32(dst, key_.size()); - dst += sizeof(int32_t); - memcpy(dst, key_.data(), key_.size()); - dst += key_.size(); - pstd::EncodeFixed32(dst, version_); - dst += sizeof(int32_t); + // reserve1: 8 byte + memcpy(dst, reserve1_, sizeof(reserve1_)); + dst += sizeof(reserve1_); + // key + dst = EncodeUserKey(key_, dst, nzero); + // version 8 byte + EncodeFixed64(dst, version_); + dst += sizeof(version_); + // data memcpy(dst, data_.data(), data_.size()); + dst += data_.size(); + // TODO(wangshaoyi): too much for reserve + // reserve2: 16 byte + memcpy(dst, reserve2_, sizeof(reserve2_)); return Slice(start_, needed); } private: - char space_[200]; char* start_ = nullptr; + char space_[200]; + char reserve1_[8] = {0}; Slice key_; - int32_t version_ = -1; + uint64_t version_ = uint64_t(-1); Slice data_; + char reserve2_[16] = {0}; }; class ParsedBaseDataKey { public: explicit ParsedBaseDataKey(const std::string* key) { const char* ptr = key->data(); - int32_t key_len = pstd::DecodeFixed32(ptr); - ptr += sizeof(int32_t); - key_ = Slice(ptr, key_len); - ptr += key_len; - version_ = pstd::DecodeFixed32(ptr); - ptr += sizeof(int32_t); - data_ = Slice(ptr, key->size() - key_len - sizeof(int32_t) * 2); + const char* end_ptr = key->data() + key->size(); + decode(ptr, end_ptr); } explicit ParsedBaseDataKey(const Slice& key) { const char* ptr = key.data(); - int32_t key_len = pstd::DecodeFixed32(ptr); - ptr += sizeof(int32_t); - key_ = Slice(ptr, key_len); - ptr += key_len; - version_ = pstd::DecodeFixed32(ptr); - ptr += sizeof(int32_t); - data_ = Slice(ptr, key.size() - key_len - sizeof(int32_t) * 2); + const char* end_ptr = key.data() + key.size(); + decode(ptr, end_ptr); + } + + void decode(const char* ptr, const char* end_ptr) { + const char* start = ptr; + // skip head reserve1_ + ptr += sizeof(reserve1_); + // skip tail reserve2_ + end_ptr -= kSuffixReserveLength; + // user key + ptr = DecodeUserKey(ptr, std::distance(ptr, end_ptr), &key_str_); + + version_ = DecodeFixed64(ptr); + ptr += sizeof(version_); + data_ = Slice(ptr, std::distance(ptr, end_ptr)); } virtual ~ParsedBaseDataKey() = default; - Slice key() { return key_; } + Slice Key() { return Slice(key_str_); } - int32_t version() { return version_; } + uint64_t Version() { return version_; } - Slice data() { return data_; } + Slice Data() { return data_; } protected: - Slice key_; - int32_t version_ = -1; + std::string key_str_; + char reserve1_[8] = {0}; + uint64_t version_ = (uint64_t)(-1); Slice data_; }; diff --git a/src/storage/src/base_data_value_format.h b/src/storage/src/base_data_value_format.h new file mode 100644 index 0000000000..5c11cc6cbc --- /dev/null +++ b/src/storage/src/base_data_value_format.h @@ -0,0 +1,107 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_BASE_DATA_VALUE_FORMAT_H_ +#define SRC_BASE_DATA_VALUE_FORMAT_H_ + +#include + +#include "rocksdb/env.h" +#include "rocksdb/slice.h" + +#include "src/coding.h" +#include "src/mutex.h" +#include "src/base_value_format.h" +#include "storage/storage_define.h" + +namespace storage { +/* +* hash/set/zset/list data value format +* | value | reserve | ctime | +* | | 16B | 8B | +*/ +class BaseDataValue : public InternalValue { +public: + explicit BaseDataValue(const rocksdb::Slice& user_value) + : InternalValue(user_value) {} + virtual ~BaseDataValue() {} + + virtual rocksdb::Slice Encode() { + size_t usize = user_value_.size(); + size_t needed = usize + kSuffixReserveLength + kTimestampLength; + char* dst = ReAllocIfNeeded(needed); + char* start_pos = dst; + + memcpy(dst, user_value_.data(), user_value_.size()); + dst += user_value_.size(); + memcpy(dst, reserve_, kSuffixReserveLength); + dst += kSuffixReserveLength; + EncodeFixed64(dst, ctime_); + dst += kTimestampLength; + return rocksdb::Slice(start_pos, needed); + } + +private: + const size_t kDefaultValueSuffixLength = kSuffixReserveLength + kTimestampLength; +}; + +class ParsedBaseDataValue : public ParsedInternalValue { +public: + // Use this constructor after rocksdb::DB::Get(), since we use this in + // the implement of user interfaces and may need to modify the + // original value suffix, so the value_ must point to the string + explicit ParsedBaseDataValue(std::string* value) : ParsedInternalValue(value) { + if (value_->size() >= kBaseDataValueSuffixLength) { + user_value_ = rocksdb::Slice(value_->data(), value_->size() - kBaseDataValueSuffixLength); + memcpy(reserve_, value_->data() + user_value_.size(), kSuffixReserveLength); + ctime_ = DecodeFixed64(value_->data() + user_value_.size() + kSuffixReserveLength); + } + } + + // Use this constructor in rocksdb::CompactionFilter::Filter(), + // since we use this in Compaction process, all we need to do is parsing + // the rocksdb::Slice, so don't need to modify the original value, value_ can be + // set to nullptr + explicit ParsedBaseDataValue(const rocksdb::Slice& value) : ParsedInternalValue(value) { + if (value.size() >= kBaseDataValueSuffixLength) { + user_value_ = rocksdb::Slice(value.data(), value.size() - kBaseDataValueSuffixLength); + memcpy(reserve_, value.data() + user_value_.size(), kSuffixReserveLength); + ctime_ = DecodeFixed64(value.data() + user_value_.size() + kSuffixReserveLength); + } + } + + virtual ~ParsedBaseDataValue() = default; + + void SetEtimeToValue() override {} + + void SetCtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kTimestampLength; + EncodeFixed64(dst, ctime_); + } + } + + void SetReserveToValue() { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kBaseDataValueSuffixLength; + memcpy(dst, reserve_, kSuffixReserveLength); + } + } + + virtual void StripSuffix() override { + if (value_) { + value_->erase(value_->size() - kBaseDataValueSuffixLength, kBaseDataValueSuffixLength); + } + } + +protected: + virtual void SetVersionToValue() override {}; + +private: + const size_t kBaseDataValueSuffixLength = kSuffixReserveLength + kTimestampLength; +}; + +} // namespace storage +#endif // SRC_BASE_VALUE_FORMAT_H_ diff --git a/src/storage/src/base_filter.h b/src/storage/src/base_filter.h index 1bbae2f8ca..093f3f4761 100644 --- a/src/storage/src/base_filter.h +++ b/src/storage/src/base_filter.h @@ -10,6 +10,7 @@ #include #include +#include "glog/logging.h" #include "rocksdb/compaction_filter.h" #include "src/base_data_key_format.h" #include "src/base_meta_value_format.h" @@ -24,19 +25,19 @@ class BaseMetaFilter : public rocksdb::CompactionFilter { bool* value_changed) const override { int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - auto cur_time = static_cast(unix_time); + auto cur_time = static_cast(unix_time); ParsedBaseMetaValue parsed_base_meta_value(value); TRACE("==========================START=========================="); - TRACE("[MetaFilter], key: %s, count = %d, timestamp: %d, cur_time: %d, version: %d", key.ToString().c_str(), - parsed_base_meta_value.count(), parsed_base_meta_value.timestamp(), cur_time, - parsed_base_meta_value.version()); + TRACE("[MetaFilter], key: %s, count = %d, timestamp: %llu, cur_time: %llu, version: %llu", key.ToString().c_str(), + parsed_base_meta_value.Count(), parsed_base_meta_value.Etime(), cur_time, + parsed_base_meta_value.Version()); - if (parsed_base_meta_value.timestamp() != 0 && parsed_base_meta_value.timestamp() < cur_time && - parsed_base_meta_value.version() < cur_time) { + if (parsed_base_meta_value.Etime() != 0 && parsed_base_meta_value.Etime() < cur_time && + parsed_base_meta_value.Version() < cur_time) { TRACE("Drop[Stale & version < cur_time]"); return true; } - if (parsed_base_meta_value.count() == 0 && parsed_base_meta_value.version() < cur_time) { + if (parsed_base_meta_value.Count() == 0 && parsed_base_meta_value.Version() < cur_time) { TRACE("Drop[Empty & version < cur_time]"); return true; } @@ -59,31 +60,42 @@ class BaseMetaFilterFactory : public rocksdb::CompactionFilterFactory { class BaseDataFilter : public rocksdb::CompactionFilter { public: - BaseDataFilter(rocksdb::DB* db, std::vector* cf_handles_ptr) + BaseDataFilter(rocksdb::DB* db, std::vector* cf_handles_ptr, int meta_cf_index) : db_(db), - cf_handles_ptr_(cf_handles_ptr) + cf_handles_ptr_(cf_handles_ptr), + meta_cf_index_(meta_cf_index) {} bool Filter(int level, const Slice& key, const rocksdb::Slice& value, std::string* new_value, bool* value_changed) const override { + UNUSED(level); + UNUSED(value); + UNUSED(new_value); + UNUSED(value_changed); ParsedBaseDataKey parsed_base_data_key(key); TRACE("==========================START=========================="); - TRACE("[DataFilter], key: %s, data = %s, version = %d", parsed_base_data_key.key().ToString().c_str(), - parsed_base_data_key.data().ToString().c_str(), parsed_base_data_key.version()); + TRACE("[DataFilter], key: %s, data = %s, version = %llu", parsed_base_data_key.Key().ToString().c_str(), + parsed_base_data_key.Data().ToString().c_str(), parsed_base_data_key.Version()); - if (parsed_base_data_key.key().ToString() != cur_key_) { - cur_key_ = parsed_base_data_key.key().ToString(); + const char* ptr = key.data(); + int key_size = key.size(); + ptr = SeekUserkeyDelim(ptr + kPrefixReserveLength, key_size - kPrefixReserveLength); + std::string meta_key_enc(key.data(), std::distance(key.data(), ptr)); + meta_key_enc.append(kSuffixReserveLength, kNeedTransformCharacter); + + if (meta_key_enc != cur_key_) { + cur_key_ = meta_key_enc; std::string meta_value; // destroyed when close the database, Reserve Current key value if (cf_handles_ptr_->empty()) { return false; } - Status s = db_->Get(default_read_options_, (*cf_handles_ptr_)[0], cur_key_, &meta_value); + Status s = db_->Get(default_read_options_, (*cf_handles_ptr_)[meta_cf_index_], cur_key_, &meta_value); if (s.ok()) { meta_not_found_ = false; ParsedBaseMetaValue parsed_base_meta_value(&meta_value); - cur_meta_version_ = parsed_base_meta_value.version(); - cur_meta_timestamp_ = parsed_base_meta_value.timestamp(); + cur_meta_version_ = parsed_base_meta_value.Version(); + cur_meta_etime_ = parsed_base_meta_value.Etime(); } else if (s.IsNotFound()) { meta_not_found_ = true; } else { @@ -100,12 +112,12 @@ class BaseDataFilter : public rocksdb::CompactionFilter { int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - if (cur_meta_timestamp_ != 0 && cur_meta_timestamp_ < static_cast(unix_time)) { + if (cur_meta_etime_ != 0 && cur_meta_etime_ < static_cast(unix_time)) { TRACE("Drop[Timeout]"); return true; } - if (cur_meta_version_ > parsed_base_data_key.version()) { + if (cur_meta_version_ > parsed_base_data_key.Version()) { TRACE("Drop[data_key_version < cur_meta_version]"); return true; } else { @@ -114,6 +126,23 @@ class BaseDataFilter : public rocksdb::CompactionFilter { } } + /* + // Only judge by meta value ttl + virtual rocksdb::CompactionFilter::Decision FilterBlobByKey(int level, const Slice& key, + uint64_t expire_time, std::string* new_value, std::string* skip_until) const override { + UNUSED(level); + UNUSED(expire_time); + UNUSED(new_value); + UNUSED(skip_until); + bool unused_value_changed; + bool should_remove = Filter(level, key, Slice{}, new_value, &unused_value_changed); + if (should_remove) { + return CompactionFilter::Decision::kRemove; + } + return CompactionFilter::Decision::kKeep; + } + */ + const char* Name() const override { return "BaseDataFilter"; } private: @@ -122,23 +151,25 @@ class BaseDataFilter : public rocksdb::CompactionFilter { rocksdb::ReadOptions default_read_options_; mutable std::string cur_key_; mutable bool meta_not_found_ = false; - mutable int32_t cur_meta_version_ = 0; - mutable int32_t cur_meta_timestamp_ = 0; + mutable uint64_t cur_meta_version_ = 0; + mutable uint64_t cur_meta_etime_ = 0; + int meta_cf_index_ = 0; }; class BaseDataFilterFactory : public rocksdb::CompactionFilterFactory { public: - BaseDataFilterFactory(rocksdb::DB** db_ptr, std::vector* handles_ptr) - : db_ptr_(db_ptr), cf_handles_ptr_(handles_ptr) {} + BaseDataFilterFactory(rocksdb::DB** db_ptr, std::vector* handles_ptr, int meta_cf_index) + : db_ptr_(db_ptr), cf_handles_ptr_(handles_ptr), meta_cf_index_(meta_cf_index) {} std::unique_ptr CreateCompactionFilter( const rocksdb::CompactionFilter::Context& context) override { - return std::unique_ptr(new BaseDataFilter(*db_ptr_, cf_handles_ptr_)); + return std::make_unique(BaseDataFilter(*db_ptr_, cf_handles_ptr_, meta_cf_index_)); } const char* Name() const override { return "BaseDataFilterFactory"; } private: rocksdb::DB** db_ptr_ = nullptr; std::vector* cf_handles_ptr_ = nullptr; + int meta_cf_index_ = 0; }; using HashesMetaFilter = BaseMetaFilter; diff --git a/src/storage/src/base_key_format.h b/src/storage/src/base_key_format.h new file mode 100644 index 0000000000..75d4d156fe --- /dev/null +++ b/src/storage/src/base_key_format.h @@ -0,0 +1,99 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_BASE_KEY_FORMAT_H_ +#define SRC_BASE_KEY_FORMAT_H_ + +#include "storage/storage_define.h" + +namespace storage { +/* +* used for string data key or hash/zset/set/list's meta key. format: +* | reserve1 | key | reserve2 | +* | 8B | | 16B | +*/ + +class BaseKey { + public: + BaseKey(const Slice& key) : key_(key) {} + + ~BaseKey() { + if (start_ != space_) { + delete[] start_; + } + } + + Slice Encode() { + size_t meta_size = sizeof(reserve1_) + sizeof(reserve2_); + size_t nzero = std::count(key_.data(), key_.data() + key_.size(), kNeedTransformCharacter); + size_t usize = nzero + kEncodedKeyDelimSize + key_.size(); + size_t needed = meta_size + usize; + char* dst; + if (needed <= sizeof(space_)) { + dst = space_; + } else { + dst = new char[needed]; + + // Need to allocate space, delete previous space + if (start_ != space_) { + delete[] start_; + } + } + + start_ = dst; + // reserve1: 8 byte + memcpy(dst, reserve1_, sizeof(reserve1_)); + dst += sizeof(reserve1_); + // key + dst = EncodeUserKey(key_, dst, nzero); + // TODO(wangshaoyi): no need to reserve tailing, + // since we already set delimiter + memcpy(dst, reserve2_, sizeof(reserve2_)); + return Slice(start_, needed); + } + + private: + char* start_ = nullptr; + char space_[200]; + char reserve1_[8] = {0}; + Slice key_; + char reserve2_[16] = {0}; +}; + +class ParsedBaseKey { + public: + explicit ParsedBaseKey(const std::string* key) { + const char* ptr = key->data(); + const char* end_ptr = key->data() + key->size(); + decode(ptr, end_ptr); + } + + explicit ParsedBaseKey(const Slice& key) { + const char* ptr = key.data(); + const char* end_ptr = key.data() + key.size(); + decode(ptr, end_ptr); + } + + void decode(const char* ptr, const char* end_ptr) { + // skip head reserve + ptr += kPrefixReserveLength; + // skip tail reserve2_ + end_ptr -= kSuffixReserveLength; + DecodeUserKey(ptr, std::distance(ptr, end_ptr), &key_str_); + } + + virtual ~ParsedBaseKey() = default; + + Slice Key() { return Slice(key_str_); } + +protected: + std::string key_str_; +}; + +using ParsedBaseMetaKey = ParsedBaseKey; +using BaseMetaKey = BaseKey; + +} // namespace storage +#endif // SRC_BASE_KEY_FORMAT_H_ diff --git a/src/storage/src/base_meta_value_format.h b/src/storage/src/base_meta_value_format.h index 10c200f8d1..77c73126fd 100644 --- a/src/storage/src/base_meta_value_format.h +++ b/src/storage/src/base_meta_value_format.h @@ -8,31 +8,46 @@ #include +#include "pstd/include/env.h" +#include "storage/storage_define.h" #include "src/base_value_format.h" namespace storage { +/* +* | value | version | reserve | cdate | timestamp | +* | | 8B | 16B | 8B | 8B | +*/ +// TODO(wangshaoyi): reformat encode, AppendTimestampAndVersion class BaseMetaValue : public InternalValue { public: explicit BaseMetaValue(const Slice& user_value) : InternalValue(user_value) {} - size_t AppendTimestampAndVersion() override { + + rocksdb::Slice Encode() override { size_t usize = user_value_.size(); - char* dst = start_; - memcpy(dst, user_value_.data(), usize); - dst += usize; - EncodeFixed32(dst, version_); - dst += sizeof(int32_t); - EncodeFixed32(dst, timestamp_); - return usize + 2 * sizeof(int32_t); + size_t needed = usize + kVersionLength + kSuffixReserveLength + 2 * kTimestampLength; + char* dst = ReAllocIfNeeded(needed); + char* start_pos = dst; + + memcpy(dst, user_value_.data(), user_value_.size()); + dst += user_value_.size(); + EncodeFixed64(dst, version_); + dst += sizeof(version_); + memcpy(dst, reserve_, sizeof(reserve_)); + dst += sizeof(reserve_); + EncodeFixed64(dst, ctime_); + dst += sizeof(ctime_); + EncodeFixed64(dst, etime_); + dst += sizeof(etime_); + return rocksdb::Slice(start_, needed); } - int32_t UpdateVersion() { - int64_t unix_time; - rocksdb::Env::Default()->GetCurrentTime(&unix_time); - if (version_ >= static_cast(unix_time)) { + uint64_t UpdateVersion() { + int64_t unix_time = pstd::NowMicros() / 1000000; + if (version_ >= unix_time) { version_++; } else { - version_ = static_cast(unix_time); + version_ = uint64_t(unix_time); } return version_; } @@ -43,9 +58,16 @@ class ParsedBaseMetaValue : public ParsedInternalValue { // Use this constructor after rocksdb::DB::Get(); explicit ParsedBaseMetaValue(std::string* internal_value_str) : ParsedInternalValue(internal_value_str) { if (internal_value_str->size() >= kBaseMetaValueSuffixLength) { + int offset = 0; user_value_ = Slice(internal_value_str->data(), internal_value_str->size() - kBaseMetaValueSuffixLength); - version_ = DecodeFixed32(internal_value_str->data() + internal_value_str->size() - sizeof(int32_t) * 2); - timestamp_ = DecodeFixed32(internal_value_str->data() + internal_value_str->size() - sizeof(int32_t)); + offset += user_value_.size(); + version_ = DecodeFixed64(internal_value_str->data() + offset); + offset += sizeof(version_); + memcpy(reserve_, internal_value_str->data() + offset, sizeof(reserve_)); + offset += sizeof(reserve_); + ctime_ = DecodeFixed64(internal_value_str->data() + offset); + offset += sizeof(ctime_); + etime_ = DecodeFixed64(internal_value_str->data() + offset); } count_ = DecodeFixed32(internal_value_str->data()); } @@ -53,9 +75,16 @@ class ParsedBaseMetaValue : public ParsedInternalValue { // Use this constructor in rocksdb::CompactionFilter::Filter(); explicit ParsedBaseMetaValue(const Slice& internal_value_slice) : ParsedInternalValue(internal_value_slice) { if (internal_value_slice.size() >= kBaseMetaValueSuffixLength) { + int offset = 0; user_value_ = Slice(internal_value_slice.data(), internal_value_slice.size() - kBaseMetaValueSuffixLength); - version_ = DecodeFixed32(internal_value_slice.data() + internal_value_slice.size() - sizeof(int32_t) * 2); - timestamp_ = DecodeFixed32(internal_value_slice.data() + internal_value_slice.size() - sizeof(int32_t)); + offset += user_value_.size(); + version_ = DecodeFixed64(internal_value_slice.data() + offset); + offset += sizeof(uint64_t); + memcpy(reserve_, internal_value_slice.data() + offset, sizeof(reserve_)); + offset += sizeof(reserve_); + ctime_ = DecodeFixed64(internal_value_slice.data() + offset); + offset += sizeof(ctime_); + etime_ = DecodeFixed64(internal_value_slice.data() + offset); } count_ = DecodeFixed32(internal_value_slice.data()); } @@ -69,25 +98,34 @@ class ParsedBaseMetaValue : public ParsedInternalValue { void SetVersionToValue() override { if (value_) { char* dst = const_cast(value_->data()) + value_->size() - kBaseMetaValueSuffixLength; - EncodeFixed32(dst, version_); + EncodeFixed64(dst, version_); } } - void SetTimestampToValue() override { + void SetCtimeToValue() override { if (value_) { - char* dst = const_cast(value_->data()) + value_->size() - sizeof(int32_t); - EncodeFixed32(dst, timestamp_); + char* dst = const_cast(value_->data()) + value_->size() - 2 * kTimestampLength; + EncodeFixed64(dst, ctime_); } } - static const size_t kBaseMetaValueSuffixLength = 2 * sizeof(int32_t); - int32_t InitialMetaValue() { - this->set_count(0); - this->set_timestamp(0); + void SetEtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kTimestampLength; + EncodeFixed64(dst, etime_); + } + } + + uint64_t InitialMetaValue() { + this->SetCount(0); + this->SetEtime(0); + this->SetCtime(0); return this->UpdateVersion(); } - int32_t count() { return count_; } + bool IsValid() override { + return !IsStale() && Count() != 0; + } bool check_set_count(size_t count) { if (count > INT32_MAX) { @@ -96,7 +134,9 @@ class ParsedBaseMetaValue : public ParsedInternalValue { return true; } - void set_count(int32_t count) { + int32_t Count() { return count_; } + + void SetCount(int32_t count) { count_ = count; if (value_) { char* dst = const_cast(value_->data()); @@ -121,19 +161,20 @@ class ParsedBaseMetaValue : public ParsedInternalValue { } } - int32_t UpdateVersion() { + uint64_t UpdateVersion() { int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - if (version_ >= static_cast(unix_time)) { + if (version_ >= static_cast(unix_time)) { version_++; } else { - version_ = static_cast(unix_time); + version_ = static_cast(unix_time); } SetVersionToValue(); return version_; } private: + static const size_t kBaseMetaValueSuffixLength = kVersionLength + kSuffixReserveLength + 2 * kTimestampLength; int32_t count_ = 0; }; diff --git a/src/storage/src/base_value_format.h b/src/storage/src/base_value_format.h index 35b200c2ea..da9ad6384e 100644 --- a/src/storage/src/base_value_format.h +++ b/src/storage/src/base_value_format.h @@ -10,61 +10,62 @@ #include "rocksdb/env.h" #include "rocksdb/slice.h" + #include "src/coding.h" -#include "src/redis.h" +#include "src/mutex.h" -namespace storage { +#include "pstd/include/env.h" +namespace storage { class InternalValue { - public: +public: explicit InternalValue(const rocksdb::Slice& user_value) - : user_value_(user_value) {} + : user_value_(user_value) { + ctime_ = pstd::NowMicros() / 1000000; + } virtual ~InternalValue() { if (start_ != space_) { delete[] start_; } } - void set_timestamp(int32_t timestamp = 0) { timestamp_ = timestamp; } - Status SetRelativeTimestamp(int32_t ttl) { + void SetEtime(uint64_t etime = 0) { etime_ = etime; } + void setCtime(uint64_t ctime) { ctime_ = ctime; } + rocksdb::Status SetRelativeTimestamp(int64_t ttl) { int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - timestamp_ = static_cast(unix_time) + ttl; - if (timestamp_ != unix_time + static_cast(ttl)) { - return Status::InvalidArgument("invalid expire time"); - } - return Status::OK(); + etime_ = uint64_t(unix_time + ttl); + return rocksdb::Status::OK(); } - static const size_t kDefaultValueSuffixLength = sizeof(int32_t) * 2; - virtual rocksdb::Slice Encode() { - size_t usize = user_value_.size(); - size_t needed = usize + kDefaultValueSuffixLength; + void SetVersion(uint64_t version = 0) { version_ = version; } + + char* ReAllocIfNeeded(size_t needed) { char* dst; if (needed <= sizeof(space_)) { dst = space_; } else { dst = new char[needed]; - - // Need to allocate space, delete previous space if (start_ != space_) { delete[] start_; } } start_ = dst; - size_t len = AppendTimestampAndVersion(); - return rocksdb::Slice(start_, len); + return dst; } - virtual size_t AppendTimestampAndVersion() = 0; - protected: + virtual rocksdb::Slice Encode() = 0; + +protected: char space_[200]; char* start_ = nullptr; rocksdb::Slice user_value_; - int32_t version_ = 0; - int32_t timestamp_ = 0; + uint64_t version_ = 0; + uint64_t etime_ = 0; + uint64_t ctime_ = 0; + char reserve_[16] = {0}; }; class ParsedInternalValue { - public: +public: // Use this constructor after rocksdb::DB::Get(), since we use this in // the implement of user interfaces and may need to modify the // original value suffix, so the value_ must point to the string @@ -74,53 +75,65 @@ class ParsedInternalValue { // since we use this in Compaction process, all we need to do is parsing // the rocksdb::Slice, so don't need to modify the original value, value_ can be // set to nullptr - explicit ParsedInternalValue(const rocksdb::Slice& value) {} + explicit ParsedInternalValue(const rocksdb::Slice& value) {} virtual ~ParsedInternalValue() = default; - rocksdb::Slice user_value() { return user_value_; } + rocksdb::Slice UserValue() { return user_value_; } - int32_t version() { return version_; } + uint64_t Version() { return version_; } - void set_version(int32_t version) { + void SetVersion(uint64_t version) { version_ = version; SetVersionToValue(); } - int32_t timestamp() { return timestamp_; } + uint64_t Etime() { return etime_; } + + void SetEtime(uint64_t etime) { + etime_ = etime; + SetEtimeToValue(); + } - void set_timestamp(int32_t timestamp) { - timestamp_ = timestamp; - SetTimestampToValue(); + void SetCtime(uint64_t ctime) { + ctime_ = ctime; + SetCtimeToValue(); } - void SetRelativeTimestamp(int32_t ttl) { + void SetRelativeTimestamp(int64_t ttl) { int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - timestamp_ = static_cast(unix_time) + ttl; - SetTimestampToValue(); + etime_ = unix_time + ttl; + SetEtimeToValue(); } - bool IsPermanentSurvival() { return timestamp_ == 0; } + bool IsPermanentSurvival() { return etime_ == 0; } bool IsStale() { - if (timestamp_ == 0) { + if (etime_ == 0) { return false; } int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - return timestamp_ < unix_time; + return etime_ < unix_time; + } + + virtual bool IsValid() { + return !IsStale(); } virtual void StripSuffix() = 0; - protected: +protected: virtual void SetVersionToValue() = 0; - virtual void SetTimestampToValue() = 0; + virtual void SetEtimeToValue() = 0; + virtual void SetCtimeToValue() = 0; std::string* value_ = nullptr; rocksdb::Slice user_value_; - int32_t version_ = 0 ; - int32_t timestamp_ = 0; + uint64_t version_ = 0 ; + uint64_t ctime_ = 0; + uint64_t etime_ = 0; + char reserve_[16] = {0}; //unused }; } // namespace storage diff --git a/src/storage/src/custom_comparator.h b/src/storage/src/custom_comparator.h index f0ea9dc045..185fc1d678 100644 --- a/src/storage/src/custom_comparator.h +++ b/src/storage/src/custom_comparator.h @@ -5,21 +5,25 @@ #ifndef INCLUDE_CUSTOM_COMPARATOR_H_ #define INCLUDE_CUSTOM_COMPARATOR_H_ -#include "string" -#include +#include "rocksdb/comparator.h" +#include "glog/logging.h" +#include "storage/storage_define.h" +#include "src/debug.h" #include "src/coding.h" -#include "rocksdb/comparator.h" namespace storage { - +/* list data key pattern +* | reserve1 | key | version | index | reserve2 | +* | 8B | | 8B | 8B | 16B | +*/ class ListsDataKeyComparatorImpl : public rocksdb::Comparator { public: ListsDataKeyComparatorImpl() = default; - // keep compatible with blackwidow - const char* Name() const override { return "blackwidow.ListsDataKeyComparator"; } + // keep compatible with floyd + const char* Name() const override { return "floyd.ListsDataKeyComparator"; } int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override { assert(!a.empty() && !b.empty()); @@ -27,17 +31,18 @@ class ListsDataKeyComparatorImpl : public rocksdb::Comparator { const char* ptr_b = b.data(); auto a_size = static_cast(a.size()); auto b_size = static_cast(b.size()); - int32_t key_a_len = DecodeFixed32(ptr_a); - int32_t key_b_len = DecodeFixed32(ptr_b); - ptr_a += sizeof(int32_t); - ptr_b += sizeof(int32_t); - rocksdb::Slice sets_key_a(ptr_a, key_a_len); - rocksdb::Slice sets_key_b(ptr_b, key_b_len); - ptr_a += key_a_len; - ptr_b += key_b_len; - if (sets_key_a != sets_key_b) { - return sets_key_a.compare(sets_key_b); + + ptr_a += kPrefixReserveLength; + ptr_b += kPrefixReserveLength; + ptr_a = SeekUserkeyDelim(ptr_a, a_size - kPrefixReserveLength); + ptr_b = SeekUserkeyDelim(ptr_b, b_size - kPrefixReserveLength); + + rocksdb::Slice a_prefix(a.data(), std::distance(a.data(), ptr_a)); + rocksdb::Slice b_prefix(b.data(), std::distance(b.data(), ptr_b)); + if (a_prefix != b_prefix) { + return a_prefix.compare(b_prefix); } + if (ptr_a - a.data() == a_size && ptr_b - b.data() == b_size) { return 0; } else if (ptr_a - a.data() == a_size) { @@ -46,10 +51,10 @@ class ListsDataKeyComparatorImpl : public rocksdb::Comparator { return 1; } - int32_t version_a = DecodeFixed32(ptr_a); - int32_t version_b = DecodeFixed32(ptr_b); - ptr_a += sizeof(int32_t); - ptr_b += sizeof(int32_t); + uint64_t version_a = DecodeFixed64(ptr_a); + uint64_t version_b = DecodeFixed64(ptr_b); + ptr_a += sizeof(uint64_t); + ptr_b += sizeof(uint64_t); if (version_a != version_b) { return version_a < version_b ? -1 : 1; } @@ -79,116 +84,95 @@ class ListsDataKeyComparatorImpl : public rocksdb::Comparator { void FindShortSuccessor(std::string* key) const override {} }; -/* - * | | | | | | - * 4 Bytes Key Size Bytes 4 Bytes 8 Bytes +/* zset score key pattern + * | | | | | | | + * | 8 Bytes | Key Size Bytes | 8 Bytes | 8 Bytes | | 16B | */ class ZSetsScoreKeyComparatorImpl : public rocksdb::Comparator { public: - // keep compatible with blackwidow - const char* Name() const override { return "blackwidow.ZSetsScoreKeyComparator"; } + // keep compatible with floyd + const char* Name() const override { return "floyd.ZSetsScoreKeyComparator"; } int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override { - assert(a.size() > sizeof(int32_t)); - assert(a.size() >= DecodeFixed32(a.data()) + 2 * sizeof(int32_t) + sizeof(uint64_t)); - assert(b.size() > sizeof(int32_t)); - assert(b.size() >= DecodeFixed32(b.data()) + 2 * sizeof(int32_t) + sizeof(uint64_t)); + assert(a.size() > kPrefixReserveLength); + assert(b.size() > kPrefixReserveLength); const char* ptr_a = a.data(); const char* ptr_b = b.data(); auto a_size = static_cast(a.size()); auto b_size = static_cast(b.size()); - int32_t key_a_len = DecodeFixed32(ptr_a); - int32_t key_b_len = DecodeFixed32(ptr_b); - rocksdb::Slice key_a_prefix(ptr_a, key_a_len + sizeof(int32_t)); - rocksdb::Slice key_b_prefix(ptr_b, key_b_len + sizeof(int32_t)); - ptr_a += key_a_len + sizeof(int32_t); - ptr_b += key_b_len + sizeof(int32_t); - int ret = key_a_prefix.compare(key_b_prefix); - if (ret) { + + ptr_a += kPrefixReserveLength; + ptr_b += kPrefixReserveLength; + const char* p_a = SeekUserkeyDelim(ptr_a, a_size - kPrefixReserveLength); + const char* p_b = SeekUserkeyDelim(ptr_b, b_size - kPrefixReserveLength); + rocksdb::Slice p_a_prefix = Slice(ptr_a, std::distance(ptr_a, p_a)); + rocksdb::Slice p_b_prefix = Slice(ptr_b, std::distance(ptr_b, p_b)); + int ret = p_a_prefix.compare(p_b_prefix); + if (ret != 0) { return ret; } - int32_t version_a = DecodeFixed32(ptr_a); - int32_t version_b = DecodeFixed32(ptr_b); + ptr_a = p_a; + ptr_b = p_b; + // compare version + uint64_t version_a = DecodeFixed64(ptr_a); + uint64_t version_b = DecodeFixed64(ptr_b); if (version_a != version_b) { return version_a < version_b ? -1 : 1; } - ptr_a += sizeof(int32_t); - ptr_b += sizeof(int32_t); + ptr_a += kVersionLength; + ptr_b += kVersionLength; + // compare score uint64_t a_i = DecodeFixed64(ptr_a); uint64_t b_i = DecodeFixed64(ptr_b); + const void* ptr_a_score = reinterpret_cast(&a_i); const void* ptr_b_score = reinterpret_cast(&b_i); double a_score = *reinterpret_cast(ptr_a_score); double b_score = *reinterpret_cast(ptr_b_score); - ptr_a += sizeof(uint64_t); - ptr_b += sizeof(uint64_t); if (a_score != b_score) { return a_score < b_score ? -1 : 1; - } else { - if (ptr_a - a.data() == a_size && ptr_b - b.data() == b_size) { - return 0; - } else if (ptr_a - a.data() == a_size) { - return -1; - } else if (ptr_b - b.data() == b_size) { - return 1; - } else { - rocksdb::Slice key_a_member(ptr_a, a_size - (ptr_a - a.data())); - rocksdb::Slice key_b_member(ptr_b, b_size - (ptr_b - b.data())); - ret = key_a_member.compare(key_b_member); - if (ret) { - return ret; - } - } } - return 0; + + // compare rest of the key, including: member and reserve + ptr_a += kScoreLength; + ptr_b += kScoreLength; + rocksdb::Slice rest_a(ptr_a, a_size - std::distance(a.data(), ptr_a)); + rocksdb::Slice rest_b(ptr_b, b_size - std::distance(b.data(), ptr_b)); + return rest_a.compare(rest_b); } bool Equal(const rocksdb::Slice& a, const rocksdb::Slice& b) const override { return Compare(a, b) == 0; } - void ParseAndPrintZSetsScoreKey(const std::string& from, const std::string& str) { - const char* ptr = str.data(); - - int32_t key_len = DecodeFixed32(ptr); - ptr += sizeof(int32_t); - - std::string key(ptr, key_len); - ptr += key_len; - - int32_t version = DecodeFixed32(ptr); - ptr += sizeof(int32_t); - - uint64_t key_score_i = DecodeFixed64(ptr); - const void* ptr_key_score = reinterpret_cast(&key_score_i); - double score = *reinterpret_cast(ptr_key_score); - ptr += sizeof(uint64_t); - - std::string member(ptr, str.size() - (key_len + 2 * sizeof(int32_t) + sizeof(uint64_t))); - LOG(INFO) << from.data() << ": total_len[" << str.size() << "], key_len[" << key_len << "], key[" << key.data() << "], " - << "version[ " << version << "], score[" << score << "], member[" << member.data() << "]"; - } - // Advanced functions: these are used to reduce the space requirements // for internal data structures like index blocks. // If *start < limit, changes *start to a short string in [start,limit). // Simple comparator implementations may return with *start unchanged, // i.e., an implementation of this method that does nothing is correct. + // TODO(wangshaoyi): need reformat, if pkey differs, why return limit directly? void FindShortestSeparator(std::string* start, const rocksdb::Slice& limit) const override { - assert(start->size() > sizeof(int32_t)); - assert(start->size() >= DecodeFixed32(start->data()) + 2 * sizeof(int32_t) + sizeof(uint64_t)); - assert(limit.size() > sizeof(int32_t)); - assert(limit.size() >= DecodeFixed32(limit.data()) + 2 * sizeof(int32_t) + sizeof(uint64_t)); + assert(start->size() > kPrefixReserveLength); + assert(limit.size() > kPrefixReserveLength); + const char* head_start = start->data(); + const char* head_limit = limit.data(); const char* ptr_start = start->data(); const char* ptr_limit = limit.data(); - int32_t key_start_len = DecodeFixed32(ptr_start); - int32_t key_limit_len = DecodeFixed32(ptr_limit); - rocksdb::Slice key_start_prefix(ptr_start, key_start_len + 2 * sizeof(int32_t)); - rocksdb::Slice key_limit_prefix(ptr_limit, key_limit_len + 2 * sizeof(int32_t)); - ptr_start += key_start_len + 2 * sizeof(int32_t); - ptr_limit += key_limit_len + 2 * sizeof(int32_t); + ptr_start += kPrefixReserveLength; + ptr_limit += kPrefixReserveLength; + ptr_start = SeekUserkeyDelim(ptr_start, start->size() - std::distance(head_start, ptr_start)); + ptr_limit = SeekUserkeyDelim(ptr_limit, limit.size() - std::distance(head_limit, ptr_limit)); + + ptr_start += kVersionLength; + ptr_limit += kVersionLength; + + size_t start_head_to_version_length = std::distance(head_start, ptr_start); + size_t limit_head_to_version_length = std::distance(head_limit, ptr_limit); + + rocksdb::Slice key_start_prefix(start->data(), start_head_to_version_length); + rocksdb::Slice key_limit_prefix(start->data(), limit_head_to_version_length); if (key_start_prefix.compare(key_limit_prefix) != 0) { return; } @@ -203,7 +187,7 @@ class ZSetsScoreKeyComparatorImpl : public rocksdb::Comparator { ptr_limit += sizeof(uint64_t); if (start_score < limit_score) { if (start_score + 1 < limit_score) { - start->resize(key_start_len + 2 * sizeof(int32_t)); + start->resize(start_head_to_version_length); start_score += 1; const void* addr_start_score = reinterpret_cast(&start_score); char dst[sizeof(uint64_t)]; @@ -213,20 +197,22 @@ class ZSetsScoreKeyComparatorImpl : public rocksdb::Comparator { return; } - std::string key_start_member(ptr_start, start->size() - (key_start_len + 2 * sizeof(int32_t) + sizeof(uint64_t))); - std::string key_limit_member(ptr_limit, limit.size() - (key_limit_len + 2 * sizeof(int32_t) + sizeof(uint64_t))); + size_t head_to_score_length = start_head_to_version_length + kScoreLength; + + std::string start_rest(ptr_start, start->size() - head_to_score_length); + std::string limit_rest(ptr_limit, limit.size() - head_to_score_length); // Find length of common prefix - size_t min_length = std::min(key_start_member.size(), key_limit_member.size()); + size_t min_length = std::min(start_rest.size(), limit_rest.size()); size_t diff_index = 0; - while ((diff_index < min_length) && (key_start_member[diff_index] == key_limit_member[diff_index])) { + while ((diff_index < min_length) && (start_rest[diff_index] == limit_rest[diff_index])) { diff_index++; } if (diff_index >= min_length) { // Do not shorten if one string is a prefix of the other } else { - auto key_start_member_byte = static_cast(key_start_member[diff_index]); - auto key_limit_member_byte = static_cast(key_limit_member[diff_index]); + auto key_start_member_byte = static_cast(start_rest[diff_index]); + auto key_limit_member_byte = static_cast(limit_rest[diff_index]); if (key_start_member_byte >= key_limit_member_byte) { // Cannot shorten since limit is smaller than start or start is // already the shortest possible. @@ -234,11 +220,11 @@ class ZSetsScoreKeyComparatorImpl : public rocksdb::Comparator { } assert(key_start_member_byte < key_limit_member_byte); - if (diff_index < key_limit_member.size() - 1 || key_start_member_byte + 1 < key_limit_member_byte) { - key_start_member[diff_index]++; - key_start_member.resize(diff_index + 1); - start->resize(key_start_len + 2 * sizeof(int32_t) + sizeof(uint64_t)); - start->append(key_start_member); + if (diff_index < limit_rest.size() - 1 || key_start_member_byte + 1 < key_limit_member_byte) { + start_rest[diff_index]++; + start_rest.resize(diff_index + 1); + start->resize(head_to_score_length); + start->append(start_rest); } else { // v // A A 1 A A A @@ -249,14 +235,14 @@ class ZSetsScoreKeyComparatorImpl : public rocksdb::Comparator { // increment it. diff_index++; - while (diff_index < key_start_member.size()) { + while (diff_index < start_rest.size()) { // Keep moving until we find the first non 0xFF byte to // increment it - if (static_cast(key_start_member[diff_index]) < static_cast(0xff)) { - key_start_member[diff_index]++; - key_start_member.resize(diff_index + 1); - start->resize(key_start_len + 2 * sizeof(int32_t) + sizeof(uint64_t)); - start->append(key_start_member); + if (static_cast(start_rest[diff_index]) < static_cast(0xff)) { + start_rest[diff_index]++; + start_rest.resize(diff_index + 1); + start->resize(head_to_score_length); + start->append(start_rest); break; } diff_index++; diff --git a/src/storage/src/debug.h b/src/storage/src/debug.h index fe78c14695..94c32c70b1 100644 --- a/src/storage/src/debug.h +++ b/src/storage/src/debug.h @@ -14,4 +14,19 @@ # define DEBUG(M, ...) {} #endif // NDEBUG +static std::string get_printable_key(const std::string& key) { + std::string res; + for (int i = 0; i < key.size(); i++) { + if (std::isprint(key[i])) { + res.append(1, key[i]); + } else { + char tmp[3]; + snprintf(tmp, 2, "%02x", key[i] & 0xFF); + res.append(tmp, 2); + } + } + return res; +} + + #endif // SRC_DEBUG_H_ diff --git a/src/storage/src/lists_data_key_format.h b/src/storage/src/lists_data_key_format.h index b25a70a2a8..1c5ab5ec1b 100644 --- a/src/storage/src/lists_data_key_format.h +++ b/src/storage/src/lists_data_key_format.h @@ -6,15 +6,19 @@ #ifndef SRC_LISTS_DATA_KEY_FORMAT_H_ #define SRC_LISTS_DATA_KEY_FORMAT_H_ -#include "pstd/include/pstd_coding.h" - -#include +#include "src/coding.h" +#include "storage/storage_define.h" namespace storage { +/* +* used for List data key. format: +* | reserve1 | key | version | index | reserve2 | +* | 8B | | 8B | 8B | 16B | +*/ class ListsDataKey { - public: - ListsDataKey(const rocksdb::Slice& key, int32_t version, uint64_t index) - : key_(key), version_(version), index_(index) {} +public: + ListsDataKey(const Slice& key, uint64_t version, uint64_t index) + : key_(key), version_(version), index_(index) {} ~ListsDataKey() { if (start_ != space_) { @@ -22,9 +26,12 @@ class ListsDataKey { } } - rocksdb::Slice Encode() { - size_t usize = key_.size(); - size_t needed = usize + sizeof(int32_t) * 2 + sizeof(uint64_t); + Slice Encode() { + size_t meta_size = sizeof(reserve1_) + sizeof(version_) + sizeof(reserve2_); + size_t usize = key_.size() + sizeof(index_) + kEncodedKeyDelimSize; + size_t nzero = std::count(key_.data(), key_.data() + key_.size(), kNeedTransformCharacter); + usize += nzero; + size_t needed = meta_size + usize; char* dst; if (needed <= sizeof(space_)) { dst = space_; @@ -36,61 +43,75 @@ class ListsDataKey { delete[] start_; } } + start_ = dst; - pstd::EncodeFixed32(dst, key_.size()); - dst += sizeof(int32_t); - memcpy(dst, key_.data(), key_.size()); - dst += key_.size(); - pstd::EncodeFixed32(dst, version_); - dst += sizeof(int32_t); - pstd::EncodeFixed64(dst, index_); - return rocksdb::Slice(start_, needed); + // reserve1: 8 byte + memcpy(dst, reserve1_, sizeof(reserve1_)); + dst += sizeof(reserve1_); + dst = EncodeUserKey(key_, dst, nzero); + // version 8 byte + EncodeFixed64(dst, version_); + dst += sizeof(version_); + // index + EncodeFixed64(dst, index_); + dst += sizeof(index_); + // TODO(wangshaoyi): too much for reserve + // reserve2: 16 byte + memcpy(dst, reserve2_, sizeof(reserve2_)); + return Slice(start_, needed); } - private: - char space_[200]; +private: char* start_ = nullptr; - rocksdb::Slice key_; - int32_t version_ = -1; + char space_[200]; + char reserve1_[8] = {0}; + Slice key_; + uint64_t version_ = uint64_t(-1); uint64_t index_ = 0; + char reserve2_[16] = {0}; }; class ParsedListsDataKey { public: explicit ParsedListsDataKey(const std::string* key) { const char* ptr = key->data(); - int32_t key_len = pstd::DecodeFixed32(ptr); - ptr += sizeof(int32_t); - key_ = rocksdb::Slice(ptr, key_len); - ptr += key_len; - version_ = pstd::DecodeFixed32(ptr); - ptr += sizeof(int32_t); - index_ = pstd::DecodeFixed64(ptr); + const char* end_ptr = key->data() + key->size(); + decode(ptr, end_ptr); } - explicit ParsedListsDataKey(const rocksdb::Slice& key) { + explicit ParsedListsDataKey(const Slice& key) { const char* ptr = key.data(); - int32_t key_len = pstd::DecodeFixed32(ptr); - ptr += sizeof(int32_t); - key_ = rocksdb::Slice(ptr, key_len); - ptr += key_len; - version_ = pstd::DecodeFixed32(ptr); - ptr += sizeof(int32_t); - index_ = pstd::DecodeFixed64(ptr); + const char* end_ptr = key.data() + key.size(); + decode(ptr, end_ptr); + } + + void decode(const char* ptr, const char* end_ptr) { + const char* start = ptr; + // skip head reserve1_ + ptr += sizeof(reserve1_); + // skip tail reserve2_ + end_ptr -= sizeof(reserve2_); + + ptr = DecodeUserKey(ptr, std::distance(ptr, end_ptr), &key_str_); + version_ = DecodeFixed64(ptr); + ptr += sizeof(version_); + index_ = DecodeFixed64(ptr); } virtual ~ParsedListsDataKey() = default; - rocksdb::Slice key() { return key_; } + Slice key() { return Slice(key_str_); } - int32_t version() { return version_; } + uint64_t Version() { return version_; } uint64_t index() { return index_; } private: - rocksdb::Slice key_; - int32_t version_ = -1; + std::string key_str_; + char reserve1_[8] = {0}; + uint64_t version_ = (uint64_t)(-1); uint64_t index_ = 0; + char reserve2_[16] = {0}; }; } // namespace storage diff --git a/src/storage/src/lists_filter.h b/src/storage/src/lists_filter.h index 77ec977776..b31b01c441 100644 --- a/src/storage/src/lists_filter.h +++ b/src/storage/src/lists_filter.h @@ -25,19 +25,19 @@ class ListsMetaFilter : public rocksdb::CompactionFilter { bool* value_changed) const override { int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - auto cur_time = static_cast(unix_time); + auto cur_time = static_cast(unix_time); ParsedListsMetaValue parsed_lists_meta_value(value); TRACE("==========================START=========================="); - TRACE("[ListMetaFilter], key: %s, count = %llu, timestamp: %d, cur_time: %d, version: %d", key.ToString().c_str(), - parsed_lists_meta_value.count(), parsed_lists_meta_value.timestamp(), cur_time, - parsed_lists_meta_value.version()); + TRACE("[ListMetaFilter], key: %s, count = %llu, timestamp: %llu, cur_time: %llu, version: %llu", key.ToString().c_str(), + parsed_lists_meta_value.Count(), parsed_lists_meta_value.Etime(), cur_time, + parsed_lists_meta_value.Version()); - if (parsed_lists_meta_value.timestamp() != 0 && parsed_lists_meta_value.timestamp() < cur_time && - parsed_lists_meta_value.version() < cur_time) { + if (parsed_lists_meta_value.Etime() != 0 && parsed_lists_meta_value.Etime() < cur_time && + parsed_lists_meta_value.Version() < cur_time) { TRACE("Drop[Stale & version < cur_time]"); return true; } - if (parsed_lists_meta_value.count() == 0 && parsed_lists_meta_value.version() < cur_time) { + if (parsed_lists_meta_value.Count() == 0 && parsed_lists_meta_value.Version() < cur_time) { TRACE("Drop[Empty & version < cur_time]"); return true; } @@ -60,31 +60,42 @@ class ListsMetaFilterFactory : public rocksdb::CompactionFilterFactory { class ListsDataFilter : public rocksdb::CompactionFilter { public: - ListsDataFilter(rocksdb::DB* db, std::vector* cf_handles_ptr) + ListsDataFilter(rocksdb::DB* db, std::vector* cf_handles_ptr, int meta_cf_index) : db_(db), - cf_handles_ptr_(cf_handles_ptr) + cf_handles_ptr_(cf_handles_ptr), + meta_cf_index_(meta_cf_index) {} bool Filter(int level, const rocksdb::Slice& key, const rocksdb::Slice& value, std::string* new_value, bool* value_changed) const override { + UNUSED(level); + UNUSED(value); + UNUSED(new_value); + UNUSED(value_changed); ParsedListsDataKey parsed_lists_data_key(key); TRACE("==========================START=========================="); - TRACE("[DataFilter], key: %s, index = %llu, data = %s, version = %d", parsed_lists_data_key.key().ToString().c_str(), - parsed_lists_data_key.index(), value.ToString().c_str(), parsed_lists_data_key.version()); + TRACE("[DataFilter], key: %s, index = %llu, data = %s, version = %llu", parsed_lists_data_key.key().ToString().c_str(), + parsed_lists_data_key.index(), value.ToString().c_str(), parsed_lists_data_key.Version()); - if (parsed_lists_data_key.key().ToString() != cur_key_) { - cur_key_ = parsed_lists_data_key.key().ToString(); + const char* ptr = key.data(); + int key_size = key.size(); + ptr = SeekUserkeyDelim(ptr + kPrefixReserveLength, key_size - kPrefixReserveLength); + std::string meta_key_enc(key.data(), std::distance(key.data(), ptr)); + meta_key_enc.append(kSuffixReserveLength, kNeedTransformCharacter); + + if (meta_key_enc != cur_key_) { + cur_key_ = meta_key_enc; std::string meta_value; // destroyed when close the database, Reserve Current key value if (cf_handles_ptr_->empty()) { return false; } - rocksdb::Status s = db_->Get(default_read_options_, (*cf_handles_ptr_)[0], cur_key_, &meta_value); + rocksdb::Status s = db_->Get(default_read_options_, (*cf_handles_ptr_)[meta_cf_index_], cur_key_, &meta_value); if (s.ok()) { meta_not_found_ = false; ParsedListsMetaValue parsed_lists_meta_value(&meta_value); - cur_meta_version_ = parsed_lists_meta_value.version(); - cur_meta_timestamp_ = parsed_lists_meta_value.timestamp(); + cur_meta_version_ = parsed_lists_meta_value.Version(); + cur_meta_etime_ = parsed_lists_meta_value.Etime(); } else if (s.IsNotFound()) { meta_not_found_ = true; } else { @@ -101,12 +112,12 @@ class ListsDataFilter : public rocksdb::CompactionFilter { int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - if (cur_meta_timestamp_ != 0 && cur_meta_timestamp_ < static_cast(unix_time)) { + if (cur_meta_etime_ != 0 && cur_meta_etime_ < static_cast(unix_time)) { TRACE("Drop[Timeout]"); return true; } - if (cur_meta_version_ > parsed_lists_data_key.version()) { + if (cur_meta_version_ > parsed_lists_data_key.Version()) { TRACE("Drop[list_data_key_version < cur_meta_version]"); return true; } else { @@ -115,6 +126,22 @@ class ListsDataFilter : public rocksdb::CompactionFilter { } } + /* + // Only judge by meta value ttl + virtual rocksdb::CompactionFilter::Decision FilterBlobByKey(int level, const Slice& key, + std::string* new_value, std::string* skip_until) const { + UNUSED(level); + UNUSED(new_value); + UNUSED(skip_until); + bool unused_value_changed; + bool should_remove = Filter(level, key, Slice{}, new_value, &unused_value_changed); + if (should_remove) { + return CompactionFilter::Decision::kRemove; + } + return CompactionFilter::Decision::kKeep; + } + */ + const char* Name() const override { return "ListsDataFilter"; } private: @@ -123,24 +150,26 @@ class ListsDataFilter : public rocksdb::CompactionFilter { rocksdb::ReadOptions default_read_options_; mutable std::string cur_key_; mutable bool meta_not_found_ = false; - mutable int32_t cur_meta_version_ = 0; - mutable int32_t cur_meta_timestamp_ = 0; + mutable uint64_t cur_meta_version_ = 0; + mutable uint64_t cur_meta_etime_ = 0; + int meta_cf_index_ = 0; }; class ListsDataFilterFactory : public rocksdb::CompactionFilterFactory { public: - ListsDataFilterFactory(rocksdb::DB** db_ptr, std::vector* handles_ptr) - : db_ptr_(db_ptr), cf_handles_ptr_(handles_ptr) {} + ListsDataFilterFactory(rocksdb::DB** db_ptr, std::vector* handles_ptr, int meta_cf_index) + : db_ptr_(db_ptr), cf_handles_ptr_(handles_ptr), meta_cf_index_(meta_cf_index) {} std::unique_ptr CreateCompactionFilter( const rocksdb::CompactionFilter::Context& context) override { - return std::unique_ptr(new ListsDataFilter(*db_ptr_, cf_handles_ptr_)); + return std::unique_ptr(new ListsDataFilter(*db_ptr_, cf_handles_ptr_, meta_cf_index_)); } const char* Name() const override { return "ListsDataFilterFactory"; } private: rocksdb::DB** db_ptr_ = nullptr; std::vector* cf_handles_ptr_ = nullptr; + int meta_cf_index_ = 0; }; } // namespace storage diff --git a/src/storage/src/lists_meta_value_format.h b/src/storage/src/lists_meta_value_format.h index 3ef993cbb7..7b9579746b 100644 --- a/src/storage/src/lists_meta_value_format.h +++ b/src/storage/src/lists_meta_value_format.h @@ -9,69 +9,61 @@ #include #include "src/base_value_format.h" +#include "storage/storage_define.h" namespace storage { const uint64_t InitalLeftIndex = 9223372036854775807; const uint64_t InitalRightIndex = 9223372036854775808U; +/* +*| list_size | version | left index | right index | reserve | cdate | timestamp | +*| 8B | 8B | 8B | 8B | 16B | 8B | 8B | +*/ class ListsMetaValue : public InternalValue { public: explicit ListsMetaValue(const rocksdb::Slice& user_value) : InternalValue(user_value), left_index_(InitalLeftIndex), right_index_(InitalRightIndex) {} - size_t AppendTimestampAndVersion() override { + rocksdb::Slice Encode() override { size_t usize = user_value_.size(); - char* dst = start_; + size_t needed = usize + kVersionLength + 2 * kListValueIndexLength + + kSuffixReserveLength + 2 * kTimestampLength; + char* dst = ReAllocIfNeeded(needed); + char* start_pos = dst; + memcpy(dst, user_value_.data(), usize); dst += usize; - EncodeFixed32(dst, version_); - dst += sizeof(int32_t); - EncodeFixed32(dst, timestamp_); - return usize + 2 * sizeof(int32_t); - } - - virtual size_t AppendIndex() { - char* dst = start_; - dst += user_value_.size() + 2 * sizeof(int32_t); + EncodeFixed64(dst, version_); + dst += kVersionLength; EncodeFixed64(dst, left_index_); - dst += sizeof(int64_t); + dst += kListValueIndexLength; EncodeFixed64(dst, right_index_); - return 2 * sizeof(int64_t); + dst += kListValueIndexLength; + memcpy(dst, reserve_, sizeof(reserve_)); + dst += kSuffixReserveLength; + EncodeFixed64(dst, ctime_); + dst += kTimestampLength; + EncodeFixed64(dst, etime_); + return rocksdb::Slice(start_pos, needed); } - static const size_t kDefaultValueSuffixLength = sizeof(int32_t) * 2 + sizeof(int64_t) * 2; - - rocksdb::Slice Encode() override { - size_t usize = user_value_.size(); - size_t needed = usize + kDefaultValueSuffixLength; - char* dst; - if (needed <= sizeof(space_)) { - dst = space_; - } else { - dst = new char[needed]; - } - start_ = dst; - size_t len = AppendTimestampAndVersion() + AppendIndex(); - return rocksdb::Slice(start_, len); - } - - int32_t UpdateVersion() { + uint64_t UpdateVersion() { int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - if (version_ >= static_cast(unix_time)) { + if (version_ >= static_cast(unix_time)) { version_++; } else { - version_ = static_cast(unix_time); + version_ = static_cast(unix_time); } return version_; } - uint64_t left_index() { return left_index_; } + uint64_t LeftIndex() { return left_index_; } void ModifyLeftIndex(uint64_t index) { left_index_ -= index; } - uint64_t right_index() { return right_index_; } + uint64_t RightIndex() { return right_index_; } void ModifyRightIndex(uint64_t index) { right_index_ += index; } @@ -87,13 +79,21 @@ class ParsedListsMetaValue : public ParsedInternalValue { : ParsedInternalValue(internal_value_str) { assert(internal_value_str->size() >= kListsMetaValueSuffixLength); if (internal_value_str->size() >= kListsMetaValueSuffixLength) { + int offset = 0; user_value_ = rocksdb::Slice(internal_value_str->data(), internal_value_str->size() - kListsMetaValueSuffixLength); - version_ = DecodeFixed32(internal_value_str->data() + internal_value_str->size() - sizeof(int32_t) * 2 - - sizeof(int64_t) * 2); - timestamp_ = DecodeFixed32(internal_value_str->data() + internal_value_str->size() - sizeof(int32_t) - - sizeof(int64_t) * 2); - left_index_ = DecodeFixed64(internal_value_str->data() + internal_value_str->size() - sizeof(int64_t) * 2); - right_index_ = DecodeFixed64(internal_value_str->data() + internal_value_str->size() - sizeof(int64_t)); + offset += user_value_.size(); + version_ = DecodeFixed64(internal_value_str->data() + offset); + offset += kVersionLength; + left_index_ = DecodeFixed64(internal_value_str->data() + offset); + offset += kListValueIndexLength; + right_index_ = DecodeFixed64(internal_value_str->data() + offset); + offset += kListValueIndexLength; + memcpy(reserve_, internal_value_str->data() + offset, sizeof(reserve_)); + offset += kSuffixReserveLength; + ctime_ = DecodeFixed64(internal_value_str->data() + offset); + offset += kTimestampLength; + etime_ = DecodeFixed64(internal_value_str->data() + offset); + offset += kTimestampLength; } count_ = DecodeFixed64(internal_value_str->data()); } @@ -103,13 +103,21 @@ class ParsedListsMetaValue : public ParsedInternalValue { : ParsedInternalValue(internal_value_slice) { assert(internal_value_slice.size() >= kListsMetaValueSuffixLength); if (internal_value_slice.size() >= kListsMetaValueSuffixLength) { + int offset = 0; user_value_ = rocksdb::Slice(internal_value_slice.data(), internal_value_slice.size() - kListsMetaValueSuffixLength); - version_ = DecodeFixed32(internal_value_slice.data() + internal_value_slice.size() - sizeof(int32_t) * 2 - - sizeof(int64_t) * 2); - timestamp_ = DecodeFixed32(internal_value_slice.data() + internal_value_slice.size() - sizeof(int32_t) - - sizeof(int64_t) * 2); - left_index_ = DecodeFixed64(internal_value_slice.data() + internal_value_slice.size() - sizeof(int64_t) * 2); - right_index_ = DecodeFixed64(internal_value_slice.data() + internal_value_slice.size() - sizeof(int64_t)); + offset += user_value_.size(); + version_ = DecodeFixed64(internal_value_slice.data() + offset); + offset += kVersionLength; + left_index_ = DecodeFixed64(internal_value_slice.data() + offset); + offset += kListValueIndexLength; + right_index_ = DecodeFixed64(internal_value_slice.data() + offset); + offset += kListValueIndexLength; + memcpy(reserve_, internal_value_slice.data() + offset, sizeof(reserve_)); + offset += kSuffixReserveLength; + ctime_ = DecodeFixed64(internal_value_slice.data() + offset); + offset += kTimestampLength; + etime_ = DecodeFixed64(internal_value_slice.data() + offset); + offset += kTimestampLength; } count_ = DecodeFixed64(internal_value_slice.data()); } @@ -123,39 +131,49 @@ class ParsedListsMetaValue : public ParsedInternalValue { void SetVersionToValue() override { if (value_) { char* dst = const_cast(value_->data()) + value_->size() - kListsMetaValueSuffixLength; - EncodeFixed32(dst, version_); + EncodeFixed64(dst, version_); + } + } + + void SetCtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - 2 * kTimestampLength; + EncodeFixed64(dst, ctime_); } } - void SetTimestampToValue() override { + void SetEtimeToValue() override { if (value_) { - char* dst = const_cast(value_->data()) + value_->size() - sizeof(int32_t) - 2 * sizeof(int64_t); - EncodeFixed32(dst, timestamp_); + char* dst = const_cast(value_->data()) + value_->size() - kTimestampLength; + EncodeFixed64(dst, etime_); } } void SetIndexToValue() { if (value_) { - char* dst = const_cast(value_->data()) + value_->size() - 2 * sizeof(int64_t); + char* dst = const_cast(value_->data()) + value_->size() - kListsMetaValueSuffixLength + kVersionLength; EncodeFixed64(dst, left_index_); - dst += sizeof(int64_t); + dst += sizeof(left_index_); EncodeFixed64(dst, right_index_); } } - static const size_t kListsMetaValueSuffixLength = 2 * sizeof(int32_t) + 2 * sizeof(int64_t); - - int32_t InitialMetaValue() { - this->set_count(0); + uint64_t InitialMetaValue() { + this->SetCount(0); this->set_left_index(InitalLeftIndex); this->set_right_index(InitalRightIndex); - this->set_timestamp(0); + this->SetEtime(0); + this->SetCtime(0); return this->UpdateVersion(); } - uint64_t count() { return count_; } + bool IsValid() override { + return !IsStale() && Count() != 0; + } + + uint64_t Count() { return count_; } - void set_count(uint64_t count) { + void SetCount(uint64_t count) { count_ = count; if (value_) { char* dst = const_cast(value_->data()); @@ -171,24 +189,24 @@ class ParsedListsMetaValue : public ParsedInternalValue { } } - int32_t UpdateVersion() { + uint64_t UpdateVersion() { int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - if (version_ >= static_cast(unix_time)) { + if (version_ >= static_cast(unix_time)) { version_++; } else { - version_ = static_cast(unix_time); + version_ = static_cast(unix_time); } SetVersionToValue(); return version_; } - uint64_t left_index() { return left_index_; } + uint64_t LeftIndex() { return left_index_; } void set_left_index(uint64_t index) { left_index_ = index; if (value_) { - char* dst = const_cast(value_->data()) + value_->size() - 2 * sizeof(int64_t); + char* dst = const_cast(value_->data()) + value_->size() - kListsMetaValueSuffixLength + kVersionLength; EncodeFixed64(dst, left_index_); } } @@ -196,17 +214,17 @@ class ParsedListsMetaValue : public ParsedInternalValue { void ModifyLeftIndex(uint64_t index) { left_index_ -= index; if (value_) { - char* dst = const_cast(value_->data()) + value_->size() - 2 * sizeof(int64_t); + char* dst = const_cast(value_->data()) + value_->size() - kListsMetaValueSuffixLength + kVersionLength; EncodeFixed64(dst, left_index_); } } - uint64_t right_index() { return right_index_; } + uint64_t RightIndex() { return right_index_; } void set_right_index(uint64_t index) { right_index_ = index; if (value_) { - char* dst = const_cast(value_->data()) + value_->size() - sizeof(int64_t); + char* dst = const_cast(value_->data()) + value_->size() - kListsMetaValueSuffixLength + kVersionLength + kListValueIndexLength; EncodeFixed64(dst, right_index_); } } @@ -214,11 +232,14 @@ class ParsedListsMetaValue : public ParsedInternalValue { void ModifyRightIndex(uint64_t index) { right_index_ += index; if (value_) { - char* dst = const_cast(value_->data()) + value_->size() - sizeof(int64_t); + char* dst = const_cast(value_->data()) + value_->size() - kListsMetaValueSuffixLength + kVersionLength + kListValueIndexLength; EncodeFixed64(dst, right_index_); } } +private: + const size_t kListsMetaValueSuffixLength = kVersionLength + 2 * kListValueIndexLength + kSuffixReserveLength + 2 * kTimestampLength; + private: uint64_t count_ = 0; uint64_t left_index_ = 0; diff --git a/src/storage/src/murmurhash.h b/src/storage/src/murmurhash.h index 3b33d69017..6692033a24 100644 --- a/src/storage/src/murmurhash.h +++ b/src/storage/src/murmurhash.h @@ -42,3 +42,4 @@ struct murmur_hash { }; } // namespace storage #endif // SRC_MURMURHASH_H_ + diff --git a/src/storage/src/options_helper.h b/src/storage/src/options_helper.h index 2e81202d9c..5907e2116f 100644 --- a/src/storage/src/options_helper.h +++ b/src/storage/src/options_helper.h @@ -68,9 +68,12 @@ static std::unordered_map mutable_cf_options_member {offset_of(&rocksdb::ColumnFamilyOptions::hard_pending_compaction_bytes_limit), MemberType::kUint64T}}, {"disable_auto_compactions", {offset_of(&rocksdb::ColumnFamilyOptions::disable_auto_compactions), MemberType::kBool}}, + {"ttl", {offset_of(&rocksdb::AdvancedColumnFamilyOptions::ttl), MemberType::kUint64T}}, + {"periodic_compaction_seconds", + {offset_of(&rocksdb::AdvancedColumnFamilyOptions::periodic_compaction_seconds), MemberType::kUint64T}}, }; extern bool ParseOptionMember(const MemberType& member_type, const std::string& value, char* member_address); } // namespace storage -#endif // SRC_OPTIONS_HELPER_H \ No newline at end of file +#endif // SRC_OPTIONS_HELPER_H diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index 3066a62759..9ac40cdcea 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -3,31 +3,51 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "src/redis.h" #include +#include "rocksdb/env.h" + +#include "src/redis.h" +#include "src/strings_filter.h" +#include "src/lists_filter.h" +#include "src/base_filter.h" +#include "src/zsets_filter.h" + namespace storage { +const rocksdb::Comparator* ListsDataKeyComparator() { + static ListsDataKeyComparatorImpl ldkc; + return &ldkc; +} + +rocksdb::Comparator* ZSetsScoreKeyComparator() { + static ZSetsScoreKeyComparatorImpl zsets_score_key_compare; + return &zsets_score_key_compare; +} -Redis::Redis(Storage* const s, const DataType& type) - : storage_(s), - type_(type), +Redis::Redis(Storage* const s, int32_t index) + : storage_(s), index_(index), lock_mgr_(std::make_shared(1000, 0, std::make_shared())), small_compaction_threshold_(5000), small_compaction_duration_threshold_(10000) { statistics_store_ = std::make_unique>(); scan_cursors_store_ = std::make_unique>(); - scan_cursors_store_->SetCapacity(5000); + spop_counts_store_ = std::make_unique>(); default_compact_range_options_.exclusive_manual_compaction = false; default_compact_range_options_.change_level = true; + spop_counts_store_->SetCapacity(1000); + scan_cursors_store_->SetCapacity(5000); + //env_ = rocksdb::Env::Instance(); handles_.clear(); } Redis::~Redis() { + rocksdb::CancelAllBackgroundWork(db_, true); std::vector tmp_handles = handles_; handles_.clear(); for (auto handle : tmp_handles) { delete handle; } + // delete env_; delete db_; if (default_compact_range_options_.canceled) { @@ -35,14 +55,135 @@ Redis::~Redis() { } } -Status Redis::GetScanStartPoint(const Slice& key, const Slice& pattern, int64_t cursor, std::string* start_point) { - std::string index_key = key.ToString() + "_" + pattern.ToString() + "_" + std::to_string(cursor); +Status Redis::Open(const StorageOptions& storage_options, const std::string& db_path) { + statistics_store_->SetCapacity(storage_options.statistics_max_size); + small_compaction_threshold_ = storage_options.small_compaction_threshold; + + rocksdb::BlockBasedTableOptions table_ops(storage_options.table_options); + table_ops.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); + + rocksdb::DBOptions db_ops(storage_options.options); + db_ops.create_missing_column_families = true; + // db_ops.env = env_; + + // string column-family options + rocksdb::ColumnFamilyOptions string_cf_ops(storage_options.options); + string_cf_ops.compaction_filter_factory = std::make_shared(); + + rocksdb::BlockBasedTableOptions string_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + string_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + string_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(string_table_ops)); + + + // hash column-family options + rocksdb::ColumnFamilyOptions hash_meta_cf_ops(storage_options.options); + rocksdb::ColumnFamilyOptions hash_data_cf_ops(storage_options.options); + hash_meta_cf_ops.compaction_filter_factory = std::make_shared(); + hash_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kHashesMetaCF); + + rocksdb::BlockBasedTableOptions hash_meta_cf_table_ops(table_ops); + rocksdb::BlockBasedTableOptions hash_data_cf_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + hash_meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + hash_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + hash_meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(hash_meta_cf_table_ops)); + hash_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(hash_data_cf_table_ops)); + + // list column-family options + rocksdb::ColumnFamilyOptions list_meta_cf_ops(storage_options.options); + rocksdb::ColumnFamilyOptions list_data_cf_ops(storage_options.options); + list_meta_cf_ops.compaction_filter_factory = std::make_shared(); + list_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kListsMetaCF); + list_data_cf_ops.comparator = ListsDataKeyComparator(); + + rocksdb::BlockBasedTableOptions list_meta_cf_table_ops(table_ops); + rocksdb::BlockBasedTableOptions list_data_cf_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + list_meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + list_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + list_meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(list_meta_cf_table_ops)); + list_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(list_data_cf_table_ops)); + + // set column-family options + rocksdb::ColumnFamilyOptions set_meta_cf_ops(storage_options.options); + rocksdb::ColumnFamilyOptions set_data_cf_ops(storage_options.options); + set_meta_cf_ops.compaction_filter_factory = std::make_shared(); + set_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kSetsMetaCF); + + rocksdb::BlockBasedTableOptions set_meta_cf_table_ops(table_ops); + rocksdb::BlockBasedTableOptions set_data_cf_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + set_meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + set_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + set_meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(set_meta_cf_table_ops)); + set_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(set_data_cf_table_ops)); + + // zset column-family options + rocksdb::ColumnFamilyOptions zset_meta_cf_ops(storage_options.options); + rocksdb::ColumnFamilyOptions zset_data_cf_ops(storage_options.options); + rocksdb::ColumnFamilyOptions zset_score_cf_ops(storage_options.options); + zset_meta_cf_ops.compaction_filter_factory = std::make_shared(); + zset_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kZsetsMetaCF); + zset_score_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, kZsetsMetaCF); + zset_score_cf_ops.comparator = ZSetsScoreKeyComparator(); + + rocksdb::BlockBasedTableOptions zset_meta_cf_table_ops(table_ops); + rocksdb::BlockBasedTableOptions zset_data_cf_table_ops(table_ops); + rocksdb::BlockBasedTableOptions zset_score_cf_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + zset_meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + zset_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + zset_meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + zset_meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_meta_cf_table_ops)); + zset_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_data_cf_table_ops)); + zset_score_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_score_cf_table_ops)); + + std::vector column_families; + column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, string_cf_ops); + // hash CF + column_families.emplace_back("hash_meta_cf", hash_meta_cf_ops); + column_families.emplace_back("hash_data_cf", hash_data_cf_ops); + // set CF + column_families.emplace_back("set_meta_cf", set_meta_cf_ops); + column_families.emplace_back("set_data_cf", set_data_cf_ops); + // list CF + column_families.emplace_back("list_meta_cf", list_meta_cf_ops); + column_families.emplace_back("list_data_cf", list_data_cf_ops); + // zset CF + column_families.emplace_back("zset_meta_cf", zset_meta_cf_ops); + column_families.emplace_back("zset_data_cf", zset_data_cf_ops); + column_families.emplace_back("zset_score_cf", zset_score_cf_ops); + return rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); +} + +Status Redis::GetScanStartPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, std::string* start_point) { + std::string index_key; + index_key.append(1, DataTypeTag[type]); + index_key.append("_"); + index_key.append(key.ToString()); + index_key.append("_"); + index_key.append(pattern.ToString()); + index_key.append("_"); + index_key.append(std::to_string(cursor)); return scan_cursors_store_->Lookup(index_key, start_point); } -Status Redis::StoreScanNextPoint(const Slice& key, const Slice& pattern, int64_t cursor, +Status Redis::StoreScanNextPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, const std::string& next_point) { - std::string index_key = key.ToString() + "_" + pattern.ToString() + "_" + std::to_string(cursor); + std::string index_key; + index_key.append(1, DataTypeTag[type]); + index_key.append("_"); + index_key.append(key.ToString()); + index_key.append("_"); + index_key.append(pattern.ToString()); + index_key.append("_"); + index_key.append(std::to_string(cursor)); return scan_cursors_store_->Insert(index_key, next_point); } @@ -51,6 +192,51 @@ Status Redis::SetMaxCacheStatisticKeys(size_t max_cache_statistic_keys) { return Status::OK(); } +Status Redis::CompactRange(const DataType& dtype, const rocksdb::Slice* begin, const rocksdb::Slice* end, const ColumnFamilyType& type) { + Status s; + switch (dtype) { + case DataType::kStrings: + s = db_->CompactRange(default_compact_range_options_, begin, end); + break; + case DataType::kHashes: + if (type == kMeta || type == kMetaAndData) { + s = db_->CompactRange(default_compact_range_options_, handles_[kHashesMetaCF], begin, end); + } + if (s.ok() && (type == kData || type == kMetaAndData)) { + s = db_->CompactRange(default_compact_range_options_, handles_[kHashesDataCF], begin, end); + } + break; + case DataType::kSets: + if (type == kMeta || type == kMetaAndData) { + db_->CompactRange(default_compact_range_options_, handles_[kSetsMetaCF], begin, end); + } + if (s.ok() && (type == kData || type == kMetaAndData)) { + db_->CompactRange(default_compact_range_options_, handles_[kSetsDataCF], begin, end); + } + break; + case DataType::kLists: + if (type == kMeta || type == kMetaAndData) { + s = db_->CompactRange(default_compact_range_options_, handles_[kListsMetaCF], begin, end); + } + if (s.ok() && (type == kData || type == kMetaAndData)) { + s = db_->CompactRange(default_compact_range_options_, handles_[kListsDataCF], begin, end); + } + break; + case DataType::kZSets: + if (type == kMeta || type == kMetaAndData) { + db_->CompactRange(default_compact_range_options_, handles_[kZsetsMetaCF], begin, end); + } + if (s.ok() && (type == kData || type == kMetaAndData)) { + db_->CompactRange(default_compact_range_options_, handles_[kZsetsDataCF], begin, end); + db_->CompactRange(default_compact_range_options_, handles_[kZsetsScoreCF], begin, end); + } + break; + default: + return Status::Corruption("Invalid data type"); + } + return s; +} + Status Redis::SetSmallCompactionThreshold(uint64_t small_compaction_threshold) { small_compaction_threshold_ = small_compaction_threshold; return Status::OK(); @@ -61,34 +247,42 @@ Status Redis::SetSmallCompactionDurationThreshold(uint64_t small_compaction_dura return Status::OK(); } -Status Redis::UpdateSpecificKeyStatistics(const std::string& key, uint64_t count) { +Status Redis::UpdateSpecificKeyStatistics(const DataType& dtype, const std::string& key, uint64_t count) { if ((statistics_store_->Capacity() != 0U) && (count != 0U) && (small_compaction_threshold_ != 0U)) { KeyStatistics data; - statistics_store_->Lookup(key, &data); + std::string lkp_key; + lkp_key.append(1, DataTypeTag[dtype]); + lkp_key.append(key); + statistics_store_->Lookup(lkp_key, &data); data.AddModifyCount(count); - statistics_store_->Insert(key, data); - AddCompactKeyTaskIfNeeded(key, data.ModifyCount(), data.AvgDuration()); + statistics_store_->Insert(lkp_key, data); + AddCompactKeyTaskIfNeeded(dtype, key, data.ModifyCount(), data.AvgDuration()); } return Status::OK(); } -Status Redis::UpdateSpecificKeyDuration(const std::string& key, uint64_t duration) { +Status Redis::UpdateSpecificKeyDuration(const DataType& dtype, const std::string& key, uint64_t duration) { if ((statistics_store_->Capacity() != 0U) && (duration != 0U) && (small_compaction_duration_threshold_ != 0U)) { KeyStatistics data; - statistics_store_->Lookup(key, &data); + std::string lkp_key; + lkp_key.append(1, DataTypeTag[dtype]); + lkp_key.append(key); + statistics_store_->Lookup(lkp_key, &data); data.AddDuration(duration); - statistics_store_->Insert(key, data); - AddCompactKeyTaskIfNeeded(key, data.ModifyCount(), data.AvgDuration()); + statistics_store_->Insert(lkp_key, data); + AddCompactKeyTaskIfNeeded(dtype, key, data.ModifyCount(), data.AvgDuration()); } return Status::OK(); } -Status Redis::AddCompactKeyTaskIfNeeded(const std::string& key, uint64_t count, uint64_t duration) { - if (count < small_compaction_threshold_ || duration < small_compaction_duration_threshold_) { +Status Redis::AddCompactKeyTaskIfNeeded(const DataType& dtype, const std::string& key, uint64_t total, uint64_t duration) { + if (total < small_compaction_threshold_ || duration < small_compaction_duration_threshold_) { return Status::OK(); } else { - storage_->AddBGTask({type_, kCompactRange, {key, key}}); - statistics_store_->Remove(key); + std::string lkp_key(1, DataTypeTag[dtype]); + lkp_key.append(key); + storage_->AddBGTask({dtype, kCompactRange, {key}}); + statistics_store_->Remove(lkp_key); } return Status::OK(); } @@ -110,7 +304,7 @@ Status Redis::SetOptions(const OptionType& option_type, const std::unordered_map return s; } -void Redis::GetRocksDBInfo(std::string &info, const char *prefix) { +void Redis::GetRocksDBInfo(std::string& info, const char* prefix) { std::ostringstream string_stream; string_stream << "#" << prefix << "RocksDB" << "\r\n"; @@ -179,7 +373,7 @@ void Redis::GetRocksDBInfo(std::string &info, const char *prefix) { write_stream_key_value(rocksdb::DB::Properties::kBlobStats, "blob_stats"); write_stream_key_value(rocksdb::DB::Properties::kTotalBlobFileSize, "total_blob_file_size"); write_stream_key_value(rocksdb::DB::Properties::kLiveBlobFileSize, "live_blob_file_size"); - + // column family stats std::map mapvalues; db_->rocksdb::DB::GetMapProperty(rocksdb::DB::Properties::kCFStats,&mapvalues); @@ -197,6 +391,46 @@ void Redis::SetCompactRangeOptions(const bool is_canceled) { } else { default_compact_range_options_.canceled->store(is_canceled); } +Status Redis::GetProperty(const std::string& property, uint64_t* out) { + std::string value; + db_->GetProperty(property, &value); + *out = std::strtoull(value.c_str(), nullptr, 10); + return Status::OK(); +} + +Status Redis::ScanKeyNum(std::vector* key_infos) { + key_infos->resize(5); + rocksdb::Status s; + s = ScanStringsKeyNum(&((*key_infos)[0])); + if (!s.ok()) { + return s; + } + s = ScanHashesKeyNum(&((*key_infos)[1])); + if (!s.ok()) { + return s; + } + s = ScanListsKeyNum(&((*key_infos)[2])); + if (!s.ok()) { + return s; + } + s = ScanZsetsKeyNum(&((*key_infos)[3])); + if (!s.ok()) { + return s; + } + s = ScanSetsKeyNum(&((*key_infos)[4])); + if (!s.ok()) { + return s; + } + + return Status::OK(); +} + +void Redis::ScanDatabase() { + ScanStrings(); + ScanHashes(); + ScanLists(); + ScanZsets(); + ScanSets(); } } // namespace storage diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index 24880ac4a3..180cc7fa9c 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -14,12 +14,19 @@ #include "rocksdb/slice.h" #include "rocksdb/status.h" -#include "pstd/include/env.h" - +#include "src/debug.h" #include "src/lock_mgr.h" #include "src/lru_cache.h" #include "src/mutex_impl.h" +#include "src/type_iterator.h" +#include "src/custom_comparator.h" #include "storage/storage.h" +#include "storage/storage_define.h" +#include "pstd/include/env.h" +#include "pstd/include/pika_codis_slot.h" + +#define SPOP_COMPACT_THRESHOLD_COUNT 500 +#define SPOP_COMPACT_THRESHOLD_DURATION (1000 * 1000) // 1000ms namespace storage { using Status = rocksdb::Status; @@ -27,7 +34,7 @@ using Slice = rocksdb::Slice; class Redis { public: - Redis(Storage* storage, const DataType& type); + Redis(Storage* storage, int32_t index); virtual ~Redis(); rocksdb::DB* GetDB() { return db_; } @@ -78,50 +85,271 @@ class Redis { Redis* ctx; std::string key; uint64_t start_us; - KeyStatisticsDurationGuard(Redis* that, const std::string& key): ctx(that), key(key), start_us(pstd::NowMicros()) { + DataType dtype; + KeyStatisticsDurationGuard(Redis* that, const DataType type, const std::string& key): ctx(that), key(key), start_us(pstd::NowMicros()), dtype(type) { } ~KeyStatisticsDurationGuard() { uint64_t end_us = pstd::NowMicros(); uint64_t duration = end_us > start_us ? end_us - start_us : 0; - ctx->UpdateSpecificKeyDuration(key, duration); + ctx->UpdateSpecificKeyDuration(dtype, key, duration); } }; + int GetIndex() const {return index_;} Status SetOptions(const OptionType& option_type, const std::unordered_map& options); void SetWriteWalOptions(const bool is_wal_disable); void SetCompactRangeOptions(const bool is_canceled); // Common Commands - virtual Status Open(const StorageOptions& storage_options, const std::string& db_path) = 0; - virtual Status CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, - const ColumnFamilyType& type = kMetaAndData) = 0; - virtual Status GetProperty(const std::string& property, uint64_t* out) = 0; - virtual Status ScanKeyNum(KeyInfo* key_info) = 0; - virtual Status ScanKeys(const std::string& pattern, std::vector* keys) = 0; - virtual Status PKPatternMatchDel(const std::string& pattern, int32_t* ret) = 0; + Status Open(const StorageOptions& storage_options, const std::string& db_path); + + virtual Status CompactRange(const DataType& option_type, const rocksdb::Slice* begin, const rocksdb::Slice* end, + const ColumnFamilyType& type = kMetaAndData); + + virtual Status GetProperty(const std::string& property, uint64_t* out); + + Status ScanKeyNum(std::vector* key_info); + Status ScanStringsKeyNum(KeyInfo* key_info); + Status ScanHashesKeyNum(KeyInfo* key_info); + Status ScanListsKeyNum(KeyInfo* key_info); + Status ScanZsetsKeyNum(KeyInfo* key_info); + Status ScanSetsKeyNum(KeyInfo* key_info); + + virtual Status StringsPKPatternMatchDel(const std::string& pattern, int32_t* ret); + virtual Status ListsPKPatternMatchDel(const std::string& pattern, int32_t* ret); + virtual Status HashesPKPatternMatchDel(const std::string& pattern, int32_t* ret); + virtual Status ZsetsPKPatternMatchDel(const std::string& pattern, int32_t* ret); + virtual Status SetsPKPatternMatchDel(const std::string& pattern, int32_t* ret); // Keys Commands - virtual Status Expire(const Slice& key, int32_t ttl) = 0; - virtual Status Del(const Slice& key) = 0; - virtual bool Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, - int64_t* count, std::string* next_key) = 0; - virtual bool PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) = 0; - virtual Status Expireat(const Slice& key, int32_t timestamp) = 0; - virtual Status Persist(const Slice& key) = 0; - virtual Status TTL(const Slice& key, int64_t* timestamp) = 0; + virtual Status StringsExpire(const Slice& key, int64_t ttl); + virtual Status HashesExpire(const Slice& key, int64_t ttl); + virtual Status ListsExpire(const Slice& key, int64_t ttl); + virtual Status ZsetsExpire(const Slice& key, int64_t ttl); + virtual Status SetsExpire(const Slice& key, int64_t ttl); + + virtual Status StringsDel(const Slice& key); + virtual Status HashesDel(const Slice& key); + virtual Status ListsDel(const Slice& key); + virtual Status ZsetsDel(const Slice& key); + virtual Status SetsDel(const Slice& key); + + virtual Status StringsExpireat(const Slice& key, int64_t timestamp); + virtual Status HashesExpireat(const Slice& key, int64_t timestamp); + virtual Status ListsExpireat(const Slice& key, int64_t timestamp); + virtual Status SetsExpireat(const Slice& key, int64_t timestamp); + virtual Status ZsetsExpireat(const Slice& key, int64_t timestamp); + + virtual Status StringsPersist(const Slice& key); + virtual Status HashesPersist(const Slice& key); + virtual Status ListsPersist(const Slice& key); + virtual Status ZsetsPersist(const Slice& key); + virtual Status SetsPersist(const Slice& key); + + virtual Status StringsTTL(const Slice& key, int64_t* timestamp); + virtual Status HashesTTL(const Slice& key, int64_t* timestamp); + virtual Status ListsTTL(const Slice& key, int64_t* timestamp); + virtual Status ZsetsTTL(const Slice& key, int64_t* timestamp); + virtual Status SetsTTL(const Slice& key, int64_t* timestamp); + + // Strings Commands + Status Append(const Slice& key, const Slice& value, int32_t* ret); + Status BitCount(const Slice& key, int64_t start_offset, int64_t end_offset, int32_t* ret, bool have_range); + Status BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, std::string &value_to_dest, int64_t* ret); + Status Decrby(const Slice& key, int64_t value, int64_t* ret); + Status Get(const Slice& key, std::string* value); + Status GetWithTTL(const Slice& key, std::string* value, int64_t* ttl); + Status GetBit(const Slice& key, int64_t offset, int32_t* ret); + Status Getrange(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret); + Status GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, + std::string* ret, std::string* value, int64_t* ttl); + Status GetSet(const Slice& key, const Slice& value, std::string* old_value); + Status Incrby(const Slice& key, int64_t value, int64_t* ret); + Status Incrbyfloat(const Slice& key, const Slice& value, std::string* ret); + Status MSet(const std::vector& kvs); + Status MSetnx(const std::vector& kvs, int32_t* ret); + Status Set(const Slice& key, const Slice& value); + Status Setxx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl = 0); + Status SetBit(const Slice& key, int64_t offset, int32_t value, int32_t* ret); + Status Setex(const Slice& key, const Slice& value, int64_t ttl); + Status Setnx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl = 0); + Status Setvx(const Slice& key, const Slice& value, const Slice& new_value, int32_t* ret, int64_t ttl = 0); + Status Delvx(const Slice& key, const Slice& value, int32_t* ret); + Status Setrange(const Slice& key, int64_t start_offset, const Slice& value, int32_t* ret); + Status Strlen(const Slice& key, int32_t* len); + + Status BitPos(const Slice& key, int32_t bit, int64_t* ret); + Status BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t* ret); + Status BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t end_offset, int64_t* ret); + Status PKSetexAt(const Slice& key, const Slice& value, int64_t timestamp); + + // Hash Commands + Status HDel(const Slice& key, const std::vector& fields, int32_t* ret); + Status HExists(const Slice& key, const Slice& field); + Status HGet(const Slice& key, const Slice& field, std::string* value); + Status HGetall(const Slice& key, std::vector* fvs); + Status HGetallWithTTL(const Slice& key, std::vector* fvs, int64_t* ttl); + Status HIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret); + Status HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by, std::string* new_value); + Status HKeys(const Slice& key, std::vector* fields); + Status HLen(const Slice& key, int32_t* ret); + Status HMGet(const Slice& key, const std::vector& fields, std::vector* vss); + Status HMSet(const Slice& key, const std::vector& fvs); + Status HSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res); + Status HSetnx(const Slice& key, const Slice& field, const Slice& value, int32_t* ret); + Status HVals(const Slice& key, std::vector* values); + Status HStrlen(const Slice& key, const Slice& field, int32_t* len); + Status HScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* field_values, int64_t* next_cursor); + Status HScanx(const Slice& key, const std::string& start_field, const std::string& pattern, int64_t count, + std::vector* field_values, std::string* next_field); + Status PKHScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, + int32_t limit, std::vector* field_values, std::string* next_field); + Status PKHRScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, + int32_t limit, std::vector* field_values, std::string* next_field); Status SetMaxCacheStatisticKeys(size_t max_cache_statistic_keys); Status SetSmallCompactionThreshold(uint64_t small_compaction_threshold); Status SetSmallCompactionDurationThreshold(uint64_t small_compaction_duration_threshold); - std::vector GetHandles(){ return handles_;}; + + std::vector GetStringCFHandles() { return {handles_[0]}; } + + std::vector GetHashCFHandles() { + return {handles_.begin() + kHashesMetaCF, handles_.begin() + kHashesDataCF + 1}; + } + + std::vector GetListCFHandles() { + return {handles_.begin() + kListsMetaCF, handles_.begin() + kListsDataCF + 1}; + } + + std::vector GetSetCFHandles() { + return {handles_.begin() + kSetsMetaCF, handles_.begin() + kSetsDataCF + 1}; + } + + std::vector GetZsetCFHandles() { + return {handles_.begin() + kZsetsMetaCF, handles_.end()}; + } void GetRocksDBInfo(std::string &info, const char *prefix); - protected: + // Sets Commands + Status SAdd(const Slice& key, const std::vector& members, int32_t* ret); + Status SCard(const Slice& key, int32_t* ret); + Status SDiff(const std::vector& keys, std::vector* members); + Status SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + Status SInter(const std::vector& keys, std::vector* members); + Status SInterstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + Status SIsmember(const Slice& key, const Slice& member, int32_t* ret); + Status SMembers(const Slice& key, std::vector* members); + Status SMembersWithTTL(const Slice& key, std::vector* members, int64_t* ttl); + Status SMove(const Slice& source, const Slice& destination, const Slice& member, int32_t* ret); + Status SPop(const Slice& key, std::vector* members, int64_t cnt); + Status SRandmember(const Slice& key, int32_t count, std::vector* members); + Status SRem(const Slice& key, const std::vector& members, int32_t* ret); + Status SUnion(const std::vector& keys, std::vector* members); + Status SUnionstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + Status SScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* members, int64_t* next_cursor); + Status AddAndGetSpopCount(const std::string& key, uint64_t* count); + Status ResetSpopCount(const std::string& key); + + // Lists commands + Status LIndex(const Slice& key, int64_t index, std::string* element); + Status LInsert(const Slice& key, const BeforeOrAfter& before_or_after, const std::string& pivot, + const std::string& value, int64_t* ret); + Status LLen(const Slice& key, uint64_t* len); + Status LPop(const Slice& key, int64_t count, std::vector* elements); + Status LPush(const Slice& key, const std::vector& values, uint64_t* ret); + Status LPushx(const Slice& key, const std::vector& values, uint64_t* len); + Status LRange(const Slice& key, int64_t start, int64_t stop, std::vector* ret); + Status LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std::vector* ret, int64_t* ttl); + Status LRem(const Slice& key, int64_t count, const Slice& value, uint64_t* ret); + Status LSet(const Slice& key, int64_t index, const Slice& value); + Status LTrim(const Slice& key, int64_t start, int64_t stop); + Status RPop(const Slice& key, int64_t count, std::vector* elements); + Status RPoplpush(const Slice& source, const Slice& destination, std::string* element); + Status RPush(const Slice& key, const std::vector& values, uint64_t* ret); + Status RPushx(const Slice& key, const std::vector& values, uint64_t* len); + + // Zsets Commands + Status ZAdd(const Slice& key, const std::vector& score_members, int32_t* ret); + Status ZCard(const Slice& key, int32_t* card); + Status ZCount(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret); + Status ZIncrby(const Slice& key, const Slice& member, double increment, double* ret); + Status ZRange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members); + Status ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std::vector* score_members, int64_t* ttl); + Status ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int64_t count, + int64_t offset, std::vector* score_members); + Status ZRank(const Slice& key, const Slice& member, int32_t* rank); + Status ZRem(const Slice& key, const std::vector& members, int32_t* ret); + Status ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int32_t* ret); + Status ZRemrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret); + Status ZRevrange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members); + Status ZRevrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int64_t count, + int64_t offset, std::vector* score_members); + Status ZRevrank(const Slice& key, const Slice& member, int32_t* rank); + Status ZScore(const Slice& key, const Slice& member, double* score); + Status ZGetAll(const Slice& key, double weight, std::map* value_to_dest); + Status ZUnionstore(const Slice& destination, const std::vector& keys, const std::vector& weights, + AGGREGATE agg, std::map& value_to_dest, int32_t* ret); + Status ZInterstore(const Slice& destination, const std::vector& keys, const std::vector& weights, + AGGREGATE agg, std::vector& value_to_dest, int32_t* ret); + Status ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + std::vector* members); + Status ZLexcount(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + int32_t* ret); + Status ZRemrangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + int32_t* ret); + Status ZScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* score_members, int64_t* next_cursor); + Status ZPopMax(const Slice& key, int64_t count, std::vector* score_members); + Status ZPopMin(const Slice& key, int64_t count, std::vector* score_members); + + void ScanDatabase(); + void ScanStrings(); + void ScanHashes(); + void ScanLists(); + void ScanZsets(); + void ScanSets(); + + TypeIterator* CreateIterator(const DataType& type, const std::string& pattern, const Slice* lower_bound, const Slice* upper_bound) { + return CreateIterator(DataTypeTag[type], pattern, lower_bound, upper_bound); + } + + TypeIterator* CreateIterator(const char& type, const std::string& pattern, const Slice* lower_bound, const Slice* upper_bound) { + rocksdb::ReadOptions options; + options.fill_cache = false; + options.iterate_lower_bound = lower_bound; + options.iterate_upper_bound = upper_bound; + switch (type) { + case 'k': + return new StringsIterator(options, db_, handles_[kStringsCF], pattern); + break; + case 'h': + return new HashesIterator(options, db_, handles_[kHashesMetaCF], pattern); + break; + case 's': + return new SetsIterator(options, db_, handles_[kSetsMetaCF], pattern); + break; + case 'l': + return new ListsIterator(options, db_, handles_[kListsMetaCF], pattern); + break; + case 'z': + return new ZsetsIterator(options, db_, handles_[kZsetsMetaCF], pattern); + break; + default: + LOG(WARNING) << "Invalid datatype to create iterator"; + return nullptr; + } + return nullptr; + } + +private: + int32_t index_ = 0; Storage* const storage_; - DataType type_; std::shared_ptr lock_mgr_; rocksdb::DB* db_ = nullptr; + //TODO(wangshaoyi): seperate env for each rocksdb instance + // rocksdb::Env* env_ = nullptr; std::vector handles_; rocksdb::WriteOptions default_write_options_; @@ -130,18 +358,19 @@ class Redis { // For Scan std::unique_ptr> scan_cursors_store_; + std::unique_ptr> spop_counts_store_; - Status GetScanStartPoint(const Slice& key, const Slice& pattern, int64_t cursor, std::string* start_point); - Status StoreScanNextPoint(const Slice& key, const Slice& pattern, int64_t cursor, const std::string& next_point); + Status GetScanStartPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, std::string* start_point); + Status StoreScanNextPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, const std::string& next_point); // For Statistics std::atomic_uint64_t small_compaction_threshold_; std::atomic_uint64_t small_compaction_duration_threshold_; std::unique_ptr> statistics_store_; - Status UpdateSpecificKeyStatistics(const std::string& key, uint64_t count); - Status UpdateSpecificKeyDuration(const std::string& key, uint64_t duration); - Status AddCompactKeyTaskIfNeeded(const std::string& key, uint64_t count, uint64_t duration); + Status UpdateSpecificKeyStatistics(const DataType& dtype, const std::string& key, uint64_t count); + Status UpdateSpecificKeyDuration(const DataType& dtype, const std::string& key, uint64_t duration); + Status AddCompactKeyTaskIfNeeded(const DataType& dtype, const std::string& key, uint64_t count, uint64_t duration); }; } // namespace storage diff --git a/src/storage/src/redis_hashes.cc b/src/storage/src/redis_hashes.cc index 4d1c9bf6b7..27eeff7e69 100644 --- a/src/storage/src/redis_hashes.cc +++ b/src/storage/src/redis_hashes.cc @@ -3,88 +3,23 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "src/redis_hashes.h" +#include "src/redis.h" #include #include #include +#include "pstd/include/pika_codis_slot.h" #include "src/base_filter.h" #include "src/scope_record_lock.h" #include "src/scope_snapshot.h" +#include "src/base_data_key_format.h" +#include "src/base_data_value_format.h" #include "storage/util.h" namespace storage { - -RedisHashes::RedisHashes(Storage* const s, const DataType& type) : Redis(s, type) {} - -Status RedisHashes::Open(const StorageOptions& storage_options, const std::string& db_path) { - statistics_store_->SetCapacity(storage_options.statistics_max_size); - small_compaction_threshold_ = storage_options.small_compaction_threshold; - small_compaction_duration_threshold_ = storage_options.small_compaction_duration_threshold; - - rocksdb::Options ops(storage_options.options); - Status s = rocksdb::DB::Open(ops, db_path, &db_); - if (s.ok()) { - // create column family - rocksdb::ColumnFamilyHandle* cf; - s = db_->CreateColumnFamily(rocksdb::ColumnFamilyOptions(), "data_cf", &cf); - if (!s.ok()) { - return s; - } - // close DB - delete cf; - delete db_; - } - - // Open - rocksdb::DBOptions db_ops(storage_options.options); - rocksdb::ColumnFamilyOptions meta_cf_ops(storage_options.options); - rocksdb::ColumnFamilyOptions data_cf_ops(storage_options.options); - meta_cf_ops.compaction_filter_factory = std::make_shared(); - data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_); - - // use the bloom filter policy to reduce disk reads - rocksdb::BlockBasedTableOptions table_ops(storage_options.table_options); - table_ops.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); - rocksdb::BlockBasedTableOptions meta_cf_table_ops(table_ops); - rocksdb::BlockBasedTableOptions data_cf_table_ops(table_ops); - if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { - meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - } - meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(meta_cf_table_ops)); - data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(data_cf_table_ops)); - - std::vector column_families; - // Meta CF - column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, meta_cf_ops); - // Data CF - column_families.emplace_back("data_cf", data_cf_ops); - return rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); -} - -Status RedisHashes::CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, const ColumnFamilyType& type) { - if (type == kMeta || type == kMetaAndData) { - db_->CompactRange(default_compact_range_options_, handles_[0], begin, end); - } - if (type == kData || type == kMetaAndData) { - db_->CompactRange(default_compact_range_options_, handles_[1], begin, end); - } - return Status::OK(); -} - -Status RedisHashes::GetProperty(const std::string& property, uint64_t* out) { - std::string value; - db_->GetProperty(handles_[0], property, &value); - *out = std::strtoull(value.c_str(), nullptr, 10); - db_->GetProperty(handles_[1], property, &value); - *out += std::strtoull(value.c_str(), nullptr, 10); - return Status::OK(); -} - -Status RedisHashes::ScanKeyNum(KeyInfo* key_info) { +Status Redis::ScanHashesKeyNum(KeyInfo* key_info) { uint64_t keys = 0; uint64_t expires = 0; uint64_t ttl_sum = 0; @@ -99,16 +34,16 @@ Status RedisHashes::ScanKeyNum(KeyInfo* key_info) { int64_t curtime; rocksdb::Env::Default()->GetCurrentTime(&curtime); - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kHashesDataCF]); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedHashesMetaValue parsed_hashes_meta_value(iter->value()); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { invaild_keys++; } else { keys++; if (!parsed_hashes_meta_value.IsPermanentSurvival()) { expires++; - ttl_sum += parsed_hashes_meta_value.timestamp() - curtime; + ttl_sum += parsed_hashes_meta_value.Etime() - curtime; } } } @@ -121,29 +56,7 @@ Status RedisHashes::ScanKeyNum(KeyInfo* key_info) { return Status::OK(); } -Status RedisHashes::ScanKeys(const std::string& pattern, std::vector* keys) { - std::string key; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); - for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { - ParsedHashesMetaValue parsed_hashes_meta_value(iter->value()); - if (!parsed_hashes_meta_value.IsStale() && parsed_hashes_meta_value.count() != 0) { - key = iter->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - } - } - delete iter; - return Status::OK(); -} - -Status RedisHashes::PKPatternMatchDel(const std::string& pattern, int32_t* ret) { +Status Redis::HashesPKPatternMatchDel(const std::string& pattern, int32_t* ret) { rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -155,16 +68,16 @@ Status RedisHashes::PKPatternMatchDel(const std::string& pattern, int32_t* ret) int32_t total_delete = 0; Status s; rocksdb::WriteBatch batch; - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kHashesMetaCF]); iter->SeekToFirst(); while (iter->Valid()) { key = iter->key().ToString(); meta_value = iter->value().ToString(); ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if (!parsed_hashes_meta_value.IsStale() && (parsed_hashes_meta_value.count() != 0) && + if (!parsed_hashes_meta_value.IsStale() && (parsed_hashes_meta_value.Count() != 0) && (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0)) { parsed_hashes_meta_value.InitialMetaValue(); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kHashesMetaCF], key, meta_value); } if (static_cast(batch.Count()) >= BATCH_DELETE_LIMIT) { s = db_->Write(default_write_options_, &batch); @@ -190,7 +103,7 @@ Status RedisHashes::PKPatternMatchDel(const std::string& pattern, int32_t* ret) return s; } -Status RedisHashes::HDel(const Slice& key, const std::vector& fields, int32_t* ret) { +Status Redis::HDel(const Slice& key, const std::vector& fields, int32_t* ret) { uint32_t statistic = 0; std::vector filtered_fields; std::unordered_set field_set; @@ -208,26 +121,28 @@ Status RedisHashes::HDel(const Slice& key, const std::vector& field std::string meta_value; int32_t del_cnt = 0; - int32_t version = 0; + uint64_t version = 0; ScopeRecordLock l(lock_mgr_, key); ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { *ret = 0; return Status::OK(); } else { std::string data_value; - version = parsed_hashes_meta_value.version(); + version = parsed_hashes_meta_value.Version(); for (const auto& field : filtered_fields) { HashesDataKey hashes_data_key(key, version, field); - s = db_->Get(read_options, handles_[1], hashes_data_key.Encode(), &data_value); + s = db_->Get(read_options, handles_[kHashesDataCF], hashes_data_key.Encode(), &data_value); if (s.ok()) { del_cnt++; statistic++; - batch.Delete(handles_[1], hashes_data_key.Encode()); + batch.Delete(handles_[kHashesDataCF], hashes_data_key.Encode()); } else if (s.IsNotFound()) { continue; } else { @@ -239,7 +154,7 @@ Status RedisHashes::HDel(const Slice& key, const std::vector& field return Status::InvalidArgument("hash size overflow"); } parsed_hashes_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); } } else if (s.IsNotFound()) { *ret = 0; @@ -248,62 +163,71 @@ Status RedisHashes::HDel(const Slice& key, const std::vector& field return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); return s; } -Status RedisHashes::HExists(const Slice& key, const Slice& field) { +Status Redis::HExists(const Slice& key, const Slice& field) { std::string value; return HGet(key, field, &value); } -Status RedisHashes::HGet(const Slice& key, const Slice& field, std::string* value) { +Status Redis::HGet(const Slice& key, const Slice& field, std::string* value) { std::string meta_value; - int32_t version = 0; + uint64_t version = 0; rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_hashes_meta_value.count() == 0) { + } else if (parsed_hashes_meta_value.Count() == 0) { return Status::NotFound(); } else { - version = parsed_hashes_meta_value.version(); + version = parsed_hashes_meta_value.Version(); HashesDataKey data_key(key, version, field); - s = db_->Get(read_options, handles_[1], data_key.Encode(), value); + s = db_->Get(read_options, handles_[kHashesDataCF], data_key.Encode(), value); + if (s.ok()) { + ParsedBaseDataValue parsed_internal_value(value); + parsed_internal_value.StripSuffix(); + } } } return s; } -Status RedisHashes::HGetall(const Slice& key, std::vector* fvs) { +Status Redis::HGetall(const Slice& key, std::vector* fvs) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; std::string meta_value; - int32_t version = 0; + uint64_t version = 0; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_hashes_meta_value.count() == 0) { + } else if (parsed_hashes_meta_value.Count() == 0) { return Status::NotFound(); } else { - version = parsed_hashes_meta_value.version(); + version = parsed_hashes_meta_value.Version(); HashesDataKey hashes_data_key(key, version, ""); - Slice prefix = hashes_data_key.Encode(); - KeyStatisticsDurationGuard guard(this, key.ToString()); - auto iter = db_->NewIterator(read_options, handles_[1]); + Slice prefix = hashes_data_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { ParsedHashesDataKey parsed_hashes_data_key(iter->key()); - fvs->push_back({parsed_hashes_data_key.field().ToString(), iter->value().ToString()}); + ParsedBaseDataValue parsed_internal_value(iter->value()); + fvs->push_back({parsed_hashes_data_key.field().ToString(), parsed_internal_value.UserValue().ToString()}); } delete iter; } @@ -311,24 +235,25 @@ Status RedisHashes::HGetall(const Slice& key, std::vector* fvs) { return s; } -Status RedisHashes::HGetallWithTTL(const Slice& key, std::vector* fvs, int64_t* ttl) { +Status Redis::HGetallWithTTL(const Slice& key, std::vector* fvs, int64_t* ttl) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; std::string meta_value; - int32_t version = 0; + uint64_t version = 0; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if (parsed_hashes_meta_value.count() == 0) { + if (parsed_hashes_meta_value.Count() == 0) { return Status::NotFound(); } else if (parsed_hashes_meta_value.IsStale()) { return Status::NotFound("Stale"); } else { // ttl - *ttl = parsed_hashes_meta_value.timestamp(); + *ttl = parsed_hashes_meta_value.Etime(); if (*ttl == 0) { *ttl = -1; } else { @@ -337,16 +262,15 @@ Status RedisHashes::HGetallWithTTL(const Slice& key, std::vector* fv *ttl = *ttl - curtime >= 0 ? *ttl - curtime : -2; } - version = parsed_hashes_meta_value.version(); + version = parsed_hashes_meta_value.Version(); HashesDataKey hashes_data_key(key, version, ""); - Slice prefix = hashes_data_key.Encode(); - auto iter = db_->NewIterator(read_options, handles_[1]); - for (iter->Seek(prefix); - iter->Valid() && iter->key().starts_with(prefix); - iter->Next()) { + Slice prefix = hashes_data_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { ParsedHashesDataKey parsed_hashes_data_key(iter->key()); - fvs->push_back({parsed_hashes_data_key.field().ToString(), - iter->value().ToString()}); + ParsedBaseDataValue parsed_internal_value(iter->value()); + fvs->push_back({parsed_hashes_data_key.field().ToString(), parsed_internal_value.UserValue().ToString()}); } delete iter; } @@ -354,35 +278,39 @@ Status RedisHashes::HGetallWithTTL(const Slice& key, std::vector* fv return s; } -Status RedisHashes::HIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret) { +Status Redis::HIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret) { *ret = 0; rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); - int32_t version = 0; + uint64_t version = 0; uint32_t statistic = 0; std::string old_value; std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); char value_buf[32] = {0}; char meta_value_buf[4] = {0}; if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { version = parsed_hashes_meta_value.UpdateVersion(); - parsed_hashes_meta_value.set_count(1); - parsed_hashes_meta_value.set_timestamp(0); - batch.Put(handles_[0], key, meta_value); + parsed_hashes_meta_value.SetCount(1); + parsed_hashes_meta_value.SetEtime(0); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); HashesDataKey hashes_data_key(key, version, field); Int64ToStr(value_buf, 32, value); - batch.Put(handles_[1], hashes_data_key.Encode(), value_buf); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), value_buf); *ret = value; } else { - version = parsed_hashes_meta_value.version(); + version = parsed_hashes_meta_value.Version(); HashesDataKey hashes_data_key(key, version, field); - s = db_->Get(default_read_options_, handles_[1], hashes_data_key.Encode(), &old_value); + s = db_->Get(default_read_options_, handles_[kHashesDataCF], hashes_data_key.Encode(), &old_value); if (s.ok()) { + ParsedBaseDataValue parsed_internal_value(&old_value); + parsed_internal_value.StripSuffix(); int64_t ival = 0; if (StrToInt64(old_value.data(), old_value.size(), &ival) == 0) { return Status::Corruption("hash value is not an integer"); @@ -392,16 +320,18 @@ Status RedisHashes::HIncrby(const Slice& key, const Slice& field, int64_t value, } *ret = ival + value; Int64ToStr(value_buf, 32, *ret); - batch.Put(handles_[1], hashes_data_key.Encode(), value_buf); + BaseDataValue internal_value(value_buf); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); statistic++; } else if (s.IsNotFound()) { Int64ToStr(value_buf, 32, value); if (!parsed_hashes_meta_value.CheckModifyCount(1)){ return Status::InvalidArgument("hash size overflow"); } + BaseDataValue internal_value(value_buf); parsed_hashes_meta_value.ModifyCount(1); - batch.Put(handles_[0], key, meta_value); - batch.Put(handles_[1], hashes_data_key.Encode(), value_buf); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); *ret = value; } else { return s; @@ -411,26 +341,27 @@ Status RedisHashes::HIncrby(const Slice& key, const Slice& field, int64_t value, EncodeFixed32(meta_value_buf, 1); HashesMetaValue hashes_meta_value(Slice(meta_value_buf, sizeof(int32_t))); version = hashes_meta_value.UpdateVersion(); - batch.Put(handles_[0], key, hashes_meta_value.Encode()); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); HashesDataKey hashes_data_key(key, version, field); Int64ToStr(value_buf, 32, value); - batch.Put(handles_[1], hashes_data_key.Encode(), value_buf); + BaseDataValue internal_value(value_buf); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); *ret = value; } else { return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); return s; } -Status RedisHashes::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by, std::string* new_value) { +Status Redis::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by, std::string* new_value) { new_value->clear(); rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); - int32_t version = 0; + uint64_t version = 0; uint32_t statistic = 0; std::string meta_value; std::string old_value_str; @@ -440,26 +371,31 @@ Status RedisHashes::HIncrbyfloat(const Slice& key, const Slice& field, const Sli return Status::Corruption("value is not a vaild float"); } - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); char meta_value_buf[4] = {0}; if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { version = parsed_hashes_meta_value.UpdateVersion(); - parsed_hashes_meta_value.set_count(1); - parsed_hashes_meta_value.set_timestamp(0); - batch.Put(handles_[0], key, meta_value); + parsed_hashes_meta_value.SetCount(1); + parsed_hashes_meta_value.SetEtime(0); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); HashesDataKey hashes_data_key(key, version, field); LongDoubleToStr(long_double_by, new_value); - batch.Put(handles_[1], hashes_data_key.Encode(), *new_value); + BaseDataValue inter_value(*new_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), inter_value.Encode()); } else { - version = parsed_hashes_meta_value.version(); + version = parsed_hashes_meta_value.Version(); HashesDataKey hashes_data_key(key, version, field); - s = db_->Get(default_read_options_, handles_[1], hashes_data_key.Encode(), &old_value_str); + s = db_->Get(default_read_options_, handles_[kHashesDataCF], hashes_data_key.Encode(), &old_value_str); if (s.ok()) { long double total; long double old_value; + ParsedBaseDataValue parsed_internal_value(&old_value_str); + parsed_internal_value.StripSuffix(); if (StrToLongDouble(old_value_str.data(), old_value_str.size(), &old_value) == -1) { return Status::Corruption("value is not a vaild float"); } @@ -468,7 +404,8 @@ Status RedisHashes::HIncrbyfloat(const Slice& key, const Slice& field, const Sli if (LongDoubleToStr(total, new_value) == -1) { return Status::InvalidArgument("Overflow"); } - batch.Put(handles_[1], hashes_data_key.Encode(), *new_value); + BaseDataValue internal_value(*new_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); statistic++; } else if (s.IsNotFound()) { LongDoubleToStr(long_double_by, new_value); @@ -476,8 +413,9 @@ Status RedisHashes::HIncrbyfloat(const Slice& key, const Slice& field, const Sli return Status::InvalidArgument("hash size overflow"); } parsed_hashes_meta_value.ModifyCount(1); - batch.Put(handles_[0], key, meta_value); - batch.Put(handles_[1], hashes_data_key.Encode(), *new_value); + BaseDataValue internal_value(*new_value); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); } else { return s; } @@ -486,40 +424,44 @@ Status RedisHashes::HIncrbyfloat(const Slice& key, const Slice& field, const Sli EncodeFixed32(meta_value_buf, 1); HashesMetaValue hashes_meta_value(Slice(meta_value_buf, sizeof(int32_t))); version = hashes_meta_value.UpdateVersion(); - batch.Put(handles_[0], key, hashes_meta_value.Encode()); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); HashesDataKey hashes_data_key(key, version, field); LongDoubleToStr(long_double_by, new_value); - batch.Put(handles_[1], hashes_data_key.Encode(), *new_value); + BaseDataValue internal_value(*new_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); } else { return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); return s; } -Status RedisHashes::HKeys(const Slice& key, std::vector* fields) { +Status Redis::HKeys(const Slice& key, std::vector* fields) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; std::string meta_value; - int32_t version = 0; + uint64_t version = 0; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_hashes_meta_value.count() == 0) { + } else if (parsed_hashes_meta_value.Count() == 0) { return Status::NotFound(); } else { - version = parsed_hashes_meta_value.version(); + version = parsed_hashes_meta_value.Version(); HashesDataKey hashes_data_key(key, version, ""); - Slice prefix = hashes_data_key.Encode(); - KeyStatisticsDurationGuard guard(this, key.ToString()); - auto iter = db_->NewIterator(read_options, handles_[1]); + Slice prefix = hashes_data_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { ParsedHashesDataKey parsed_hashes_data_key(iter->key()); fields->push_back(parsed_hashes_data_key.field().ToString()); @@ -530,19 +472,21 @@ Status RedisHashes::HKeys(const Slice& key, std::vector* fields) { return s; } -Status RedisHashes::HLen(const Slice& key, int32_t* ret) { +Status Redis::HLen(const Slice& key, int32_t* ret) { *ret = 0; std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { *ret = 0; return Status::NotFound("Stale"); - } else if (parsed_hashes_meta_value.count() == 0) { + } else if (parsed_hashes_meta_value.Count() == 0) { return Status::NotFound(); } else { - *ret = parsed_hashes_meta_value.count(); + *ret = parsed_hashes_meta_value.Count(); } } else if (s.IsNotFound()) { *ret = 0; @@ -550,10 +494,10 @@ Status RedisHashes::HLen(const Slice& key, int32_t* ret) { return s; } -Status RedisHashes::HMGet(const Slice& key, const std::vector& fields, std::vector* vss) { +Status Redis::HMGet(const Slice& key, const std::vector& fields, std::vector* vss) { vss->clear(); - int32_t version = 0; + uint64_t version = 0; bool is_stale = false; std::string value; std::string meta_value; @@ -561,20 +505,23 @@ Status RedisHashes::HMGet(const Slice& key, const std::vector& fiel const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if ((is_stale = parsed_hashes_meta_value.IsStale()) || parsed_hashes_meta_value.count() == 0) { + if ((is_stale = parsed_hashes_meta_value.IsStale()) || parsed_hashes_meta_value.Count() == 0) { for (size_t idx = 0; idx < fields.size(); ++idx) { vss->push_back({std::string(), Status::NotFound()}); } return Status::NotFound(is_stale ? "Stale" : ""); } else { - version = parsed_hashes_meta_value.version(); + version = parsed_hashes_meta_value.Version(); for (const auto& field : fields) { HashesDataKey hashes_data_key(key, version, field); - s = db_->Get(read_options, handles_[1], hashes_data_key.Encode(), &value); + s = db_->Get(read_options, handles_[kHashesDataCF], hashes_data_key.Encode(), &value); if (s.ok()) { + ParsedBaseDataValue parsed_internal_value(&value); + parsed_internal_value.StripSuffix(); vss->push_back({value, Status::OK()}); } else if (s.IsNotFound()) { vss->push_back({std::string(), Status::NotFound()}); @@ -593,7 +540,7 @@ Status RedisHashes::HMGet(const Slice& key, const std::vector& fiel return s; } -Status RedisHashes::HMSet(const Slice& key, const std::vector& fvs) { +Status Redis::HMSet(const Slice& key, const std::vector& fvs) { uint32_t statistic = 0; std::unordered_set fields; std::vector filtered_fvs; @@ -608,36 +555,40 @@ Status RedisHashes::HMSet(const Slice& key, const std::vector& fvs) rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); - int32_t version = 0; + uint64_t version = 0; std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); char meta_value_buf[4] = {0}; if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { version = parsed_hashes_meta_value.InitialMetaValue(); if (!parsed_hashes_meta_value.check_set_count(static_cast(filtered_fvs.size()))) { return Status::InvalidArgument("hash size overflow"); } - parsed_hashes_meta_value.set_count(static_cast(filtered_fvs.size())); - batch.Put(handles_[0], key, meta_value); + parsed_hashes_meta_value.SetCount(static_cast(filtered_fvs.size())); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); for (const auto& fv : filtered_fvs) { HashesDataKey hashes_data_key(key, version, fv.field); - batch.Put(handles_[1], hashes_data_key.Encode(), fv.value); + BaseDataValue inter_value(fv.value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), inter_value.Encode()); } } else { int32_t count = 0; std::string data_value; - version = parsed_hashes_meta_value.version(); + version = parsed_hashes_meta_value.Version(); for (const auto& fv : filtered_fvs) { HashesDataKey hashes_data_key(key, version, fv.field); - s = db_->Get(default_read_options_, handles_[1], hashes_data_key.Encode(), &data_value); + BaseDataValue inter_value(fv.value); + s = db_->Get(default_read_options_, handles_[kHashesDataCF], hashes_data_key.Encode(), &data_value); if (s.ok()) { statistic++; - batch.Put(handles_[1], hashes_data_key.Encode(), fv.value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), inter_value.Encode()); } else if (s.IsNotFound()) { count++; - batch.Put(handles_[1], hashes_data_key.Encode(), fv.value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), inter_value.Encode()); } else { return s; } @@ -646,52 +597,57 @@ Status RedisHashes::HMSet(const Slice& key, const std::vector& fvs) return Status::InvalidArgument("hash size overflow"); } parsed_hashes_meta_value.ModifyCount(count); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); } } else if (s.IsNotFound()) { EncodeFixed32(meta_value_buf, filtered_fvs.size()); HashesMetaValue hashes_meta_value(Slice(meta_value_buf, sizeof(int32_t))); version = hashes_meta_value.UpdateVersion(); - batch.Put(handles_[0], key, hashes_meta_value.Encode()); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); for (const auto& fv : filtered_fvs) { HashesDataKey hashes_data_key(key, version, fv.field); - batch.Put(handles_[1], hashes_data_key.Encode(), fv.value); + BaseDataValue inter_value(fv.value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), inter_value.Encode()); } } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); return s; } -Status RedisHashes::HSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res) { +Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res) { rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); - int32_t version = 0; + uint64_t version = 0; uint32_t statistic = 0; std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); char meta_value_buf[4] = {0}; if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { version = parsed_hashes_meta_value.InitialMetaValue(); - parsed_hashes_meta_value.set_count(1); - batch.Put(handles_[0], key, meta_value); + parsed_hashes_meta_value.SetCount(1); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); HashesDataKey data_key(key, version, field); - batch.Put(handles_[1], data_key.Encode(), value); + BaseDataValue internal_value(value); + batch.Put(handles_[kHashesDataCF], data_key.Encode(), internal_value.Encode()); *res = 1; } else { - version = parsed_hashes_meta_value.version(); + version = parsed_hashes_meta_value.Version(); std::string data_value; HashesDataKey hashes_data_key(key, version, field); - s = db_->Get(default_read_options_, handles_[1], hashes_data_key.Encode(), &data_value); + s = db_->Get(default_read_options_, handles_[kHashesDataCF], hashes_data_key.Encode(), &data_value); if (s.ok()) { *res = 0; if (data_value == value.ToString()) { return Status::OK(); } else { - batch.Put(handles_[1], hashes_data_key.Encode(), value); + BaseDataValue internal_value(value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); statistic++; } } else if (s.IsNotFound()) { @@ -699,8 +655,9 @@ Status RedisHashes::HSet(const Slice& key, const Slice& field, const Slice& valu return Status::InvalidArgument("hash size overflow"); } parsed_hashes_meta_value.ModifyCount(1); - batch.Put(handles_[0], key, meta_value); - batch.Put(handles_[1], hashes_data_key.Encode(), value); + BaseDataValue internal_value(value); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); *res = 1; } else { return s; @@ -710,40 +667,44 @@ Status RedisHashes::HSet(const Slice& key, const Slice& field, const Slice& valu EncodeFixed32(meta_value_buf, 1); HashesMetaValue meta_value(Slice(meta_value_buf, sizeof(int32_t))); version = meta_value.UpdateVersion(); - batch.Put(handles_[0], key, meta_value.Encode()); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value.Encode()); HashesDataKey data_key(key, version, field); - batch.Put(handles_[1], data_key.Encode(), value); + BaseDataValue internal_value(value); + batch.Put(handles_[kHashesDataCF], data_key.Encode(), internal_value.Encode()); *res = 1; } else { return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); return s; } -Status RedisHashes::HSetnx(const Slice& key, const Slice& field, const Slice& value, int32_t* ret) { +Status Redis::HSetnx(const Slice& key, const Slice& field, const Slice& value, int32_t* ret) { rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); - int32_t version = 0; + uint64_t version = 0; std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + BaseDataValue internal_value(value); + Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); char meta_value_buf[4] = {0}; if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { version = parsed_hashes_meta_value.InitialMetaValue(); - parsed_hashes_meta_value.set_count(1); - batch.Put(handles_[0], key, meta_value); + parsed_hashes_meta_value.SetCount(1); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); HashesDataKey hashes_data_key(key, version, field); - batch.Put(handles_[1], hashes_data_key.Encode(), value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); *ret = 1; } else { - version = parsed_hashes_meta_value.version(); + version = parsed_hashes_meta_value.Version(); HashesDataKey hashes_data_key(key, version, field); std::string data_value; - s = db_->Get(default_read_options_, handles_[1], hashes_data_key.Encode(), &data_value); + s = db_->Get(default_read_options_, handles_[kHashesDataCF], hashes_data_key.Encode(), &data_value); if (s.ok()) { *ret = 0; } else if (s.IsNotFound()) { @@ -751,8 +712,8 @@ Status RedisHashes::HSetnx(const Slice& key, const Slice& field, const Slice& va return Status::InvalidArgument("hash size overflow"); } parsed_hashes_meta_value.ModifyCount(1); - batch.Put(handles_[0], key, meta_value); - batch.Put(handles_[1], hashes_data_key.Encode(), value); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); *ret = 1; } else { return s; @@ -762,9 +723,9 @@ Status RedisHashes::HSetnx(const Slice& key, const Slice& field, const Slice& va EncodeFixed32(meta_value_buf, 1); HashesMetaValue hashes_meta_value(Slice(meta_value_buf, sizeof(int32_t))); version = hashes_meta_value.UpdateVersion(); - batch.Put(handles_[0], key, hashes_meta_value.Encode()); + batch.Put(handles_[kHashesMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); HashesDataKey hashes_data_key(key, version, field); - batch.Put(handles_[1], hashes_data_key.Encode(), value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); *ret = 1; } else { return s; @@ -772,29 +733,32 @@ Status RedisHashes::HSetnx(const Slice& key, const Slice& field, const Slice& va return db_->Write(default_write_options_, &batch); } -Status RedisHashes::HVals(const Slice& key, std::vector* values) { +Status Redis::HVals(const Slice& key, std::vector* values) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; std::string meta_value; - int32_t version = 0; + uint64_t version = 0; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_hashes_meta_value.count() == 0) { + } else if (parsed_hashes_meta_value.Count() == 0) { return Status::NotFound(); } else { - version = parsed_hashes_meta_value.version(); + version = parsed_hashes_meta_value.Version(); HashesDataKey hashes_data_key(key, version, ""); - Slice prefix = hashes_data_key.Encode(); - KeyStatisticsDurationGuard guard(this, key.ToString()); - auto iter = db_->NewIterator(read_options, handles_[1]); + Slice prefix = hashes_data_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { - values->push_back(iter->value().ToString()); + ParsedBaseDataValue parsed_internal_value(iter->value()); + values->push_back(parsed_internal_value.UserValue().ToString()); } delete iter; } @@ -802,7 +766,7 @@ Status RedisHashes::HVals(const Slice& key, std::vector* values) { return s; } -Status RedisHashes::HStrlen(const Slice& key, const Slice& field, int32_t* len) { +Status Redis::HStrlen(const Slice& key, const Slice& field, int32_t* len) { std::string value; Status s = HGet(key, field, &value); if (s.ok()) { @@ -813,8 +777,8 @@ Status RedisHashes::HStrlen(const Slice& key, const Slice& field, int32_t* len) return s; } -Status RedisHashes::HScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, - std::vector* field_values, int64_t* next_cursor) { +Status Redis::HScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* field_values, int64_t* next_cursor) { *next_cursor = 0; field_values->clear(); if (cursor < 0) { @@ -830,17 +794,19 @@ Status RedisHashes::HScan(const Slice& key, int64_t cursor, const std::string& p std::string meta_value; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { *next_cursor = 0; return Status::NotFound(); } else { std::string sub_field; std::string start_point; - int32_t version = parsed_hashes_meta_value.version(); - s = GetScanStartPoint(key, pattern, cursor, &start_point); + uint64_t version = parsed_hashes_meta_value.Version(); + s = GetScanStartPoint(DataType::kHashes, key, pattern, cursor, &start_point); if (s.IsNotFound()) { cursor = 0; if (isTailWildcard(pattern)) { @@ -853,15 +819,16 @@ Status RedisHashes::HScan(const Slice& key, int64_t cursor, const std::string& p HashesDataKey hashes_data_prefix(key, version, sub_field); HashesDataKey hashes_start_data_key(key, version, start_point); - std::string prefix = hashes_data_prefix.Encode().ToString(); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[1]); + std::string prefix = hashes_data_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); for (iter->Seek(hashes_start_data_key.Encode()); iter->Valid() && rest > 0 && iter->key().starts_with(prefix); iter->Next()) { ParsedHashesDataKey parsed_hashes_data_key(iter->key()); std::string field = parsed_hashes_data_key.field().ToString(); if (StringMatch(pattern.data(), pattern.size(), field.data(), field.size(), 0) != 0) { - field_values->push_back({field, iter->value().ToString()}); + ParsedBaseDataValue parsed_internal_value(iter->value()); + field_values->emplace_back(field, parsed_internal_value.UserValue().ToString()); } rest--; } @@ -870,7 +837,7 @@ Status RedisHashes::HScan(const Slice& key, int64_t cursor, const std::string& p *next_cursor = cursor + step_length; ParsedHashesDataKey parsed_hashes_data_key(iter->key()); std::string next_field = parsed_hashes_data_key.field().ToString(); - StoreScanNextPoint(key, pattern, *next_cursor, next_field); + StoreScanNextPoint(DataType::kHashes, key, pattern, *next_cursor, next_field); } else { *next_cursor = 0; } @@ -883,7 +850,7 @@ Status RedisHashes::HScan(const Slice& key, int64_t cursor, const std::string& p return Status::OK(); } -Status RedisHashes::HScanx(const Slice& key, const std::string& start_field, const std::string& pattern, int64_t count, +Status Redis::HScanx(const Slice& key, const std::string& start_field, const std::string& pattern, int64_t count, std::vector* field_values, std::string* next_field) { next_field->clear(); field_values->clear(); @@ -894,25 +861,28 @@ Status RedisHashes::HScanx(const Slice& key, const std::string& start_field, con const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { *next_field = ""; return Status::NotFound(); } else { - int32_t version = parsed_hashes_meta_value.version(); + uint64_t version = parsed_hashes_meta_value.Version(); HashesDataKey hashes_data_prefix(key, version, Slice()); HashesDataKey hashes_start_data_key(key, version, start_field); - std::string prefix = hashes_data_prefix.Encode().ToString(); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[1]); + std::string prefix = hashes_data_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); for (iter->Seek(hashes_start_data_key.Encode()); iter->Valid() && rest > 0 && iter->key().starts_with(prefix); iter->Next()) { ParsedHashesDataKey parsed_hashes_data_key(iter->key()); std::string field = parsed_hashes_data_key.field().ToString(); if (StringMatch(pattern.data(), pattern.size(), field.data(), field.size(), 0) != 0) { - field_values->push_back({field, iter->value().ToString()}); + ParsedBaseDataValue parsed_value(iter->value()); + field_values->emplace_back(field, parsed_value.UserValue().ToString()); } rest--; } @@ -932,7 +902,7 @@ Status RedisHashes::HScanx(const Slice& key, const std::string& start_field, con return Status::OK(); } -Status RedisHashes::PKHScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, +Status Redis::PKHScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, int32_t limit, std::vector* field_values, std::string* next_field) { next_field->clear(); @@ -952,18 +922,20 @@ Status RedisHashes::PKHScanRange(const Slice& key, const Slice& field_start, con return Status::InvalidArgument("error in given range"); } - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t version = parsed_hashes_meta_value.version(); + uint64_t version = parsed_hashes_meta_value.Version(); HashesDataKey hashes_data_prefix(key, version, Slice()); HashesDataKey hashes_start_data_key(key, version, field_start); - std::string prefix = hashes_data_prefix.Encode().ToString(); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[1]); + std::string prefix = hashes_data_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); for (iter->Seek(start_no_limit ? prefix : hashes_start_data_key.Encode()); iter->Valid() && remain > 0 && iter->key().starts_with(prefix); iter->Next()) { ParsedHashesDataKey parsed_hashes_data_key(iter->key()); @@ -972,7 +944,8 @@ Status RedisHashes::PKHScanRange(const Slice& key, const Slice& field_start, con break; } if (StringMatch(pattern.data(), pattern.size(), field.data(), field.size(), 0) != 0) { - field_values->push_back({field, iter->value().ToString()}); + ParsedBaseDataValue parsed_internal_value(iter->value()); + field_values->push_back({field, parsed_internal_value.UserValue().ToString()}); } remain--; } @@ -991,7 +964,7 @@ Status RedisHashes::PKHScanRange(const Slice& key, const Slice& field_start, con return Status::OK(); } -Status RedisHashes::PKHRScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, +Status Redis::PKHRScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, int32_t limit, std::vector* field_values, std::string* next_field) { next_field->clear(); @@ -1011,20 +984,22 @@ Status RedisHashes::PKHRScanRange(const Slice& key, const Slice& field_start, co return Status::InvalidArgument("error in given range"); } - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t version = parsed_hashes_meta_value.version(); + uint64_t version = parsed_hashes_meta_value.Version(); int32_t start_key_version = start_no_limit ? version + 1 : version; std::string start_key_field = start_no_limit ? "" : field_start.ToString(); HashesDataKey hashes_data_prefix(key, version, Slice()); HashesDataKey hashes_start_data_key(key, start_key_version, start_key_field); - std::string prefix = hashes_data_prefix.Encode().ToString(); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[1]); + std::string prefix = hashes_data_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); for (iter->SeekForPrev(hashes_start_data_key.Encode().ToString()); iter->Valid() && remain > 0 && iter->key().starts_with(prefix); iter->Prev()) { ParsedHashesDataKey parsed_hashes_data_key(iter->key()); @@ -1033,7 +1008,8 @@ Status RedisHashes::PKHRScanRange(const Slice& key, const Slice& field_start, co break; } if (StringMatch(pattern.data(), pattern.size(), field.data(), field.size(), 0) != 0) { - field_values->push_back({field, iter->value().ToString()}); + ParsedBaseDataValue parsed_value(iter->value()); + field_values->push_back({field, parsed_value.UserValue().ToString()}); } remain--; } @@ -1052,288 +1028,117 @@ Status RedisHashes::PKHRScanRange(const Slice& key, const Slice& field_start, co return Status::OK(); } -Status RedisHashes::PKScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key) { - next_key->clear(); - - std::string key; - int32_t remain = limit; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - bool start_no_limit = key_start.compare("") == 0; - bool end_no_limit = key_end.compare("") == 0; - - if (!start_no_limit && !end_no_limit && (key_start.compare(key_end) > 0)) { - return Status::InvalidArgument("error in given range"); - } - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - if (start_no_limit) { - it->SeekToFirst(); - } else { - it->Seek(key_start); - } - - while (it->Valid() && remain > 0 && (end_no_limit || it->key().compare(key_end) <= 0)) { - ParsedHashesMetaValue parsed_hashes_meta_value(it->value()); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { - it->Next(); - } else { - key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - remain--; - it->Next(); - } - } - - while (it->Valid() && (end_no_limit || it->key().compare(key_end) <= 0)) { - ParsedHashesMetaValue parsed_hashes_meta_value(it->value()); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { - it->Next(); - } else { - *next_key = it->key().ToString(); - break; - } - } - delete it; - return Status::OK(); -} - -Status RedisHashes::PKRScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key) { - next_key->clear(); - - std::string key; - int32_t remain = limit; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - bool start_no_limit = key_start.compare("") == 0; - bool end_no_limit = key_end.compare("") == 0; - - if (!start_no_limit && !end_no_limit && (key_start.compare(key_end) < 0)) { - return Status::InvalidArgument("error in given range"); - } - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - if (start_no_limit) { - it->SeekToLast(); - } else { - it->SeekForPrev(key_start); - } - - while (it->Valid() && remain > 0 && (end_no_limit || it->key().compare(key_end) >= 0)) { - ParsedHashesMetaValue parsed_hashes_meta_value(it->value()); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { - it->Prev(); - } else { - key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - remain--; - it->Prev(); - } - } - - while (it->Valid() && (end_no_limit || it->key().compare(key_end) >= 0)) { - ParsedHashesMetaValue parsed_hashes_meta_value(it->value()); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { - it->Prev(); - } else { - *next_key = it->key().ToString(); - break; - } - } - delete it; - return Status::OK(); -} - -Status RedisHashes::Expire(const Slice& key, int32_t ttl) { +Status Redis::HashesExpire(const Slice& key, int64_t ttl) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_hashes_meta_value.count() == 0) { + } else if (parsed_hashes_meta_value.Count() == 0) { return Status::NotFound(); } if (ttl > 0) { parsed_hashes_meta_value.SetRelativeTimestamp(ttl); - s = db_->Put(default_write_options_, handles_[0], key, meta_value); + s = db_->Put(default_write_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); } else { parsed_hashes_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[0], key, meta_value); + s = db_->Put(default_write_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); } } return s; } -Status RedisHashes::Del(const Slice& key) { +Status Redis::HashesDel(const Slice& key) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_hashes_meta_value.count() == 0) { + } else if (parsed_hashes_meta_value.Count() == 0) { return Status::NotFound(); } else { - uint32_t statistic = parsed_hashes_meta_value.count(); + uint32_t statistic = parsed_hashes_meta_value.Count(); parsed_hashes_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[0], key, meta_value); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + s = db_->Put(default_write_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); } } return s; } -bool RedisHashes::Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, - int64_t* count, std::string* next_key) { - std::string meta_key; - bool is_finish = true; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - - it->Seek(start_key); - while (it->Valid() && (*count) > 0) { - ParsedHashesMetaValue parsed_meta_value(it->value()); - if (parsed_meta_value.IsStale() || parsed_meta_value.count() == 0) { - it->Next(); - continue; - } else { - meta_key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), meta_key.data(), meta_key.size(), 0) != 0) { - keys->push_back(meta_key); - } - (*count)--; - it->Next(); - } - } - - std::string prefix = isTailWildcard(pattern) ? pattern.substr(0, pattern.size() - 1) : ""; - if (it->Valid() && (it->key().compare(prefix) <= 0 || it->key().starts_with(prefix))) { - *next_key = it->key().ToString(); - is_finish = false; - } else { - *next_key = ""; - } - delete it; - return is_finish; -} - -bool RedisHashes::PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) { - bool is_finish = true; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - it->Seek(start_key); - while (it->Valid() && (*leftover_visits) > 0) { - ParsedHashesMetaValue parsed_hashes_meta_value(it->value()); - if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.count() == 0) { - it->Next(); - continue; - } else { - if (min_timestamp < parsed_hashes_meta_value.timestamp() && - parsed_hashes_meta_value.timestamp() < max_timestamp) { - keys->push_back(it->key().ToString()); - } - (*leftover_visits)--; - it->Next(); - } - } - - if (it->Valid()) { - is_finish = false; - *next_key = it->key().ToString(); - } else { - *next_key = ""; - } - delete it; - return is_finish; -} - -Status RedisHashes::Expireat(const Slice& key, int32_t timestamp) { +Status Redis::HashesExpireat(const Slice& key, int64_t timestamp) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_hashes_meta_value.count() == 0) { + } else if (parsed_hashes_meta_value.Count() == 0) { return Status::NotFound(); } else { if (timestamp > 0) { - parsed_hashes_meta_value.set_timestamp(timestamp); + parsed_hashes_meta_value.SetEtime(static_cast(timestamp)); } else { parsed_hashes_meta_value.InitialMetaValue(); } - s = db_->Put(default_write_options_, handles_[0], key, meta_value); + s = db_->Put(default_write_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); } } return s; } -Status RedisHashes::Persist(const Slice& key) { +Status Redis::HashesPersist(const Slice& key) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_hashes_meta_value.count() == 0) { + } else if (parsed_hashes_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t timestamp = parsed_hashes_meta_value.timestamp(); + uint64_t timestamp = parsed_hashes_meta_value.Etime(); if (timestamp == 0) { return Status::NotFound("Not have an associated timeout"); } else { - parsed_hashes_meta_value.set_timestamp(0); - s = db_->Put(default_write_options_, handles_[0], key, meta_value); + parsed_hashes_meta_value.SetEtime(0); + s = db_->Put(default_write_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), meta_value); } } } return s; } -Status RedisHashes::TTL(const Slice& key, int64_t* timestamp) { +Status Redis::HashesTTL(const Slice& key, int64_t* timestamp) { std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kHashesMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); if (parsed_hashes_meta_value.IsStale()) { *timestamp = -2; return Status::NotFound("Stale"); - } else if (parsed_hashes_meta_value.count() == 0) { + } else if (parsed_hashes_meta_value.Count() == 0) { *timestamp = -2; return Status::NotFound(); } else { - *timestamp = parsed_hashes_meta_value.timestamp(); + *timestamp = parsed_hashes_meta_value.Etime(); if (*timestamp == 0) { *timestamp = -1; } else { @@ -1348,7 +1153,7 @@ Status RedisHashes::TTL(const Slice& key, int64_t* timestamp) { return s; } -void RedisHashes::ScanDatabase() { +void Redis::ScanHashes() { rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -1356,31 +1161,31 @@ void RedisHashes::ScanDatabase() { iterator_options.fill_cache = false; auto current_time = static_cast(time(nullptr)); - LOG(INFO) << "***************Hashes Meta Data***************"; - auto meta_iter = db_->NewIterator(iterator_options, handles_[0]); + LOG(INFO) << "***************" << "rocksdb instance: " << index_ << " Hashes Meta Data***************"; + auto meta_iter = db_->NewIterator(iterator_options, handles_[kHashesMetaCF]); for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { ParsedHashesMetaValue parsed_hashes_meta_value(meta_iter->value()); int32_t survival_time = 0; - if (parsed_hashes_meta_value.timestamp() != 0) { - survival_time = parsed_hashes_meta_value.timestamp() - current_time > 0 - ? parsed_hashes_meta_value.timestamp() - current_time - : -1; + if (parsed_hashes_meta_value.Etime() != 0) { + survival_time = parsed_hashes_meta_value.Etime() > current_time ? parsed_hashes_meta_value.Etime() - current_time : -1; } + ParsedBaseMetaKey parsed_meta_key(meta_iter->key()); LOG(INFO) << fmt::format("[key : {:<30}] [count : {:<10}] [timestamp : {:<10}] [version : {}] [survival_time : {}]", - meta_iter->key().ToString(), parsed_hashes_meta_value.count(), - parsed_hashes_meta_value.timestamp(), parsed_hashes_meta_value.version(), survival_time); + parsed_meta_key.Key().ToString(), parsed_hashes_meta_value.Count(), + parsed_hashes_meta_value.Etime(), parsed_hashes_meta_value.Version(), survival_time); } delete meta_iter; LOG(INFO) << "***************Hashes Field Data***************"; - auto field_iter = db_->NewIterator(iterator_options, handles_[1]); + auto field_iter = db_->NewIterator(iterator_options, handles_[kHashesDataCF]); for (field_iter->SeekToFirst(); field_iter->Valid(); field_iter->Next()) { ParsedHashesDataKey parsed_hashes_data_key(field_iter->key()); + ParsedBaseDataValue parsed_internal_value(field_iter->value()); LOG(INFO) << fmt::format("[key : {:<30}] [field : {:<20}] [value : {:<20}] [version : {}]", - parsed_hashes_data_key.key().ToString(), parsed_hashes_data_key.field().ToString(), - field_iter->value().ToString(), parsed_hashes_data_key.version()); + parsed_hashes_data_key.Key().ToString(), parsed_hashes_data_key.field().ToString(), + parsed_internal_value.UserValue().ToString(), parsed_hashes_data_key.Version()); } delete field_iter; } diff --git a/src/storage/src/redis_hashes.h b/src/storage/src/redis_hashes.h deleted file mode 100644 index 6733748123..0000000000 --- a/src/storage/src/redis_hashes.h +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef SRC_REDIS_HASHES_H_ -#define SRC_REDIS_HASHES_H_ - -#include -#include -#include - -#include "src/redis.h" - -namespace storage { - -class RedisHashes : public Redis { - public: - RedisHashes(Storage* s, const DataType& type); - ~RedisHashes() override = default; - - // Common Commands - Status Open(const StorageOptions& storage_options, const std::string& db_path) override; - Status CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, - const ColumnFamilyType& type = kMetaAndData) override; - Status GetProperty(const std::string& property, uint64_t* out) override; - Status ScanKeyNum(KeyInfo* key_info) override; - Status ScanKeys(const std::string& pattern, std::vector* keys) override; - Status PKPatternMatchDel(const std::string& pattern, int32_t* ret) override; - - // Hashes Commands - Status HDel(const Slice& key, const std::vector& fields, int32_t* ret); - Status HExists(const Slice& key, const Slice& field); - Status HGet(const Slice& key, const Slice& field, std::string* value); - Status HGetall(const Slice& key, std::vector* fvs); - Status HGetallWithTTL(const Slice& key, std::vector* fvs, int64_t* ttl); - Status HIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret); - Status HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by, std::string* new_value); - Status HKeys(const Slice& key, std::vector* fields); - Status HLen(const Slice& key, int32_t* ret); - Status HMGet(const Slice& key, const std::vector& fields, std::vector* vss); - Status HMSet(const Slice& key, const std::vector& fvs); - Status HSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res); - Status HSetnx(const Slice& key, const Slice& field, const Slice& value, int32_t* ret); - Status HVals(const Slice& key, std::vector* values); - Status HStrlen(const Slice& key, const Slice& field, int32_t* len); - Status HScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, - std::vector* field_values, int64_t* next_cursor); - Status HScanx(const Slice& key, const std::string& start_field, const std::string& pattern, int64_t count, - std::vector* field_values, std::string* next_field); - Status PKHScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, - int32_t limit, std::vector* field_values, std::string* next_field); - Status PKHRScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, - int32_t limit, std::vector* field_values, std::string* next_field); - Status PKScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key); - Status PKRScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key); - - // Keys Commands - Status Expire(const Slice& key, int32_t ttl) override; - Status Del(const Slice& key) override; - bool Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, int64_t* count, - std::string* next_key) override; - bool PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) override; - Status Expireat(const Slice& key, int32_t timestamp) override; - Status Persist(const Slice& key) override; - Status TTL(const Slice& key, int64_t* timestamp) override; - - // Iterate all data - void ScanDatabase(); -}; - -} // namespace storage -#endif // SRC_REDIS_HASHES_H_ diff --git a/src/storage/src/redis_lists.cc b/src/storage/src/redis_lists.cc index e2d484b3e4..f15a11b113 100644 --- a/src/storage/src/redis_lists.cc +++ b/src/storage/src/redis_lists.cc @@ -8,90 +8,17 @@ #include #include +#include "pstd/include/pika_codis_slot.h" +#include "src/base_data_value_format.h" #include "src/lists_filter.h" -#include "src/redis_lists.h" +#include "src/redis.h" #include "src/scope_record_lock.h" #include "src/scope_snapshot.h" #include "storage/util.h" +#include "src/debug.h" namespace storage { - -const rocksdb::Comparator* ListsDataKeyComparator() { - static ListsDataKeyComparatorImpl ldkc; - return &ldkc; -} - -RedisLists::RedisLists(Storage* const s, const DataType& type) : Redis(s, type) {} - -Status RedisLists::Open(const StorageOptions& storage_options, const std::string& db_path) { - statistics_store_->SetCapacity(storage_options.statistics_max_size); - small_compaction_threshold_ = storage_options.small_compaction_threshold; - small_compaction_duration_threshold_ = storage_options.small_compaction_duration_threshold; - - rocksdb::Options ops(storage_options.options); - Status s = rocksdb::DB::Open(ops, db_path, &db_); - if (s.ok()) { - // Create column family - rocksdb::ColumnFamilyHandle* cf; - rocksdb::ColumnFamilyOptions cfo; - cfo.comparator = ListsDataKeyComparator(); - s = db_->CreateColumnFamily(cfo, "data_cf", &cf); - if (!s.ok()) { - return s; - } - // Close DB - delete cf; - delete db_; - } - - // Open - rocksdb::DBOptions db_ops(storage_options.options); - rocksdb::ColumnFamilyOptions meta_cf_ops(storage_options.options); - rocksdb::ColumnFamilyOptions data_cf_ops(storage_options.options); - meta_cf_ops.compaction_filter_factory = std::make_shared(); - data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_); - data_cf_ops.comparator = ListsDataKeyComparator(); - - // use the bloom filter policy to reduce disk reads - rocksdb::BlockBasedTableOptions table_ops(storage_options.table_options); - table_ops.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); - rocksdb::BlockBasedTableOptions meta_cf_table_ops(table_ops); - rocksdb::BlockBasedTableOptions data_cf_table_ops(table_ops); - if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { - meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - } - meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(meta_cf_table_ops)); - data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(data_cf_table_ops)); - - std::vector column_families; - // Meta CF - column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, meta_cf_ops); - // Data CF - column_families.emplace_back("data_cf", data_cf_ops); - return rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); -} - -Status RedisLists::CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, const ColumnFamilyType& type) { - if (type == kMeta || type == kMetaAndData) { - db_->CompactRange(default_compact_range_options_, handles_[0], begin, end); - } - if (type == kData || type == kMetaAndData) { - db_->CompactRange(default_compact_range_options_, handles_[1], begin, end); - } - return Status::OK(); -} - -Status RedisLists::GetProperty(const std::string& property, uint64_t* out) { - std::string value; - db_->GetProperty(handles_[0], property, &value); - *out = std::strtoull(value.c_str(), nullptr, 10); - db_->GetProperty(handles_[1], property, &value); - *out += std::strtoull(value.c_str(), nullptr, 10); - return Status::OK(); -} - -Status RedisLists::ScanKeyNum(KeyInfo* key_info) { +Status Redis::ScanListsKeyNum(KeyInfo* key_info) { uint64_t keys = 0; uint64_t expires = 0; uint64_t ttl_sum = 0; @@ -106,16 +33,16 @@ Status RedisLists::ScanKeyNum(KeyInfo* key_info) { int64_t curtime; rocksdb::Env::Default()->GetCurrentTime(&curtime); - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kListsMetaCF]); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedListsMetaValue parsed_lists_meta_value(iter->value()); - if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.count() == 0) { + if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) { invaild_keys++; } else { keys++; if (!parsed_lists_meta_value.IsPermanentSurvival()) { expires++; - ttl_sum += parsed_lists_meta_value.timestamp() - curtime; + ttl_sum += parsed_lists_meta_value.Etime() - curtime; } } } @@ -128,29 +55,7 @@ Status RedisLists::ScanKeyNum(KeyInfo* key_info) { return Status::OK(); } -Status RedisLists::ScanKeys(const std::string& pattern, std::vector* keys) { - std::string key; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); - for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { - ParsedListsMetaValue parsed_lists_meta_value(iter->value()); - if (!parsed_lists_meta_value.IsStale() && parsed_lists_meta_value.count() != 0) { - key = iter->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - } - } - delete iter; - return Status::OK(); -} - -Status RedisLists::PKPatternMatchDel(const std::string& pattern, int32_t* ret) { +Status Redis::ListsPKPatternMatchDel(const std::string& pattern, int32_t* ret) { rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -162,16 +67,16 @@ Status RedisLists::PKPatternMatchDel(const std::string& pattern, int32_t* ret) { int32_t total_delete = 0; Status s; rocksdb::WriteBatch batch; - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kListsMetaCF]); iter->SeekToFirst(); while (iter->Valid()) { - key = iter->key().ToString(); + ParsedBaseMetaKey parsed_meta_key(iter->key().ToString()); meta_value = iter->value().ToString(); ParsedListsMetaValue parsed_lists_meta_value(&meta_value); - if (!parsed_lists_meta_value.IsStale() && (parsed_lists_meta_value.count() != 0U) && - (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0)) { + if (!parsed_lists_meta_value.IsStale() && (parsed_lists_meta_value.Count() != 0U) && + (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != 0)) { parsed_lists_meta_value.InitialMetaValue(); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kListsMetaCF], iter->key(), meta_value); } if (static_cast(batch.Count()) >= BATCH_DELETE_LIMIT) { s = db_->Write(default_write_options_, &batch); @@ -197,30 +102,32 @@ Status RedisLists::PKPatternMatchDel(const std::string& pattern, int32_t* ret) { return s; } -Status RedisLists::LIndex(const Slice& key, int64_t index, std::string* element) { +Status Redis::LIndex(const Slice& key, int64_t index, std::string* element) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; std::string meta_value; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); - int32_t version = parsed_lists_meta_value.version(); + uint64_t version = parsed_lists_meta_value.Version(); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - std::string tmp_element; uint64_t target_index = - index >= 0 ? parsed_lists_meta_value.left_index() + index + 1 : parsed_lists_meta_value.right_index() + index; - if (parsed_lists_meta_value.left_index() < target_index && target_index < parsed_lists_meta_value.right_index()) { + index >= 0 ? parsed_lists_meta_value.LeftIndex() + index + 1 : parsed_lists_meta_value.RightIndex() + index; + if (parsed_lists_meta_value.LeftIndex() < target_index && target_index < parsed_lists_meta_value.RightIndex()) { ListsDataKey lists_data_key(key, version, target_index); - s = db_->Get(read_options, handles_[1], lists_data_key.Encode(), &tmp_element); + s = db_->Get(read_options, handles_[kListsDataCF], lists_data_key.Encode(), element); if (s.ok()) { - *element = tmp_element; + ParsedBaseDataValue parsed_value(element); + parsed_value.StripSuffix(); } } else { return Status::NotFound(); @@ -230,29 +137,32 @@ Status RedisLists::LIndex(const Slice& key, int64_t index, std::string* element) return s; } -Status RedisLists::LInsert(const Slice& key, const BeforeOrAfter& before_or_after, const std::string& pivot, - const std::string& value, int64_t* ret) { +Status Redis::LInsert(const Slice& key, const BeforeOrAfter& before_or_after, const std::string& pivot, + const std::string& value, int64_t* ret) { *ret = 0; rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { bool find_pivot = false; uint64_t pivot_index = 0; - int32_t version = parsed_lists_meta_value.version(); - uint64_t current_index = parsed_lists_meta_value.left_index() + 1; - rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[1]); + uint64_t version = parsed_lists_meta_value.Version(); + uint64_t current_index = parsed_lists_meta_value.LeftIndex() + 1; + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); ListsDataKey start_data_key(key, version, current_index); - for (iter->Seek(start_data_key.Encode()); iter->Valid() && current_index < parsed_lists_meta_value.right_index(); + for (iter->Seek(start_data_key.Encode()); iter->Valid() && current_index < parsed_lists_meta_value.RightIndex(); iter->Next(), current_index++) { - if (iter->value() == Slice(pivot)) { + ParsedBaseDataValue parsed_value(iter->value()); + if (pivot.compare(parsed_value.UserValue().ToString()) == 0) { find_pivot = true; pivot_index = current_index; break; @@ -265,58 +175,63 @@ Status RedisLists::LInsert(const Slice& key, const BeforeOrAfter& before_or_afte } else { uint64_t target_index; std::vector list_nodes; - uint64_t mid_index = parsed_lists_meta_value.left_index() + - (parsed_lists_meta_value.right_index() - parsed_lists_meta_value.left_index()) / 2; + uint64_t mid_index = parsed_lists_meta_value.LeftIndex() + + (parsed_lists_meta_value.RightIndex() - parsed_lists_meta_value.LeftIndex()) / 2; if (pivot_index <= mid_index) { target_index = (before_or_after == Before) ? pivot_index - 1 : pivot_index; - current_index = parsed_lists_meta_value.left_index() + 1; - rocksdb::Iterator* first_half_iter = db_->NewIterator(default_read_options_, handles_[1]); + current_index = parsed_lists_meta_value.LeftIndex() + 1; + rocksdb::Iterator* first_half_iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); ListsDataKey start_data_key(key, version, current_index); for (first_half_iter->Seek(start_data_key.Encode()); first_half_iter->Valid() && current_index <= pivot_index; first_half_iter->Next(), current_index++) { + ParsedBaseDataValue parsed_value(first_half_iter->value()); if (current_index == pivot_index) { if (before_or_after == After) { - list_nodes.push_back(first_half_iter->value().ToString()); + list_nodes.push_back(parsed_value.UserValue().ToString()); } break; } - list_nodes.push_back(first_half_iter->value().ToString()); + list_nodes.push_back(parsed_value.UserValue().ToString()); } delete first_half_iter; - current_index = parsed_lists_meta_value.left_index(); + current_index = parsed_lists_meta_value.LeftIndex(); for (const auto& node : list_nodes) { ListsDataKey lists_data_key(key, version, current_index++); - batch.Put(handles_[1], lists_data_key.Encode(), node); + BaseDataValue i_val(node); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } parsed_lists_meta_value.ModifyLeftIndex(1); } else { target_index = (before_or_after == Before) ? pivot_index : pivot_index + 1; current_index = pivot_index; - rocksdb::Iterator* after_half_iter = db_->NewIterator(default_read_options_, handles_[1]); + rocksdb::Iterator* after_half_iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); ListsDataKey start_data_key(key, version, current_index); for (after_half_iter->Seek(start_data_key.Encode()); - after_half_iter->Valid() && current_index < parsed_lists_meta_value.right_index(); + after_half_iter->Valid() && current_index < parsed_lists_meta_value.RightIndex(); after_half_iter->Next(), current_index++) { if (current_index == pivot_index && before_or_after == BeforeOrAfter::After) { continue; } - list_nodes.push_back(after_half_iter->value().ToString()); + ParsedBaseDataValue parsed_value(after_half_iter->value()); + list_nodes.push_back(parsed_value.UserValue().ToString()); } delete after_half_iter; current_index = target_index + 1; for (const auto& node : list_nodes) { ListsDataKey lists_data_key(key, version, current_index++); - batch.Put(handles_[1], lists_data_key.Encode(), node); + BaseDataValue i_val(node); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } parsed_lists_meta_value.ModifyRightIndex(1); } parsed_lists_meta_value.ModifyCount(1); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); ListsDataKey lists_target_key(key, version, target_index); - batch.Put(handles_[1], lists_target_key.Encode(), value); - *ret = static_cast(parsed_lists_meta_value.count()); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_target_key.Encode(), i_val.Encode()); + *ret = static_cast(parsed_lists_meta_value.Count()); return db_->Write(default_write_options_, &batch); } } @@ -326,25 +241,27 @@ Status RedisLists::LInsert(const Slice& key, const BeforeOrAfter& before_or_afte return s; } -Status RedisLists::LLen(const Slice& key, uint64_t* len) { +Status Redis::LLen(const Slice& key, uint64_t* len) { *len = 0; std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - *len = parsed_lists_meta_value.count(); + *len = parsed_lists_meta_value.Count(); return s; } } return s; } -Status RedisLists::LPop(const Slice& key, int64_t count, std::vector* elements) { +Status Redis::LPop(const Slice& key, int64_t count, std::vector* elements) { uint32_t statistic = 0; elements->clear(); @@ -352,30 +269,33 @@ Status RedisLists::LPop(const Slice& key, int64_t count, std::vectorGet(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - auto size = static_cast(parsed_lists_meta_value.count()); - int32_t version = parsed_lists_meta_value.version(); + auto size = static_cast(parsed_lists_meta_value.Count()); + uint64_t version = parsed_lists_meta_value.Version(); int32_t start_index = 0; auto stop_index = static_cast(count<=size?count-1:size-1); int32_t cur_index = 0; - ListsDataKey lists_data_key(key, version, parsed_lists_meta_value.left_index()+1); - rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[1]); + ListsDataKey lists_data_key(key, version, parsed_lists_meta_value.LeftIndex()+1); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); for (iter->Seek(lists_data_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { statistic++; - elements->push_back(iter->value().ToString()); - batch.Delete(handles_[1],iter->key()); + ParsedBaseDataValue parsed_base_data_value(iter->value()); + elements->push_back(parsed_base_data_value.UserValue().ToString()); + batch.Delete(handles_[kListsDataCF],iter->key()); parsed_lists_meta_value.ModifyCount(-1); parsed_lists_meta_value.ModifyLeftIndex(-1); } - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); delete iter; } } @@ -384,86 +304,93 @@ Status RedisLists::LPop(const Slice& key, int64_t count, std::vector& values, uint64_t* ret) { +Status Redis::LPush(const Slice& key, const std::vector& values, uint64_t* ret) { *ret = 0; rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); uint64_t index = 0; - int32_t version = 0; + uint64_t version = 0; std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); - if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.count() == 0) { + if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) { version = parsed_lists_meta_value.InitialMetaValue(); } else { - version = parsed_lists_meta_value.version(); + version = parsed_lists_meta_value.Version(); } for (const auto& value : values) { - index = parsed_lists_meta_value.left_index(); + index = parsed_lists_meta_value.LeftIndex(); parsed_lists_meta_value.ModifyLeftIndex(1); parsed_lists_meta_value.ModifyCount(1); ListsDataKey lists_data_key(key, version, index); - batch.Put(handles_[1], lists_data_key.Encode(), value); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[0], key, meta_value); - *ret = parsed_lists_meta_value.count(); + batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + *ret = parsed_lists_meta_value.Count(); } else if (s.IsNotFound()) { char str[8]; EncodeFixed64(str, values.size()); ListsMetaValue lists_meta_value(Slice(str, sizeof(uint64_t))); version = lists_meta_value.UpdateVersion(); for (const auto& value : values) { - index = lists_meta_value.left_index(); + index = lists_meta_value.LeftIndex(); lists_meta_value.ModifyLeftIndex(1); ListsDataKey lists_data_key(key, version, index); - batch.Put(handles_[1], lists_data_key.Encode(), value); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[0], key, lists_meta_value.Encode()); - *ret = lists_meta_value.right_index() - lists_meta_value.left_index() - 1; + batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), lists_meta_value.Encode()); + *ret = lists_meta_value.RightIndex() - lists_meta_value.LeftIndex() - 1; } else { return s; } return db_->Write(default_write_options_, &batch); } -Status RedisLists::LPushx(const Slice& key, const std::vector& values, uint64_t* len) { +Status Redis::LPushx(const Slice& key, const std::vector& values, uint64_t* len) { *len = 0; rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t version = parsed_lists_meta_value.version(); + uint64_t version = parsed_lists_meta_value.Version(); for (const auto& value : values) { - uint64_t index = parsed_lists_meta_value.left_index(); + uint64_t index = parsed_lists_meta_value.LeftIndex(); parsed_lists_meta_value.ModifyCount(1); parsed_lists_meta_value.ModifyLeftIndex(1); ListsDataKey lists_data_key(key, version, index); - batch.Put(handles_[1], lists_data_key.Encode(), value); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[0], key, meta_value); - *len = parsed_lists_meta_value.count(); + batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + *len = parsed_lists_meta_value.Count(); return db_->Write(default_write_options_, &batch); } } return s; } -Status RedisLists::LRange(const Slice& key, int64_t start, int64_t stop, std::vector* ret) { +Status Redis::LRange(const Slice& key, int64_t start, int64_t stop, std::vector* ret) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; @@ -471,17 +398,18 @@ Status RedisLists::LRange(const Slice& key, int64_t start, int64_t stop, std::ve read_options.snapshot = snapshot; std::string meta_value; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t version = parsed_lists_meta_value.version(); - uint64_t origin_left_index = parsed_lists_meta_value.left_index() + 1; - uint64_t origin_right_index = parsed_lists_meta_value.right_index() - 1; + uint64_t version = parsed_lists_meta_value.Version(); + uint64_t origin_left_index = parsed_lists_meta_value.LeftIndex() + 1; + uint64_t origin_right_index = parsed_lists_meta_value.RightIndex() - 1; uint64_t sublist_left_index = start >= 0 ? origin_left_index + start : origin_right_index + start + 1; uint64_t sublist_right_index = stop >= 0 ? origin_left_index + stop : origin_right_index + stop + 1; @@ -495,12 +423,13 @@ Status RedisLists::LRange(const Slice& key, int64_t start, int64_t stop, std::ve if (sublist_right_index > origin_right_index) { sublist_right_index = origin_right_index; } - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[1]); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kListsDataCF]); uint64_t current_index = sublist_left_index; ListsDataKey start_data_key(key, version, current_index); for (iter->Seek(start_data_key.Encode()); iter->Valid() && current_index <= sublist_right_index; iter->Next(), current_index++) { - ret->push_back(iter->value().ToString()); + ParsedBaseDataValue parsed_value(iter->value()); + ret->push_back(parsed_value.UserValue().ToString()); } delete iter; return Status::OK(); @@ -511,7 +440,7 @@ Status RedisLists::LRange(const Slice& key, int64_t start, int64_t stop, std::ve } } -Status RedisLists::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std::vector* ret, int64_t* ttl) { +Status Redis::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std::vector* ret, int64_t* ttl) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; @@ -519,16 +448,17 @@ Status RedisLists::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, read_options.snapshot = snapshot; std::string meta_value; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); - if (parsed_lists_meta_value.count() == 0) { + if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); } else { // ttl - *ttl = parsed_lists_meta_value.timestamp(); + *ttl = parsed_lists_meta_value.Etime(); if (*ttl == 0) { *ttl = -1; } else { @@ -537,15 +467,11 @@ Status RedisLists::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, *ttl = *ttl - curtime >= 0 ? *ttl - curtime : -2; } - int32_t version = parsed_lists_meta_value.version(); - uint64_t origin_left_index = parsed_lists_meta_value.left_index() + 1; - uint64_t origin_right_index = parsed_lists_meta_value.right_index() - 1; - uint64_t sublist_left_index = start >= 0 ? - origin_left_index + start : - origin_right_index + start + 1; - uint64_t sublist_right_index = stop >= 0 ? - origin_left_index + stop : - origin_right_index + stop + 1; + uint64_t version = parsed_lists_meta_value.Version(); + uint64_t origin_left_index = parsed_lists_meta_value.LeftIndex() + 1; + uint64_t origin_right_index = parsed_lists_meta_value.RightIndex() - 1; + uint64_t sublist_left_index = start >= 0 ? origin_left_index + start : origin_right_index + start + 1; + uint64_t sublist_right_index = stop >= 0 ? origin_left_index + stop : origin_right_index + stop + 1; if (sublist_left_index > sublist_right_index || sublist_left_index > origin_right_index @@ -558,14 +484,14 @@ Status RedisLists::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, if (sublist_right_index > origin_right_index) { sublist_right_index = origin_right_index; } - rocksdb::Iterator* iter = db_->NewIterator(read_options, - handles_[1]); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kListsDataCF]); uint64_t current_index = sublist_left_index; ListsDataKey start_data_key(key, version, current_index); for (iter->Seek(start_data_key.Encode()); iter->Valid() && current_index <= sublist_right_index; iter->Next(), current_index++) { - ret->push_back(iter->value().ToString()); + ParsedBaseDataValue parsed_value(iter->value()); + ret->push_back(parsed_value.UserValue().ToString()); } delete iter; return Status::OK(); @@ -576,35 +502,38 @@ Status RedisLists::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, } } -Status RedisLists::LRem(const Slice& key, int64_t count, const Slice& value, uint64_t* ret) { +Status Redis::LRem(const Slice& key, int64_t count, const Slice& value, uint64_t* ret) { *ret = 0; rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { uint64_t current_index; std::vector target_index; std::vector delete_index; uint64_t rest = (count < 0) ? -count : count; - int32_t version = parsed_lists_meta_value.version(); - uint64_t start_index = parsed_lists_meta_value.left_index() + 1; - uint64_t stop_index = parsed_lists_meta_value.right_index() - 1; + uint64_t version = parsed_lists_meta_value.Version(); + uint64_t start_index = parsed_lists_meta_value.LeftIndex() + 1; + uint64_t stop_index = parsed_lists_meta_value.RightIndex() - 1; ListsDataKey start_data_key(key, version, start_index); ListsDataKey stop_data_key(key, version, stop_index); if (count >= 0) { current_index = start_index; - rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[1]); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); for (iter->Seek(start_data_key.Encode()); iter->Valid() && current_index <= stop_index && ((count == 0) || rest != 0); iter->Next(), current_index++) { - if (iter->value() == value) { + ParsedBaseDataValue parsed_value(iter->value()); + if (value.compare(parsed_value.UserValue()) == 0) { target_index.push_back(current_index); if (count != 0) { rest--; @@ -614,11 +543,12 @@ Status RedisLists::LRem(const Slice& key, int64_t count, const Slice& value, uin delete iter; } else { current_index = stop_index; - rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[1]); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); for (iter->Seek(stop_data_key.Encode()); iter->Valid() && current_index >= start_index && ((count == 0) || rest != 0); iter->Prev(), current_index--) { - if (iter->value() == value) { + ParsedBaseDataValue parsed_value(iter->value()); + if (value.compare(parsed_value.UserValue()) == 0) { target_index.push_back(current_index); if (count != 0) { rest--; @@ -640,18 +570,19 @@ Status RedisLists::LRem(const Slice& key, int64_t count, const Slice& value, uin uint64_t left = sublist_right_index; current_index = sublist_right_index; ListsDataKey sublist_right_key(key, version, sublist_right_index); - rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[1]); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); for (iter->Seek(sublist_right_key.Encode()); iter->Valid() && current_index >= start_index; iter->Prev(), current_index--) { - if ((iter->value() == value) && rest > 0) { + ParsedBaseDataValue parsed_value(iter->value()); + if (value.compare(parsed_value.UserValue()) == 0 && rest > 0) { rest--; } else { ListsDataKey lists_data_key(key, version, left--); - batch.Put(handles_[1], lists_data_key.Encode(), iter->value()); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), iter->value()); } } delete iter; - uint64_t left_index = parsed_lists_meta_value.left_index(); + uint64_t left_index = parsed_lists_meta_value.LeftIndex(); for (uint64_t idx = 0; idx < target_index.size(); ++idx) { delete_index.push_back(left_index + idx + 1); } @@ -660,28 +591,29 @@ Status RedisLists::LRem(const Slice& key, int64_t count, const Slice& value, uin uint64_t right = sublist_left_index; current_index = sublist_left_index; ListsDataKey sublist_left_key(key, version, sublist_left_index); - rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[1]); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); for (iter->Seek(sublist_left_key.Encode()); iter->Valid() && current_index <= stop_index; iter->Next(), current_index++) { - if ((iter->value() == value) && rest > 0) { + ParsedBaseDataValue parsed_value(iter->value()); + if ((value.compare(parsed_value.UserValue()) == 0) && rest > 0) { rest--; } else { ListsDataKey lists_data_key(key, version, right++); - batch.Put(handles_[1], lists_data_key.Encode(), iter->value()); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), iter->value()); } } delete iter; - uint64_t right_index = parsed_lists_meta_value.right_index(); + uint64_t right_index = parsed_lists_meta_value.RightIndex(); for (uint64_t idx = 0; idx < target_index.size(); ++idx) { delete_index.push_back(right_index - idx - 1); } parsed_lists_meta_value.ModifyRightIndex(-target_index.size()); } parsed_lists_meta_value.ModifyCount(-target_index.size()); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); for (const auto& idx : delete_index) { ListsDataKey lists_data_key(key, version, idx); - batch.Delete(handles_[1], lists_data_key.Encode()); + batch.Delete(handles_[kListsDataCF], lists_data_key.Encode()); } *ret = target_index.size(); return db_->Write(default_write_options_, &batch); @@ -693,59 +625,64 @@ Status RedisLists::LRem(const Slice& key, int64_t count, const Slice& value, uin return s; } -Status RedisLists::LSet(const Slice& key, int64_t index, const Slice& value) { +Status Redis::LSet(const Slice& key, int64_t index, const Slice& value) { uint32_t statistic = 0; ScopeRecordLock l(lock_mgr_, key); std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t version = parsed_lists_meta_value.version(); + uint64_t version = parsed_lists_meta_value.Version(); uint64_t target_index = - index >= 0 ? parsed_lists_meta_value.left_index() + index + 1 : parsed_lists_meta_value.right_index() + index; - if (target_index <= parsed_lists_meta_value.left_index() || - target_index >= parsed_lists_meta_value.right_index()) { + index >= 0 ? parsed_lists_meta_value.LeftIndex() + index + 1 : parsed_lists_meta_value.RightIndex() + index; + if (target_index <= parsed_lists_meta_value.LeftIndex() || + target_index >= parsed_lists_meta_value.RightIndex()) { return Status::Corruption("index out of range"); } ListsDataKey lists_data_key(key, version, target_index); - s = db_->Put(default_write_options_, handles_[1], lists_data_key.Encode(), value); + BaseDataValue i_val(value); + s = db_->Put(default_write_options_, handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); statistic++; - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); return s; } } return s; } -Status RedisLists::LTrim(const Slice& key, int64_t start, int64_t stop) { +Status Redis::LTrim(const Slice& key, int64_t start, int64_t stop) { rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); uint32_t statistic = 0; std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); - int32_t version = parsed_lists_meta_value.version(); + uint64_t version = parsed_lists_meta_value.Version(); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - uint64_t origin_left_index = parsed_lists_meta_value.left_index() + 1; - uint64_t origin_right_index = parsed_lists_meta_value.right_index() - 1; + uint64_t origin_left_index = parsed_lists_meta_value.LeftIndex() + 1; + uint64_t origin_right_index = parsed_lists_meta_value.RightIndex() - 1; uint64_t sublist_left_index = start >= 0 ? origin_left_index + start : origin_right_index + start + 1; uint64_t sublist_right_index = stop >= 0 ? origin_left_index + stop : origin_right_index + stop + 1; if (sublist_left_index > sublist_right_index || sublist_left_index > origin_right_index || sublist_right_index < origin_left_index) { parsed_lists_meta_value.InitialMetaValue(); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); } else { if (sublist_left_index < origin_left_index) { sublist_left_index = origin_left_index; @@ -760,16 +697,16 @@ Status RedisLists::LTrim(const Slice& key, int64_t start, int64_t stop) { parsed_lists_meta_value.ModifyLeftIndex(-(sublist_left_index - origin_left_index)); parsed_lists_meta_value.ModifyRightIndex(-(origin_right_index - sublist_right_index)); parsed_lists_meta_value.ModifyCount(-delete_node_num); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); for (uint64_t idx = origin_left_index; idx < sublist_left_index; ++idx) { statistic++; ListsDataKey lists_data_key(key, version, idx); - batch.Delete(handles_[1], lists_data_key.Encode()); + batch.Delete(handles_[kListsDataCF], lists_data_key.Encode()); } for (uint64_t idx = origin_right_index; idx > sublist_right_index; --idx) { statistic++; ListsDataKey lists_data_key(key, version, idx); - batch.Delete(handles_[1], lists_data_key.Encode()); + batch.Delete(handles_[kListsDataCF], lists_data_key.Encode()); } } } @@ -777,11 +714,11 @@ Status RedisLists::LTrim(const Slice& key, int64_t start, int64_t stop) { return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); return s; } -Status RedisLists::RPop(const Slice& key, int64_t count, std::vector* elements) { +Status Redis::RPop(const Slice& key, int64_t count, std::vector* elements) { uint32_t statistic = 0; elements->clear(); @@ -789,30 +726,33 @@ Status RedisLists::RPop(const Slice& key, int64_t count, std::vectorGet(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - auto size = static_cast(parsed_lists_meta_value.count()); - int32_t version = parsed_lists_meta_value.version(); + auto size = static_cast(parsed_lists_meta_value.Count()); + uint64_t version = parsed_lists_meta_value.Version(); int32_t start_index = 0; auto stop_index = static_cast(count<=size?count-1:size-1); int32_t cur_index = 0; - ListsDataKey lists_data_key(key, version, parsed_lists_meta_value.right_index()-1); - rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[1]); + ListsDataKey lists_data_key(key, version, parsed_lists_meta_value.RightIndex()-1); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); for (iter->SeekForPrev(lists_data_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Prev(), ++cur_index) { statistic++; - elements->push_back(iter->value().ToString()); - batch.Delete(handles_[1],iter->key()); + ParsedBaseDataValue parsed_value(iter->value()); + elements->push_back(parsed_value.UserValue().ToString()); + batch.Delete(handles_[kListsDataCF],iter->key()); parsed_lists_meta_value.ModifyCount(-1); parsed_lists_meta_value.ModifyRightIndex(-1); } - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); delete iter; } } @@ -821,12 +761,12 @@ Status RedisLists::RPop(const Slice& key, int64_t count, std::vectorclear(); uint32_t statistic = 0; Status s; @@ -834,34 +774,37 @@ Status RedisLists::RPoplpush(const Slice& source, const Slice& destination, std: MultiScopeRecordLock l(lock_mgr_, {source.ToString(), destination.ToString()}); if (source.compare(destination) == 0) { std::string meta_value; - s = db_->Get(default_read_options_, handles_[0], source, &meta_value); + BaseMetaKey base_source(source); + s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_source.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { std::string target; - int32_t version = parsed_lists_meta_value.version(); - uint64_t last_node_index = parsed_lists_meta_value.right_index() - 1; + uint64_t version = parsed_lists_meta_value.Version(); + uint64_t last_node_index = parsed_lists_meta_value.RightIndex() - 1; ListsDataKey lists_data_key(source, version, last_node_index); - s = db_->Get(default_read_options_, handles_[1], lists_data_key.Encode(), &target); + s = db_->Get(default_read_options_, handles_[kListsDataCF], lists_data_key.Encode(), &target); if (s.ok()) { *element = target; - if (parsed_lists_meta_value.count() == 1) { + ParsedBaseDataValue parsed_value(element); + parsed_value.StripSuffix(); + if (parsed_lists_meta_value.Count() == 1) { return Status::OK(); } else { - uint64_t target_index = parsed_lists_meta_value.left_index(); + uint64_t target_index = parsed_lists_meta_value.LeftIndex(); ListsDataKey lists_target_key(source, version, target_index); - batch.Delete(handles_[1], lists_data_key.Encode()); - batch.Put(handles_[1], lists_target_key.Encode(), target); + batch.Delete(handles_[kListsDataCF], lists_data_key.Encode()); + batch.Put(handles_[kListsDataCF], lists_target_key.Encode(), target); statistic++; parsed_lists_meta_value.ModifyRightIndex(-1); parsed_lists_meta_value.ModifyLeftIndex(1); - batch.Put(handles_[0], source, meta_value); + batch.Put(handles_[kListsMetaCF], base_source.Encode(), meta_value); s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(source.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kLists, source.ToString(), statistic); return s; } } else { @@ -873,27 +816,28 @@ Status RedisLists::RPoplpush(const Slice& source, const Slice& destination, std: } } - int32_t version; + uint64_t version; std::string target; std::string source_meta_value; - s = db_->Get(default_read_options_, handles_[0], source, &source_meta_value); + BaseMetaKey base_source(source); + s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_source.Encode(), &source_meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&source_meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - version = parsed_lists_meta_value.version(); - uint64_t last_node_index = parsed_lists_meta_value.right_index() - 1; + version = parsed_lists_meta_value.Version(); + uint64_t last_node_index = parsed_lists_meta_value.RightIndex() - 1; ListsDataKey lists_data_key(source, version, last_node_index); - s = db_->Get(default_read_options_, handles_[1], lists_data_key.Encode(), &target); + s = db_->Get(default_read_options_, handles_[kListsDataCF], lists_data_key.Encode(), &target); if (s.ok()) { - batch.Delete(handles_[1], lists_data_key.Encode()); + batch.Delete(handles_[kListsDataCF], lists_data_key.Encode()); statistic++; parsed_lists_meta_value.ModifyCount(-1); parsed_lists_meta_value.ModifyRightIndex(-1); - batch.Put(handles_[0], source, source_meta_value); + batch.Put(handles_[kListsMetaCF], base_source.Encode(), source_meta_value); } else { return s; } @@ -903,396 +847,235 @@ Status RedisLists::RPoplpush(const Slice& source, const Slice& destination, std: } std::string destination_meta_value; - s = db_->Get(default_read_options_, handles_[0], destination, &destination_meta_value); + BaseMetaKey base_destination(destination); + s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_destination.Encode(), &destination_meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&destination_meta_value); - if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.count() == 0) { + if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) { version = parsed_lists_meta_value.InitialMetaValue(); } else { - version = parsed_lists_meta_value.version(); + version = parsed_lists_meta_value.Version(); } - uint64_t target_index = parsed_lists_meta_value.left_index(); + uint64_t target_index = parsed_lists_meta_value.LeftIndex(); ListsDataKey lists_data_key(destination, version, target_index); - batch.Put(handles_[1], lists_data_key.Encode(), target); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), target); parsed_lists_meta_value.ModifyCount(1); parsed_lists_meta_value.ModifyLeftIndex(1); - batch.Put(handles_[0], destination, destination_meta_value); + batch.Put(handles_[kListsMetaCF], base_destination.Encode(), destination_meta_value); } else if (s.IsNotFound()) { char str[8]; EncodeFixed64(str, 1); ListsMetaValue lists_meta_value(Slice(str, sizeof(uint64_t))); version = lists_meta_value.UpdateVersion(); - uint64_t target_index = lists_meta_value.left_index(); + uint64_t target_index = lists_meta_value.LeftIndex(); ListsDataKey lists_data_key(destination, version, target_index); - batch.Put(handles_[1], lists_data_key.Encode(), target); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), target); lists_meta_value.ModifyLeftIndex(1); - batch.Put(handles_[0], destination, lists_meta_value.Encode()); + batch.Put(handles_[kListsMetaCF], base_destination.Encode(), lists_meta_value.Encode()); } else { return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(source.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kLists, source.ToString(), statistic); if (s.ok()) { + ParsedBaseDataValue parsed_value(&target); + parsed_value.StripSuffix(); *element = target; } return s; } -Status RedisLists::RPush(const Slice& key, const std::vector& values, uint64_t* ret) { +Status Redis::RPush(const Slice& key, const std::vector& values, uint64_t* ret) { *ret = 0; rocksdb::WriteBatch batch; uint64_t index = 0; - int32_t version = 0; + uint64_t version = 0; std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); - if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.count() == 0) { + if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) { version = parsed_lists_meta_value.InitialMetaValue(); } else { - version = parsed_lists_meta_value.version(); + version = parsed_lists_meta_value.Version(); } for (const auto& value : values) { - index = parsed_lists_meta_value.right_index(); + index = parsed_lists_meta_value.RightIndex(); parsed_lists_meta_value.ModifyRightIndex(1); parsed_lists_meta_value.ModifyCount(1); ListsDataKey lists_data_key(key, version, index); - batch.Put(handles_[1], lists_data_key.Encode(), value); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[0], key, meta_value); - *ret = parsed_lists_meta_value.count(); + batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + *ret = parsed_lists_meta_value.Count(); } else if (s.IsNotFound()) { char str[8]; EncodeFixed64(str, values.size()); ListsMetaValue lists_meta_value(Slice(str, sizeof(uint64_t))); version = lists_meta_value.UpdateVersion(); for (const auto& value : values) { - index = lists_meta_value.right_index(); + index = lists_meta_value.RightIndex(); lists_meta_value.ModifyRightIndex(1); ListsDataKey lists_data_key(key, version, index); - batch.Put(handles_[1], lists_data_key.Encode(), value); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[0], key, lists_meta_value.Encode()); - *ret = lists_meta_value.right_index() - lists_meta_value.left_index() - 1; + batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), lists_meta_value.Encode()); + *ret = lists_meta_value.RightIndex() - lists_meta_value.LeftIndex() - 1; } else { return s; } return db_->Write(default_write_options_, &batch); } -Status RedisLists::RPushx(const Slice& key, const std::vector& values, uint64_t* len) { +Status Redis::RPushx(const Slice& key, const std::vector& values, uint64_t* len) { *len = 0; rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t version = parsed_lists_meta_value.version(); + uint64_t version = parsed_lists_meta_value.Version(); for (const auto& value : values) { - uint64_t index = parsed_lists_meta_value.right_index(); + uint64_t index = parsed_lists_meta_value.RightIndex(); parsed_lists_meta_value.ModifyCount(1); parsed_lists_meta_value.ModifyRightIndex(1); ListsDataKey lists_data_key(key, version, index); - batch.Put(handles_[1], lists_data_key.Encode(), value); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); } - batch.Put(handles_[0], key, meta_value); - *len = parsed_lists_meta_value.count(); + batch.Put(handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + *len = parsed_lists_meta_value.Count(); return db_->Write(default_write_options_, &batch); } } return s; } -Status RedisLists::PKScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key) { - next_key->clear(); - - std::string key; - int32_t remain = limit; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - bool start_no_limit = key_start.compare("") == 0; - bool end_no_limit = key_end.compare("") == 0; - - if (!start_no_limit && !end_no_limit && (key_start.compare(key_end) > 0)) { - return Status::InvalidArgument("error in given range"); - } - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - if (start_no_limit) { - it->SeekToFirst(); - } else { - it->Seek(key_start); - } - - while (it->Valid() && remain > 0 && (end_no_limit || it->key().compare(key_end) <= 0)) { - ParsedListsMetaValue parsed_lists_meta_value(it->value()); - if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.count() == 0) { - it->Next(); - } else { - key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - remain--; - it->Next(); - } - } - - while (it->Valid() && (end_no_limit || it->key().compare(key_end) <= 0)) { - ParsedListsMetaValue parsed_lists_meta_value(it->value()); - if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.count() == 0) { - it->Next(); - } else { - *next_key = it->key().ToString(); - break; - } - } - delete it; - return Status::OK(); -} - -Status RedisLists::PKRScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key) { - next_key->clear(); - - std::string key; - int32_t remain = limit; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - bool start_no_limit = key_start.compare("") == 0; - bool end_no_limit = key_end.compare("") == 0; - - if (!start_no_limit && !end_no_limit && (key_start.compare(key_end) < 0)) { - return Status::InvalidArgument("error in given range"); - } - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - if (start_no_limit) { - it->SeekToLast(); - } else { - it->SeekForPrev(key_start); - } - - while (it->Valid() && remain > 0 && (end_no_limit || it->key().compare(key_end) >= 0)) { - ParsedListsMetaValue parsed_lists_meta_value(it->value()); - if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.count() == 0) { - it->Prev(); - } else { - key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - remain--; - it->Prev(); - } - } - - while (it->Valid() && (end_no_limit || it->key().compare(key_end) >= 0)) { - ParsedListsMetaValue parsed_lists_meta_value(it->value()); - if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.count() == 0) { - it->Prev(); - } else { - *next_key = it->key().ToString(); - break; - } - } - delete it; - return Status::OK(); -} - -Status RedisLists::Expire(const Slice& key, int32_t ttl) { +Status Redis::ListsExpire(const Slice& key, int64_t ttl) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } if (ttl > 0) { parsed_lists_meta_value.SetRelativeTimestamp(ttl); - s = db_->Put(default_write_options_, handles_[0], key, meta_value); + s = db_->Put(default_write_options_, handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); } else { parsed_lists_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[0], key, meta_value); + s = db_->Put(default_write_options_, handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); } } return s; } -Status RedisLists::Del(const Slice& key) { +Status Redis::ListsDel(const Slice& key) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - uint32_t statistic = parsed_lists_meta_value.count(); + uint64_t statistic = parsed_lists_meta_value.Count(); parsed_lists_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[0], key, meta_value); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + s = db_->Put(default_write_options_, handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); } } return s; } -bool RedisLists::Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, - int64_t* count, std::string* next_key) { - std::string meta_key; - bool is_finish = true; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - - it->Seek(start_key); - while (it->Valid() && (*count) > 0) { - ParsedListsMetaValue parsed_lists_meta_value(it->value()); - if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.count() == 0) { - it->Next(); - continue; - } else { - meta_key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), meta_key.data(), meta_key.size(), 0) != 0) { - keys->push_back(meta_key); - } - (*count)--; - it->Next(); - } - } - - std::string prefix = isTailWildcard(pattern) ? pattern.substr(0, pattern.size() - 1) : ""; - if (it->Valid() && (it->key().compare(prefix) <= 0 || it->key().starts_with(prefix))) { - *next_key = it->key().ToString(); - is_finish = false; - } else { - *next_key = ""; - } - delete it; - return is_finish; -} - -bool RedisLists::PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) { - bool is_finish = true; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - it->Seek(start_key); - while (it->Valid() && (*leftover_visits) > 0) { - ParsedListsMetaValue parsed_lists_meta_value(it->value()); - if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.count() == 0) { - it->Next(); - continue; - } else { - if (min_timestamp < parsed_lists_meta_value.timestamp() && parsed_lists_meta_value.timestamp() < max_timestamp) { - keys->push_back(it->key().ToString()); - } - (*leftover_visits)--; - it->Next(); - } - } - - if (it->Valid()) { - is_finish = false; - *next_key = it->key().ToString(); - } else { - *next_key = ""; - } - delete it; - return is_finish; -} - -Status RedisLists::Expireat(const Slice& key, int32_t timestamp) { +Status Redis::ListsExpireat(const Slice& key, int64_t timestamp) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { if (timestamp > 0) { - parsed_lists_meta_value.set_timestamp(timestamp); + parsed_lists_meta_value.SetEtime(static_cast(timestamp)); } else { parsed_lists_meta_value.InitialMetaValue(); } - return db_->Put(default_write_options_, handles_[0], key, meta_value); + return db_->Put(default_write_options_, handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); } } return s; } -Status RedisLists::Persist(const Slice& key) { +Status Redis::ListsPersist(const Slice& key) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t timestamp = parsed_lists_meta_value.timestamp(); + uint64_t timestamp = parsed_lists_meta_value.Etime(); if (timestamp == 0) { return Status::NotFound("Not have an associated timeout"); } else { - parsed_lists_meta_value.set_timestamp(0); - return db_->Put(default_write_options_, handles_[0], key, meta_value); + parsed_lists_meta_value.SetEtime(0); + return db_->Put(default_write_options_, handles_[kListsMetaCF], base_meta_key.Encode(), meta_value); } } } return s; } -Status RedisLists::TTL(const Slice& key, int64_t* timestamp) { +Status Redis::ListsTTL(const Slice& key, int64_t* timestamp) { std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kListsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedListsMetaValue parsed_lists_meta_value(&meta_value); if (parsed_lists_meta_value.IsStale()) { *timestamp = -2; return Status::NotFound("Stale"); - } else if (parsed_lists_meta_value.count() == 0) { + } else if (parsed_lists_meta_value.Count() == 0) { *timestamp = -2; return Status::NotFound(); } else { - *timestamp = parsed_lists_meta_value.timestamp(); + *timestamp = parsed_lists_meta_value.Etime(); if (*timestamp == 0) { *timestamp = -1; } else { @@ -1307,7 +1090,7 @@ Status RedisLists::TTL(const Slice& key, int64_t* timestamp) { return s; } -void RedisLists::ScanDatabase() { +void Redis::ScanLists() { rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -1315,34 +1098,36 @@ void RedisLists::ScanDatabase() { iterator_options.fill_cache = false; auto current_time = static_cast(time(nullptr)); - LOG(INFO) << "***************List Meta Data***************"; - auto meta_iter = db_->NewIterator(iterator_options, handles_[0]); + LOG(INFO) << "*************** " << "rocksdb instance: " << index_ << " List Meta ***************"; + auto meta_iter = db_->NewIterator(iterator_options, handles_[kListsMetaCF]); for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { ParsedListsMetaValue parsed_lists_meta_value(meta_iter->value()); + ParsedBaseMetaKey parsed_meta_key(meta_iter->value()); int32_t survival_time = 0; - if (parsed_lists_meta_value.timestamp() != 0) { - survival_time = parsed_lists_meta_value.timestamp() - current_time > 0 - ? parsed_lists_meta_value.timestamp() - current_time + if (parsed_lists_meta_value.Etime() != 0) { + survival_time = parsed_lists_meta_value.Etime() - current_time > 0 + ? parsed_lists_meta_value.Etime() - current_time : -1; } LOG(INFO) << fmt::format( "[key : {:<30}] [count : {:<10}] [left index : {:<10}] [right index : {:<10}] [timestamp : {:<10}] [version : " "{}] [survival_time : {}]", - meta_iter->key().ToString(), parsed_lists_meta_value.count(), parsed_lists_meta_value.left_index(), - parsed_lists_meta_value.right_index(), parsed_lists_meta_value.timestamp(), parsed_lists_meta_value.version(), + parsed_meta_key.Key().ToString(), parsed_lists_meta_value.Count(), parsed_lists_meta_value.LeftIndex(), + parsed_lists_meta_value.RightIndex(), parsed_lists_meta_value.Etime(), parsed_lists_meta_value.Version(), survival_time); } delete meta_iter; - LOG(INFO) << "***************List Node Data***************"; - auto data_iter = db_->NewIterator(iterator_options, handles_[1]); + LOG(INFO) << "*************** " << "rocksdb instance: " << index_ << " List Data***************"; + auto data_iter = db_->NewIterator(iterator_options, handles_[kListsDataCF]); for (data_iter->SeekToFirst(); data_iter->Valid(); data_iter->Next()) { ParsedListsDataKey parsed_lists_data_key(data_iter->key()); + ParsedBaseDataValue parsed_value(data_iter->value()); LOG(INFO) << fmt::format("[key : {:<30}] [index : {:<10}] [data : {:<20}] [version : {}]", parsed_lists_data_key.key().ToString(), parsed_lists_data_key.index(), - data_iter->value().ToString(), parsed_lists_data_key.version()); + parsed_value.UserValue().ToString(), parsed_lists_data_key.Version()); } delete data_iter; } diff --git a/src/storage/src/redis_lists.h b/src/storage/src/redis_lists.h deleted file mode 100644 index 9f23eee375..0000000000 --- a/src/storage/src/redis_lists.h +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef SRC_REDIS_LISTS_H_ -#define SRC_REDIS_LISTS_H_ - -#include -#include -#include - -#include "src/custom_comparator.h" -#include "src/redis.h" - -namespace storage { - -class RedisLists : public Redis { - public: - RedisLists(Storage* s, const DataType& type); - ~RedisLists() override = default; - - // Common commands - Status Open(const StorageOptions& storage_options, const std::string& db_path) override; - Status CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, - const ColumnFamilyType& type = kMetaAndData) override; - Status GetProperty(const std::string& property, uint64_t* out) override; - Status ScanKeyNum(KeyInfo* key_info) override; - Status ScanKeys(const std::string& pattern, std::vector* keys) override; - Status PKPatternMatchDel(const std::string& pattern, int32_t* ret) override; - - // Lists commands; - Status LIndex(const Slice& key, int64_t index, std::string* element); - Status LInsert(const Slice& key, const BeforeOrAfter& before_or_after, const std::string& pivot, - const std::string& value, int64_t* ret); - Status LLen(const Slice& key, uint64_t* len); - Status LPop(const Slice& key, int64_t count, std::vector* elements); - Status LPush(const Slice& key, const std::vector& values, uint64_t* ret); - Status LPushx(const Slice& key, const std::vector& values, uint64_t* len); - Status LRange(const Slice& key, int64_t start, int64_t stop, std::vector* ret); - Status LRem(const Slice& key, int64_t count, const Slice& value, uint64_t* ret); - Status LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std::vector* ret, int64_t* ttl); - Status LSet(const Slice& key, int64_t index, const Slice& value); - Status LTrim(const Slice& key, int64_t start, int64_t stop); - Status RPop(const Slice& key, int64_t count, std::vector* elements); - Status RPoplpush(const Slice& source, const Slice& destination, std::string* element); - Status RPush(const Slice& key, const std::vector& values, uint64_t* ret); - Status RPushx(const Slice& key, const std::vector& values, uint64_t* len); - Status PKScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key); - Status PKRScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key); - - // Keys Commands - Status Expire(const Slice& key, int32_t ttl) override; - Status Del(const Slice& key) override; - bool Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, int64_t* count, - std::string* next_key) override; - bool PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) override; - Status Expireat(const Slice& key, int32_t timestamp) override; - Status Persist(const Slice& key) override; - Status TTL(const Slice& key, int64_t* timestamp) override; - - // Iterate all data - void ScanDatabase(); -}; - -} // namespace storage -#endif // SRC_REDIS_LISTS_H_ diff --git a/src/storage/src/redis_sets.cc b/src/storage/src/redis_sets.cc index f76217eb32..38c556bec7 100644 --- a/src/storage/src/redis_sets.cc +++ b/src/storage/src/redis_sets.cc @@ -3,7 +3,7 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "src/redis_sets.h" +#include "src/redis.h" #include #include @@ -14,84 +14,15 @@ #include #include "src/base_filter.h" -#include "src/scope_record_lock.h" #include "src/scope_snapshot.h" +#include "src/scope_record_lock.h" +#include "src/base_data_value_format.h" +#include "pstd/include/env.h" +#include "pstd/include/pika_codis_slot.h" #include "storage/util.h" namespace storage { - -RedisSets::RedisSets(Storage* const s, const DataType& type) : Redis(s, type) { -} - -RedisSets::~RedisSets() = default; - -rocksdb::Status RedisSets::Open(const StorageOptions& storage_options, const std::string& db_path) { - statistics_store_->SetCapacity(storage_options.statistics_max_size); - small_compaction_threshold_ = storage_options.small_compaction_threshold; - small_compaction_duration_threshold_ = storage_options.small_compaction_duration_threshold; - - rocksdb::Options ops(storage_options.options); - rocksdb::Status s = rocksdb::DB::Open(ops, db_path, &db_); - if (s.ok()) { - // create column family - rocksdb::ColumnFamilyHandle* cf; - rocksdb::ColumnFamilyOptions cfo; - s = db_->CreateColumnFamily(cfo, "member_cf", &cf); - if (!s.ok()) { - return s; - } - // close DB - delete cf; - delete db_; - } - - // Open - rocksdb::DBOptions db_ops(storage_options.options); - rocksdb::ColumnFamilyOptions meta_cf_ops(storage_options.options); - rocksdb::ColumnFamilyOptions member_cf_ops(storage_options.options); - meta_cf_ops.compaction_filter_factory = std::make_shared(); - member_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_); - - // use the bloom filter policy to reduce disk reads - rocksdb::BlockBasedTableOptions table_ops(storage_options.table_options); - table_ops.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); - rocksdb::BlockBasedTableOptions meta_cf_table_ops(table_ops); - rocksdb::BlockBasedTableOptions member_cf_table_ops(table_ops); - if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { - meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - member_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - } - meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(meta_cf_table_ops)); - member_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(member_cf_table_ops)); - - std::vector column_families; - // Meta CF - column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, meta_cf_ops); - // Member CF - column_families.emplace_back("member_cf", member_cf_ops); - return rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); -} - -rocksdb::Status RedisSets::CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, const ColumnFamilyType& type) { - if (type == kMeta || type == kMetaAndData) { - db_->CompactRange(default_compact_range_options_, handles_[0], begin, end); - } - if (type == kData || type == kMetaAndData) { - db_->CompactRange(default_compact_range_options_, handles_[1], begin, end); - } - return rocksdb::Status::OK(); -} - -rocksdb::Status RedisSets::GetProperty(const std::string& property, uint64_t* out) { - std::string value; - db_->GetProperty(handles_[0], property, &value); - *out = std::strtoull(value.c_str(), nullptr, 10); - db_->GetProperty(handles_[1], property, &value); - *out += std::strtoull(value.c_str(), nullptr, 10); - return rocksdb::Status::OK(); -} - -rocksdb::Status RedisSets::ScanKeyNum(KeyInfo* key_info) { +rocksdb::Status Redis::ScanSetsKeyNum(KeyInfo* key_info) { uint64_t keys = 0; uint64_t expires = 0; uint64_t ttl_sum = 0; @@ -106,16 +37,16 @@ rocksdb::Status RedisSets::ScanKeyNum(KeyInfo* key_info) { int64_t curtime; rocksdb::Env::Default()->GetCurrentTime(&curtime); - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kSetsMetaCF]); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedSetsMetaValue parsed_sets_meta_value(iter->value()); - if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.count() == 0) { + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { invaild_keys++; } else { keys++; if (!parsed_sets_meta_value.IsPermanentSurvival()) { expires++; - ttl_sum += parsed_sets_meta_value.timestamp() - curtime; + ttl_sum += parsed_sets_meta_value.Etime() - curtime; } } } @@ -128,29 +59,7 @@ rocksdb::Status RedisSets::ScanKeyNum(KeyInfo* key_info) { return rocksdb::Status::OK(); } -rocksdb::Status RedisSets::ScanKeys(const std::string& pattern, std::vector* keys) { - std::string key; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); - for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { - ParsedSetsMetaValue parsed_sets_meta_value(iter->value()); - if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.count() != 0) { - key = iter->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - } - } - delete iter; - return rocksdb::Status::OK(); -} - -rocksdb::Status RedisSets::PKPatternMatchDel(const std::string& pattern, int32_t* ret) { +rocksdb::Status Redis::SetsPKPatternMatchDel(const std::string& pattern, int32_t* ret) { rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -162,16 +71,16 @@ rocksdb::Status RedisSets::PKPatternMatchDel(const std::string& pattern, int32_t int32_t total_delete = 0; rocksdb::Status s; rocksdb::WriteBatch batch; - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kSetsMetaCF]); iter->SeekToFirst(); while (iter->Valid()) { - key = iter->key().ToString(); + ParsedBaseMetaKey parsed_meta_key(iter->key()); meta_value = iter->value().ToString(); ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (!parsed_sets_meta_value.IsStale() && (parsed_sets_meta_value.count() != 0) && - (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0)) { + if (!parsed_sets_meta_value.IsStale() && (parsed_sets_meta_value.Count() != 0) && + (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != 0)) { parsed_sets_meta_value.InitialMetaValue(); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kSetsMetaCF], iter->key(), meta_value); } if (static_cast(batch.Count()) >= BATCH_DELETE_LIMIT) { s = db_->Write(default_write_options_, &batch); @@ -197,7 +106,7 @@ rocksdb::Status RedisSets::PKPatternMatchDel(const std::string& pattern, int32_t return s; } -rocksdb::Status RedisSets::SAdd(const Slice& key, const std::vector& members, int32_t* ret) { +rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& members, int32_t* ret) { std::unordered_set unique; std::vector filtered_members; for (const auto& member : members) { @@ -209,34 +118,38 @@ rocksdb::Status RedisSets::SAdd(const Slice& key, const std::vector rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); - int32_t version = 0; + uint64_t version = 0; std::string meta_value; - rocksdb::Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.count() == 0) { + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { version = parsed_sets_meta_value.InitialMetaValue(); if (!parsed_sets_meta_value.check_set_count(static_cast(filtered_members.size()))) { return Status::InvalidArgument("set size overflow"); } - parsed_sets_meta_value.set_count(static_cast(filtered_members.size())); - batch.Put(handles_[0], key, meta_value); + parsed_sets_meta_value.SetCount(static_cast(filtered_members.size())); + batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); for (const auto& member : filtered_members) { SetsMemberKey sets_member_key(key, version, member); - batch.Put(handles_[1], sets_member_key.Encode(), Slice()); + BaseDataValue iter_value(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); } *ret = static_cast(filtered_members.size()); } else { int32_t cnt = 0; std::string member_value; - version = parsed_sets_meta_value.version(); + version = parsed_sets_meta_value.Version(); for (const auto& member : filtered_members) { SetsMemberKey sets_member_key(key, version, member); - s = db_->Get(default_read_options_, handles_[1], sets_member_key.Encode(), &member_value); + s = db_->Get(default_read_options_, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); if (s.ok()) { } else if (s.IsNotFound()) { cnt++; - batch.Put(handles_[1], sets_member_key.Encode(), Slice()); + BaseDataValue iter_value(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); } else { return s; } @@ -249,7 +162,7 @@ rocksdb::Status RedisSets::SAdd(const Slice& key, const std::vector return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.ModifyCount(cnt); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); } } } else if (s.IsNotFound()) { @@ -257,10 +170,11 @@ rocksdb::Status RedisSets::SAdd(const Slice& key, const std::vector EncodeFixed32(str, filtered_members.size()); SetsMetaValue sets_meta_value(Slice(str, sizeof(int32_t))); version = sets_meta_value.UpdateVersion(); - batch.Put(handles_[0], key, sets_meta_value.Encode()); + batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), sets_meta_value.Encode()); for (const auto& member : filtered_members) { SetsMemberKey sets_member_key(key, version, member); - batch.Put(handles_[1], sets_member_key.Encode(), Slice()); + BaseDataValue i_val(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), i_val.Encode()); } *ret = static_cast(filtered_members.size()); } else { @@ -269,16 +183,18 @@ rocksdb::Status RedisSets::SAdd(const Slice& key, const std::vector return db_->Write(default_write_options_, &batch); } -rocksdb::Status RedisSets::SCard(const Slice& key, int32_t* ret) { +rocksdb::Status Redis::SCard(const Slice& key, int32_t* ret) { *ret = 0; std::string meta_value; - rocksdb::Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { return rocksdb::Status::NotFound("Stale"); } else { - *ret = parsed_sets_meta_value.count(); + *ret = parsed_sets_meta_value.Count(); if (*ret == 0) { return rocksdb::Status::NotFound("Deleted"); } @@ -287,7 +203,7 @@ rocksdb::Status RedisSets::SCard(const Slice& key, int32_t* ret) { return s; } -rocksdb::Status RedisSets::SDiff(const std::vector& keys, std::vector* members) { +rocksdb::Status Redis::SDiff(const std::vector& keys, std::vector* members) { if (keys.empty()) { return rocksdb::Status::Corruption("SDiff invalid parameter, no keys"); } @@ -296,36 +212,38 @@ rocksdb::Status RedisSets::SDiff(const std::vector& keys, std::vect const rocksdb::Snapshot* snapshot; std::string meta_value; - int32_t version = 0; + uint64_t version = 0; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; std::vector vaild_sets; rocksdb::Status s; for (uint32_t idx = 1; idx < keys.size(); ++idx) { - s = db_->Get(read_options, handles_[0], keys[idx], &meta_value); + BaseMetaKey base_meta_key(keys[idx]); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.count() != 0) { - vaild_sets.push_back({keys[idx], parsed_sets_meta_value.version()}); + if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { + vaild_sets.push_back({keys[idx], parsed_sets_meta_value.Version()}); } } else if (!s.IsNotFound()) { return s; } } - s = db_->Get(read_options, handles_[0], keys[0], &meta_value); + BaseMetaKey base_meta_key0(keys[0]); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key0.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.count() != 0) { + if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { bool found; Slice prefix; std::string member_value; - version = parsed_sets_meta_value.version(); + version = parsed_sets_meta_value.Version(); SetsMemberKey sets_member_key(keys[0], version, Slice()); - prefix = sets_member_key.Encode(); - KeyStatisticsDurationGuard guard(this, keys[0]); - auto iter = db_->NewIterator(read_options, handles_[1]); + prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, keys[0]); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { ParsedSetsMemberKey parsed_sets_member_key(iter->key()); Slice member = parsed_sets_member_key.member(); @@ -333,7 +251,7 @@ rocksdb::Status RedisSets::SDiff(const std::vector& keys, std::vect found = false; for (const auto& key_version : vaild_sets) { SetsMemberKey sets_member_key(key_version.key, key_version.version, member); - s = db_->Get(read_options, handles_[1], sets_member_key.Encode(), &member_value); + s = db_->Get(read_options, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); if (s.ok()) { found = true; break; @@ -354,7 +272,7 @@ rocksdb::Status RedisSets::SDiff(const std::vector& keys, std::vect return rocksdb::Status::OK(); } -rocksdb::Status RedisSets::SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { +rocksdb::Status Redis::SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { if (keys.empty()) { return rocksdb::Status::Corruption("SDiffsotre invalid parameter, no keys"); } @@ -364,7 +282,7 @@ rocksdb::Status RedisSets::SDiffstore(const Slice& destination, const std::vecto const rocksdb::Snapshot* snapshot; std::string meta_value; - int32_t version = 0; + uint64_t version = 0; ScopeRecordLock l(lock_mgr_, destination); ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; @@ -372,11 +290,12 @@ rocksdb::Status RedisSets::SDiffstore(const Slice& destination, const std::vecto rocksdb::Status s; for (uint32_t idx = 1; idx < keys.size(); ++idx) { - s = db_->Get(read_options, handles_[0], keys[idx], &meta_value); + BaseMetaKey base_meta_key(keys[idx]); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.count() != 0) { - vaild_sets.push_back({keys[idx], parsed_sets_meta_value.version()}); + if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { + vaild_sets.push_back({keys[idx], parsed_sets_meta_value.Version()}); } } else if (!s.IsNotFound()) { return s; @@ -384,17 +303,18 @@ rocksdb::Status RedisSets::SDiffstore(const Slice& destination, const std::vecto } std::vector members; - s = db_->Get(read_options, handles_[0], keys[0], &meta_value); + BaseMetaKey base_meta_key0(keys[0]); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key0.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.count() != 0) { + if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { bool found; std::string member_value; - version = parsed_sets_meta_value.version(); + version = parsed_sets_meta_value.Version(); SetsMemberKey sets_member_key(keys[0], version, Slice()); - Slice prefix = sets_member_key.Encode(); - KeyStatisticsDurationGuard guard(this, keys[0]); - auto iter = db_->NewIterator(read_options, handles_[1]); + Slice prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, keys[0]); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { ParsedSetsMemberKey parsed_sets_member_key(iter->key()); Slice member = parsed_sets_member_key.member(); @@ -402,7 +322,7 @@ rocksdb::Status RedisSets::SDiffstore(const Slice& destination, const std::vecto found = false; for (const auto& key_version : vaild_sets) { SetsMemberKey sets_member_key(key_version.key, key_version.version, member); - s = db_->Get(read_options, handles_[1], sets_member_key.Encode(), &member_value); + s = db_->Get(read_options, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); if (s.ok()) { found = true; break; @@ -422,37 +342,39 @@ rocksdb::Status RedisSets::SDiffstore(const Slice& destination, const std::vecto } uint32_t statistic = 0; - s = db_->Get(read_options, handles_[0], destination, &meta_value); + BaseMetaKey base_destination(destination); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_destination.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - statistic = parsed_sets_meta_value.count(); + statistic = parsed_sets_meta_value.Count(); version = parsed_sets_meta_value.InitialMetaValue(); - if (!parsed_sets_meta_value.check_set_count(static_cast(members.size()))) { - return Status::InvalidArgument("set size overflow"); - } - parsed_sets_meta_value.set_count(static_cast(members.size())); - batch.Put(handles_[0], destination, meta_value); + if (!parsed_sets_meta_value.check_set_count(static_cast(members.size()))) { + return Status::InvalidArgument("set size overflow"); + } + parsed_sets_meta_value.SetCount(static_cast(members.size())); + batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), meta_value); } else if (s.IsNotFound()) { char str[4]; EncodeFixed32(str, members.size()); SetsMetaValue sets_meta_value(Slice(str, sizeof(int32_t))); version = sets_meta_value.UpdateVersion(); - batch.Put(handles_[0], destination, sets_meta_value.Encode()); + batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), sets_meta_value.Encode()); } else { return s; } for (const auto& member : members) { SetsMemberKey sets_member_key(destination, version, member); - batch.Put(handles_[1], sets_member_key.Encode(), Slice()); + BaseDataValue iter_value(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); } *ret = static_cast(members.size()); s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(destination.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kSets, destination.ToString(), statistic); value_to_dest = std::move(members); return s; } -rocksdb::Status RedisSets::SInter(const std::vector& keys, std::vector* members) { +rocksdb::Status Redis::SInter(const std::vector& keys, std::vector* members) { if (keys.empty()) { return rocksdb::Status::Corruption("SInter invalid parameter, no keys"); } @@ -461,20 +383,21 @@ rocksdb::Status RedisSets::SInter(const std::vector& keys, std::vec const rocksdb::Snapshot* snapshot; std::string meta_value; - int32_t version = 0; + uint64_t version = 0; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; std::vector vaild_sets; rocksdb::Status s; for (uint32_t idx = 1; idx < keys.size(); ++idx) { - s = db_->Get(read_options, handles_[0], keys[idx], &meta_value); + BaseMetaKey base_meta_key(keys[idx]); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.count() == 0) { + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { return rocksdb::Status::OK(); } else { - vaild_sets.push_back({keys[idx], parsed_sets_meta_value.version()}); + vaild_sets.push_back({keys[idx], parsed_sets_meta_value.Version()}); } } else if (s.IsNotFound()) { return rocksdb::Status::OK(); @@ -483,19 +406,20 @@ rocksdb::Status RedisSets::SInter(const std::vector& keys, std::vec } } - s = db_->Get(read_options, handles_[0], keys[0], &meta_value); + BaseMetaKey base_meta_key0(keys[0]); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key0.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.count() == 0) { + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { return rocksdb::Status::OK(); } else { bool reliable; std::string member_value; - version = parsed_sets_meta_value.version(); + version = parsed_sets_meta_value.Version(); SetsMemberKey sets_member_key(keys[0], version, Slice()); - Slice prefix = sets_member_key.Encode(); - KeyStatisticsDurationGuard guard(this, keys[0]); - auto iter = db_->NewIterator(read_options, handles_[1]); + KeyStatisticsDurationGuard guard(this, DataType::kSets, keys[0]); + Slice prefix = sets_member_key.EncodeSeekKey(); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { ParsedSetsMemberKey parsed_sets_member_key(iter->key()); Slice member = parsed_sets_member_key.member(); @@ -503,7 +427,7 @@ rocksdb::Status RedisSets::SInter(const std::vector& keys, std::vec reliable = true; for (const auto& key_version : vaild_sets) { SetsMemberKey sets_member_key(key_version.key, key_version.version, member); - s = db_->Get(read_options, handles_[1], sets_member_key.Encode(), &member_value); + s = db_->Get(read_options, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); if (s.ok()) { continue; } else if (s.IsNotFound()) { @@ -528,7 +452,7 @@ rocksdb::Status RedisSets::SInter(const std::vector& keys, std::vec return rocksdb::Status::OK(); } -rocksdb::Status RedisSets::SInterstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { +rocksdb::Status Redis::SInterstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { if (keys.empty()) { return rocksdb::Status::Corruption("SInterstore invalid parameter, no keys"); } @@ -538,7 +462,7 @@ rocksdb::Status RedisSets::SInterstore(const Slice& destination, const std::vect const rocksdb::Snapshot* snapshot; std::string meta_value; - int32_t version = 0; + uint64_t version = 0; bool have_invalid_sets = false; ScopeRecordLock l(lock_mgr_, destination); ScopeSnapshot ss(db_, &snapshot); @@ -547,14 +471,15 @@ rocksdb::Status RedisSets::SInterstore(const Slice& destination, const std::vect rocksdb::Status s; for (uint32_t idx = 1; idx < keys.size(); ++idx) { - s = db_->Get(read_options, handles_[0], keys[idx], &meta_value); + BaseMetaKey base_meta_key(keys[idx]); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.count() == 0) { + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { have_invalid_sets = true; break; } else { - vaild_sets.push_back({keys[idx], parsed_sets_meta_value.version()}); + vaild_sets.push_back({keys[idx], parsed_sets_meta_value.Version()}); } } else if (s.IsNotFound()) { have_invalid_sets = true; @@ -566,19 +491,20 @@ rocksdb::Status RedisSets::SInterstore(const Slice& destination, const std::vect std::vector members; if (!have_invalid_sets) { - s = db_->Get(read_options, handles_[0], keys[0], &meta_value); + BaseMetaKey base_meta_key0(keys[0]); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key0.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.count() == 0) { + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { have_invalid_sets = true; } else { bool reliable; std::string member_value; - version = parsed_sets_meta_value.version(); + version = parsed_sets_meta_value.Version(); SetsMemberKey sets_member_key(keys[0], version, Slice()); - Slice prefix = sets_member_key.Encode(); - KeyStatisticsDurationGuard guard(this, keys[0]); - auto iter = db_->NewIterator(read_options, handles_[1]); + Slice prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, keys[0]); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { ParsedSetsMemberKey parsed_sets_member_key(iter->key()); Slice member = parsed_sets_member_key.member(); @@ -586,7 +512,7 @@ rocksdb::Status RedisSets::SInterstore(const Slice& destination, const std::vect reliable = true; for (const auto& key_version : vaild_sets) { SetsMemberKey sets_member_key(key_version.key, key_version.version, member); - s = db_->Get(read_options, handles_[1], sets_member_key.Encode(), &member_value); + s = db_->Get(read_options, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); if (s.ok()) { continue; } else if (s.IsNotFound()) { @@ -610,57 +536,61 @@ rocksdb::Status RedisSets::SInterstore(const Slice& destination, const std::vect } uint32_t statistic = 0; - s = db_->Get(read_options, handles_[0], destination, &meta_value); + BaseMetaKey base_destination(destination); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_destination.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - statistic = parsed_sets_meta_value.count(); + statistic = parsed_sets_meta_value.Count(); version = parsed_sets_meta_value.InitialMetaValue(); if (!parsed_sets_meta_value.check_set_count(static_cast(members.size()))) { return Status::InvalidArgument("set size overflow"); } - parsed_sets_meta_value.set_count(static_cast(members.size())); - batch.Put(handles_[0], destination, meta_value); + parsed_sets_meta_value.SetCount(static_cast(members.size())); + batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), meta_value); } else if (s.IsNotFound()) { char str[4]; EncodeFixed32(str, members.size()); SetsMetaValue sets_meta_value(Slice(str, sizeof(int32_t))); version = sets_meta_value.UpdateVersion(); - batch.Put(handles_[0], destination, sets_meta_value.Encode()); + batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), sets_meta_value.Encode()); } else { return s; } for (const auto& member : members) { SetsMemberKey sets_member_key(destination, version, member); - batch.Put(handles_[1], sets_member_key.Encode(), Slice()); + BaseDataValue iter_value(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); } *ret = static_cast(members.size()); s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(destination.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kSets, destination.ToString(), statistic); value_to_dest = std::move(members); return s; } -rocksdb::Status RedisSets::SIsmember(const Slice& key, const Slice& member, int32_t* ret) { +rocksdb::Status Redis::SIsmember(const Slice& key, const Slice& member, int32_t* ret) { *ret = 0; rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; std::string meta_value; - int32_t version = 0; + uint64_t version = 0; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - rocksdb::Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { return rocksdb::Status::NotFound("Stale"); - } else if (parsed_sets_meta_value.count() == 0) { + } else if (parsed_sets_meta_value.Count() == 0) { return rocksdb::Status::NotFound(); } else { std::string member_value; - version = parsed_sets_meta_value.version(); + version = parsed_sets_meta_value.Version(); SetsMemberKey sets_member_key(key, version, member); - s = db_->Get(read_options, handles_[1], sets_member_key.Encode(), &member_value); + s = db_->Get(read_options, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); *ret = s.ok() ? 1 : 0; } } else if (s.IsNotFound()) { @@ -669,27 +599,29 @@ rocksdb::Status RedisSets::SIsmember(const Slice& key, const Slice& member, int3 return s; } -rocksdb::Status RedisSets::SMembers(const Slice& key, std::vector* members) { +rocksdb::Status Redis::SMembers(const Slice& key, std::vector* members) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; std::string meta_value; - int32_t version = 0; + uint64_t version = 0; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - rocksdb::Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { return rocksdb::Status::NotFound("Stale"); - } else if (parsed_sets_meta_value.count() == 0) { + } else if (parsed_sets_meta_value.Count() == 0) { return rocksdb::Status::NotFound(); } else { - version = parsed_sets_meta_value.version(); + version = parsed_sets_meta_value.Version(); SetsMemberKey sets_member_key(key, version, Slice()); - Slice prefix = sets_member_key.Encode(); - KeyStatisticsDurationGuard guard(this, key.ToString()); - auto iter = db_->NewIterator(read_options, handles_[1]); + Slice prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { ParsedSetsMemberKey parsed_sets_member_key(iter->key()); members->push_back(parsed_sets_member_key.member().ToString()); @@ -700,26 +632,27 @@ rocksdb::Status RedisSets::SMembers(const Slice& key, std::vector* return s; } -Status RedisSets::SMembersWithTTL(const Slice& key, - std::vector* members, - int64_t* ttl) { +Status Redis::SMembersWithTTL(const Slice& key, + std::vector* members, + int64_t* ttl) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; std::string meta_value; - int32_t version = 0; + uint64_t version = 0; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (parsed_sets_meta_value.count() == 0) { + if (parsed_sets_meta_value.Count() == 0) { return Status::NotFound(); } else if (parsed_sets_meta_value.IsStale()) { return Status::NotFound("Stale"); } else { // ttl - *ttl = parsed_sets_meta_value.timestamp(); + *ttl = parsed_sets_meta_value.Etime(); if (*ttl == 0) { *ttl = -1; } else { @@ -728,10 +661,11 @@ Status RedisSets::SMembersWithTTL(const Slice& key, *ttl = *ttl - curtime >= 0 ? *ttl - curtime : -2; } - version = parsed_sets_meta_value.version(); + version = parsed_sets_meta_value.Version(); SetsMemberKey sets_member_key(key, version, Slice()); - Slice prefix = sets_member_key.Encode(); - auto iter = db_->NewIterator(read_options, handles_[1]); + Slice prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { @@ -744,12 +678,12 @@ Status RedisSets::SMembersWithTTL(const Slice& key, return s; } -rocksdb::Status RedisSets::SMove(const Slice& source, const Slice& destination, const Slice& member, int32_t* ret) { +rocksdb::Status Redis::SMove(const Slice& source, const Slice& destination, const Slice& member, int32_t* ret) { *ret = 0; rocksdb::WriteBatch batch; rocksdb::ReadOptions read_options; - int32_t version = 0; + uint64_t version = 0; uint32_t statistic = 0; std::string meta_value; std::vector keys{source.ToString(), destination.ToString()}; @@ -760,26 +694,27 @@ rocksdb::Status RedisSets::SMove(const Slice& source, const Slice& destination, return rocksdb::Status::OK(); } - rocksdb::Status s = db_->Get(default_read_options_, handles_[0], source, &meta_value); + BaseMetaKey base_source(source); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_source.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { return rocksdb::Status::NotFound("Stale"); - } else if (parsed_sets_meta_value.count() == 0) { + } else if (parsed_sets_meta_value.Count() == 0) { return rocksdb::Status::NotFound(); } else { std::string member_value; - version = parsed_sets_meta_value.version(); + version = parsed_sets_meta_value.Version(); SetsMemberKey sets_member_key(source, version, member); - s = db_->Get(default_read_options_, handles_[1], sets_member_key.Encode(), &member_value); + s = db_->Get(default_read_options_, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); if (s.ok()) { *ret = 1; if (!parsed_sets_meta_value.CheckModifyCount(-1)){ return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.ModifyCount(-1); - batch.Put(handles_[0], source, meta_value); - batch.Delete(handles_[1], sets_member_key.Encode()); + batch.Put(handles_[kSetsMetaCF], base_source.Encode(), meta_value); + batch.Delete(handles_[kSetsDataCF], sets_member_key.Encode()); statistic++; } else if (s.IsNotFound()) { *ret = 0; @@ -795,27 +730,30 @@ rocksdb::Status RedisSets::SMove(const Slice& source, const Slice& destination, return s; } - s = db_->Get(default_read_options_, handles_[0], destination, &meta_value); + BaseMetaKey base_destination(destination); + s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_destination.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.count() == 0) { + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { version = parsed_sets_meta_value.InitialMetaValue(); - parsed_sets_meta_value.set_count(1); - batch.Put(handles_[0], destination, meta_value); + parsed_sets_meta_value.SetCount(1); + batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), meta_value); SetsMemberKey sets_member_key(destination, version, member); - batch.Put(handles_[1], sets_member_key.Encode(), Slice()); + BaseDataValue i_val(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), i_val.Encode()); } else { std::string member_value; - version = parsed_sets_meta_value.version(); + version = parsed_sets_meta_value.Version(); SetsMemberKey sets_member_key(destination, version, member); - s = db_->Get(default_read_options_, handles_[1], sets_member_key.Encode(), &member_value); + s = db_->Get(default_read_options_, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); if (s.IsNotFound()) { if (!parsed_sets_meta_value.CheckModifyCount(1)){ return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.ModifyCount(1); - batch.Put(handles_[0], destination, meta_value); - batch.Put(handles_[1], sets_member_key.Encode(), Slice()); + BaseDataValue iter_value(Slice{}); + batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), meta_value); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); } else if (!s.ok()) { return s; } @@ -825,18 +763,19 @@ rocksdb::Status RedisSets::SMove(const Slice& source, const Slice& destination, EncodeFixed32(str, 1); SetsMetaValue sets_meta_value(Slice(str, sizeof(int32_t))); version = sets_meta_value.UpdateVersion(); - batch.Put(handles_[0], destination, sets_meta_value.Encode()); + batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), sets_meta_value.Encode()); SetsMemberKey sets_member_key(destination, version, member); - batch.Put(handles_[1], sets_member_key.Encode(), Slice()); + BaseDataValue iter_value(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); } else { return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(source.ToString(), 1); + UpdateSpecificKeyStatistics(DataType::kSets, source.ToString(), 1); return s; } -rocksdb::Status RedisSets::SPop(const Slice& key, std::vector* members, int64_t cnt) { +rocksdb::Status Redis::SPop(const Slice& key, std::vector* members, int64_t cnt) { std::default_random_engine engine; std::string meta_value; @@ -844,42 +783,44 @@ rocksdb::Status RedisSets::SPop(const Slice& key, std::vector* memb ScopeRecordLock l(lock_mgr_, key); uint64_t start_us = pstd::NowMicros(); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_sets_meta_value.count() == 0) { + } else if (parsed_sets_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t length = parsed_sets_meta_value.count(); + int32_t length = parsed_sets_meta_value.Count(); if (length < cnt) { - int32_t size = parsed_sets_meta_value.count(); + int32_t size = parsed_sets_meta_value.Count(); int32_t cur_index = 0; - int32_t version = parsed_sets_meta_value.version(); + uint64_t version = parsed_sets_meta_value.Version(); SetsMemberKey sets_member_key(key, version, Slice()); - auto iter = db_->NewIterator(default_read_options_, handles_[1]); - for (iter->Seek(sets_member_key.Encode()); + auto iter = db_->NewIterator(default_read_options_, handles_[kSetsDataCF]); + for (iter->Seek(sets_member_key.EncodeSeekKey()); iter->Valid() && cur_index < size; iter->Next(), cur_index++) { - batch.Delete(handles_[1], iter->key()); + batch.Delete(handles_[kSetsDataCF], iter->key()); ParsedSetsMemberKey parsed_sets_member_key(iter->key()); members->push_back(parsed_sets_member_key.member().ToString()); } //parsed_sets_meta_value.ModifyCount(-cnt); - //batch.Put(handles_[0], key, meta_value); - batch.Delete(handles_[0], key); - delete iter; + //batch.Put(handles_[kSetsMetaCF], key, meta_value); + batch.Delete(handles_[kSetsMetaCF], base_meta_key.Encode()); + delete iter; } else { engine.seed(time(nullptr)); int32_t cur_index = 0; - int32_t size = parsed_sets_meta_value.count(); + int32_t size = parsed_sets_meta_value.Count(); int32_t target_index = -1; - int32_t version = parsed_sets_meta_value.version(); + uint64_t version = parsed_sets_meta_value.Version(); std::unordered_set sets_index; int32_t modnum = size; @@ -894,9 +835,9 @@ rocksdb::Status RedisSets::SPop(const Slice& key, std::vector* memb SetsMemberKey sets_member_key(key, version, Slice()); int64_t del_count = 0; - KeyStatisticsDurationGuard guard(this, key.ToString()); - auto iter = db_->NewIterator(default_read_options_, handles_[1]); - for (iter->Seek(sets_member_key.Encode()); + KeyStatisticsDurationGuard guard(this, DataType::kSets, key.ToString()); + auto iter = db_->NewIterator(default_read_options_, handles_[kSetsDataCF]); + for (iter->Seek(sets_member_key.EncodeSeekKey()); iter->Valid() && cur_index < size; iter->Next(), cur_index++) { if (del_count == cnt) { @@ -904,7 +845,7 @@ rocksdb::Status RedisSets::SPop(const Slice& key, std::vector* memb } if (sets_index.find(cur_index) != sets_index.end()) { del_count++; - batch.Delete(handles_[1], iter->key()); + batch.Delete(handles_[kSetsDataCF], iter->key()); ParsedSetsMemberKey parsed_sets_member_key(iter->key()); members->push_back(parsed_sets_member_key.member().ToString()); } @@ -914,7 +855,7 @@ rocksdb::Status RedisSets::SPop(const Slice& key, std::vector* memb return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.ModifyCount(static_cast(-cnt)); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); delete iter; } } @@ -924,7 +865,17 @@ rocksdb::Status RedisSets::SPop(const Slice& key, std::vector* memb return db_->Write(default_write_options_, &batch); } -rocksdb::Status RedisSets::SRandmember(const Slice& key, int32_t count, std::vector* members) { +rocksdb::Status Redis::ResetSpopCount(const std::string& key) { return spop_counts_store_->Remove(key); } + +rocksdb::Status Redis::AddAndGetSpopCount(const std::string& key, uint64_t* count) { + size_t old_count = 0; + spop_counts_store_->Lookup(key, &old_count); + spop_counts_store_->Insert(key, old_count + 1); + *count = old_count + 1; + return rocksdb::Status::OK(); +} + +rocksdb::Status Redis::SRandmember(const Slice& key, int32_t count, std::vector* members) { if (count == 0) { return rocksdb::Status::OK(); } @@ -939,16 +890,18 @@ rocksdb::Status RedisSets::SRandmember(const Slice& key, int32_t count, std::vec std::vector targets; std::unordered_set unique; - rocksdb::Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { return rocksdb::Status::NotFound("Stale"); - } else if (parsed_sets_meta_value.count() == 0) { + } else if (parsed_sets_meta_value.Count() == 0) { return rocksdb::Status::NotFound(); } else { - int32_t size = parsed_sets_meta_value.count(); - int32_t version = parsed_sets_meta_value.version(); + int32_t size = parsed_sets_meta_value.Count(); + uint64_t version = parsed_sets_meta_value.Version(); if (count > 0) { count = count <= size ? count : size; while (targets.size() < static_cast(count)) { @@ -973,9 +926,9 @@ rocksdb::Status RedisSets::SRandmember(const Slice& key, int32_t count, std::vec int32_t cur_index = 0; int32_t idx = 0; SetsMemberKey sets_member_key(key, version, Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - auto iter = db_->NewIterator(default_read_options_, handles_[1]); - for (iter->Seek(sets_member_key.Encode()); iter->Valid() && cur_index < size; iter->Next(), cur_index++) { + KeyStatisticsDurationGuard guard(this, DataType::kSets, key.ToString()); + auto iter = db_->NewIterator(default_read_options_, handles_[kSetsDataCF]); + for (iter->Seek(sets_member_key.EncodeSeekKey()); iter->Valid() && cur_index < size; iter->Next(), cur_index++) { if (static_cast(idx) >= targets.size()) { break; } @@ -993,32 +946,34 @@ rocksdb::Status RedisSets::SRandmember(const Slice& key, int32_t count, std::vec return s; } -rocksdb::Status RedisSets::SRem(const Slice& key, const std::vector& members, int32_t* ret) { +rocksdb::Status Redis::SRem(const Slice& key, const std::vector& members, int32_t* ret) { *ret = 0; rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); - int32_t version = 0; + uint64_t version = 0; uint32_t statistic = 0; std::string meta_value; - rocksdb::Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { return rocksdb::Status::NotFound("stale"); - } else if (parsed_sets_meta_value.count() == 0) { + } else if (parsed_sets_meta_value.Count() == 0) { return rocksdb::Status::NotFound(); } else { int32_t cnt = 0; std::string member_value; - version = parsed_sets_meta_value.version(); + version = parsed_sets_meta_value.Version(); for (const auto& member : members) { SetsMemberKey sets_member_key(key, version, member); - s = db_->Get(default_read_options_, handles_[1], sets_member_key.Encode(), &member_value); + s = db_->Get(default_read_options_, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); if (s.ok()) { cnt++; statistic++; - batch.Delete(handles_[1], sets_member_key.Encode()); + batch.Delete(handles_[kSetsDataCF], sets_member_key.Encode()); } else if (s.IsNotFound()) { } else { return s; @@ -1029,7 +984,7 @@ rocksdb::Status RedisSets::SRem(const Slice& key, const std::vector return Status::InvalidArgument("set size overflow"); } parsed_sets_meta_value.ModifyCount(-cnt); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); } } else if (s.IsNotFound()) { *ret = 0; @@ -1038,11 +993,11 @@ rocksdb::Status RedisSets::SRem(const Slice& key, const std::vector return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kSets, key.ToString(), statistic); return s; } -rocksdb::Status RedisSets::SUnion(const std::vector& keys, std::vector* members) { +rocksdb::Status Redis::SUnion(const std::vector& keys, std::vector* members) { if (keys.empty()) { return rocksdb::Status::Corruption("SUnion invalid parameter, no keys"); } @@ -1057,11 +1012,12 @@ rocksdb::Status RedisSets::SUnion(const std::vector& keys, std::vec rocksdb::Status s; for (const auto & key : keys) { - s = db_->Get(read_options, handles_[0], key, &meta_value); + BaseMetaKey base_meta_key(key); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.count() != 0) { - vaild_sets.push_back({key, parsed_sets_meta_value.version()}); + if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { + vaild_sets.push_back({key, parsed_sets_meta_value.Version()}); } } else if (!s.IsNotFound()) { return s; @@ -1072,9 +1028,9 @@ rocksdb::Status RedisSets::SUnion(const std::vector& keys, std::vec std::map result_flag; for (const auto& key_version : vaild_sets) { SetsMemberKey sets_member_key(key_version.key, key_version.version, Slice()); - prefix = sets_member_key.Encode(); - KeyStatisticsDurationGuard guard(this, key_version.key); - auto iter = db_->NewIterator(read_options, handles_[1]); + prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, key_version.key); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { ParsedSetsMemberKey parsed_sets_member_key(iter->key()); std::string member = parsed_sets_member_key.member().ToString(); @@ -1088,7 +1044,7 @@ rocksdb::Status RedisSets::SUnion(const std::vector& keys, std::vec return rocksdb::Status::OK(); } -rocksdb::Status RedisSets::SUnionstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { +rocksdb::Status Redis::SUnionstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { if (keys.empty()) { return rocksdb::Status::Corruption("SUnionstore invalid parameter, no keys"); } @@ -1098,7 +1054,7 @@ rocksdb::Status RedisSets::SUnionstore(const Slice& destination, const std::vect const rocksdb::Snapshot* snapshot; std::string meta_value; - int32_t version = 0; + uint64_t version = 0; ScopeRecordLock l(lock_mgr_, destination); ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; @@ -1106,11 +1062,12 @@ rocksdb::Status RedisSets::SUnionstore(const Slice& destination, const std::vect rocksdb::Status s; for (const auto & key : keys) { - s = db_->Get(read_options, handles_[0], key, &meta_value); + BaseMetaKey base_meta_key(key); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.count() != 0) { - vaild_sets.push_back({key, parsed_sets_meta_value.version()}); + if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { + vaild_sets.push_back({key, parsed_sets_meta_value.Version()}); } } else if (!s.IsNotFound()) { return s; @@ -1122,9 +1079,9 @@ rocksdb::Status RedisSets::SUnionstore(const Slice& destination, const std::vect std::map result_flag; for (const auto& key_version : vaild_sets) { SetsMemberKey sets_member_key(key_version.key, key_version.version, Slice()); - prefix = sets_member_key.Encode(); - KeyStatisticsDurationGuard guard(this, key_version.key); - auto iter = db_->NewIterator(read_options, handles_[1]); + prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, key_version.key); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { ParsedSetsMemberKey parsed_sets_member_key(iter->key()); std::string member = parsed_sets_member_key.member().ToString(); @@ -1137,37 +1094,39 @@ rocksdb::Status RedisSets::SUnionstore(const Slice& destination, const std::vect } uint32_t statistic = 0; - s = db_->Get(read_options, handles_[0], destination, &meta_value); + BaseMetaKey base_destination(destination); + s = db_->Get(read_options, handles_[kSetsMetaCF], base_destination.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - statistic = parsed_sets_meta_value.count(); + statistic = parsed_sets_meta_value.Count(); version = parsed_sets_meta_value.InitialMetaValue(); if (!parsed_sets_meta_value.check_set_count(static_cast(members.size()))) { return Status::InvalidArgument("set size overflow"); } - parsed_sets_meta_value.set_count(static_cast(members.size())); - batch.Put(handles_[0], destination, meta_value); + parsed_sets_meta_value.SetCount(static_cast(members.size())); + batch.Put(handles_[kSetsMetaCF], destination, meta_value); } else if (s.IsNotFound()) { char str[4]; EncodeFixed32(str, members.size()); SetsMetaValue sets_meta_value(Slice(str, sizeof(int32_t))); version = sets_meta_value.UpdateVersion(); - batch.Put(handles_[0], destination, sets_meta_value.Encode()); + batch.Put(handles_[kSetsMetaCF], base_destination.Encode(), sets_meta_value.Encode()); } else { return s; } for (const auto& member : members) { SetsMemberKey sets_member_key(destination, version, member); - batch.Put(handles_[1], sets_member_key.Encode(), Slice()); + BaseDataValue i_val(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), i_val.Encode()); } *ret = static_cast(members.size()); s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(destination.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kSets, destination.ToString(), statistic); value_to_dest = std::move(members); return s; } -rocksdb::Status RedisSets::SScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, +rocksdb::Status Redis::SScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, std::vector* members, int64_t* next_cursor) { *next_cursor = 0; members->clear(); @@ -1184,17 +1143,19 @@ rocksdb::Status RedisSets::SScan(const Slice& key, int64_t cursor, const std::st std::string meta_value; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - rocksdb::Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(read_options, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.count() == 0) { + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { *next_cursor = 0; return rocksdb::Status::NotFound(); } else { std::string sub_member; std::string start_point; - int32_t version = parsed_sets_meta_value.version(); - s = GetScanStartPoint(key, pattern, cursor, &start_point); + uint64_t version = parsed_sets_meta_value.Version(); + s = GetScanStartPoint(DataType::kSets, key, pattern, cursor, &start_point); if (s.IsNotFound()) { cursor = 0; if (isTailWildcard(pattern)) { @@ -1207,10 +1168,10 @@ rocksdb::Status RedisSets::SScan(const Slice& key, int64_t cursor, const std::st SetsMemberKey sets_member_prefix(key, version, sub_member); SetsMemberKey sets_member_key(key, version, start_point); - std::string prefix = sets_member_prefix.Encode().ToString(); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[1]); - for (iter->Seek(sets_member_key.Encode()); iter->Valid() && rest > 0 && iter->key().starts_with(prefix); + std::string prefix = sets_member_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); + for (iter->Seek(sets_member_key.EncodeSeekKey()); iter->Valid() && rest > 0 && iter->key().starts_with(prefix); iter->Next()) { ParsedSetsMemberKey parsed_sets_member_key(iter->key()); std::string member = parsed_sets_member_key.member().ToString(); @@ -1224,7 +1185,7 @@ rocksdb::Status RedisSets::SScan(const Slice& key, int64_t cursor, const std::st *next_cursor = cursor + step_length; ParsedSetsMemberKey parsed_sets_member_key(iter->key()); std::string next_member = parsed_sets_member_key.member().ToString(); - StoreScanNextPoint(key, pattern, *next_cursor, next_member); + StoreScanNextPoint(DataType::kSets, key, pattern, *next_cursor, next_member); } else { *next_cursor = 0; } @@ -1237,287 +1198,117 @@ rocksdb::Status RedisSets::SScan(const Slice& key, int64_t cursor, const std::st return rocksdb::Status::OK(); } -rocksdb::Status RedisSets::PKScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key) { - next_key->clear(); - - std::string key; - int32_t remain = limit; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - bool start_no_limit = key_start.compare("") == 0; - bool end_no_limit = key_end.compare("") == 0; - - if (!start_no_limit && !end_no_limit && (key_start.compare(key_end) > 0)) { - return rocksdb::Status::InvalidArgument("error in given range"); - } - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - if (start_no_limit) { - it->SeekToFirst(); - } else { - it->Seek(key_start); - } - - while (it->Valid() && remain > 0 && (end_no_limit || it->key().compare(key_end) <= 0)) { - ParsedSetsMetaValue parsed_meta_value(it->value()); - if (parsed_meta_value.IsStale() || parsed_meta_value.count() == 0) { - it->Next(); - } else { - key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - remain--; - it->Next(); - } - } - - while (it->Valid() && (end_no_limit || it->key().compare(key_end) <= 0)) { - ParsedSetsMetaValue parsed_sets_meta_value(it->value()); - if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.count() == 0) { - it->Next(); - } else { - *next_key = it->key().ToString(); - break; - } - } - delete it; - return rocksdb::Status::OK(); -} - -rocksdb::Status RedisSets::PKRScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key) { - next_key->clear(); - - std::string key; - int32_t remain = limit; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - bool start_no_limit = key_start.compare("") == 0; - bool end_no_limit = key_end.compare("") == 0; - - if (!start_no_limit && !end_no_limit && (key_start.compare(key_end) < 0)) { - return rocksdb::Status::InvalidArgument("error in given range"); - } - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - if (start_no_limit) { - it->SeekToLast(); - } else { - it->SeekForPrev(key_start); - } - - while (it->Valid() && remain > 0 && (end_no_limit || it->key().compare(key_end) >= 0)) { - ParsedSetsMetaValue parsed_sets_meta_value(it->value()); - if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.count() == 0) { - it->Prev(); - } else { - key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - remain--; - it->Prev(); - } - } - - while (it->Valid() && (end_no_limit || it->key().compare(key_end) >= 0)) { - ParsedSetsMetaValue parsed_sets_meta_value(it->value()); - if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.count() == 0) { - it->Prev(); - } else { - *next_key = it->key().ToString(); - break; - } - } - delete it; - return rocksdb::Status::OK(); -} - -rocksdb::Status RedisSets::Expire(const Slice& key, int32_t ttl) { +rocksdb::Status Redis::SetsExpire(const Slice& key, int64_t ttl) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { return rocksdb::Status::NotFound("Stale"); - } else if (parsed_sets_meta_value.count() == 0) { + } else if (parsed_sets_meta_value.Count() == 0) { return rocksdb::Status::NotFound(); } if (ttl > 0) { parsed_sets_meta_value.SetRelativeTimestamp(ttl); - s = db_->Put(default_write_options_, handles_[0], key, meta_value); + s = db_->Put(default_write_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); } else { parsed_sets_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[0], key, meta_value); + s = db_->Put(default_write_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); } } return s; } -rocksdb::Status RedisSets::Del(const Slice& key) { +rocksdb::Status Redis::SetsDel(const Slice& key) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { return rocksdb::Status::NotFound("Stale"); - } else if (parsed_sets_meta_value.count() == 0) { + } else if (parsed_sets_meta_value.Count() == 0) { return rocksdb::Status::NotFound(); } else { - uint32_t statistic = parsed_sets_meta_value.count(); + uint32_t statistic = parsed_sets_meta_value.Count(); parsed_sets_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[0], key, meta_value); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + s = db_->Put(default_write_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kSets, key.ToString(), statistic); } } return s; } -bool RedisSets::Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, - int64_t* count, std::string* next_key) { - std::string meta_key; - bool is_finish = true; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - - it->Seek(start_key); - while (it->Valid() && (*count) > 0) { - ParsedSetsMetaValue parsed_meta_value(it->value()); - if (parsed_meta_value.IsStale() || parsed_meta_value.count() == 0) { - it->Next(); - continue; - } else { - meta_key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), meta_key.data(), meta_key.size(), 0) != 0) { - keys->push_back(meta_key); - } - (*count)--; - it->Next(); - } - } - - std::string prefix = isTailWildcard(pattern) ? pattern.substr(0, pattern.size() - 1) : ""; - if (it->Valid() && (it->key().compare(prefix) <= 0 || it->key().starts_with(prefix))) { - *next_key = it->key().ToString(); - is_finish = false; - } else { - *next_key = ""; - } - delete it; - return is_finish; -} - -bool RedisSets::PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) { - bool is_finish = true; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - it->Seek(start_key); - while (it->Valid() && (*leftover_visits) > 0) { - ParsedSetsMetaValue parsed_sets_meta_value(it->value()); - if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.count() == 0) { - it->Next(); - continue; - } else { - if (min_timestamp < parsed_sets_meta_value.timestamp() && parsed_sets_meta_value.timestamp() < max_timestamp) { - keys->push_back(it->key().ToString()); - } - (*leftover_visits)--; - it->Next(); - } - } - - if (it->Valid()) { - is_finish = false; - *next_key = it->key().ToString(); - } else { - *next_key = ""; - } - delete it; - return is_finish; -} - -rocksdb::Status RedisSets::Expireat(const Slice& key, int32_t timestamp) { +rocksdb::Status Redis::SetsExpireat(const Slice& key, int64_t timestamp) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { return rocksdb::Status::NotFound("Stale"); - } else if (parsed_sets_meta_value.count() == 0) { + } else if (parsed_sets_meta_value.Count() == 0) { return rocksdb::Status::NotFound(); } else { if (timestamp > 0) { - parsed_sets_meta_value.set_timestamp(timestamp); + parsed_sets_meta_value.SetEtime(static_cast(timestamp)); } else { parsed_sets_meta_value.InitialMetaValue(); } - return db_->Put(default_write_options_, handles_[0], key, meta_value); + return db_->Put(default_write_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); } } return s; } -rocksdb::Status RedisSets::Persist(const Slice& key) { +rocksdb::Status Redis::SetsPersist(const Slice& key) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - rocksdb::Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); if (parsed_sets_meta_value.IsStale()) { return rocksdb::Status::NotFound("Stale"); - } else if (parsed_sets_meta_value.count() == 0) { + } else if (parsed_sets_meta_value.Count() == 0) { return rocksdb::Status::NotFound(); } else { - int32_t timestamp = parsed_sets_meta_value.timestamp(); + uint64_t timestamp = parsed_sets_meta_value.Etime(); if (timestamp == 0) { return rocksdb::Status::NotFound("Not have an associated timeout"); } else { - parsed_sets_meta_value.set_timestamp(0); - return db_->Put(default_write_options_, handles_[0], key, meta_value); + parsed_sets_meta_value.SetEtime(0); + return db_->Put(default_write_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), meta_value); } } } return s; } -rocksdb::Status RedisSets::TTL(const Slice& key, int64_t* timestamp) { +rocksdb::Status Redis::SetsTTL(const Slice& key, int64_t* timestamp) { std::string meta_value; - rocksdb::Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kSetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedSetsMetaValue parsed_setes_meta_value(&meta_value); if (parsed_setes_meta_value.IsStale()) { *timestamp = -2; return rocksdb::Status::NotFound("Stale"); - } else if (parsed_setes_meta_value.count() == 0) { + } else if (parsed_setes_meta_value.Count() == 0) { *timestamp = -2; return rocksdb::Status::NotFound(); } else { - *timestamp = parsed_setes_meta_value.timestamp(); + *timestamp = parsed_setes_meta_value.Etime(); if (*timestamp == 0) { *timestamp = -1; } else { @@ -1532,7 +1323,7 @@ rocksdb::Status RedisSets::TTL(const Slice& key, int64_t* timestamp) { return s; } -void RedisSets::ScanDatabase() { +void Redis::ScanSets() { rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -1541,29 +1332,30 @@ void RedisSets::ScanDatabase() { auto current_time = static_cast(time(nullptr)); LOG(INFO) << "***************Sets Meta Data***************"; - auto meta_iter = db_->NewIterator(iterator_options, handles_[0]); + auto meta_iter = db_->NewIterator(iterator_options, handles_[kSetsMetaCF]); for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { ParsedSetsMetaValue parsed_sets_meta_value(meta_iter->value()); + ParsedBaseMetaKey parsed_meta_key(meta_iter->key()); int32_t survival_time = 0; - if (parsed_sets_meta_value.timestamp() != 0) { - survival_time = parsed_sets_meta_value.timestamp() - current_time > 0 - ? parsed_sets_meta_value.timestamp() - current_time + if (parsed_sets_meta_value.Etime() != 0) { + survival_time = parsed_sets_meta_value.Etime() - current_time > 0 + ? parsed_sets_meta_value.Etime() - current_time : -1; } LOG(INFO) << fmt::format("[key : {:<30}] [count : {:<10}] [timestamp : {:<10}] [version : {}] [survival_time : {}]", - meta_iter->key().ToString(), parsed_sets_meta_value.count(), parsed_sets_meta_value.timestamp(), - parsed_sets_meta_value.version(), survival_time); + parsed_meta_key.Key().ToString(), parsed_sets_meta_value.Count(), parsed_sets_meta_value.Etime(), + parsed_sets_meta_value.Version(), survival_time); } delete meta_iter; LOG(INFO) << "***************Sets Member Data***************"; - auto member_iter = db_->NewIterator(iterator_options, handles_[1]); + auto member_iter = db_->NewIterator(iterator_options, handles_[kSetsDataCF]); for (member_iter->SeekToFirst(); member_iter->Valid(); member_iter->Next()) { ParsedSetsMemberKey parsed_sets_member_key(member_iter->key()); - LOG(INFO) << fmt::format("[key : {:<30}] [member : {:<20}] [version : {}]", parsed_sets_member_key.key().ToString(), - parsed_sets_member_key.member().ToString(), parsed_sets_member_key.version()); + LOG(INFO) << fmt::format("[key : {:<30}] [member : {:<20}] [version : {}]", parsed_sets_member_key.Key().ToString(), + parsed_sets_member_key.member().ToString(), parsed_sets_member_key.Version()); } delete member_iter; } diff --git a/src/storage/src/redis_sets.h b/src/storage/src/redis_sets.h deleted file mode 100644 index 2898d0e9e7..0000000000 --- a/src/storage/src/redis_sets.h +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef SRC_REDIS_SETS_H_ -#define SRC_REDIS_SETS_H_ - -#include -#include -#include - -#include "src/custom_comparator.h" -#include "src/lru_cache.h" -#include "src/redis.h" - -namespace storage { - -class RedisSets : public Redis { - public: - RedisSets(Storage* s, const DataType& type); - ~RedisSets() override; - - // Common Commands - Status Open(const StorageOptions& storage_options, const std::string& db_path) override; - Status CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, - const ColumnFamilyType& type = kMetaAndData) override; - Status GetProperty(const std::string& property, uint64_t* out) override; - Status ScanKeyNum(KeyInfo* key_info) override; - Status ScanKeys(const std::string& pattern, std::vector* keys) override; - Status PKPatternMatchDel(const std::string& pattern, int32_t* ret) override; - - // Setes Commands - Status SAdd(const Slice& key, const std::vector& members, int32_t* ret); - Status SCard(const Slice& key, int32_t* ret); - Status SDiff(const std::vector& keys, std::vector* members); - Status SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); - Status SInter(const std::vector& keys, std::vector* members); - Status SInterstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); - Status SIsmember(const Slice& key, const Slice& member, int32_t* ret); - Status SMembers(const Slice& key, std::vector* members); - Status SMembersWithTTL(const Slice& key, std::vector* members, int64_t* ttl); - Status SMove(const Slice& source, const Slice& destination, const Slice& member, int32_t* ret); - Status SPop(const Slice& key, std::vector* members, int64_t cnt); - Status SRandmember(const Slice& key, int32_t count, std::vector* members); - Status SRem(const Slice& key, const std::vector& members, int32_t* ret); - Status SUnion(const std::vector& keys, std::vector* members); - Status SUnionstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); - Status SScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, - std::vector* members, int64_t* next_cursor); - Status PKScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key); - Status PKRScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key); - - // Keys Commands - Status Expire(const Slice& key, int32_t ttl) override; - Status Del(const Slice& key) override; - bool Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, int64_t* count, - std::string* next_key) override; - bool PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) override; - Status Expireat(const Slice& key, int32_t timestamp) override; - Status Persist(const Slice& key) override; - Status TTL(const Slice& key, int64_t* timestamp) override; - - // Iterate all data - void ScanDatabase(); -}; - -} // namespace storage -#endif // SRC_REDIS_SETS_H_ diff --git a/src/storage/src/redis_strings.cc b/src/storage/src/redis_strings.cc index dd4c63dbbb..9b1d2859d5 100644 --- a/src/storage/src/redis_strings.cc +++ b/src/storage/src/redis_strings.cc @@ -3,8 +3,7 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "src/redis_strings.h" - +#include #include #include #include @@ -12,45 +11,18 @@ #include #include -#include +#include "pstd/include/pika_codis_slot.h" +#include "src/base_key_format.h" +#include "src/base_key_format.h" #include "src/scope_record_lock.h" #include "src/scope_snapshot.h" #include "src/strings_filter.h" +#include "src/redis.h" #include "storage/util.h" namespace storage { - -RedisStrings::RedisStrings(Storage* const s, const DataType& type) : Redis(s, type) {} - -Status RedisStrings::Open(const StorageOptions& storage_options, const std::string& db_path) { - rocksdb::Options ops(storage_options.options); - ops.compaction_filter_factory = std::make_shared(); - - // use the bloom filter policy to reduce disk reads - rocksdb::BlockBasedTableOptions table_ops(storage_options.table_options); - if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { - table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - } - table_ops.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); - ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(table_ops)); - - return rocksdb::DB::Open(ops, db_path, &db_); -} - -Status RedisStrings::CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, - const ColumnFamilyType& type) { - return db_->CompactRange(default_compact_range_options_, begin, end); -} - -Status RedisStrings::GetProperty(const std::string& property, uint64_t* out) { - std::string value; - db_->GetProperty(property, &value); - *out = std::strtoull(value.c_str(), nullptr, 10); - return Status::OK(); -} - -Status RedisStrings::ScanKeyNum(KeyInfo* key_info) { +Status Redis::ScanStringsKeyNum(KeyInfo* key_info) { uint64_t keys = 0; uint64_t expires = 0; uint64_t ttl_sum = 0; @@ -76,7 +48,7 @@ Status RedisStrings::ScanKeyNum(KeyInfo* key_info) { keys++; if (!parsed_strings_value.IsPermanentSurvival()) { expires++; - ttl_sum += parsed_strings_value.timestamp() - curtime; + ttl_sum += parsed_strings_value.Etime() - curtime; } } } @@ -89,31 +61,7 @@ Status RedisStrings::ScanKeyNum(KeyInfo* key_info) { return Status::OK(); } -Status RedisStrings::ScanKeys(const std::string& pattern, std::vector* keys) { - std::string key; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - // Note: This is a string type and does not need to pass the column family as - // a parameter, use the default column family - rocksdb::Iterator* iter = db_->NewIterator(iterator_options); - for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { - ParsedStringsValue parsed_strings_value(iter->value()); - if (!parsed_strings_value.IsStale()) { - key = iter->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - } - } - delete iter; - return Status::OK(); -} - -Status RedisStrings::PKPatternMatchDel(const std::string& pattern, int32_t* ret) { +Status Redis::StringsPKPatternMatchDel(const std::string& pattern, int32_t* ret) { rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -159,30 +107,32 @@ Status RedisStrings::PKPatternMatchDel(const std::string& pattern, int32_t* ret) return s; } -Status RedisStrings::Append(const Slice& key, const Slice& value, int32_t* ret) { +Status Redis::Append(const Slice& key, const Slice& value, int32_t* ret) { std::string old_value; *ret = 0; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &old_value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&old_value); if (parsed_strings_value.IsStale()) { *ret = static_cast(value.size()); StringsValue strings_value(value); - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } else { - int32_t timestamp = parsed_strings_value.timestamp(); - std::string old_user_value = parsed_strings_value.value().ToString(); + uint64_t timestamp = parsed_strings_value.Etime(); + std::string old_user_value = parsed_strings_value.UserValue().ToString(); std::string new_value = old_user_value + value.ToString(); StringsValue strings_value(new_value); - strings_value.set_timestamp(timestamp); + strings_value.SetEtime(timestamp); *ret = static_cast(new_value.size()); - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } } else if (s.IsNotFound()) { *ret = static_cast(value.size()); StringsValue strings_value(value); - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } return s; } @@ -203,11 +153,13 @@ int GetBitCount(const unsigned char* value, int64_t bytes) { return bit_num; } -Status RedisStrings::BitCount(const Slice& key, int64_t start_offset, int64_t end_offset, int32_t* ret, - bool have_range) { +Status Redis::BitCount(const Slice& key, int64_t start_offset, int64_t end_offset, int32_t* ret, + bool have_range) { *ret = 0; std::string value; - Status s = db_->Get(default_read_options_, key, &value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&value); if (parsed_strings_value.IsStale()) { @@ -289,8 +241,7 @@ std::string BitOpOperate(BitOpType op, const std::vector& src_value return dest_str; } -Status RedisStrings::BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, - std::string &value_to_dest, int64_t* ret) { +Status Redis::BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, std::string& value_to_dest, int64_t* ret) { Status s; if (op == kBitOpNot && src_keys.size() != 1) { return Status::InvalidArgument("the number of source keys is not right"); @@ -303,7 +254,8 @@ Status RedisStrings::BitOp(BitOpType op, const std::string& dest_key, const std: std::vector src_values; for (const auto & src_key : src_keys) { std::string value; - s = db_->Get(default_read_options_, src_key, &value); + BaseKey base_key(src_key); + s = db_->Get(default_read_options_, base_key.Encode(), &value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&value); if (parsed_strings_value.IsStale()) { @@ -329,24 +281,27 @@ Status RedisStrings::BitOp(BitOpType op, const std::string& dest_key, const std: StringsValue strings_value(Slice(dest_value.c_str(), max_len)); ScopeRecordLock l(lock_mgr_, dest_key); - return db_->Put(default_write_options_, dest_key, strings_value.Encode()); + BaseKey base_dest_key(dest_key); + return db_->Put(default_write_options_, base_dest_key.Encode(), strings_value.Encode()); } -Status RedisStrings::Decrby(const Slice& key, int64_t value, int64_t* ret) { +Status Redis::Decrby(const Slice& key, int64_t value, int64_t* ret) { std::string old_value; std::string new_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &old_value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&old_value); if (parsed_strings_value.IsStale()) { *ret = -value; new_value = std::to_string(*ret); StringsValue strings_value(new_value); - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } else { - int32_t timestamp = parsed_strings_value.timestamp(); - std::string old_user_value = parsed_strings_value.value().ToString(); + uint64_t timestamp = parsed_strings_value.Etime(); + std::string old_user_value = parsed_strings_value.UserValue().ToString(); char* end = nullptr; errno = 0; int64_t ival = strtoll(old_user_value.c_str(), &end, 10); @@ -359,22 +314,24 @@ Status RedisStrings::Decrby(const Slice& key, int64_t value, int64_t* ret) { *ret = ival - value; new_value = std::to_string(*ret); StringsValue strings_value(new_value); - strings_value.set_timestamp(timestamp); - return db_->Put(default_write_options_, key, strings_value.Encode()); + strings_value.SetEtime(timestamp); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } } else if (s.IsNotFound()) { *ret = -value; new_value = std::to_string(*ret); StringsValue strings_value(new_value); - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } else { return s; } } -Status RedisStrings::Get(const Slice& key, std::string* value) { +Status Redis::Get(const Slice& key, std::string* value) { value->clear(); - Status s = db_->Get(default_read_options_, key, value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), value); if (s.ok()) { ParsedStringsValue parsed_strings_value(value); if (parsed_strings_value.IsStale()) { @@ -387,9 +344,10 @@ Status RedisStrings::Get(const Slice& key, std::string* value) { return s; } -Status RedisStrings::GetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { +Status Redis::GetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { value->clear(); - Status s = db_->Get(default_read_options_, key, value); + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), value); if (s.ok()) { ParsedStringsValue parsed_strings_value(value); if (parsed_strings_value.IsStale()) { @@ -398,7 +356,7 @@ Status RedisStrings::GetWithTTL(const Slice& key, std::string* value, int64_t* t return Status::NotFound("Stale"); } else { parsed_strings_value.StripSuffix(); - *ttl = parsed_strings_value.timestamp(); + *ttl = parsed_strings_value.Etime(); if (*ttl == 0) { *ttl = -1; } else { @@ -415,9 +373,11 @@ Status RedisStrings::GetWithTTL(const Slice& key, std::string* value, int64_t* t return s; } -Status RedisStrings::GetBit(const Slice& key, int64_t offset, int32_t* ret) { +Status Redis::GetBit(const Slice& key, int64_t offset, int32_t* ret) { std::string meta_value; - Status s = db_->Get(default_read_options_, key, &meta_value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &meta_value); if (s.ok() || s.IsNotFound()) { std::string data_value; if (s.ok()) { @@ -426,7 +386,7 @@ Status RedisStrings::GetBit(const Slice& key, int64_t offset, int32_t* ret) { *ret = 0; return Status::OK(); } else { - data_value = parsed_strings_value.value().ToString(); + data_value = parsed_strings_value.UserValue().ToString(); } } size_t byte = offset >> 3; @@ -442,10 +402,12 @@ Status RedisStrings::GetBit(const Slice& key, int64_t offset, int32_t* ret) { return Status::OK(); } -Status RedisStrings::Getrange(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret) { +Status Redis::Getrange(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret) { *ret = ""; std::string value; - Status s = db_->Get(default_read_options_, key, &value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&value); if (parsed_strings_value.IsStale()) { @@ -475,8 +437,8 @@ Status RedisStrings::Getrange(const Slice& key, int64_t start_offset, int64_t en } } -Status RedisStrings::GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, - std::string* ret, std::string* value, int64_t* ttl) { +Status Redis::GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, + std::string* ret, std::string* value, int64_t* ttl) { *ret = ""; Status s = db_->Get(default_read_options_, key, value); if (s.ok()) { @@ -488,7 +450,7 @@ Status RedisStrings::GetrangeWithValue(const Slice& key, int64_t start_offset, i } else { parsed_strings_value.StripSuffix(); // get ttl - *ttl = parsed_strings_value.timestamp(); + *ttl = parsed_strings_value.Etime(); if (*ttl == 0) { *ttl = -1; } else { @@ -525,9 +487,11 @@ Status RedisStrings::GetrangeWithValue(const Slice& key, int64_t start_offset, i return s; } -Status RedisStrings::GetSet(const Slice& key, const Slice& value, std::string* old_value) { +Status Redis::GetSet(const Slice& key, const Slice& value, std::string* old_value) { ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, old_value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), old_value); if (s.ok()) { ParsedStringsValue parsed_strings_value(old_value); if (parsed_strings_value.IsStale()) { @@ -539,14 +503,16 @@ Status RedisStrings::GetSet(const Slice& key, const Slice& value, std::string* o return s; } StringsValue strings_value(value); - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } -Status RedisStrings::Incrby(const Slice& key, int64_t value, int64_t* ret) { +Status Redis::Incrby(const Slice& key, int64_t value, int64_t* ret) { std::string old_value; std::string new_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &old_value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); char buf[32] = {0}; if (s.ok()) { ParsedStringsValue parsed_strings_value(&old_value); @@ -554,10 +520,10 @@ Status RedisStrings::Incrby(const Slice& key, int64_t value, int64_t* ret) { *ret = value; Int64ToStr(buf, 32, value); StringsValue strings_value(buf); - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } else { - int32_t timestamp = parsed_strings_value.timestamp(); - std::string old_user_value = parsed_strings_value.value().ToString(); + uint64_t timestamp = parsed_strings_value.Etime(); + std::string old_user_value = parsed_strings_value.UserValue().ToString(); char* end = nullptr; int64_t ival = strtoll(old_user_value.c_str(), &end, 10); if (*end != 0) { @@ -569,38 +535,40 @@ Status RedisStrings::Incrby(const Slice& key, int64_t value, int64_t* ret) { *ret = ival + value; new_value = std::to_string(*ret); StringsValue strings_value(new_value); - strings_value.set_timestamp(timestamp); - return db_->Put(default_write_options_, key, strings_value.Encode()); + strings_value.SetEtime(timestamp); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } } else if (s.IsNotFound()) { *ret = value; Int64ToStr(buf, 32, value); StringsValue strings_value(buf); - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } else { return s; } } -Status RedisStrings::Incrbyfloat(const Slice& key, const Slice& value, std::string* ret) { +Status Redis::Incrbyfloat(const Slice& key, const Slice& value, std::string* ret) { std::string old_value; std::string new_value; long double long_double_by; if (StrToLongDouble(value.data(), value.size(), &long_double_by) == -1) { return Status::Corruption("Value is not a vaild float"); } + + BaseKey base_key(key); ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &old_value); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&old_value); if (parsed_strings_value.IsStale()) { LongDoubleToStr(long_double_by, &new_value); *ret = new_value; StringsValue strings_value(new_value); - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } else { - int32_t timestamp = parsed_strings_value.timestamp(); - std::string old_user_value = parsed_strings_value.value().ToString(); + uint64_t timestamp = parsed_strings_value.Etime(); + std::string old_user_value = parsed_strings_value.UserValue().ToString(); long double total; long double old_number; if (StrToLongDouble(old_user_value.data(), old_user_value.size(), &old_number) == -1) { @@ -612,84 +580,20 @@ Status RedisStrings::Incrbyfloat(const Slice& key, const Slice& value, std::stri } *ret = new_value; StringsValue strings_value(new_value); - strings_value.set_timestamp(timestamp); - return db_->Put(default_write_options_, key, strings_value.Encode()); + strings_value.SetEtime(timestamp); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } } else if (s.IsNotFound()) { LongDoubleToStr(long_double_by, &new_value); *ret = new_value; StringsValue strings_value(new_value); - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } else { return s; } } -Status RedisStrings::MGet(const std::vector& keys, std::vector* vss) { - vss->clear(); - - Status s; - std::string value; - rocksdb::ReadOptions read_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - read_options.snapshot = snapshot; - for (const auto& key : keys) { - s = db_->Get(read_options, key, &value); - if (s.ok()) { - ParsedStringsValue parsed_strings_value(&value); - if (parsed_strings_value.IsStale()) { - vss->push_back({std::string(), Status::NotFound("Stale")}); - } else { - vss->push_back({parsed_strings_value.user_value().ToString(), Status::OK()}); - } - } else if (s.IsNotFound()) { - vss->push_back({std::string(), Status::NotFound()}); - } else { - vss->clear(); - return s; - } - } - return Status::OK(); -} - -Status RedisStrings::MGetWithTTL(const std::vector& keys, std::vector* vss) { - vss->clear(); - - Status s; - std::string value; - rocksdb::ReadOptions read_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - read_options.snapshot = snapshot; - for (const auto& key : keys) { - s = db_->Get(read_options, key, &value); - if (s.ok()) { - ParsedStringsValue parsed_strings_value(&value); - if (parsed_strings_value.IsStale()) { - vss->push_back({std::string(), Status::NotFound("Stale"), -2}); - } else { - if (parsed_strings_value.timestamp() == 0) { - vss->push_back({parsed_strings_value.user_value().ToString(), Status::OK(), -1}); - } else { - int64_t curtime; - rocksdb::Env::Default()->GetCurrentTime(&curtime); - vss->push_back( - {parsed_strings_value.user_value().ToString(), Status::OK(), - parsed_strings_value.timestamp() - curtime >= 0 ? parsed_strings_value.timestamp() - curtime : -2}); - } - } - } else if (s.IsNotFound()) { - vss->push_back({std::string(), Status::NotFound(), -2}); - } else { - vss->clear(); - return s; - } - } - return Status::OK(); -} - -Status RedisStrings::MSet(const std::vector& kvs) { +Status Redis::MSet(const std::vector& kvs) { std::vector keys; keys.reserve(kvs.size()); for (const auto& kv : kvs) { @@ -699,19 +603,21 @@ Status RedisStrings::MSet(const std::vector& kvs) { MultiScopeRecordLock ml(lock_mgr_, keys); rocksdb::WriteBatch batch; for (const auto& kv : kvs) { + BaseKey base_key(kv.key); StringsValue strings_value(kv.value); - batch.Put(kv.key, strings_value.Encode()); + batch.Put(base_key.Encode(), strings_value.Encode()); } return db_->Write(default_write_options_, &batch); } -Status RedisStrings::MSetnx(const std::vector& kvs, int32_t* ret) { +Status Redis::MSetnx(const std::vector& kvs, int32_t* ret) { Status s; bool exists = false; *ret = 0; std::string value; for (const auto & kv : kvs) { - s = db_->Get(default_read_options_, kv.key, &value); + BaseKey base_key(kv.key); + s = db_->Get(default_read_options_, base_key.Encode(), &value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&value); if (!parsed_strings_value.IsStale()) { @@ -729,18 +635,22 @@ Status RedisStrings::MSetnx(const std::vector& kvs, int32_t* ret) { return s; } -Status RedisStrings::Set(const Slice& key, const Slice& value) { +Status Redis::Set(const Slice& key, const Slice& value) { StringsValue strings_value(value); ScopeRecordLock l(lock_mgr_, key); - return db_->Put(default_write_options_, key, strings_value.Encode()); + + BaseKey base_key(key); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } -Status RedisStrings::Setxx(const Slice& key, const Slice& value, int32_t* ret, const int32_t ttl) { +Status Redis::Setxx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl) { bool not_found = true; std::string old_value; StringsValue strings_value(value); + + BaseKey base_key(key); ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &old_value); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); if (s.ok()) { ParsedStringsValue parsed_strings_value(old_value); if (!parsed_strings_value.IsStale()) { @@ -758,26 +668,27 @@ Status RedisStrings::Setxx(const Slice& key, const Slice& value, int32_t* ret, c if (ttl > 0) { strings_value.SetRelativeTimestamp(ttl); } - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } } -Status RedisStrings::SetBit(const Slice& key, int64_t offset, int32_t on, int32_t* ret) { +Status Redis::SetBit(const Slice& key, int64_t offset, int32_t on, int32_t* ret) { std::string meta_value; if (offset < 0) { return Status::InvalidArgument("offset < 0"); } + BaseKey base_key(key); ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &meta_value); + Status s = db_->Get(default_read_options_, base_key.Encode(), &meta_value); if (s.ok() || s.IsNotFound()) { std::string data_value; - int32_t timestamp = 0; + uint64_t timestamp = 0; if (s.ok()) { ParsedStringsValue parsed_strings_value(&meta_value); if (!parsed_strings_value.IsStale()) { - data_value = parsed_strings_value.value().ToString(); - timestamp = parsed_strings_value.timestamp(); + data_value = parsed_strings_value.UserValue().ToString(); + timestamp = parsed_strings_value.Etime(); } } size_t byte = offset >> 3; @@ -803,14 +714,14 @@ Status RedisStrings::SetBit(const Slice& key, int64_t offset, int32_t on, int32_ data_value.append(1, byte_val); } StringsValue strings_value(data_value); - strings_value.set_timestamp(timestamp); - return db_->Put(rocksdb::WriteOptions(), key, strings_value.Encode()); + strings_value.SetEtime(timestamp); + return db_->Put(rocksdb::WriteOptions(), base_key.Encode(), strings_value.Encode()); } else { return s; } } -Status RedisStrings::Setex(const Slice& key, const Slice& value, int32_t ttl) { +Status Redis::Setex(const Slice& key, const Slice& value, int64_t ttl) { if (ttl <= 0) { return Status::InvalidArgument("invalid expire time"); } @@ -819,15 +730,19 @@ Status RedisStrings::Setex(const Slice& key, const Slice& value, int32_t ttl) { if (s != Status::OK()) { return s; } + + BaseKey base_key(key); ScopeRecordLock l(lock_mgr_, key); - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } -Status RedisStrings::Setnx(const Slice& key, const Slice& value, int32_t* ret, const int32_t ttl) { +Status Redis::Setnx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl) { *ret = 0; std::string old_value; + + BaseKey base_key(key); ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &old_value); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&old_value); if (parsed_strings_value.IsStale()) { @@ -835,7 +750,7 @@ Status RedisStrings::Setnx(const Slice& key, const Slice& value, int32_t* ret, c if (ttl > 0) { strings_value.SetRelativeTimestamp(ttl); } - s = db_->Put(default_write_options_, key, strings_value.Encode()); + s = db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); if (s.ok()) { *ret = 1; } @@ -845,7 +760,7 @@ Status RedisStrings::Setnx(const Slice& key, const Slice& value, int32_t* ret, c if (ttl > 0) { strings_value.SetRelativeTimestamp(ttl); } - s = db_->Put(default_write_options_, key, strings_value.Encode()); + s = db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); if (s.ok()) { *ret = 1; } @@ -853,23 +768,25 @@ Status RedisStrings::Setnx(const Slice& key, const Slice& value, int32_t* ret, c return s; } -Status RedisStrings::Setvx(const Slice& key, const Slice& value, const Slice& new_value, int32_t* ret, - const int32_t ttl) { +Status Redis::Setvx(const Slice& key, const Slice& value, const Slice& new_value, int32_t* ret, + int64_t ttl) { *ret = 0; std::string old_value; + + BaseKey base_key(key); ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &old_value); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&old_value); if (parsed_strings_value.IsStale()) { *ret = 0; } else { - if (value.compare(parsed_strings_value.value()) == 0) { + if (value.compare(parsed_strings_value.UserValue()) == 0) { StringsValue strings_value(new_value); if (ttl > 0) { strings_value.SetRelativeTimestamp(ttl); } - s = db_->Put(default_write_options_, key, strings_value.Encode()); + s = db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); if (!s.ok()) { return s; } @@ -886,20 +803,22 @@ Status RedisStrings::Setvx(const Slice& key, const Slice& value, const Slice& ne return Status::OK(); } -Status RedisStrings::Delvx(const Slice& key, const Slice& value, int32_t* ret) { +Status Redis::Delvx(const Slice& key, const Slice& value, int32_t* ret) { *ret = 0; std::string old_value; + + BaseKey base_key(key); ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &old_value); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&old_value); if (parsed_strings_value.IsStale()) { *ret = 0; return Status::NotFound("Stale"); } else { - if (value.compare(parsed_strings_value.value()) == 0) { + if (value.compare(parsed_strings_value.UserValue()) == 0) { *ret = 1; - return db_->Delete(default_write_options_, key); + return db_->Delete(default_write_options_, base_key.Encode()); } else { *ret = -1; } @@ -910,7 +829,7 @@ Status RedisStrings::Delvx(const Slice& key, const Slice& value, int32_t* ret) { return s; } -Status RedisStrings::Setrange(const Slice& key, int64_t start_offset, const Slice& value, int32_t* ret) { +Status Redis::Setrange(const Slice& key, int64_t start_offset, const Slice& value, int32_t* ret) { std::string old_value; std::string new_value; if (start_offset < 0) { @@ -918,9 +837,11 @@ Status RedisStrings::Setrange(const Slice& key, int64_t start_offset, const Slic } ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &old_value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); if (s.ok()) { - int32_t timestamp = 0; + uint64_t timestamp = 0; ParsedStringsValue parsed_strings_value(&old_value); parsed_strings_value.StripSuffix(); if (parsed_strings_value.IsStale()) { @@ -928,7 +849,7 @@ Status RedisStrings::Setrange(const Slice& key, int64_t start_offset, const Slic new_value = tmp.append(value.data()); *ret = static_cast(new_value.length()); } else { - timestamp = parsed_strings_value.timestamp(); + timestamp = parsed_strings_value.Etime(); if (static_cast(start_offset) > old_value.length()) { old_value.resize(start_offset); new_value = old_value.append(value.data()); @@ -943,19 +864,19 @@ Status RedisStrings::Setrange(const Slice& key, int64_t start_offset, const Slic } *ret = static_cast(new_value.length()); StringsValue strings_value(new_value); - strings_value.set_timestamp(timestamp); - return db_->Put(default_write_options_, key, strings_value.Encode()); + strings_value.SetEtime(timestamp); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } else if (s.IsNotFound()) { std::string tmp(start_offset, '\0'); new_value = tmp.append(value.data()); *ret = static_cast(new_value.length()); StringsValue strings_value(new_value); - return db_->Put(default_write_options_, key, strings_value.Encode()); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } return s; } -Status RedisStrings::Strlen(const Slice& key, int32_t* len) { +Status Redis::Strlen(const Slice& key, int32_t* len) { std::string value; Status s = Get(key, &value); if (s.ok()) { @@ -1012,10 +933,12 @@ int32_t GetBitPos(const unsigned char* s, unsigned int bytes, int bit) { return pos; } -Status RedisStrings::BitPos(const Slice& key, int32_t bit, int64_t* ret) { +Status Redis::BitPos(const Slice& key, int32_t bit, int64_t* ret) { Status s; std::string value; - s = db_->Get(default_read_options_, key, &value); + + BaseKey base_key(key); + s = db_->Get(default_read_options_, base_key.Encode(), &value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&value); if (parsed_strings_value.IsStale()) { @@ -1047,10 +970,12 @@ Status RedisStrings::BitPos(const Slice& key, int32_t bit, int64_t* ret) { return Status::OK(); } -Status RedisStrings::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t* ret) { +Status Redis::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t* ret) { Status s; std::string value; - s = db_->Get(default_read_options_, key, &value); + + BaseKey base_key(key); + s = db_->Get(default_read_options_, base_key.Encode(), &value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&value); if (parsed_strings_value.IsStale()) { @@ -1095,10 +1020,12 @@ Status RedisStrings::BitPos(const Slice& key, int32_t bit, int64_t start_offset, return Status::OK(); } -Status RedisStrings::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t end_offset, int64_t* ret) { +Status Redis::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t end_offset, int64_t* ret) { Status s; std::string value; - s = db_->Get(default_read_options_, key, &value); + + BaseKey base_key(key); + s = db_->Get(default_read_options_, base_key.Encode(), &value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&value); if (parsed_strings_value.IsStale()) { @@ -1152,129 +1079,22 @@ Status RedisStrings::BitPos(const Slice& key, int32_t bit, int64_t start_offset, return Status::OK(); } -Status RedisStrings::PKSetexAt(const Slice& key, const Slice& value, int32_t timestamp) { +//TODO(wangshaoyi): timestamp uint64_t +Status Redis::PKSetexAt(const Slice& key, const Slice& value, int64_t timestamp) { StringsValue strings_value(value); - ScopeRecordLock l(lock_mgr_, key); - strings_value.set_timestamp(timestamp); - return db_->Put(default_write_options_, key, strings_value.Encode()); -} - -Status RedisStrings::PKScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* kvs, std::string* next_key) { - next_key->clear(); - - std::string key; - std::string value; - int32_t remain = limit; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - bool start_no_limit = key_start.compare("") == 0; - bool end_no_limit = key_end.compare("") == 0; - - if (!start_no_limit && !end_no_limit && (key_start.compare(key_end) > 0)) { - return Status::InvalidArgument("error in given range"); - } - - // Note: This is a string type and does not need to pass the column family as - // a parameter, use the default column family - rocksdb::Iterator* it = db_->NewIterator(iterator_options); - if (start_no_limit) { - it->SeekToFirst(); - } else { - it->Seek(key_start); - } - - while (it->Valid() && remain > 0 && (end_no_limit || it->key().compare(key_end) <= 0)) { - ParsedStringsValue parsed_strings_value(it->value()); - if (parsed_strings_value.IsStale()) { - it->Next(); - } else { - key = it->key().ToString(); - value = parsed_strings_value.value().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - kvs->push_back({key, value}); - } - remain--; - it->Next(); - } - } - - while (it->Valid() && (end_no_limit || it->key().compare(key_end) <= 0)) { - ParsedStringsValue parsed_strings_value(it->value()); - if (parsed_strings_value.IsStale()) { - it->Next(); - } else { - *next_key = it->key().ToString(); - break; - } - } - delete it; - return Status::OK(); + BaseKey base_key(key); + ScopeRecordLock l(lock_mgr_, key); + strings_value.SetEtime(uint64_t(timestamp)); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } -Status RedisStrings::PKRScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* kvs, std::string* next_key) { - std::string key; +Status Redis::StringsExpire(const Slice& key, int64_t ttl) { std::string value; - int32_t remain = limit; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - bool start_no_limit = key_start.compare("") == 0; - bool end_no_limit = key_end.compare("") == 0; - - if (!start_no_limit && !end_no_limit && (key_start.compare(key_end) < 0)) { - return Status::InvalidArgument("error in given range"); - } - - // Note: This is a string type and does not need to pass the column family as - // a parameter, use the default column family - rocksdb::Iterator* it = db_->NewIterator(iterator_options); - if (start_no_limit) { - it->SeekToLast(); - } else { - it->SeekForPrev(key_start); - } - - while (it->Valid() && remain > 0 && (end_no_limit || it->key().compare(key_end) >= 0)) { - ParsedStringsValue parsed_strings_value(it->value()); - if (parsed_strings_value.IsStale()) { - it->Prev(); - } else { - key = it->key().ToString(); - value = parsed_strings_value.value().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - kvs->push_back({key, value}); - } - remain--; - it->Prev(); - } - } - - while (it->Valid() && (end_no_limit || it->key().compare(key_end) >= 0)) { - ParsedStringsValue parsed_strings_value(it->value()); - if (parsed_strings_value.IsStale()) { - it->Prev(); - } else { - *next_key = it->key().ToString(); - break; - } - } - delete it; - return Status::OK(); -} -Status RedisStrings::Expire(const Slice& key, int32_t ttl) { - std::string value; + BaseKey base_key(key); ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &value); + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&value); if (parsed_strings_value.IsStale()) { @@ -1282,157 +1102,88 @@ Status RedisStrings::Expire(const Slice& key, int32_t ttl) { } if (ttl > 0) { parsed_strings_value.SetRelativeTimestamp(ttl); - return db_->Put(default_write_options_, key, value); + return db_->Put(default_write_options_, base_key.Encode(), value); } else { - return db_->Delete(default_write_options_, key); + return db_->Delete(default_write_options_, base_key.Encode()); } } return s; } -Status RedisStrings::Del(const Slice& key) { +Status Redis::StringsDel(const Slice& key) { std::string value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&value); if (parsed_strings_value.IsStale()) { return Status::NotFound("Stale"); } - return db_->Delete(default_write_options_, key); + return db_->Delete(default_write_options_, base_key.Encode()); } return s; } -bool RedisStrings::Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, - int64_t* count, std::string* next_key) { - std::string key; - bool is_finish = true; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - // Note: This is a string type and does not need to pass the column family as - // a parameter, use the default column family - rocksdb::Iterator* it = db_->NewIterator(iterator_options); - - it->Seek(start_key); - while (it->Valid() && (*count) > 0) { - ParsedStringsValue parsed_strings_value(it->value()); - if (parsed_strings_value.IsStale()) { - it->Next(); - continue; - } else { - key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - (*count)--; - it->Next(); - } - } - - std::string prefix = isTailWildcard(pattern) ? pattern.substr(0, pattern.size() - 1) : ""; - if (it->Valid() && (it->key().compare(prefix) <= 0 || it->key().starts_with(prefix))) { - is_finish = false; - *next_key = it->key().ToString(); - } else { - *next_key = ""; - } - delete it; - return is_finish; -} - -bool RedisStrings::PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) { - bool is_finish = true; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* it = db_->NewIterator(iterator_options); - - it->Seek(start_key); - while (it->Valid() && (*leftover_visits) > 0) { - ParsedStringsValue parsed_strings_value(it->value()); - if (parsed_strings_value.IsStale()) { - it->Next(); - continue; - } else { - if (min_timestamp < parsed_strings_value.timestamp() && parsed_strings_value.timestamp() < max_timestamp) { - keys->push_back(it->key().ToString()); - } - (*leftover_visits)--; - it->Next(); - } - } - - if (it->Valid()) { - is_finish = false; - *next_key = it->key().ToString(); - } else { - *next_key = ""; - } - delete it; - return is_finish; -} - -Status RedisStrings::Expireat(const Slice& key, int32_t timestamp) { +Status Redis::StringsExpireat(const Slice& key, int64_t timestamp) { std::string value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&value); if (parsed_strings_value.IsStale()) { return Status::NotFound("Stale"); } else { if (timestamp > 0) { - parsed_strings_value.set_timestamp(timestamp); - return db_->Put(default_write_options_, key, value); + parsed_strings_value.SetEtime(static_cast(timestamp)); + return db_->Put(default_write_options_, base_key.Encode(), value); } else { - return db_->Delete(default_write_options_, key); + return db_->Delete(default_write_options_, base_key.Encode()); } } } return s; } -Status RedisStrings::Persist(const Slice& key) { +Status Redis::StringsPersist(const Slice& key) { std::string value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&value); if (parsed_strings_value.IsStale()) { return Status::NotFound("Stale"); } else { - int32_t timestamp = parsed_strings_value.timestamp(); + uint64_t timestamp = parsed_strings_value.Etime(); if (timestamp == 0) { return Status::NotFound("Not have an associated timeout"); } else { - parsed_strings_value.set_timestamp(0); - return db_->Put(default_write_options_, key, value); + parsed_strings_value.SetEtime(0); + return db_->Put(default_write_options_, base_key.Encode(), value); } } } return s; } -Status RedisStrings::TTL(const Slice& key, int64_t* timestamp) { +Status Redis::StringsTTL(const Slice& key, int64_t* timestamp) { std::string value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &value); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); if (s.ok()) { ParsedStringsValue parsed_strings_value(&value); if (parsed_strings_value.IsStale()) { *timestamp = -2; return Status::NotFound("Stale"); } else { - *timestamp = parsed_strings_value.timestamp(); + *timestamp = parsed_strings_value.Etime(); if (*timestamp == 0) { *timestamp = -1; } else { @@ -1447,7 +1198,7 @@ Status RedisStrings::TTL(const Slice& key, int64_t* timestamp) { return s; } -void RedisStrings::ScanDatabase() { +void Redis::ScanStrings() { rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -1455,17 +1206,18 @@ void RedisStrings::ScanDatabase() { iterator_options.fill_cache = false; auto current_time = static_cast(time(nullptr)); - LOG(INFO) << "***************String Data***************"; + LOG(INFO) << "***************" << "rocksdb instance: " << index_ << " " << "String Data***************"; auto iter = db_->NewIterator(iterator_options); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ParsedBaseKey parsed_strings_key(iter->key()); ParsedStringsValue parsed_strings_value(iter->value()); int32_t survival_time = 0; - if (parsed_strings_value.timestamp() != 0) { + if (parsed_strings_value.Etime() != 0) { survival_time = - parsed_strings_value.timestamp() - current_time > 0 ? parsed_strings_value.timestamp() - current_time : -1; + parsed_strings_value.Etime() - current_time > 0 ? parsed_strings_value.Etime() - current_time : -1; } - LOG(INFO) << fmt::format("[key : {:<30}] [value : {:<30}] [timestamp : {:<10}] [version : {}] [survival_time : {}]", iter->key().ToString(), - parsed_strings_value.value().ToString(), parsed_strings_value.timestamp(), parsed_strings_value.version(), + LOG(INFO) << fmt::format("[key : {:<30}] [value : {:<30}] [timestamp : {:<10}] [version : {}] [survival_time : {}]", parsed_strings_key.Key().ToString(), + parsed_strings_value.UserValue().ToString(), parsed_strings_value.Etime(), parsed_strings_value.Version(), survival_time); } diff --git a/src/storage/src/redis_strings.h b/src/storage/src/redis_strings.h deleted file mode 100644 index 2cb0bdb13f..0000000000 --- a/src/storage/src/redis_strings.h +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef SRC_REDIS_STRINGS_H_ -#define SRC_REDIS_STRINGS_H_ - -#include -#include -#include - -#include "src/redis.h" - -namespace storage { - -class RedisStrings : public Redis { - public: - RedisStrings(Storage* s, const DataType& type); - ~RedisStrings() override = default; - - // Common Commands - Status Open(const StorageOptions& storage_options, const std::string& db_path) override; - Status CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, - const ColumnFamilyType& type = kMetaAndData) override; - Status GetProperty(const std::string& property, uint64_t* out) override; - Status ScanKeyNum(KeyInfo* key_info) override; - Status ScanKeys(const std::string& pattern, std::vector* keys) override; - Status PKPatternMatchDel(const std::string& pattern, int32_t* ret) override; - - // Strings Commands - Status Append(const Slice& key, const Slice& value, int32_t* ret); - Status BitCount(const Slice& key, int64_t start_offset, int64_t end_offset, int32_t* ret, bool have_range); - Status BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, std::string &value_to_dest, int64_t* ret); - Status Decrby(const Slice& key, int64_t value, int64_t* ret); - Status Get(const Slice& key, std::string* value); - Status GetWithTTL(const Slice& key, std::string* value, int64_t* ttl); - Status GetBit(const Slice& key, int64_t offset, int32_t* ret); - Status Getrange(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret); - Status GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret, std::string* value, int64_t* ttl); - Status GetSet(const Slice& key, const Slice& value, std::string* old_value); - Status Incrby(const Slice& key, int64_t value, int64_t* ret); - Status Incrbyfloat(const Slice& key, const Slice& value, std::string* ret); - Status MGet(const std::vector& keys, std::vector* vss); - Status MGetWithTTL(const std::vector& keys, std::vector* vss); - Status MSet(const std::vector& kvs); - Status MSetnx(const std::vector& kvs, int32_t* ret); - Status Set(const Slice& key, const Slice& value); - Status Setxx(const Slice& key, const Slice& value, int32_t* ret, int32_t ttl = 0); - Status SetBit(const Slice& key, int64_t offset, int32_t value, int32_t* ret); - Status Setex(const Slice& key, const Slice& value, int32_t ttl); - Status Setnx(const Slice& key, const Slice& value, int32_t* ret, int32_t ttl = 0); - Status Setvx(const Slice& key, const Slice& value, const Slice& new_value, int32_t* ret, int32_t ttl = 0); - Status Delvx(const Slice& key, const Slice& value, int32_t* ret); - Status Setrange(const Slice& key, int64_t start_offset, const Slice& value, int32_t* ret); - Status Strlen(const Slice& key, int32_t* len); - - Status BitPos(const Slice& key, int32_t bit, int64_t* ret); - Status BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t* ret); - Status BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t end_offset, int64_t* ret); - Status PKSetexAt(const Slice& key, const Slice& value, int32_t timestamp); - Status PKScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* kvs, std::string* next_key); - Status PKRScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* kvs, std::string* next_key); - - // Keys Commands - Status Expire(const Slice& key, int32_t ttl) override; - Status Del(const Slice& key) override; - bool Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, int64_t* count, - std::string* next_key) override; - bool PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) override; - Status Expireat(const Slice& key, int32_t timestamp) override; - Status Persist(const Slice& key) override; - Status TTL(const Slice& key, int64_t* timestamp) override; - - // Iterate all data - void ScanDatabase(); -}; - -} // namespace storage -#endif // SRC_REDIS_STRINGS_H_ diff --git a/src/storage/src/redis_zsets.cc b/src/storage/src/redis_zsets.cc index 4da415901f..9cde3cfaf6 100644 --- a/src/storage/src/redis_zsets.cc +++ b/src/storage/src/redis_zsets.cc @@ -3,110 +3,28 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "src/redis_zsets.h" +#include #include #include #include #include +#include #include #include -#include "iostream" +#include "src/base_key_format.h" +#include "src/base_data_value_format.h" +#include "pstd/include/pika_codis_slot.h" #include "src/scope_record_lock.h" #include "src/scope_snapshot.h" #include "src/zsets_filter.h" +#include "src/redis.h" #include "storage/util.h" namespace storage { - -rocksdb::Comparator* ZSetsScoreKeyComparator() { - static ZSetsScoreKeyComparatorImpl zsets_score_key_compare; - return &zsets_score_key_compare; -} - -RedisZSets::RedisZSets(Storage* const s, const DataType& type) : Redis(s, type) {} - -Status RedisZSets::Open(const StorageOptions& storage_options, const std::string& db_path) { - statistics_store_->SetCapacity(storage_options.statistics_max_size); - small_compaction_threshold_ = storage_options.small_compaction_threshold; - small_compaction_duration_threshold_ = storage_options.small_compaction_duration_threshold; - - rocksdb::Options ops(storage_options.options); - Status s = rocksdb::DB::Open(ops, db_path, &db_); - if (s.ok()) { - rocksdb::ColumnFamilyHandle *dcf = nullptr; - rocksdb::ColumnFamilyHandle *scf = nullptr; - s = db_->CreateColumnFamily(rocksdb::ColumnFamilyOptions(), "data_cf", &dcf); - if (!s.ok()) { - return s; - } - rocksdb::ColumnFamilyOptions score_cf_ops; - score_cf_ops.comparator = ZSetsScoreKeyComparator(); - s = db_->CreateColumnFamily(score_cf_ops, "score_cf", &scf); - if (!s.ok()) { - return s; - } - delete scf; - delete dcf; - delete db_; - } - - rocksdb::DBOptions db_ops(storage_options.options); - rocksdb::ColumnFamilyOptions meta_cf_ops(storage_options.options); - rocksdb::ColumnFamilyOptions data_cf_ops(storage_options.options); - rocksdb::ColumnFamilyOptions score_cf_ops(storage_options.options); - meta_cf_ops.compaction_filter_factory = std::make_shared(); - data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_); - score_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_); - score_cf_ops.comparator = ZSetsScoreKeyComparator(); - - // use the bloom filter policy to reduce disk reads - rocksdb::BlockBasedTableOptions table_ops(storage_options.table_options); - table_ops.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); - rocksdb::BlockBasedTableOptions meta_cf_table_ops(table_ops); - rocksdb::BlockBasedTableOptions data_cf_table_ops(table_ops); - rocksdb::BlockBasedTableOptions score_cf_table_ops(table_ops); - if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { - meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - score_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - } - meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(meta_cf_table_ops)); - data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(data_cf_table_ops)); - score_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(score_cf_table_ops)); - - std::vector column_families; - column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, meta_cf_ops); - column_families.emplace_back("data_cf", data_cf_ops); - column_families.emplace_back("score_cf", score_cf_ops); - return rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); -} - -Status RedisZSets::CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, const ColumnFamilyType& type) { - if (type == kMeta || type == kMetaAndData) { - db_->CompactRange(default_compact_range_options_, handles_[0], begin, end); - } - if (type == kData || type == kMetaAndData) { - db_->CompactRange(default_compact_range_options_, handles_[1], begin, end); - db_->CompactRange(default_compact_range_options_, handles_[2], begin, end); - } - return Status::OK(); -} - -Status RedisZSets::GetProperty(const std::string& property, uint64_t* out) { - std::string value; - db_->GetProperty(handles_[0], property, &value); - *out = std::strtoull(value.c_str(), nullptr, 10); - db_->GetProperty(handles_[1], property, &value); - *out += std::strtoull(value.c_str(), nullptr, 10); - db_->GetProperty(handles_[2], property, &value); - *out += std::strtoull(value.c_str(), nullptr, 10); - return Status::OK(); -} - -Status RedisZSets::ScanKeyNum(KeyInfo* key_info) { +Status Redis::ScanZsetsKeyNum(KeyInfo* key_info) { uint64_t keys = 0; uint64_t expires = 0; uint64_t ttl_sum = 0; @@ -121,16 +39,16 @@ Status RedisZSets::ScanKeyNum(KeyInfo* key_info) { int64_t curtime; rocksdb::Env::Default()->GetCurrentTime(&curtime); - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kZsetsMetaCF]); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedZSetsMetaValue parsed_zsets_meta_value(iter->value()); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { invaild_keys++; } else { keys++; if (!parsed_zsets_meta_value.IsPermanentSurvival()) { expires++; - ttl_sum += parsed_zsets_meta_value.timestamp() - curtime; + ttl_sum += parsed_zsets_meta_value.Etime() - curtime; } } } @@ -143,29 +61,7 @@ Status RedisZSets::ScanKeyNum(KeyInfo* key_info) { return Status::OK(); } -Status RedisZSets::ScanKeys(const std::string& pattern, std::vector* keys) { - std::string key; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); - for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { - ParsedZSetsMetaValue parsed_zsets_meta_value(iter->value()); - if (!parsed_zsets_meta_value.IsStale() && parsed_zsets_meta_value.count() != 0) { - key = iter->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - } - } - delete iter; - return Status::OK(); -} - -Status RedisZSets::PKPatternMatchDel(const std::string& pattern, int32_t* ret) { +Status Redis::ZsetsPKPatternMatchDel(const std::string& pattern, int32_t* ret) { rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -177,16 +73,16 @@ Status RedisZSets::PKPatternMatchDel(const std::string& pattern, int32_t* ret) { int32_t total_delete = 0; Status s; rocksdb::WriteBatch batch; - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kZsetsMetaCF]); iter->SeekToFirst(); while (iter->Valid()) { - key = iter->key().ToString(); + ParsedBaseMetaKey meta_key(iter->key().ToString()); meta_value = iter->value().ToString(); ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - if (!parsed_zsets_meta_value.IsStale() && (parsed_zsets_meta_value.count() != 0) && - (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0)) { + if (!parsed_zsets_meta_value.IsStale() && (parsed_zsets_meta_value.Count() != 0) && + (StringMatch(pattern.data(), pattern.size(), meta_key.Key().data(), meta_key.Key().size(), 0) != 0)) { parsed_zsets_meta_value.InitialMetaValue(); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kZsetsMetaCF], key, meta_value); } if (static_cast(batch.Count()) >= BATCH_DELETE_LIMIT) { s = db_->Write(default_write_options_, &batch); @@ -212,26 +108,28 @@ Status RedisZSets::PKPatternMatchDel(const std::string& pattern, int32_t* ret) { return s; } -Status RedisZSets::ZPopMax(const Slice& key, const int64_t count, std::vector* score_members) { +Status Redis::ZPopMax(const Slice& key, const int64_t count, std::vector* score_members) { uint32_t statistic = 0; score_members->clear(); rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { - int64_t num = parsed_zsets_meta_value.count(); + int64_t num = parsed_zsets_meta_value.Count(); num = num <= count ? num : count; - int32_t version = parsed_zsets_meta_value.version(); + uint64_t version = parsed_zsets_meta_value.Version(); ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::max(), Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kZsetsScoreCF]); int32_t del_cnt = 0; for (iter->SeekForPrev(zsets_score_key.Encode()); iter->Valid() && del_cnt < num; iter->Prev()) { ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); @@ -240,17 +138,17 @@ Status RedisZSets::ZPopMax(const Slice& key, const int64_t count, std::vectorkey()); + batch.Delete(handles_[kZsetsDataCF], zsets_member_key.Encode()); + batch.Delete(handles_[kZsetsScoreCF], iter->key()); } delete iter; if (!parsed_zsets_meta_value.CheckModifyCount(-del_cnt)){ return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); return s; } } else { @@ -258,26 +156,28 @@ Status RedisZSets::ZPopMax(const Slice& key, const int64_t count, std::vector* score_members) { +Status Redis::ZPopMin(const Slice& key, const int64_t count, std::vector* score_members) { uint32_t statistic = 0; score_members->clear(); rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { - int64_t num = parsed_zsets_meta_value.count(); + int64_t num = parsed_zsets_meta_value.Count(); num = num <= count ? num : count; - int32_t version = parsed_zsets_meta_value.version(); + uint64_t version = parsed_zsets_meta_value.Version(); ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::lowest(), Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kZsetsScoreCF]); int32_t del_cnt = 0; for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && del_cnt < num; iter->Next()) { ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); @@ -286,17 +186,17 @@ Status RedisZSets::ZPopMin(const Slice& key, const int64_t count, std::vectorkey()); + batch.Delete(handles_[kZsetsDataCF], zsets_member_key.Encode()); + batch.Delete(handles_[kZsetsScoreCF], iter->key()); } delete iter; if (!parsed_zsets_meta_value.CheckModifyCount(-del_cnt)){ return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); return s; } } else { @@ -304,7 +204,7 @@ Status RedisZSets::ZPopMin(const Slice& key, const int64_t count, std::vector& score_members, int32_t* ret) { +Status Redis::ZAdd(const Slice& key, const std::vector& score_members, int32_t* ret) { *ret = 0; uint32_t statistic = 0; std::unordered_set unique; @@ -317,20 +217,22 @@ Status RedisZSets::ZAdd(const Slice& key, const std::vector& score_ } char score_buf[8]; - int32_t version = 0; + uint64_t version = 0; std::string meta_value; rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { bool vaild = true; ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { vaild = false; version = parsed_zsets_meta_value.InitialMetaValue(); } else { vaild = true; - version = parsed_zsets_meta_value.version(); + version = parsed_zsets_meta_value.Version(); } int32_t cnt = 0; @@ -339,8 +241,10 @@ Status RedisZSets::ZAdd(const Slice& key, const std::vector& score_ bool not_found = true; ZSetsMemberKey zsets_member_key(key, version, sm.member); if (vaild) { - s = db_->Get(default_read_options_, handles_[1], zsets_member_key.Encode(), &data_value); + s = db_->Get(default_read_options_, handles_[kZsetsDataCF], zsets_member_key.Encode(), &data_value); if (s.ok()) { + ParsedBaseDataValue parsed_value(&data_value); + parsed_value.StripSuffix(); not_found = false; uint64_t tmp = DecodeFixed64(data_value.data()); const void* ptr_tmp = reinterpret_cast(&tmp); @@ -349,7 +253,7 @@ Status RedisZSets::ZAdd(const Slice& key, const std::vector& score_ continue; } else { ZSetsScoreKey zsets_score_key(key, version, old_score, sm.member); - batch.Delete(handles_[2], zsets_score_key.Encode()); + batch.Delete(handles_[kZsetsScoreCF], zsets_score_key.Encode()); // delete old zsets_score_key and overwirte zsets_member_key // but in different column_families so we accumulative 1 statistic++; @@ -361,10 +265,12 @@ Status RedisZSets::ZAdd(const Slice& key, const std::vector& score_ const void* ptr_score = reinterpret_cast(&sm.score); EncodeFixed64(score_buf, *reinterpret_cast(ptr_score)); - batch.Put(handles_[1], zsets_member_key.Encode(), Slice(score_buf, sizeof(uint64_t))); + BaseDataValue zsets_member_i_val(Slice(score_buf, sizeof(uint64_t))); + batch.Put(handles_[kZsetsDataCF], zsets_member_key.Encode(), zsets_member_i_val.Encode()); ZSetsScoreKey zsets_score_key(key, version, sm.score, sm.member); - batch.Put(handles_[2], zsets_score_key.Encode(), Slice()); + BaseDataValue zsets_score_i_val(Slice{}); + batch.Put(handles_[kZsetsScoreCF], zsets_score_key.Encode(), zsets_score_i_val.Encode()); if (not_found) { cnt++; } @@ -373,53 +279,57 @@ Status RedisZSets::ZAdd(const Slice& key, const std::vector& score_ return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(cnt); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); *ret = cnt; } else if (s.IsNotFound()) { char buf[4]; EncodeFixed32(buf, filtered_score_members.size()); ZSetsMetaValue zsets_meta_value(Slice(buf, sizeof(int32_t))); version = zsets_meta_value.UpdateVersion(); - batch.Put(handles_[0], key, zsets_meta_value.Encode()); + batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), zsets_meta_value.Encode()); for (const auto& sm : filtered_score_members) { ZSetsMemberKey zsets_member_key(key, version, sm.member); const void* ptr_score = reinterpret_cast(&sm.score); EncodeFixed64(score_buf, *reinterpret_cast(ptr_score)); - batch.Put(handles_[1], zsets_member_key.Encode(), Slice(score_buf, sizeof(uint64_t))); + BaseDataValue zsets_member_i_val(Slice(score_buf, sizeof(uint64_t))); + batch.Put(handles_[kZsetsDataCF], zsets_member_key.Encode(), zsets_member_i_val.Encode()); ZSetsScoreKey zsets_score_key(key, version, sm.score, sm.member); - batch.Put(handles_[2], zsets_score_key.Encode(), Slice()); + BaseDataValue zsets_score_i_val(Slice{}); + batch.Put(handles_[kZsetsScoreCF], zsets_score_key.Encode(), zsets_score_i_val.Encode()); } *ret = static_cast(filtered_score_members.size()); } else { return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); return s; } -Status RedisZSets::ZCard(const Slice& key, int32_t* card) { +Status Redis::ZCard(const Slice& key, int32_t* card) { *card = 0; std::string meta_value; - Status s = db_->Get(default_read_options_, key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { *card = 0; return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { *card = 0; return Status::NotFound(); } else { - *card = parsed_zsets_meta_value.count(); + *card = parsed_zsets_meta_value.Count(); } } return s; } -Status RedisZSets::ZCount(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret) { +Status Redis::ZCount(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret) { *ret = 0; rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot = nullptr; @@ -428,22 +338,24 @@ Status RedisZSets::ZCount(const Slice& key, double min, double max, bool left_cl ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t version = parsed_zsets_meta_value.version(); + uint64_t version = parsed_zsets_meta_value.Version(); int32_t cnt = 0; int32_t cur_index = 0; - int32_t stop_index = parsed_zsets_meta_value.count() - 1; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; ScoreMember score_member; ZSetsScoreKey zsets_score_key(key, version, min, Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { bool left_pass = false; bool right_pass = false; @@ -451,7 +363,7 @@ Status RedisZSets::ZCount(const Slice& key, double min, double max, bool left_cl if (parsed_zsets_score_key.key() != key) { break; } - if (parsed_zsets_score_key.version() != version) { + if (parsed_zsets_score_key.Version() != version) { break; } if ((left_close && min <= parsed_zsets_score_key.score()) || @@ -475,33 +387,37 @@ Status RedisZSets::ZCount(const Slice& key, double min, double max, bool left_cl return s; } -Status RedisZSets::ZIncrby(const Slice& key, const Slice& member, double increment, double* ret) { +Status Redis::ZIncrby(const Slice& key, const Slice& member, double increment, double* ret) { *ret = 0; uint32_t statistic = 0; double score = 0; char score_buf[8]; - int32_t version = 0; + uint64_t version = 0; std::string meta_value; rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { version = parsed_zsets_meta_value.InitialMetaValue(); } else { - version = parsed_zsets_meta_value.version(); + version = parsed_zsets_meta_value.Version(); } std::string data_value; ZSetsMemberKey zsets_member_key(key, version, member); - s = db_->Get(default_read_options_, handles_[1], zsets_member_key.Encode(), &data_value); + s = db_->Get(default_read_options_, handles_[kZsetsDataCF], zsets_member_key.Encode(), &data_value); if (s.ok()) { + ParsedBaseDataValue parsed_value(&data_value); + parsed_value.StripSuffix(); uint64_t tmp = DecodeFixed64(data_value.data()); const void* ptr_tmp = reinterpret_cast(&tmp); double old_score = *reinterpret_cast(ptr_tmp); score = old_score + increment; ZSetsScoreKey zsets_score_key(key, version, old_score, member); - batch.Delete(handles_[2], zsets_score_key.Encode()); + batch.Delete(handles_[kZsetsScoreCF], zsets_score_key.Encode()); // delete old zsets_score_key and overwirte zsets_member_key // but in different column_families so we accumulative 1 statistic++; @@ -511,7 +427,7 @@ Status RedisZSets::ZIncrby(const Slice& key, const Slice& member, double increme return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(1); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); } else { return s; } @@ -520,7 +436,7 @@ Status RedisZSets::ZIncrby(const Slice& key, const Slice& member, double increme EncodeFixed32(buf, 1); ZSetsMetaValue zsets_meta_value(Slice(buf, sizeof(int32_t))); version = zsets_meta_value.UpdateVersion(); - batch.Put(handles_[0], key, zsets_meta_value.Encode()); + batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), zsets_meta_value.Encode()); score = increment; } else { return s; @@ -528,17 +444,19 @@ Status RedisZSets::ZIncrby(const Slice& key, const Slice& member, double increme ZSetsMemberKey zsets_member_key(key, version, member); const void* ptr_score = reinterpret_cast(&score); EncodeFixed64(score_buf, *reinterpret_cast(ptr_score)); - batch.Put(handles_[1], zsets_member_key.Encode(), Slice(score_buf, sizeof(uint64_t))); + BaseDataValue zsets_member_i_val(Slice(score_buf, sizeof(uint64_t))); + batch.Put(handles_[kZsetsDataCF], zsets_member_key.Encode(), zsets_member_i_val.Encode()); ZSetsScoreKey zsets_score_key(key, version, score, member); - batch.Put(handles_[2], zsets_score_key.Encode(), Slice()); + BaseDataValue zsets_score_i_val(Slice{}); + batch.Put(handles_[kZsetsScoreCF], zsets_score_key.Encode(), zsets_score_i_val.Encode()); *ret = score; s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); return s; } -Status RedisZSets::ZRange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members) { +Status Redis::ZRange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members) { score_members->clear(); rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot = nullptr; @@ -547,16 +465,18 @@ Status RedisZSets::ZRange(const Slice& key, int32_t start, int32_t stop, std::ve ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t count = parsed_zsets_meta_value.count(); - int32_t version = parsed_zsets_meta_value.version(); + int32_t count = parsed_zsets_meta_value.Count(); + uint64_t version = parsed_zsets_meta_value.Version(); int32_t start_index = start >= 0 ? start : count + start; int32_t stop_index = stop >= 0 ? stop : count + stop; start_index = start_index <= 0 ? 0 : start_index; @@ -566,9 +486,10 @@ Status RedisZSets::ZRange(const Slice& key, int32_t start, int32_t stop, std::ve } int32_t cur_index = 0; ScoreMember score_member; + ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::lowest(), Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { if (cur_index >= start_index) { ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); @@ -583,7 +504,7 @@ Status RedisZSets::ZRange(const Slice& key, int32_t start, int32_t stop, std::ve return s; } -Status RedisZSets::ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std::vector* score_members, +Status Redis::ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std::vector* score_members, int64_t* ttl) { score_members->clear(); rocksdb::ReadOptions read_options; @@ -593,16 +514,17 @@ Status RedisZSets::ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, key, &meta_value); + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - if (parsed_zsets_meta_value.count() == 0) { + if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); } else { // ttl - *ttl = parsed_zsets_meta_value.timestamp(); + *ttl = parsed_zsets_meta_value.Etime(); if (*ttl == 0) { *ttl = -1; } else { @@ -611,10 +533,10 @@ Status RedisZSets::ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, *ttl = *ttl - curtime >= 0 ? *ttl - curtime : -2; } - int32_t count = parsed_zsets_meta_value.count(); - int32_t version = parsed_zsets_meta_value.version(); + int32_t count = parsed_zsets_meta_value.Count(); + uint64_t version = parsed_zsets_meta_value.Version(); int32_t start_index = start >= 0 ? start : count + start; - int32_t stop_index = stop >= 0 ? stop : count + stop; + int32_t stop_index = stop >= 0 ? stop : count + stop; start_index = start_index <= 0 ? 0 : start_index; stop_index = stop_index >= count ? count - 1 : stop_index; if (start_index > stop_index @@ -626,7 +548,8 @@ Status RedisZSets::ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, ScoreMember score_member; ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::lowest(), Slice()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { @@ -643,7 +566,7 @@ Status RedisZSets::ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, return s; } -Status RedisZSets::ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, +Status Redis::ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int64_t count, int64_t offset, std::vector* score_members) { score_members->clear(); rocksdb::ReadOptions read_options; @@ -652,22 +575,24 @@ Status RedisZSets::ZRangebyscore(const Slice& key, double min, double max, bool std::string meta_value; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else if (offset >= 0 && count != 0) { - int32_t version = parsed_zsets_meta_value.version(); + uint64_t version = parsed_zsets_meta_value.Version(); int32_t index = 0; - int32_t stop_index = parsed_zsets_meta_value.count() - 1; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; int64_t skipped = 0; ScoreMember score_member; ZSetsScoreKey zsets_score_key(key, version, min, Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && index <= stop_index; iter->Next(), ++index) { bool left_pass = false; bool right_pass = false; @@ -675,7 +600,7 @@ Status RedisZSets::ZRangebyscore(const Slice& key, double min, double max, bool if (parsed_zsets_score_key.key() != key) { break; } - if (parsed_zsets_score_key.version() != version) { + if (parsed_zsets_score_key.Version() != version) { break; } if ((left_close && min <= parsed_zsets_score_key.score()) || @@ -709,7 +634,7 @@ Status RedisZSets::ZRangebyscore(const Slice& key, double min, double max, bool return s; } -Status RedisZSets::ZRank(const Slice& key, const Slice& member, int32_t* rank) { +Status Redis::ZRank(const Slice& key, const Slice& member, int32_t* rank) { *rank = -1; rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot = nullptr; @@ -717,22 +642,24 @@ Status RedisZSets::ZRank(const Slice& key, const Slice& member, int32_t* rank) { std::string meta_value; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { bool found = false; - int32_t version = parsed_zsets_meta_value.version(); + uint64_t version = parsed_zsets_meta_value.Version(); int32_t index = 0; - int32_t stop_index = parsed_zsets_meta_value.count() - 1; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; ScoreMember score_member; ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::lowest(), Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && index <= stop_index; iter->Next(), ++index) { ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); if (parsed_zsets_score_key.member().compare(member) == 0) { @@ -752,7 +679,7 @@ Status RedisZSets::ZRank(const Slice& key, const Slice& member, int32_t* rank) { return s; } -Status RedisZSets::ZRem(const Slice& key, const std::vector& members, int32_t* ret) { +Status Redis::ZRem(const Slice& key, const std::vector& members, int32_t* ret) { *ret = 0; uint32_t statistic = 0; std::unordered_set unique; @@ -767,30 +694,34 @@ Status RedisZSets::ZRem(const Slice& key, const std::vector& member std::string meta_value; rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { int32_t del_cnt = 0; std::string data_value; - int32_t version = parsed_zsets_meta_value.version(); + uint64_t version = parsed_zsets_meta_value.Version(); for (const auto& member : filtered_members) { ZSetsMemberKey zsets_member_key(key, version, member); - s = db_->Get(default_read_options_, handles_[1], zsets_member_key.Encode(), &data_value); + s = db_->Get(default_read_options_, handles_[kZsetsDataCF], zsets_member_key.Encode(), &data_value); if (s.ok()) { del_cnt++; statistic++; + ParsedBaseDataValue parsed_value(&data_value); + parsed_value.StripSuffix(); uint64_t tmp = DecodeFixed64(data_value.data()); const void* ptr_tmp = reinterpret_cast(&tmp); double score = *reinterpret_cast(ptr_tmp); - batch.Delete(handles_[1], zsets_member_key.Encode()); + batch.Delete(handles_[kZsetsDataCF], zsets_member_key.Encode()); ZSetsScoreKey zsets_score_key(key, version, score, member); - batch.Delete(handles_[2], zsets_score_key.Encode()); + batch.Delete(handles_[kZsetsScoreCF], zsets_score_key.Encode()); } else if (!s.IsNotFound()) { return s; } @@ -800,35 +731,37 @@ Status RedisZSets::ZRem(const Slice& key, const std::vector& member return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); } } else { return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); return s; } -Status RedisZSets::ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int32_t* ret) { +Status Redis::ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int32_t* ret) { *ret = 0; uint32_t statistic = 0; std::string meta_value; rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { std::string member; int32_t del_cnt = 0; int32_t cur_index = 0; - int32_t count = parsed_zsets_meta_value.count(); - int32_t version = parsed_zsets_meta_value.version(); + int32_t count = parsed_zsets_meta_value.Count(); + uint64_t version = parsed_zsets_meta_value.Version(); int32_t start_index = start >= 0 ? start : count + start; int32_t stop_index = stop >= 0 ? stop : count + stop; start_index = start_index <= 0 ? 0 : start_index; @@ -837,14 +770,14 @@ Status RedisZSets::ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop return s; } ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::lowest(), Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kZsetsScoreCF]); for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { if (cur_index >= start_index) { ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); ZSetsMemberKey zsets_member_key(key, version, parsed_zsets_score_key.member()); - batch.Delete(handles_[1], zsets_member_key.Encode()); - batch.Delete(handles_[2], iter->key()); + batch.Delete(handles_[kZsetsDataCF], zsets_member_key.Encode()); + batch.Delete(handles_[kZsetsScoreCF], iter->key()); del_cnt++; statistic++; } @@ -855,39 +788,41 @@ Status RedisZSets::ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); } } else { return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); return s; } -Status RedisZSets::ZRemrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, +Status Redis::ZRemrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret) { *ret = 0; uint32_t statistic = 0; std::string meta_value; rocksdb::WriteBatch batch; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { std::string member; int32_t del_cnt = 0; int32_t cur_index = 0; - int32_t stop_index = parsed_zsets_meta_value.count() - 1; - int32_t version = parsed_zsets_meta_value.version(); + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; + uint64_t version = parsed_zsets_meta_value.Version(); ZSetsScoreKey zsets_score_key(key, version, min, Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kZsetsScoreCF]); for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { bool left_pass = false; bool right_pass = false; @@ -895,7 +830,7 @@ Status RedisZSets::ZRemrangebyscore(const Slice& key, double min, double max, bo if (parsed_zsets_score_key.key() != key) { break; } - if (parsed_zsets_score_key.version() != version) { + if (parsed_zsets_score_key.Version() != version) { break; } if ((left_close && min <= parsed_zsets_score_key.score()) || @@ -908,8 +843,8 @@ Status RedisZSets::ZRemrangebyscore(const Slice& key, double min, double max, bo } if (left_pass && right_pass) { ZSetsMemberKey zsets_member_key(key, version, parsed_zsets_score_key.member()); - batch.Delete(handles_[1], zsets_member_key.Encode()); - batch.Delete(handles_[2], iter->key()); + batch.Delete(handles_[kZsetsDataCF], zsets_member_key.Encode()); + batch.Delete(handles_[kZsetsScoreCF], iter->key()); del_cnt++; statistic++; } @@ -923,17 +858,17 @@ Status RedisZSets::ZRemrangebyscore(const Slice& key, double min, double max, bo return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); } } else { return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); return s; } -Status RedisZSets::ZRevrange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members) { +Status Redis::ZRevrange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members) { score_members->clear(); rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot = nullptr; @@ -942,16 +877,18 @@ Status RedisZSets::ZRevrange(const Slice& key, int32_t start, int32_t stop, std: ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t count = parsed_zsets_meta_value.count(); - int32_t version = parsed_zsets_meta_value.version(); + int32_t count = parsed_zsets_meta_value.Count(); + uint64_t version = parsed_zsets_meta_value.Version(); int32_t start_index = stop >= 0 ? count - stop - 1 : -stop - 1; int32_t stop_index = start >= 0 ? count - start - 1 : -start - 1; start_index = start_index <= 0 ? 0 : start_index; @@ -962,8 +899,8 @@ Status RedisZSets::ZRevrange(const Slice& key, int32_t start, int32_t stop, std: int32_t cur_index = count - 1; ScoreMember score_member; ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::max(), Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); for (iter->SeekForPrev(zsets_score_key.Encode()); iter->Valid() && cur_index >= start_index; iter->Prev(), --cur_index) { if (cur_index <= stop_index) { @@ -979,7 +916,7 @@ Status RedisZSets::ZRevrange(const Slice& key, int32_t start, int32_t stop, std: return s; } -Status RedisZSets::ZRevrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, +Status Redis::ZRevrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int64_t count, int64_t offset, std::vector* score_members) { score_members->clear(); rocksdb::ReadOptions read_options; @@ -988,21 +925,23 @@ Status RedisZSets::ZRevrangebyscore(const Slice& key, double min, double max, bo std::string meta_value; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else if (offset >= 0 && count != 0) { - int32_t version = parsed_zsets_meta_value.version(); - int32_t left = parsed_zsets_meta_value.count(); + uint64_t version = parsed_zsets_meta_value.Version(); + int32_t left = parsed_zsets_meta_value.Count(); int64_t skipped = 0; ScoreMember score_member; ZSetsScoreKey zsets_score_key(key, version, std::nextafter(max, std::numeric_limits::max()), Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); for (iter->SeekForPrev(zsets_score_key.Encode()); iter->Valid() && left > 0; iter->Prev(), --left) { bool left_pass = false; bool right_pass = false; @@ -1010,7 +949,7 @@ Status RedisZSets::ZRevrangebyscore(const Slice& key, double min, double max, bo if (parsed_zsets_score_key.key() != key) { break; } - if (parsed_zsets_score_key.version() != version) { + if (parsed_zsets_score_key.Version() != version) { break; } if ((left_close && min <= parsed_zsets_score_key.score()) || @@ -1044,7 +983,7 @@ Status RedisZSets::ZRevrangebyscore(const Slice& key, double min, double max, bo return s; } -Status RedisZSets::ZRevrank(const Slice& key, const Slice& member, int32_t* rank) { +Status Redis::ZRevrank(const Slice& key, const Slice& member, int32_t* rank) { *rank = -1; rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot = nullptr; @@ -1053,21 +992,23 @@ Status RedisZSets::ZRevrank(const Slice& key, const Slice& member, int32_t* rank ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { bool found = false; int32_t rev_index = 0; - int32_t left = parsed_zsets_meta_value.count(); - int32_t version = parsed_zsets_meta_value.version(); + int32_t left = parsed_zsets_meta_value.Count(); + uint64_t version = parsed_zsets_meta_value.Version(); ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::max(), Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); for (iter->SeekForPrev(zsets_score_key.Encode()); iter->Valid() && left >= 0; iter->Prev(), --left, ++rev_index) { ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); if (parsed_zsets_score_key.member().compare(member) == 0) { @@ -1086,7 +1027,7 @@ Status RedisZSets::ZRevrank(const Slice& key, const Slice& member, int32_t* rank return s; } -Status RedisZSets::ZScore(const Slice& key, const Slice& member, double* score) { +Status Redis::ZScore(const Slice& key, const Slice& member, double* score) { *score = 0; rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot = nullptr; @@ -1095,19 +1036,23 @@ Status RedisZSets::ZScore(const Slice& key, const Slice& member, double* score) ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - int32_t version = parsed_zsets_meta_value.version(); + uint64_t version = parsed_zsets_meta_value.Version(); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { std::string data_value; ZSetsMemberKey zsets_member_key(key, version, member); - s = db_->Get(read_options, handles_[1], zsets_member_key.Encode(), &data_value); + s = db_->Get(read_options, handles_[kZsetsDataCF], zsets_member_key.Encode(), &data_value); if (s.ok()) { + ParsedBaseDataValue parsed_value(&data_value); + parsed_value.StripSuffix(); uint64_t tmp = DecodeFixed64(data_value.data()); const void* ptr_tmp = reinterpret_cast(&tmp); *score = *reinterpret_cast(ptr_tmp); @@ -1121,7 +1066,39 @@ Status RedisZSets::ZScore(const Slice& key, const Slice& member, double* score) return s; } -Status RedisZSets::ZUnionstore(const Slice& destination, const std::vector& keys, +Status Redis::ZGetAll(const Slice& key, double weight, std::map* value_to_dest) { + Status s; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (!parsed_zsets_meta_value.IsStale() && parsed_zsets_meta_value.Count() != 0) { + int32_t cur_index = 0; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; + double score = 0.0; + uint64_t version = parsed_zsets_meta_value.Version(); + ZSetsScoreKey zsets_score_key(key.ToString(), version, std::numeric_limits::lowest(), Slice()); + Slice seek_key = zsets_score_key.Encode(); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); + for (iter->Seek(seek_key); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + double score = parsed_zsets_score_key.score() * weight; + score = (score == -0.0) ? 0 : score; + value_to_dest->insert(std::make_pair(parsed_zsets_score_key.member().ToString(), score)); + } + delete iter; + } + } + return s; +} + +Status Redis::ZUnionstore(const Slice& destination, const std::vector& keys, const std::vector& weights, const AGGREGATE agg, std::map& value_to_dest, int32_t* ret) { *ret = 0; uint32_t statistic = 0; @@ -1129,7 +1106,7 @@ Status RedisZSets::ZUnionstore(const Slice& destination, const std::vectorGet(read_options, handles_[0], keys[idx], &meta_value); + BaseMetaKey base_meta_key(keys[idx]); + s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - if (!parsed_zsets_meta_value.IsStale() && parsed_zsets_meta_value.count() != 0) { + if (!parsed_zsets_meta_value.IsStale() && parsed_zsets_meta_value.Count() != 0) { int32_t cur_index = 0; - int32_t stop_index = parsed_zsets_meta_value.count() - 1; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; double score = 0; double weight = idx < weights.size() ? weights[idx] : 1; - version = parsed_zsets_meta_value.version(); + version = parsed_zsets_meta_value.Version(); ZSetsScoreKey zsets_score_key(keys[idx], version, std::numeric_limits::lowest(), Slice()); - KeyStatisticsDurationGuard guard(this, keys[idx]); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[2]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, keys[idx]); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); @@ -1182,22 +1160,23 @@ Status RedisZSets::ZUnionstore(const Slice& destination, const std::vectorGet(read_options, handles_[0], destination, &meta_value); + BaseMetaKey base_destination(destination); + s = db_->Get(read_options, handles_[kZsetsMetaCF], base_destination.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - statistic = parsed_zsets_meta_value.count(); + statistic = parsed_zsets_meta_value.Count(); version = parsed_zsets_meta_value.InitialMetaValue(); if (!parsed_zsets_meta_value.check_set_count(static_cast(member_score_map.size()))) { return Status::InvalidArgument("zset size overflow"); } - parsed_zsets_meta_value.set_count(static_cast(member_score_map.size())); - batch.Put(handles_[0], destination, meta_value); + parsed_zsets_meta_value.SetCount(static_cast(member_score_map.size())); + batch.Put(handles_[kZsetsMetaCF], base_destination.Encode(), meta_value); } else { char buf[4]; EncodeFixed32(buf, member_score_map.size()); ZSetsMetaValue zsets_meta_value(Slice(buf, sizeof(int32_t))); version = zsets_meta_value.UpdateVersion(); - batch.Put(handles_[0], destination, zsets_meta_value.Encode()); + batch.Put(handles_[kZsetsMetaCF], base_destination.Encode(), zsets_meta_value.Encode()); } char score_buf[8]; @@ -1206,19 +1185,21 @@ Status RedisZSets::ZUnionstore(const Slice& destination, const std::vector(&sm.second); EncodeFixed64(score_buf, *reinterpret_cast(ptr_score)); - batch.Put(handles_[1], zsets_member_key.Encode(), Slice(score_buf, sizeof(uint64_t))); + BaseDataValue member_i_val(Slice(score_buf, sizeof(uint64_t))); + batch.Put(handles_[kZsetsDataCF], zsets_member_key.Encode(), member_i_val.Encode()); ZSetsScoreKey zsets_score_key(destination, version, sm.second, sm.first); - batch.Put(handles_[2], zsets_score_key.Encode(), Slice()); + BaseDataValue score_i_val(Slice{}); + batch.Put(handles_[kZsetsScoreCF], zsets_score_key.Encode(), score_i_val.Encode()); } *ret = static_cast(member_score_map.size()); s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(destination.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kZSets, destination.ToString(), statistic); value_to_dest = std::move(member_score_map); return s; } -Status RedisZSets::ZInterstore(const Slice& destination, const std::vector& keys, +Status Redis::ZInterstore(const Slice& destination, const std::vector& keys, const std::vector& weights, const AGGREGATE agg, std::vector& value_to_dest, int32_t* ret) { if (keys.empty()) { return Status::Corruption("ZInterstore invalid parameter, no keys"); @@ -1234,10 +1215,10 @@ Status RedisZSets::ZInterstore(const Slice& destination, const std::vector vaild_zsets; + std::vector valid_zsets; std::vector score_members; std::vector final_score_members; Status s; @@ -1245,15 +1226,16 @@ Status RedisZSets::ZInterstore(const Slice& destination, const std::vectorGet(read_options, handles_[0], keys[idx], &meta_value); + BaseMetaKey base_meta_key(keys[idx]); + s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { have_invalid_zsets = true; } else { - vaild_zsets.push_back({keys[idx], parsed_zsets_meta_value.version()}); + valid_zsets.push_back({keys[idx], parsed_zsets_meta_value.Version()}); if (idx == 0) { - stop_index = parsed_zsets_meta_value.count() - 1; + stop_index = parsed_zsets_meta_value.Count() - 1; } } } else if (s.IsNotFound()) { @@ -1264,10 +1246,9 @@ Status RedisZSets::ZInterstore(const Slice& destination, const std::vector::lowest(), - Slice()); - KeyStatisticsDurationGuard guard(this, vaild_zsets[0].key); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[2]); + ZSetsScoreKey zsets_score_key(valid_zsets[0].key, valid_zsets[0].version, std::numeric_limits::lowest(), Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, valid_zsets[0].key); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); double score = parsed_zsets_score_key.score(); @@ -1281,11 +1262,13 @@ Status RedisZSets::ZInterstore(const Slice& destination, const std::vectorGet(read_options, handles_[1], zsets_member_key.Encode(), &data_value); + ZSetsMemberKey zsets_member_key(valid_zsets[idx].key, valid_zsets[idx].version, item.member); + s = db_->Get(read_options, handles_[kZsetsDataCF], zsets_member_key.Encode(), &data_value); if (s.ok()) { + ParsedBaseDataValue parsed_value(&data_value); + parsed_value.StripSuffix(); uint64_t tmp = DecodeFixed64(data_value.data()); const void* ptr_tmp = reinterpret_cast(&tmp); double score = *reinterpret_cast(ptr_tmp); @@ -1313,22 +1296,23 @@ Status RedisZSets::ZInterstore(const Slice& destination, const std::vectorGet(read_options, handles_[0], destination, &meta_value); + BaseMetaKey base_destination(destination); + s = db_->Get(read_options, handles_[kZsetsMetaCF], base_destination.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - statistic = parsed_zsets_meta_value.count(); + statistic = parsed_zsets_meta_value.Count(); version = parsed_zsets_meta_value.InitialMetaValue(); if (!parsed_zsets_meta_value.check_set_count(static_cast(final_score_members.size()))) { return Status::InvalidArgument("zset size overflow"); } - parsed_zsets_meta_value.set_count(static_cast(final_score_members.size())); - batch.Put(handles_[0], destination, meta_value); + parsed_zsets_meta_value.SetCount(static_cast(final_score_members.size())); + batch.Put(handles_[kZsetsMetaCF], base_destination.Encode(), meta_value); } else { char buf[4]; EncodeFixed32(buf, final_score_members.size()); ZSetsMetaValue zsets_meta_value(Slice(buf, sizeof(int32_t))); version = zsets_meta_value.UpdateVersion(); - batch.Put(handles_[0], destination, zsets_meta_value.Encode()); + batch.Put(handles_[kZsetsMetaCF], base_destination.Encode(), zsets_meta_value.Encode()); } char score_buf[8]; for (const auto& sm : final_score_members) { @@ -1336,19 +1320,21 @@ Status RedisZSets::ZInterstore(const Slice& destination, const std::vector(&sm.score); EncodeFixed64(score_buf, *reinterpret_cast(ptr_score)); - batch.Put(handles_[1], zsets_member_key.Encode(), Slice(score_buf, sizeof(uint64_t))); + BaseDataValue member_i_val(Slice(score_buf, sizeof(uint64_t))); + batch.Put(handles_[kZsetsDataCF], zsets_member_key.Encode(), member_i_val.Encode()); ZSetsScoreKey zsets_score_key(destination, version, sm.score, sm.member); - batch.Put(handles_[2], zsets_score_key.Encode(), Slice()); + BaseDataValue zsets_score_i_val(Slice{}); + batch.Put(handles_[kZsetsScoreCF], zsets_score_key.Encode(), zsets_score_i_val.Encode()); } *ret = static_cast(final_score_members.size()); s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(destination.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kZSets, destination.ToString(), statistic); value_to_dest = std::move(final_score_members); return s; } -Status RedisZSets::ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, +Status Redis::ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, std::vector* members) { members->clear(); rocksdb::ReadOptions read_options; @@ -1361,18 +1347,20 @@ Status RedisZSets::ZRangebylex(const Slice& key, const Slice& min, const Slice& bool left_no_limit = min.compare("-") == 0; bool right_not_limit = max.compare("+") == 0; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t version = parsed_zsets_meta_value.version(); + uint64_t version = parsed_zsets_meta_value.Version(); int32_t cur_index = 0; - int32_t stop_index = parsed_zsets_meta_value.count() - 1; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; ZSetsMemberKey zsets_member_key(key, version, Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[1]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsDataCF]); for (iter->Seek(zsets_member_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { bool left_pass = false; bool right_pass = false; @@ -1397,7 +1385,7 @@ Status RedisZSets::ZRangebylex(const Slice& key, const Slice& min, const Slice& return s; } -Status RedisZSets::ZLexcount(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, +Status Redis::ZLexcount(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, int32_t* ret) { std::vector members; Status s = ZRangebylex(key, min, max, left_close, right_close, &members); @@ -1405,7 +1393,7 @@ Status RedisZSets::ZLexcount(const Slice& key, const Slice& min, const Slice& ma return s; } -Status RedisZSets::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, +Status Redis::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, int32_t* ret) { *ret = 0; uint32_t statistic = 0; @@ -1422,18 +1410,20 @@ Status RedisZSets::ZRemrangebylex(const Slice& key, const Slice& min, const Slic int32_t del_cnt = 0; std::string meta_value; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t version = parsed_zsets_meta_value.version(); + uint64_t version = parsed_zsets_meta_value.Version(); int32_t cur_index = 0; - int32_t stop_index = parsed_zsets_meta_value.count() - 1; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; ZSetsMemberKey zsets_member_key(key, version, Slice()); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[1]); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsDataCF]); for (iter->Seek(zsets_member_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { bool left_pass = false; bool right_pass = false; @@ -1446,13 +1436,14 @@ Status RedisZSets::ZRemrangebylex(const Slice& key, const Slice& min, const Slic right_pass = true; } if (left_pass && right_pass) { - batch.Delete(handles_[1], iter->key()); + batch.Delete(handles_[kZsetsDataCF], iter->key()); - uint64_t tmp = DecodeFixed64(iter->value().data()); + ParsedBaseDataValue parsed_value(iter->value()); + uint64_t tmp = DecodeFixed64(parsed_value.UserValue().data()); const void* ptr_tmp = reinterpret_cast(&tmp); double score = *reinterpret_cast(ptr_tmp); ZSetsScoreKey zsets_score_key(key, version, score, member); - batch.Delete(handles_[2], zsets_score_key.Encode()); + batch.Delete(handles_[kZsetsScoreCF], zsets_score_key.Encode()); del_cnt++; statistic++; } @@ -1467,26 +1458,28 @@ Status RedisZSets::ZRemrangebylex(const Slice& key, const Slice& min, const Slic return Status::InvalidArgument("zset size overflow"); } parsed_zsets_meta_value.ModifyCount(-del_cnt); - batch.Put(handles_[0], key, meta_value); + batch.Put(handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); *ret = del_cnt; } } else { return s; } s = db_->Write(default_write_options_, &batch); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); return s; } -Status RedisZSets::Expire(const Slice& key, int32_t ttl) { +Status Redis::ZsetsExpire(const Slice& key, int64_t ttl) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } @@ -1495,129 +1488,59 @@ Status RedisZSets::Expire(const Slice& key, int32_t ttl) { } else { parsed_zsets_meta_value.InitialMetaValue(); } - s = db_->Put(default_write_options_, handles_[0], key, meta_value); + s = db_->Put(default_write_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); } return s; } -Status RedisZSets::Del(const Slice& key) { +Status Redis::ZsetsDel(const Slice& key) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { - uint32_t statistic = parsed_zsets_meta_value.count(); + uint32_t statistic = parsed_zsets_meta_value.Count(); parsed_zsets_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[0], key, meta_value); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + s = db_->Put(default_write_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); } } return s; } -bool RedisZSets::Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, - int64_t* count, std::string* next_key) { - std::string meta_key; - bool is_finish = true; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - - it->Seek(start_key); - while (it->Valid() && (*count) > 0) { - ParsedZSetsMetaValue parsed_zsets_meta_value(it->value()); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { - it->Next(); - continue; - } else { - meta_key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), meta_key.data(), meta_key.size(), 0) != 0) { - keys->push_back(meta_key); - } - (*count)--; - it->Next(); - } - } - - std::string prefix = isTailWildcard(pattern) ? pattern.substr(0, pattern.size() - 1) : ""; - if (it->Valid() && (it->key().compare(prefix) <= 0 || it->key().starts_with(prefix))) { - *next_key = it->key().ToString(); - is_finish = false; - } else { - *next_key = ""; - } - delete it; - return is_finish; -} - -bool RedisZSets::PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) { - bool is_finish = true; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - it->Seek(start_key); - while (it->Valid() && (*leftover_visits) > 0) { - ParsedZSetsMetaValue parsed_zsets_meta_value(it->value()); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { - it->Next(); - continue; - } else { - if (min_timestamp < parsed_zsets_meta_value.timestamp() && parsed_zsets_meta_value.timestamp() < max_timestamp) { - keys->push_back(it->key().ToString()); - } - (*leftover_visits)--; - it->Next(); - } - } - - if (it->Valid()) { - is_finish = false; - *next_key = it->key().ToString(); - } else { - *next_key = ""; - } - delete it; - return is_finish; -} - -Status RedisZSets::Expireat(const Slice& key, int32_t timestamp) { +Status Redis::ZsetsExpireat(const Slice& key, int64_t timestamp) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { if (timestamp > 0) { - parsed_zsets_meta_value.set_timestamp(timestamp); + parsed_zsets_meta_value.SetEtime(uint64_t(timestamp)); } else { parsed_zsets_meta_value.InitialMetaValue(); } - return db_->Put(default_write_options_, handles_[0], key, meta_value); + return db_->Put(default_write_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); } } return s; } -Status RedisZSets::ZScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, - std::vector* score_members, int64_t* next_cursor) { +Status Redis::ZScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* score_members, int64_t* next_cursor) { *next_cursor = 0; score_members->clear(); if (cursor < 0) { @@ -1633,17 +1556,19 @@ Status RedisZSets::ZScan(const Slice& key, int64_t cursor, const std::string& pa std::string meta_value; ScopeSnapshot ss(db_, &snapshot); read_options.snapshot = snapshot; - Status s = db_->Get(read_options, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { *next_cursor = 0; return Status::NotFound(); } else { std::string sub_member; std::string start_point; - int32_t version = parsed_zsets_meta_value.version(); - s = GetScanStartPoint(key, pattern, cursor, &start_point); + uint64_t version = parsed_zsets_meta_value.Version(); + s = GetScanStartPoint(DataType::kZSets, key, pattern, cursor, &start_point); if (s.IsNotFound()) { cursor = 0; if (isTailWildcard(pattern)) { @@ -1656,15 +1581,16 @@ Status RedisZSets::ZScan(const Slice& key, int64_t cursor, const std::string& pa ZSetsMemberKey zsets_member_prefix(key, version, sub_member); ZSetsMemberKey zsets_member_key(key, version, start_point); - std::string prefix = zsets_member_prefix.Encode().ToString(); - KeyStatisticsDurationGuard guard(this, key.ToString()); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[1]); + std::string prefix = zsets_member_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsDataCF]); for (iter->Seek(zsets_member_key.Encode()); iter->Valid() && rest > 0 && iter->key().starts_with(prefix); iter->Next()) { ParsedZSetsMemberKey parsed_zsets_member_key(iter->key()); std::string member = parsed_zsets_member_key.member().ToString(); if (StringMatch(pattern.data(), pattern.size(), member.data(), member.size(), 0) != 0) { - uint64_t tmp = DecodeFixed64(iter->value().data()); + ParsedBaseDataValue parsed_value(iter->value()); + uint64_t tmp = DecodeFixed64(parsed_value.UserValue().data()); const void* ptr_tmp = reinterpret_cast(&tmp); double score = *reinterpret_cast(ptr_tmp); score_members->push_back({score, member}); @@ -1676,7 +1602,7 @@ Status RedisZSets::ZScan(const Slice& key, int64_t cursor, const std::string& pa *next_cursor = cursor + step_length; ParsedZSetsMemberKey parsed_zsets_member_key(iter->key()); std::string next_member = parsed_zsets_member_key.member().ToString(); - StoreScanNextPoint(key, pattern, *next_cursor, next_member); + StoreScanNextPoint(DataType::kZSets, key, pattern, *next_cursor, next_member); } else { *next_cursor = 0; } @@ -1689,148 +1615,46 @@ Status RedisZSets::ZScan(const Slice& key, int64_t cursor, const std::string& pa return Status::OK(); } -Status RedisZSets::PKScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key) { - next_key->clear(); - - std::string key; - int32_t remain = limit; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - bool start_no_limit = key_start.compare("") == 0; - bool end_no_limit = key_end.compare("") == 0; - - if (!start_no_limit && !end_no_limit && (key_start.compare(key_end) > 0)) { - return Status::InvalidArgument("error in given range"); - } - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - if (start_no_limit) { - it->SeekToFirst(); - } else { - it->Seek(key_start); - } - - while (it->Valid() && remain > 0 && (end_no_limit || it->key().compare(key_end) <= 0)) { - ParsedZSetsMetaValue parsed_zsets_meta_value(it->value()); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { - it->Next(); - } else { - key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - remain--; - it->Next(); - } - } - - while (it->Valid() && (end_no_limit || it->key().compare(key_end) <= 0)) { - ParsedZSetsMetaValue parsed_zsets_meta_value(it->value()); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { - it->Next(); - } else { - *next_key = it->key().ToString(); - break; - } - } - delete it; - return Status::OK(); -} - -Status RedisZSets::PKRScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key) { - next_key->clear(); - - std::string key; - int32_t remain = limit; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - bool start_no_limit = key_start.compare("") == 0; - bool end_no_limit = key_end.compare("") == 0; - - if (!start_no_limit && !end_no_limit && (key_start.compare(key_end) < 0)) { - return Status::InvalidArgument("error in given range"); - } - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - if (start_no_limit) { - it->SeekToLast(); - } else { - it->SeekForPrev(key_start); - } - - while (it->Valid() && remain > 0 && (end_no_limit || it->key().compare(key_end) >= 0)) { - ParsedZSetsMetaValue parsed_zsets_meta_value(it->value()); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { - it->Prev(); - } else { - key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - remain--; - it->Prev(); - } - } - - while (it->Valid() && (end_no_limit || it->key().compare(key_end) >= 0)) { - ParsedZSetsMetaValue parsed_zsets_meta_value(it->value()); - if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.count() == 0) { - it->Prev(); - } else { - *next_key = it->key().ToString(); - break; - } - } - delete it; - return Status::OK(); -} - -Status RedisZSets::Persist(const Slice& key) { +Status Redis::ZsetsPersist(const Slice& key) { std::string meta_value; ScopeRecordLock l(lock_mgr_, key); - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { return Status::NotFound(); } else { - int32_t timestamp = parsed_zsets_meta_value.timestamp(); + uint64_t timestamp = parsed_zsets_meta_value.Etime(); if (timestamp == 0) { return Status::NotFound("Not have an associated timeout"); } else { - parsed_zsets_meta_value.set_timestamp(0); - return db_->Put(default_write_options_, handles_[0], key, meta_value); + parsed_zsets_meta_value.SetEtime(0); + return db_->Put(default_write_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), meta_value); } } } return s; } -Status RedisZSets::TTL(const Slice& key, int64_t* timestamp) { +Status Redis::ZsetsTTL(const Slice& key, int64_t* timestamp) { std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kZsetsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); if (parsed_zsets_meta_value.IsStale()) { *timestamp = -2; return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.count() == 0) { + } else if (parsed_zsets_meta_value.Count() == 0) { *timestamp = -2; return Status::NotFound(); } else { - *timestamp = parsed_zsets_meta_value.timestamp(); + *timestamp = parsed_zsets_meta_value.Etime(); if (*timestamp == 0) { *timestamp = -1; } else { @@ -1845,7 +1669,7 @@ Status RedisZSets::TTL(const Slice& key, int64_t* timestamp) { return s; } -void RedisZSets::ScanDatabase() { +void Redis::ScanZsets() { rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -1853,46 +1677,48 @@ void RedisZSets::ScanDatabase() { iterator_options.fill_cache = false; auto current_time = static_cast(time(nullptr)); - LOG(INFO) << "***************ZSets Meta Data***************"; - auto meta_iter = db_->NewIterator(iterator_options, handles_[0]); + LOG(INFO) << "***************" << "rocksdb instance: " << index_ << " ZSets Meta Data***************"; + auto meta_iter = db_->NewIterator(iterator_options, handles_[kZsetsMetaCF]); for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { + ParsedBaseMetaKey parsed_meta_key(meta_iter->key()); ParsedZSetsMetaValue parsed_zsets_meta_value(meta_iter->value()); int32_t survival_time = 0; - if (parsed_zsets_meta_value.timestamp() != 0) { - survival_time = parsed_zsets_meta_value.timestamp() - current_time > 0 - ? parsed_zsets_meta_value.timestamp() - current_time + if (parsed_zsets_meta_value.Etime() != 0) { + survival_time = parsed_zsets_meta_value.Etime() - current_time > 0 + ? parsed_zsets_meta_value.Etime() - current_time : -1; } LOG(INFO) << fmt::format("[key : {:<30}] [count : {:<10}] [timestamp : {:<10}] [version : {}] [survival_time : {}]", - meta_iter->key().ToString(), parsed_zsets_meta_value.count(), parsed_zsets_meta_value.timestamp(), - parsed_zsets_meta_value.version(), survival_time); + parsed_meta_key.Key().ToString(), parsed_zsets_meta_value.Count(), parsed_zsets_meta_value.Etime(), + parsed_zsets_meta_value.Version(), survival_time); } delete meta_iter; - LOG(INFO) << "***************ZSets Member To Score Data***************"; - auto member_iter = db_->NewIterator(iterator_options, handles_[1]); + LOG(INFO) << "***************" << "rocksdb instance: " << index_ << " ZSets Member To Score Data***************"; + auto member_iter = db_->NewIterator(iterator_options, handles_[kZsetsDataCF]); for (member_iter->SeekToFirst(); member_iter->Valid(); member_iter->Next()) { ParsedZSetsMemberKey parsed_zsets_member_key(member_iter->key()); + ParsedBaseDataValue parsed_value(member_iter->value()); - uint64_t tmp = DecodeFixed64(member_iter->value().data()); + uint64_t tmp = DecodeFixed64(parsed_value.UserValue().data()); const void* ptr_tmp = reinterpret_cast(&tmp); double score = *reinterpret_cast(ptr_tmp); LOG(INFO) << fmt::format("[key : {:<30}] [member : {:<20}] [score : {:<20}] [version : {}]", - parsed_zsets_member_key.key().ToString(), parsed_zsets_member_key.member().ToString(), - score, parsed_zsets_member_key.version()); + parsed_zsets_member_key.Key().ToString(), parsed_zsets_member_key.member().ToString(), + score, parsed_zsets_member_key.Version()); } delete member_iter; - LOG(INFO) << "***************ZSets Score To Member Data***************"; - auto score_iter = db_->NewIterator(iterator_options, handles_[2]); + LOG(INFO) << "***************" << "rocksdb instance: " << index_ << " ZSets Score To Member Data***************"; + auto score_iter = db_->NewIterator(iterator_options, handles_[kZsetsScoreCF]); for (score_iter->SeekToFirst(); score_iter->Valid(); score_iter->Next()) { ParsedZSetsScoreKey parsed_zsets_score_key(score_iter->key()); - + LOG(INFO) << fmt::format("[key : {:<30}] [score : {:<20}] [member : {:<20}] [version : {}]", parsed_zsets_score_key.key().ToString(), parsed_zsets_score_key.score(), - parsed_zsets_score_key.member().ToString(), parsed_zsets_score_key.version()); + parsed_zsets_score_key.member().ToString(), parsed_zsets_score_key.Version()); } delete score_iter; } diff --git a/src/storage/src/redis_zsets.h b/src/storage/src/redis_zsets.h deleted file mode 100644 index 76b2ec19b9..0000000000 --- a/src/storage/src/redis_zsets.h +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef SRC_REDIS_ZSETS_h -#define SRC_REDIS_ZSETS_h - -#include -#include -#include - -#include "src/custom_comparator.h" -#include "src/redis.h" - -namespace storage { - -class RedisZSets : public Redis { - public: - RedisZSets(Storage* s, const DataType& type); - ~RedisZSets() override = default; - - // Common Commands - Status Open(const StorageOptions& storage_options, const std::string& db_path) override; - Status CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, - const ColumnFamilyType& type = kMetaAndData) override; - Status GetProperty(const std::string& property, uint64_t* out) override; - Status ScanKeyNum(KeyInfo* key_info) override; - Status ScanKeys(const std::string& pattern, std::vector* keys) override; - Status PKPatternMatchDel(const std::string& pattern, int32_t* ret) override; - - // ZSets Commands - Status ZAdd(const Slice& key, const std::vector& score_members, int32_t* ret); - Status ZCard(const Slice& key, int32_t* card); - Status ZCount(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret); - Status ZIncrby(const Slice& key, const Slice& member, double increment, double* ret); - Status ZRange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members); - Status ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std::vector* score_members, - int64_t* ttl); - Status ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int64_t count, - int64_t offset, std::vector* score_members); - Status ZRank(const Slice& key, const Slice& member, int32_t* rank); - Status ZRem(const Slice& key, const std::vector& members, int32_t* ret); - Status ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int32_t* ret); - Status ZRemrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret); - Status ZRevrange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members); - Status ZRevrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int64_t count, - int64_t offset, std::vector* score_members); - Status ZRevrank(const Slice& key, const Slice& member, int32_t* rank); - Status ZScore(const Slice& key, const Slice& member, double* score); - Status ZUnionstore(const Slice& destination, const std::vector& keys, const std::vector& weights, - AGGREGATE agg, std::map& value_to_dest, int32_t* ret); - Status ZInterstore(const Slice& destination, const std::vector& keys, const std::vector& weights, - AGGREGATE agg, std::vector& value_to_dest, int32_t* ret); - Status ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, - std::vector* members); - Status ZLexcount(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, - int32_t* ret); - Status ZRemrangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, - int32_t* ret); - Status ZScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, - std::vector* score_members, int64_t* next_cursor); - Status PKScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key); - Status PKRScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key); - Status ZPopMax(const Slice& key, int64_t count, std::vector* score_members); - Status ZPopMin(const Slice& key, int64_t count, std::vector* score_members); - - // Keys Commands - Status Expire(const Slice& key, int32_t ttl) override; - Status Del(const Slice& key) override; - bool Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, int64_t* count, - std::string* next_key) override; - bool PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) override; - Status Expireat(const Slice& key, int32_t timestamp) override; - Status Persist(const Slice& key) override; - Status TTL(const Slice& key, int64_t* timestamp) override; - - // Iterate all data - void ScanDatabase(); -}; - -} // namespace storage -#endif // SRC_REDIS_ZSETS_h diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index fa4629a158..20d729a2cb 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -3,27 +3,26 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "storage/storage.h" -#include "storage/util.h" +#include +#include #include -#include - +#include "storage/util.h" +#include "storage/storage.h" #include "scope_snapshot.h" #include "src/lru_cache.h" #include "src/mutex_impl.h" #include "src/options_helper.h" -#include "src/redis_hashes.h" #include "src/redis_hyperloglog.h" -#include "src/redis_lists.h" -#include "src/redis_sets.h" -#include "src/redis_streams.h" -#include "src/redis_strings.h" -#include "src/redis_zsets.h" +#include "src/type_iterator.h" +#include "src/redis.h" +#include "include/pika_conf.h" +#include "pstd/include/pika_codis_slot.h" namespace storage { - +extern std::string BitOpOperate(BitOpType op, const std::vector& src_values, int64_t max_len); +class Redis; Status StorageOptions::ResetOptions(const OptionType& option_type, const std::unordered_map& options_map) { std::unordered_map& options_member_type_info = mutable_cf_options_member_type_info; @@ -50,9 +49,16 @@ Status StorageOptions::ResetOptions(const OptionType& option_type, return Status::OK(); } -Storage::Storage() { +// for unit test only +Storage::Storage() : Storage(3, 1024, true) {} + +Storage::Storage(int db_instance_num, int slot_num, bool is_classic_mode) { cursors_store_ = std::make_unique>(); cursors_store_->SetCapacity(5000); + slot_indexer_ = std::make_unique(db_instance_num); + is_classic_mode_ = is_classic_mode; + db_instance_num_ = db_instance_num; + slot_num_ = slot_num; Status s = StartBGThread(); if (!s.ok()) { @@ -65,520 +71,1087 @@ Storage::~Storage() { bg_tasks_cond_var_.notify_one(); if (is_opened_) { - rocksdb::CancelAllBackgroundWork(strings_db_->GetDB(), true); - rocksdb::CancelAllBackgroundWork(hashes_db_->GetDB(), true); - rocksdb::CancelAllBackgroundWork(sets_db_->GetDB(), true); - rocksdb::CancelAllBackgroundWork(lists_db_->GetDB(), true); - rocksdb::CancelAllBackgroundWork(zsets_db_->GetDB(), true); - rocksdb::CancelAllBackgroundWork(streams_db_->GetDB(), true); - } - - int ret = 0; - if ((ret = pthread_join(bg_tasks_thread_id_, nullptr)) != 0) { - LOG(ERROR) << "pthread_join failed with bgtask thread error " << ret; + int ret = 0; + if ((ret = pthread_join(bg_tasks_thread_id_, nullptr)) != 0) { + LOG(ERROR) << "pthread_join failed with bgtask thread error " << ret; + } + for (auto& inst : insts_) { + inst.reset(); + } } } -static std::string AppendSubDirectory(const std::string& db_path, const std::string& sub_db) { +static std::string AppendSubDirectory(const std::string& db_path, int index) { if (db_path.back() == '/') { - return db_path + sub_db; + return db_path + std::to_string(index); } else { - return db_path + "/" + sub_db; + return db_path + "/" + std::to_string(index); } } Status Storage::Open(const StorageOptions& storage_options, const std::string& db_path) { mkpath(db_path.c_str(), 0755); - strings_db_ = std::make_unique(this, kStrings); - Status s = strings_db_->Open(storage_options, AppendSubDirectory(db_path, "strings")); - if (!s.ok()) { - LOG(FATAL) << "open kv db failed, " << s.ToString(); - } - - hashes_db_ = std::make_unique(this, kHashes); - s = hashes_db_->Open(storage_options, AppendSubDirectory(db_path, "hashes")); - if (!s.ok()) { - LOG(FATAL) << "open hashes db failed, " << s.ToString(); - } - - sets_db_ = std::make_unique(this, kSets); - s = sets_db_->Open(storage_options, AppendSubDirectory(db_path, "sets")); - if (!s.ok()) { - LOG(FATAL) << "open set db failed, " << s.ToString(); - } - - lists_db_ = std::make_unique(this, kLists); - s = lists_db_->Open(storage_options, AppendSubDirectory(db_path, "lists")); - if (!s.ok()) { - LOG(FATAL) << "open list db failed, " << s.ToString(); - } - - zsets_db_ = std::make_unique(this, kZSets); - s = zsets_db_->Open(storage_options, AppendSubDirectory(db_path, "zsets")); - if (!s.ok()) { - LOG(FATAL) << "open zset db failed, " << s.ToString(); - } - - streams_db_ = std::make_unique(this, kStreams); - s = streams_db_->Open(storage_options, AppendSubDirectory(db_path, "streams")); - if (!s.ok()) { - LOG(FATAL) << "open stream db failed, " << s.ToString(); + int inst_count = db_instance_num_; + for (int index = 0; index < inst_count; index++) { + insts_.emplace_back(std::make_unique(this, index)); + Status s = insts_.back()->Open(storage_options, AppendSubDirectory(db_path, index)); + if (!s.ok()) { + LOG(FATAL) << "open db failed" << s.ToString(); + } } is_opened_.store(true); return Status::OK(); } -Status Storage::GetStartKey(const DataType& dtype, int64_t cursor, std::string* start_key) { +Status Storage::LoadCursorStartKey(const DataType& dtype, int64_t cursor, char* type, std::string* start_key) { std::string index_key = DataTypeTag[dtype] + std::to_string(cursor); - return cursors_store_->Lookup(index_key, start_key); + std::string index_value; + Status s = cursors_store_->Lookup(index_key, &index_value); + if (!s.ok() || index_value.size() < 3) { + return s; + } + *type = index_value[0]; + *start_key = index_value.substr(1); + return s; } -Status Storage::StoreCursorStartKey(const DataType& dtype, int64_t cursor, const std::string& next_key) { +Status Storage::StoreCursorStartKey(const DataType& dtype, int64_t cursor, char type, const std::string& next_key) { std::string index_key = DataTypeTag[dtype] + std::to_string(cursor); - return cursors_store_->Insert(index_key, next_key); + // format: data_type tag(1B) | start_key + std::string index_value(1, type); + index_value.append(next_key); + return cursors_store_->Insert(index_key, index_value); +} + +std::unique_ptr& Storage::GetDBInstance(const Slice& key) { + return GetDBInstance(key.ToString()); +} + +std::unique_ptr& Storage::GetDBInstance(const std::string& key) { + auto inst_index = slot_indexer_->GetInstanceID(GetSlotID(slot_num_, key)); + return insts_[inst_index]; } // Strings Commands -Status Storage::Set(const Slice& key, const Slice& value) { return strings_db_->Set(key, value); } +Status Storage::Set(const Slice& key, const Slice& value) { + auto& inst = GetDBInstance(key); + return inst->Set(key, value); +} -Status Storage::Setxx(const Slice& key, const Slice& value, int32_t* ret, const int32_t ttl) { - return strings_db_->Setxx(key, value, ret, ttl); +Status Storage::Setxx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl) { + auto& inst = GetDBInstance(key); + return inst->Setxx(key, value, ret, ttl); } -Status Storage::Get(const Slice& key, std::string* value) { return strings_db_->Get(key, value); } +Status Storage::Get(const Slice& key, std::string* value) { + auto& inst = GetDBInstance(key); + return inst->Get(key, value); +} Status Storage::GetWithTTL(const Slice& key, std::string* value, int64_t* ttl) { - return strings_db_->GetWithTTL(key, value, ttl); + auto& inst = GetDBInstance(key); + return inst->GetWithTTL(key, value, ttl); } Status Storage::GetSet(const Slice& key, const Slice& value, std::string* old_value) { - return strings_db_->GetSet(key, value, old_value); + auto& inst = GetDBInstance(key); + return inst->GetSet(key, value, old_value); } Status Storage::SetBit(const Slice& key, int64_t offset, int32_t value, int32_t* ret) { - return strings_db_->SetBit(key, offset, value, ret); + auto& inst = GetDBInstance(key); + return inst->SetBit(key, offset, value, ret); } -Status Storage::GetBit(const Slice& key, int64_t offset, int32_t* ret) { return strings_db_->GetBit(key, offset, ret); } +Status Storage::GetBit(const Slice& key, int64_t offset, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->GetBit(key, offset, ret); +} -Status Storage::MSet(const std::vector& kvs) { return strings_db_->MSet(kvs); } +Status Storage::MSet(const std::vector& kvs) { + Status s; + for (const auto& kv : kvs) { + auto& inst = GetDBInstance(kv.key); + s = inst->Set(Slice(kv.key), Slice(kv.value)); + if (!s.ok()) { + return s; + } + } + return s; +} Status Storage::MGet(const std::vector& keys, std::vector* vss) { - return strings_db_->MGet(keys, vss); + vss->clear(); + Status s; + for(const auto& key : keys) { + auto& inst = GetDBInstance(key); + std::string value; + s = inst->Get(key, &value); + if (s.ok()) { + vss->push_back({value, Status::OK()}); + } else if(s.IsNotFound()) { + vss->push_back({std::string(), Status::NotFound()}); + } else { + vss->clear(); + return s; + } + } + return Status::OK(); } Status Storage::MGetWithTTL(const std::vector& keys, std::vector* vss) { - return strings_db_->MGetWithTTL(keys, vss); + vss->clear(); + Status s; + for(const auto& key : keys) { + auto& inst = GetDBInstance(key); + std::string value; + int64_t ttl; + s = inst->GetWithTTL(key, &value, &ttl); + if (s.ok()) { + vss->push_back({value, Status::OK(), ttl}); + } else if(s.IsNotFound()) { + vss->push_back({std::string(), Status::NotFound(), ttl}); + } else { + vss->clear(); + return s; + } + } + return Status::OK(); } -Status Storage::Setnx(const Slice& key, const Slice& value, int32_t* ret, const int32_t ttl) { - return strings_db_->Setnx(key, value, ret, ttl); +Status Storage::Setnx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl) { + auto& inst = GetDBInstance(key); + return inst->Setnx(key, value, ret, ttl); } -Status Storage::MSetnx(const std::vector& kvs, int32_t* ret) { return strings_db_->MSetnx(kvs, ret); } +// disallowed in codis, only runs in pika classic mode +// TODO: Not concurrent safe now, merge wuxianrong's bugfix after floyd's PR review finishes. +Status Storage::MSetnx(const std::vector& kvs, int32_t* ret) { + assert(is_classic_mode_); + Status s; + for (const auto& kv : kvs) { + auto& inst = GetDBInstance(kv.key); + std::string value; + s = inst->Get(Slice(kv.key), &value); + if (s.ok() || !s.IsNotFound()) { + return s; + } + } + + for (const auto& kv : kvs) { + auto& inst = GetDBInstance(kv.key); + s = inst->Set(Slice(kv.key), Slice(kv.value)); + if (!s.ok()) { + return s; + } + } + if (s.ok()) { + *ret = 1; + } + return s; +} -Status Storage::Setvx(const Slice& key, const Slice& value, const Slice& new_value, int32_t* ret, const int32_t ttl) { - return strings_db_->Setvx(key, value, new_value, ret, ttl); +Status Storage::Setvx(const Slice& key, const Slice& value, const Slice& new_value, int32_t* ret, int64_t ttl) { + auto& inst = GetDBInstance(key); + return inst->Setvx(key, value, new_value, ret, ttl); } Status Storage::Delvx(const Slice& key, const Slice& value, int32_t* ret) { - return strings_db_->Delvx(key, value, ret); + auto& inst = GetDBInstance(key); + return inst->Delvx(key, value, ret); } Status Storage::Setrange(const Slice& key, int64_t start_offset, const Slice& value, int32_t* ret) { - return strings_db_->Setrange(key, start_offset, value, ret); + auto& inst = GetDBInstance(key); + return inst->Setrange(key, start_offset, value, ret); } Status Storage::Getrange(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret) { - return strings_db_->Getrange(key, start_offset, end_offset, ret); + auto& inst = GetDBInstance(key); + return inst->Getrange(key, start_offset, end_offset, ret); } Status Storage::GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret, std::string* value, int64_t* ttl) { - return strings_db_->GetrangeWithValue(key, start_offset, end_offset, ret, value, ttl); + auto& inst = GetDBInstance(key); + return inst->GetrangeWithValue(key, start_offset, end_offset, ret, value, ttl); } Status Storage::Append(const Slice& key, const Slice& value, int32_t* ret) { - return strings_db_->Append(key, value, ret); + auto& inst = GetDBInstance(key); + return inst->Append(key, value, ret); } Status Storage::BitCount(const Slice& key, int64_t start_offset, int64_t end_offset, int32_t* ret, bool have_range) { - return strings_db_->BitCount(key, start_offset, end_offset, ret, have_range); + auto& inst = GetDBInstance(key); + return inst->BitCount(key, start_offset, end_offset, ret, have_range); } +// disallowed in codis proxy, only runs in classic mode Status Storage::BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, std::string &value_to_dest, int64_t* ret) { - return strings_db_->BitOp(op, dest_key, src_keys, value_to_dest, ret); + assert(is_classic_mode_); + Status s; + int64_t max_len = 0; + int64_t value_len = 0; + std::vector src_vlaues; + for (const auto& src_key : src_keys) { + auto& inst = GetDBInstance(src_key); + std::string value; + s = inst->Get(Slice(src_key), &value); + if (s.ok()) { + src_vlaues.push_back(value); + value_len = value.size(); + } else { + if (!s.IsNotFound()) { + return s; + } + src_vlaues.push_back(""); + value_len = 0; + } + max_len = std::max(max_len, value_len); + } + + std::string dest_value = BitOpOperate(op, src_vlaues, max_len); + value_to_dest = dest_value; + *ret = dest_value.size(); + + auto& dest_inst = GetDBInstance(dest_key); + return dest_inst->Set(Slice(dest_key), Slice(dest_value)); } -Status Storage::BitPos(const Slice& key, int32_t bit, int64_t* ret) { return strings_db_->BitPos(key, bit, ret); } +Status Storage::BitPos(const Slice& key, int32_t bit, int64_t* ret) { + auto& inst = GetDBInstance(key); + return inst->BitPos(key, bit, ret); +} Status Storage::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t* ret) { - return strings_db_->BitPos(key, bit, start_offset, ret); + auto& inst = GetDBInstance(key); + return inst->BitPos(key, bit, start_offset, ret); } Status Storage::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t end_offset, int64_t* ret) { - return strings_db_->BitPos(key, bit, start_offset, end_offset, ret); + auto& inst = GetDBInstance(key); + return inst->BitPos(key, bit, start_offset, end_offset, ret); } -Status Storage::Decrby(const Slice& key, int64_t value, int64_t* ret) { return strings_db_->Decrby(key, value, ret); } +Status Storage::Decrby(const Slice& key, int64_t value, int64_t* ret) { + auto& inst = GetDBInstance(key); + return inst->Decrby(key, value, ret); +} -Status Storage::Incrby(const Slice& key, int64_t value, int64_t* ret) { return strings_db_->Incrby(key, value, ret); } +Status Storage::Incrby(const Slice& key, int64_t value, int64_t* ret) { + auto& inst = GetDBInstance(key); + return inst->Incrby(key, value, ret); +} Status Storage::Incrbyfloat(const Slice& key, const Slice& value, std::string* ret) { - return strings_db_->Incrbyfloat(key, value, ret); + auto& inst = GetDBInstance(key); + return inst->Incrbyfloat(key, value, ret); } -Status Storage::Setex(const Slice& key, const Slice& value, int32_t ttl) { return strings_db_->Setex(key, value, ttl); } +Status Storage::Setex(const Slice& key, const Slice& value, int64_t ttl) { + auto& inst = GetDBInstance(key); + return inst->Setex(key, value, ttl); +} -Status Storage::Strlen(const Slice& key, int32_t* len) { return strings_db_->Strlen(key, len); } +Status Storage::Strlen(const Slice& key, int32_t* len) { + auto& inst = GetDBInstance(key); + return inst->Strlen(key, len); +} -Status Storage::PKSetexAt(const Slice& key, const Slice& value, int32_t timestamp) { - return strings_db_->PKSetexAt(key, value, timestamp); +Status Storage::PKSetexAt(const Slice& key, const Slice& value, int64_t timestamp) { + auto& inst = GetDBInstance(key); + return inst->PKSetexAt(key, value, timestamp); } // Hashes Commands Status Storage::HSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res) { - return hashes_db_->HSet(key, field, value, res); + auto& inst = GetDBInstance(key); + return inst->HSet(key, field, value, res); } Status Storage::HGet(const Slice& key, const Slice& field, std::string* value) { - return hashes_db_->HGet(key, field, value); + auto& inst = GetDBInstance(key); + return inst->HGet(key, field, value); } -Status Storage::HMSet(const Slice& key, const std::vector& fvs) { return hashes_db_->HMSet(key, fvs); } +Status Storage::HMSet(const Slice& key, const std::vector& fvs) { + auto& inst = GetDBInstance(key); + return inst->HMSet(key, fvs); +} Status Storage::HMGet(const Slice& key, const std::vector& fields, std::vector* vss) { - return hashes_db_->HMGet(key, fields, vss); + auto& inst = GetDBInstance(key); + return inst->HMGet(key, fields, vss); } -Status Storage::HGetall(const Slice& key, std::vector* fvs) { return hashes_db_->HGetall(key, fvs); } +Status Storage::HGetall(const Slice& key, std::vector* fvs) { + auto& inst = GetDBInstance(key); + return inst->HGetall(key, fvs); +} Status Storage::HGetallWithTTL(const Slice& key, std::vector* fvs, int64_t* ttl) { - return hashes_db_->HGetallWithTTL(key, fvs, ttl); + auto& inst = GetDBInstance(key); + return inst->HGetallWithTTL(key, fvs, ttl); } -Status Storage::HKeys(const Slice& key, std::vector* fields) { return hashes_db_->HKeys(key, fields); } +Status Storage::HKeys(const Slice& key, std::vector* fields) { + auto& inst = GetDBInstance(key); + return inst->HKeys(key, fields); +} -Status Storage::HVals(const Slice& key, std::vector* values) { return hashes_db_->HVals(key, values); } +Status Storage::HVals(const Slice& key, std::vector* values) { + auto& inst = GetDBInstance(key); + return inst->HVals(key, values); +} Status Storage::HSetnx(const Slice& key, const Slice& field, const Slice& value, int32_t* ret) { - return hashes_db_->HSetnx(key, field, value, ret); + auto& inst = GetDBInstance(key); + return inst->HSetnx(key, field, value, ret); } -Status Storage::HLen(const Slice& key, int32_t* ret) { return hashes_db_->HLen(key, ret); } +Status Storage::HLen(const Slice& key, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->HLen(key, ret); +} Status Storage::HStrlen(const Slice& key, const Slice& field, int32_t* len) { - return hashes_db_->HStrlen(key, field, len); + auto& inst = GetDBInstance(key); + return inst->HStrlen(key, field, len); } -Status Storage::HExists(const Slice& key, const Slice& field) { return hashes_db_->HExists(key, field); } +Status Storage::HExists(const Slice& key, const Slice& field) { + auto& inst = GetDBInstance(key); + return inst->HExists(key, field); +} Status Storage::HIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret) { - return hashes_db_->HIncrby(key, field, value, ret); + auto& inst = GetDBInstance(key); + return inst->HIncrby(key, field, value, ret); } Status Storage::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by, std::string* new_value) { - return hashes_db_->HIncrbyfloat(key, field, by, new_value); + auto& inst = GetDBInstance(key); + return inst->HIncrbyfloat(key, field, by, new_value); } Status Storage::HDel(const Slice& key, const std::vector& fields, int32_t* ret) { - return hashes_db_->HDel(key, fields, ret); + auto& inst = GetDBInstance(key); + return inst->HDel(key, fields, ret); } Status Storage::HScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, std::vector* field_values, int64_t* next_cursor) { - return hashes_db_->HScan(key, cursor, pattern, count, field_values, next_cursor); + auto& inst = GetDBInstance(key); + return inst->HScan(key, cursor, pattern, count, field_values, next_cursor); } Status Storage::HScanx(const Slice& key, const std::string& start_field, const std::string& pattern, int64_t count, std::vector* field_values, std::string* next_field) { - return hashes_db_->HScanx(key, start_field, pattern, count, field_values, next_field); + auto& inst = GetDBInstance(key); + return inst->HScanx(key, start_field, pattern, count, field_values, next_field); } Status Storage::PKHScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, int32_t limit, std::vector* field_values, std::string* next_field) { - return hashes_db_->PKHScanRange(key, field_start, field_end, pattern, limit, field_values, next_field); + auto& inst = GetDBInstance(key); + return inst->PKHScanRange(key, field_start, field_end, pattern, limit, field_values, next_field); } Status Storage::PKHRScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, int32_t limit, std::vector* field_values, std::string* next_field) { - return hashes_db_->PKHRScanRange(key, field_start, field_end, pattern, limit, field_values, next_field); + auto& inst = GetDBInstance(key); + return inst->PKHRScanRange(key, field_start, field_end, pattern, limit, field_values, next_field); } // Sets Commands Status Storage::SAdd(const Slice& key, const std::vector& members, int32_t* ret) { - return sets_db_->SAdd(key, members, ret); + auto& inst = GetDBInstance(key); + return inst->SAdd(key, members, ret); } -Status Storage::SCard(const Slice& key, int32_t* ret) { return sets_db_->SCard(key, ret); } +Status Storage::SCard(const Slice& key, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->SCard(key, ret); +} Status Storage::SDiff(const std::vector& keys, std::vector* members) { - return sets_db_->SDiff(keys, members); + if (keys.empty()) { + return rocksdb::Status::Corruption("SDiff invalid parameter, no keys"); + } + members->clear(); + + Status s; + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + s = inst->SDiff(keys, members); + return s; + } + + auto& inst = GetDBInstance(keys[0]); + std::vector keys0_members; + s = inst->SMembers(Slice(keys[0]), &keys0_members); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + + for (const auto& member : keys0_members) { + int32_t exist = 0; + for (int idx = 1; idx < keys.size(); idx++) { + Slice pkey = Slice(keys[idx]); + auto& inst = GetDBInstance(pkey); + s = inst->SIsmember(pkey, Slice(member), &exist); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + if (exist) break; + } + if (!exist) { + members->push_back(member); + } + } + return Status::OK(); } Status Storage::SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { - return sets_db_->SDiffstore(destination, keys, value_to_dest, ret); + Status s; + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + s = inst->SDiffstore(destination, keys, value_to_dest, ret); + return s; + } + + s = SDiff(keys, &value_to_dest); + if (!s.ok()) { + return s; + } + + auto& inst = GetDBInstance(destination); + s = inst->SetsDel(destination); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + + s = inst->SAdd(destination, value_to_dest, ret); + return s; } Status Storage::SInter(const std::vector& keys, std::vector* members) { - return sets_db_->SInter(keys, members); + Status s; + members->clear(); + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + s = inst->SInter(keys, members); + return s; + } + + std::vector key0_members; + auto& inst = GetDBInstance(keys[0]); + s = inst->SMembers(keys[0], &key0_members); + if (s.IsNotFound()) { + return Status::OK(); + } + if (!s.ok()) { + return s; + } + + for (const auto member : key0_members) { + int32_t exist = 1; + for (int idx = 1; idx < keys.size(); idx++) { + Slice pkey(keys[idx]); + auto& inst = GetDBInstance(keys[idx]); + s = inst->SIsmember(keys[idx], member, &exist); + if (s.ok() && exist > 0) { + continue; + } else if (!s.IsNotFound()) { + return s; + } else { + break; + } + } + if (exist > 0) { + members->push_back(member); + } + } + return Status::OK(); } Status Storage::SInterstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { - return sets_db_->SInterstore(destination, keys, value_to_dest, ret); + Status s; + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + s = inst->SInterstore(destination, keys, value_to_dest, ret); + return s; + } + + s = SInter(keys, &value_to_dest); + if (!s.ok()) { + return s; + } + + auto& dest_inst = GetDBInstance(destination); + s = dest_inst->SetsDel(destination); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + + s = dest_inst->SAdd(destination, value_to_dest, ret); + return s; } Status Storage::SIsmember(const Slice& key, const Slice& member, int32_t* ret) { - return sets_db_->SIsmember(key, member, ret); + auto& inst = GetDBInstance(key); + return inst->SIsmember(key, member, ret); } Status Storage::SMembers(const Slice& key, std::vector* members) { - return sets_db_->SMembers(key, members); + auto& inst = GetDBInstance(key); + return inst->SMembers(key, members); } Status Storage::SMembersWithTTL(const Slice& key, std::vector* members, int64_t *ttl) { - return sets_db_->SMembersWithTTL(key, members, ttl); + auto& inst = GetDBInstance(key); + return inst->SMembersWithTTL(key, members, ttl); } Status Storage::SMove(const Slice& source, const Slice& destination, const Slice& member, int32_t* ret) { - return sets_db_->SMove(source, destination, member, ret); + Status s; + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(source); + s = inst->SMove(source, destination, member, ret); + } + + auto& src_inst = GetDBInstance(source); + s = src_inst->SIsmember(source, member, ret); + if (s.IsNotFound()) { + *ret = 0; + return s; + } + if (!s.ok()) { + return s; + } + + s = src_inst->SRem(source, std::vector{member.ToString()}, ret); + if (!s.ok()) { + return s; + } + auto& dest_inst = GetDBInstance(destination); + int unused_ret; + return dest_inst->SAdd(destination, std::vector{member.ToString()}, &unused_ret); } Status Storage::SPop(const Slice& key, std::vector* members, int64_t count) { - Status status = sets_db_->SPop(key, members, count); + auto& inst = GetDBInstance(key); + Status status = inst->SPop(key, members, count); return status; } Status Storage::SRandmember(const Slice& key, int32_t count, std::vector* members) { - return sets_db_->SRandmember(key, count, members); + auto& inst = GetDBInstance(key); + return inst->SRandmember(key, count, members); } Status Storage::SRem(const Slice& key, const std::vector& members, int32_t* ret) { - return sets_db_->SRem(key, members, ret); + auto& inst = GetDBInstance(key); + return inst->SRem(key, members, ret); } Status Storage::SUnion(const std::vector& keys, std::vector* members) { - return sets_db_->SUnion(keys, members); + Status s; + members->clear(); + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + return inst->SUnion(keys, members); + } + + using Iter = std::vector::iterator; + using Uset = std::unordered_set; + Uset member_set; + for (const auto& key : keys) { + std::vector vec; + auto& inst = GetDBInstance(key); + s = inst->SMembers(key, &vec); + if (s.IsNotFound()) { + continue; + } + if (!s.ok()) { + return s; + } + std::copy(std::move_iterator(vec.begin()), + std::move_iterator(vec.end()), + std::insert_iterator(member_set, member_set.begin())); + } + + std::copy(member_set.begin(), member_set.end(), std::back_inserter(*members)); + return Status::OK(); } Status Storage::SUnionstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { - return sets_db_->SUnionstore(destination, keys, value_to_dest, ret); + Status s; + value_to_dest.clear(); + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(destination); + s = inst->SUnionstore(destination, keys, value_to_dest, ret); + return s; + } + + s = SUnion(keys, &value_to_dest); + if (!s.ok()) { + return s; + } + *ret = value_to_dest.size(); + auto& dest_inst = GetDBInstance(destination); + s = dest_inst->SetsDel(destination); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + int unused_ret; + return dest_inst->SAdd(destination, value_to_dest, &unused_ret); } Status Storage::SScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, std::vector* members, int64_t* next_cursor) { - return sets_db_->SScan(key, cursor, pattern, count, members, next_cursor); + auto& inst = GetDBInstance(key); + return inst->SScan(key, cursor, pattern, count, members, next_cursor); } Status Storage::LPush(const Slice& key, const std::vector& values, uint64_t* ret) { - return lists_db_->LPush(key, values, ret); + auto& inst = GetDBInstance(key); + return inst->LPush(key, values, ret); } Status Storage::RPush(const Slice& key, const std::vector& values, uint64_t* ret) { - return lists_db_->RPush(key, values, ret); + auto& inst = GetDBInstance(key); + return inst->RPush(key, values, ret); } Status Storage::LRange(const Slice& key, int64_t start, int64_t stop, std::vector* ret) { - return lists_db_->LRange(key, start, stop, ret); + ret->clear(); + auto& inst = GetDBInstance(key); + return inst->LRange(key, start, stop, ret); } Status Storage::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std::vector* ret, int64_t *ttl) { - return lists_db_->LRangeWithTTL(key, start, stop, ret, ttl); + auto& inst = GetDBInstance(key); + return inst->LRangeWithTTL(key, start, stop, ret, ttl); } -Status Storage::LTrim(const Slice& key, int64_t start, int64_t stop) { return lists_db_->LTrim(key, start, stop); } +Status Storage::LTrim(const Slice& key, int64_t start, int64_t stop) { + auto& inst = GetDBInstance(key); + return inst->LTrim(key, start, stop); +} -Status Storage::LLen(const Slice& key, uint64_t* len) { return lists_db_->LLen(key, len); } +Status Storage::LLen(const Slice& key, uint64_t* len) { + auto& inst = GetDBInstance(key); + return inst->LLen(key, len); +} -Status Storage::LPop(const Slice& key, int64_t count, std::vector* elements) { return lists_db_->LPop(key, count, elements); } +Status Storage::LPop(const Slice& key, int64_t count, std::vector* elements) { + elements->clear(); + auto& inst = GetDBInstance(key); + return inst->LPop(key, count, elements); +} -Status Storage::RPop(const Slice& key, int64_t count, std::vector* elements) { return lists_db_->RPop(key, count, elements); } +Status Storage::RPop(const Slice& key, int64_t count, std::vector* elements) { + elements->clear(); + auto& inst = GetDBInstance(key); + return inst->RPop(key, count, elements); +} Status Storage::LIndex(const Slice& key, int64_t index, std::string* element) { - return lists_db_->LIndex(key, index, element); + element->clear(); + auto& inst = GetDBInstance(key); + return inst->LIndex(key, index, element); } Status Storage::LInsert(const Slice& key, const BeforeOrAfter& before_or_after, const std::string& pivot, const std::string& value, int64_t* ret) { - return lists_db_->LInsert(key, before_or_after, pivot, value, ret); + auto& inst = GetDBInstance(key); + return inst->LInsert(key, before_or_after, pivot, value, ret); } Status Storage::LPushx(const Slice& key, const std::vector& values, uint64_t* len) { - return lists_db_->LPushx(key, values, len); + auto& inst = GetDBInstance(key); + return inst->LPushx(key, values, len); } Status Storage::RPushx(const Slice& key, const std::vector& values, uint64_t* len) { - return lists_db_->RPushx(key, values, len); + auto& inst = GetDBInstance(key); + return inst->RPushx(key, values, len); } Status Storage::LRem(const Slice& key, int64_t count, const Slice& value, uint64_t* ret) { - return lists_db_->LRem(key, count, value, ret); + auto& inst = GetDBInstance(key); + return inst->LRem(key, count, value, ret); } -Status Storage::LSet(const Slice& key, int64_t index, const Slice& value) { return lists_db_->LSet(key, index, value); } +Status Storage::LSet(const Slice& key, int64_t index, const Slice& value) { + auto& inst = GetDBInstance(key); + return inst->LSet(key, index, value); +} Status Storage::RPoplpush(const Slice& source, const Slice& destination, std::string* element) { - return lists_db_->RPoplpush(source, destination, element); + Status s; + element->clear(); + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(source); + s = inst->RPoplpush(source, destination, element); + return s; + } + + auto& source_inst = GetDBInstance(source); + if (source.compare(destination) == 0) { + s = source_inst->RPoplpush(source, destination, element); + return s; + } + + std::vector elements; + s = source_inst->RPop(source, 1, &elements); + if (!s.ok()) { + return s; + } + *element = elements.front(); + auto& dest_inst = GetDBInstance(destination); + uint64_t ret; + s = dest_inst->LPush(destination, elements, &ret); + return s; } Status Storage::ZPopMax(const Slice& key, const int64_t count, std::vector* score_members) { - return zsets_db_->ZPopMax(key, count, score_members); + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZPopMax(key, count, score_members); } Status Storage::ZPopMin(const Slice& key, const int64_t count, std::vector* score_members) { - return zsets_db_->ZPopMin(key, count, score_members); + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZPopMin(key, count, score_members); } Status Storage::ZAdd(const Slice& key, const std::vector& score_members, int32_t* ret) { - return zsets_db_->ZAdd(key, score_members, ret); + auto& inst = GetDBInstance(key); + return inst->ZAdd(key, score_members, ret); } -Status Storage::ZCard(const Slice& key, int32_t* ret) { return zsets_db_->ZCard(key, ret); } +Status Storage::ZCard(const Slice& key, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->ZCard(key, ret); +} Status Storage::ZCount(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret) { - return zsets_db_->ZCount(key, min, max, left_close, right_close, ret); + auto& inst = GetDBInstance(key); + return inst->ZCount(key, min, max, left_close, right_close, ret); } Status Storage::ZIncrby(const Slice& key, const Slice& member, double increment, double* ret) { - return zsets_db_->ZIncrby(key, member, increment, ret); + auto& inst = GetDBInstance(key); + return inst->ZIncrby(key, member, increment, ret); } Status Storage::ZRange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members) { - return zsets_db_->ZRange(key, start, stop, score_members); + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRange(key, start, stop, score_members); } Status Storage::ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std::vector* score_members, int64_t *ttl) { - return zsets_db_->ZRangeWithTTL(key, start, stop, score_members, ttl); + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRangeWithTTL(key, start, stop, score_members, ttl); } Status Storage::ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, std::vector* score_members) { // maximum number of zset is std::numeric_limits::max() - return zsets_db_->ZRangebyscore(key, min, max, left_close, right_close, std::numeric_limits::max(), 0, + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRangebyscore(key, min, max, left_close, right_close, std::numeric_limits::max(), 0, score_members); } Status Storage::ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int64_t count, int64_t offset, std::vector* score_members) { - return zsets_db_->ZRangebyscore(key, min, max, left_close, right_close, count, offset, score_members); + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRangebyscore(key, min, max, left_close, right_close, count, offset, score_members); } Status Storage::ZRank(const Slice& key, const Slice& member, int32_t* rank) { - return zsets_db_->ZRank(key, member, rank); + auto& inst = GetDBInstance(key); + return inst->ZRank(key, member, rank); } Status Storage::ZRem(const Slice& key, const std::vector& members, int32_t* ret) { - return zsets_db_->ZRem(key, members, ret); + auto& inst = GetDBInstance(key); + return inst->ZRem(key, members, ret); } Status Storage::ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int32_t* ret) { - return zsets_db_->ZRemrangebyrank(key, start, stop, ret); + auto& inst = GetDBInstance(key); + return inst->ZRemrangebyrank(key, start, stop, ret); } Status Storage::ZRemrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret) { - return zsets_db_->ZRemrangebyscore(key, min, max, left_close, right_close, ret); + auto& inst = GetDBInstance(key); + return inst->ZRemrangebyscore(key, min, max, left_close, right_close, ret); } Status Storage::ZRevrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int64_t count, int64_t offset, std::vector* score_members) { - return zsets_db_->ZRevrangebyscore(key, min, max, left_close, right_close, count, offset, score_members); + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRevrangebyscore(key, min, max, left_close, right_close, count, offset, score_members); } Status Storage::ZRevrange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members) { - return zsets_db_->ZRevrange(key, start, stop, score_members); + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRevrange(key, start, stop, score_members); } Status Storage::ZRevrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, std::vector* score_members) { // maximum number of zset is std::numeric_limits::max() - return zsets_db_->ZRevrangebyscore(key, min, max, left_close, right_close, std::numeric_limits::max(), 0, - score_members); + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRevrangebyscore(key, min, max, left_close, right_close, std::numeric_limits::max(), + 0, score_members); } Status Storage::ZRevrank(const Slice& key, const Slice& member, int32_t* rank) { - return zsets_db_->ZRevrank(key, member, rank); + auto& inst = GetDBInstance(key); + return inst->ZRevrank(key, member, rank); } Status Storage::ZScore(const Slice& key, const Slice& member, double* ret) { - return zsets_db_->ZScore(key, member, ret); + auto& inst = GetDBInstance(key); + return inst->ZScore(key, member, ret); } Status Storage::ZUnionstore(const Slice& destination, const std::vector& keys, - const std::vector& weights, const AGGREGATE agg, std::map& value_to_dest, int32_t* ret) { - return zsets_db_->ZUnionstore(destination, keys, weights, agg, value_to_dest, ret); + const std::vector& weights, const AGGREGATE agg, + std::map& value_to_dest, int32_t* ret) { + value_to_dest.clear(); + Status s; + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + s = inst->ZUnionstore(destination, keys, weights, agg, value_to_dest, ret); + return s; + } + + for (int idx = 0; idx < keys.size(); idx++) { + Slice key = Slice(keys[idx]); + auto& inst = GetDBInstance(key); + std::map member_to_score; + double weight = idx >= weights.size() ? 1 : weights[idx]; + s = inst->ZGetAll(key, weight, &member_to_score); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + for (const auto& key_score : member_to_score) { + const std::string& member = key_score.first; + double score = key_score.second; + if (value_to_dest.find(member) == value_to_dest.end()) { + value_to_dest[member] = score; + continue; + } + switch (agg) { + case SUM: + score += value_to_dest[member]; + break; + case MIN: + score = std::min(value_to_dest[member], score); + break; + case MAX: + score = std::max(value_to_dest[member], score); + break; + } + value_to_dest[member] = (score == -0.0) ? 0 : score; + } + } + + BaseMetaKey base_destination(destination); + auto& inst = GetDBInstance(destination); + s = inst->ZsetsDel(destination); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + std::vector score_members; + std::for_each(value_to_dest.begin(), value_to_dest.end(), [&score_members](auto kv) { + score_members.emplace_back(kv.second, kv.first); + }); + *ret = score_members.size(); + int unused_ret; + return inst->ZAdd(destination, score_members, &unused_ret); } Status Storage::ZInterstore(const Slice& destination, const std::vector& keys, - const std::vector& weights, const AGGREGATE agg, std::vector& value_to_dest, int32_t* ret) { - return zsets_db_->ZInterstore(destination, keys, weights, agg, value_to_dest, ret); + const std::vector& weights, const AGGREGATE agg, + std::vector& value_to_dest, int32_t* ret) { + Status s; + value_to_dest.clear(); + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + s = inst->ZInterstore(destination, keys, weights, agg, value_to_dest, ret); + return s; + } + + Slice key = Slice(keys[0]); + auto& inst = GetDBInstance(key); + std::map member_to_score; + double weight = weights.empty() ? 1 : weights[0]; + s = inst->ZGetAll(key, weight, &member_to_score); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + + for (const auto member_score : member_to_score) { + std::string member = member_score.first; + double score = member_score.second; + bool reliable = true; + + for (int idx = 1; idx < keys.size(); idx++) { + double weight = idx >= weights.size() ? 1 : weights[idx]; + auto& inst = GetDBInstance(keys[idx]); + double ret_score; + s = inst->ZScore(keys[idx], member, &ret_score); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + if (s.IsNotFound()) { + reliable = false; + break; + } + switch (agg) { + case SUM: + score += ret_score * weight; + break; + case MIN: + score = std::min(score, ret_score * weight); + break; + case MAX: + score = std::max(score, ret_score * weight); + break; + } + } + if (reliable) { + value_to_dest.emplace_back(score, member); + } + } + + BaseMetaKey base_destination(destination); + auto& ninst = GetDBInstance(destination); + + s = ninst->ZsetsDel(destination); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + *ret = value_to_dest.size(); + int unused_ret; + return ninst->ZAdd(destination, value_to_dest, &unused_ret); } -Status Storage::ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, - std::vector* members) { - return zsets_db_->ZRangebylex(key, min, max, left_close, right_close, members); +Status Storage::ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, + bool right_close, std::vector* members) { + members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRangebylex(key, min, max, left_close, right_close, members); } -Status Storage::ZLexcount(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, - int32_t* ret) { - return zsets_db_->ZLexcount(key, min, max, left_close, right_close, ret); +Status Storage::ZLexcount(const Slice& key, const Slice& min, const Slice& max, bool left_close, + bool right_close, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->ZLexcount(key, min, max, left_close, right_close, ret); } -Status Storage::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, - int32_t* ret) { - return zsets_db_->ZRemrangebylex(key, min, max, left_close, right_close, ret); +Status Storage::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& max, + bool left_close, bool right_close, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->ZRemrangebylex(key, min, max, left_close, right_close, ret); } Status Storage::ZScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, std::vector* score_members, int64_t* next_cursor) { - return zsets_db_->ZScan(key, cursor, pattern, count, score_members, next_cursor); + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZScan(key, cursor, pattern, count, score_members, next_cursor); } Status Storage::XAdd(const Slice& key, const std::string& serialized_message, StreamAddTrimArgs& args) { - return streams_db_->XAdd(key, serialized_message, args); + auto& inst = GetDBInstance(key); + return inst->XAdd(key, serialized_message, args); } Status Storage::XDel(const Slice& key, const std::vector& ids, int32_t& ret) { - return streams_db_->XDel(key, ids, ret); + auto& inst = GetDBInstance(key); + return inst->XDel(key, ids, ret); } Status Storage::XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& count) { - return streams_db_->XTrim(key, args, count); + auto& inst = GetDBInstance(key); + return inst->XTrim(key, args, count); } Status Storage::XRange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages) { - return streams_db_->XRange(key, args, id_messages); + auto& inst = GetDBInstance(key); + return inst->XRange(key, args, id_messages); } Status Storage::XRevrange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages) { - return streams_db_->XRevrange(key, args, id_messages); + auto& inst = GetDBInstance(key); + return inst->XRevrange(key, args, id_messages); } Status Storage::XLen(const Slice& key, int32_t& len) { - return streams_db_->XLen(key, len); + auto& inst = GetDBInstance(key); + return inst->XLen(key, len); } Status Storage::XRead(const StreamReadGroupReadArgs& args, std::vector>& results, std::vector& reserved_keys) { - return streams_db_->XRead(args, results, reserved_keys); + auto& inst = GetDBInstance(key); + return inst->XRead(args, results, reserved_keys); } Status Storage::XInfo(const Slice& key, StreamInfoResult &result) { - return streams_db_->XInfo(key, result); + auto& inst = GetDBInstance(key); + return inst->XInfo(key, result); } // Keys Commands -int32_t Storage::Expire(const Slice& key, int32_t ttl, std::map* type_status) { +int32_t Storage::Expire(const Slice& key, int64_t ttl, std::map* type_status) { + type_status->clear(); int32_t ret = 0; bool is_corruption = false; + auto& inst = GetDBInstance(key); // Strings - Status s = strings_db_->Expire(key, ttl); + Status s = inst->StringsExpire(key, ttl); if (s.ok()) { ret++; } else if (!s.IsNotFound()) { @@ -587,7 +1160,7 @@ int32_t Storage::Expire(const Slice& key, int32_t ttl, std::mapExpire(key, ttl); + s = inst->HashesExpire(key, ttl); if (s.ok()) { ret++; } else if (!s.IsNotFound()) { @@ -596,7 +1169,7 @@ int32_t Storage::Expire(const Slice& key, int32_t ttl, std::mapExpire(key, ttl); + s = inst->SetsExpire(key, ttl); if (s.ok()) { ret++; } else if (!s.IsNotFound()) { @@ -605,7 +1178,7 @@ int32_t Storage::Expire(const Slice& key, int32_t ttl, std::mapExpire(key, ttl); + s = inst->ListsExpire(key, ttl); if (s.ok()) { ret++; } else if (!s.IsNotFound()) { @@ -614,7 +1187,7 @@ int32_t Storage::Expire(const Slice& key, int32_t ttl, std::mapExpire(key, ttl); + s = inst->ZsetsExpire(key, ttl); if (s.ok()) { ret++; } else if (!s.IsNotFound()) { @@ -635,8 +1208,9 @@ int64_t Storage::Del(const std::vector& keys, std::mapDel(key); + Status s = inst->StringsDel(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -645,7 +1219,7 @@ int64_t Storage::Del(const std::vector& keys, std::mapDel(key); + s = inst->HashesDel(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -654,7 +1228,7 @@ int64_t Storage::Del(const std::vector& keys, std::mapDel(key); + s = inst->SetsDel(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -663,7 +1237,7 @@ int64_t Storage::Del(const std::vector& keys, std::mapDel(key); + s = inst->ListsDel(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -672,7 +1246,7 @@ int64_t Storage::Del(const std::vector& keys, std::mapDel(key); + s = inst->ZsetsDel(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -703,10 +1277,11 @@ int64_t Storage::DelByType(const std::vector& keys, const DataType& bool is_corruption = false; for (const auto& key : keys) { + auto& inst = GetDBInstance(key); switch (type) { // Strings case DataType::kStrings: { - s = strings_db_->Del(key); + s = inst->StringsDel(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -716,7 +1291,7 @@ int64_t Storage::DelByType(const std::vector& keys, const DataType& } // Hashes case DataType::kHashes: { - s = hashes_db_->Del(key); + s = inst->HashesDel(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -726,7 +1301,7 @@ int64_t Storage::DelByType(const std::vector& keys, const DataType& } // Sets case DataType::kSets: { - s = sets_db_->Del(key); + s = inst->SetsDel(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -736,7 +1311,7 @@ int64_t Storage::DelByType(const std::vector& keys, const DataType& } // Lists case DataType::kLists: { - s = lists_db_->Del(key); + s = inst->ListsDel(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -746,7 +1321,7 @@ int64_t Storage::DelByType(const std::vector& keys, const DataType& } // ZSets case DataType::kZSets: { - s = zsets_db_->Del(key); + s = inst->ZsetsDel(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -786,7 +1361,8 @@ int64_t Storage::Exists(const std::vector& keys, std::mapGet(key, &value); + auto& inst = GetDBInstance(key); + s = inst->Get(key, &value); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -794,7 +1370,7 @@ int64_t Storage::Exists(const std::vector& keys, std::mapHLen(key, &ret); + s = inst->HLen(key, &ret); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -802,7 +1378,7 @@ int64_t Storage::Exists(const std::vector& keys, std::mapSCard(key, &ret); + s = inst->SCard(key, &ret); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -810,7 +1386,7 @@ int64_t Storage::Exists(const std::vector& keys, std::mapLLen(key, &llen); + s = inst->LLen(key, &llen); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -818,7 +1394,7 @@ int64_t Storage::Exists(const std::vector& keys, std::mapZCard(key, &ret); + s = inst->ZCard(key, &ret); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -836,6 +1412,7 @@ int64_t Storage::Exists(const std::vector& keys, std::map* keys) { + assert(is_classic_mode_); keys->clear(); bool is_finish; int64_t leftover_visits = count; @@ -844,231 +1421,78 @@ int64_t Storage::Scan(const DataType& dtype, int64_t cursor, const std::string& std::string start_key; std::string next_key; std::string prefix; + char key_type; - prefix = isTailWildcard(pattern) ? pattern.substr(0, pattern.size() - 1) : ""; - + // invalid cursor if (cursor < 0) { return cursor_ret; - } else { - Status s = GetStartKey(dtype, cursor, &start_key); - if (s.IsNotFound()) { - // If want to scan all the databases, we start with the strings database - start_key = (dtype == DataType::kAll ? DataTypeTag[kStrings] : DataTypeTag[dtype]) + prefix; - cursor = 0; - } } - char key_type = start_key.at(0); - start_key.erase(start_key.begin()); - switch (key_type) { - case 'k': - is_finish = strings_db_->Scan(start_key, pattern, keys, &leftover_visits, &next_key); - if ((leftover_visits == 0) && !is_finish) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("k") + next_key); - break; - } else if (is_finish) { - if (DataType::kStrings == dtype) { - cursor_ret = 0; - break; - } else if (leftover_visits == 0) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("h") + prefix); - break; - } - } - start_key = prefix; - case 'h': - is_finish = hashes_db_->Scan(start_key, pattern, keys, &leftover_visits, &next_key); - if ((leftover_visits == 0) && !is_finish) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("h") + next_key); - break; - } else if (is_finish) { - if (DataType::kHashes == dtype) { - cursor_ret = 0; - break; - } else if (leftover_visits == 0) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("s") + prefix); - break; - } - } - start_key = prefix; - case 's': - is_finish = sets_db_->Scan(start_key, pattern, keys, &leftover_visits, &next_key); - if ((leftover_visits == 0) && !is_finish) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("s") + next_key); - break; - } else if (is_finish) { - if (DataType::kSets == dtype) { - cursor_ret = 0; - break; - } else if (leftover_visits == 0) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("l") + prefix); - break; - } - } - start_key = prefix; - case 'l': - is_finish = lists_db_->Scan(start_key, pattern, keys, &leftover_visits, &next_key); - if ((leftover_visits == 0) && !is_finish) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("l") + next_key); - break; - } else if (is_finish) { - if (DataType::kLists == dtype) { - cursor_ret = 0; - break; - } else if (leftover_visits == 0) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("z") + prefix); - break; - } - } - start_key = prefix; - case 'x': - is_finish = streams_db_->Scan(start_key, pattern, keys, &leftover_visits, &next_key); - if ((leftover_visits == 0) && !is_finish) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(DataType::kStreams, cursor_ret, std::string("x") + next_key); - break; - } else if (is_finish) { - if (DataType::kStreams == dtype) { - cursor_ret = 0; - break; - } else if (leftover_visits == 0) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(DataType::kStreams, cursor_ret, std::string("k") + prefix); - break; - } - } - case 'z': - is_finish = zsets_db_->Scan(start_key, pattern, keys, &leftover_visits, &next_key); - if ((leftover_visits == 0) && !is_finish) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("z") + next_key); - break; - } else if (is_finish) { - cursor_ret = 0; - break; - } + // get seek by corsor + prefix = isTailWildcard(pattern) ? pattern.substr(0, pattern.size() - 1) : ""; + Status s = LoadCursorStartKey(dtype, cursor, &key_type, &start_key); + if (!s.ok()) { + // If want to scan all the databases, we start with the strings database + key_type = dtype == DataType::kAll ? DataTypeTag[DataType::kStrings] : DataTypeTag[dtype]; + start_key = prefix; + cursor = 0; + } + + // collect types to scan + std::vector types; + if (DataType::kAll == dtype) { + auto iter_end = std::end(DataTypeTag); + auto pos = std::find(std::begin(DataTypeTag), iter_end, key_type); + if (pos == iter_end) { + LOG(WARNING) << "Invalid key_type: " << key_type; + return 0; + } + std::copy(pos, iter_end, std::back_inserter(types)); + } else { + types.push_back(DataTypeTag[dtype]); } - return cursor_ret; -} -int64_t Storage::PKExpireScan(const DataType& dtype, int64_t cursor, int32_t min_ttl, int32_t max_ttl, int64_t count, - std::vector* keys) { - keys->clear(); - bool is_finish; - int64_t leftover_visits = count; - int64_t step_length = count; - int64_t cursor_ret = 0; - std::string start_key; - std::string next_key; + for (const auto& type : types) { + std::vector inst_iters; + for (const auto& inst : insts_) { + IterSptr iter_sptr; + iter_sptr.reset(inst->CreateIterator(type, pattern, + nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + inst_iters.push_back(iter_sptr); + } - int64_t curtime; - rocksdb::Env::Default()->GetCurrentTime(&curtime); + BaseMetaKey base_start_key(start_key); + MergingIterator miter(inst_iters); + miter.Seek(base_start_key.Encode().ToString()); + while (miter.Valid() && count > 0) { + keys->push_back(miter.Key()); + miter.Next(); + count--; + } - if (cursor < 0) { - return cursor_ret; - } else { - Status s = GetStartKey(dtype, cursor, &start_key); - if (s.IsNotFound()) { - // If want to scan all the databases, we start with the strings database - start_key = std::string(1, dtype == DataType::kAll ? DataTypeTag[kStrings] : DataTypeTag[dtype]); - cursor = 0; + bool is_finish = !miter.Valid(); + if (miter.Valid() && + (miter.Key().compare(prefix) <= 0 || + miter.Key().substr(0, prefix.size()) == prefix)) { + is_finish = false; } - } - char key_type = start_key.at(0); - start_key.erase(start_key.begin()); - switch (key_type) { - case 'k': - is_finish = strings_db_->PKExpireScan(start_key, static_cast(curtime + min_ttl), - static_cast(curtime + max_ttl), keys, &leftover_visits, &next_key); - if ((leftover_visits == 0) && !is_finish) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("k") + next_key); - break; - } else if (is_finish) { - if (DataType::kStrings == dtype) { - cursor_ret = 0; - break; - } else if (leftover_visits == 0) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("h")); - break; - } - } - start_key = ""; - case 'h': - is_finish = hashes_db_->PKExpireScan(start_key, static_cast(curtime + min_ttl), - static_cast(curtime + max_ttl), keys, &leftover_visits, &next_key); - if ((leftover_visits == 0) && !is_finish) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("h") + next_key); - break; - } else if (is_finish) { - if (DataType::kHashes == dtype) { - cursor_ret = 0; - break; - } else if (leftover_visits == 0) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("s")); - break; - } - } - start_key = ""; - case 's': - is_finish = sets_db_->PKExpireScan(start_key, static_cast(curtime + min_ttl), - static_cast(curtime + max_ttl), keys, &leftover_visits, &next_key); - if ((leftover_visits == 0) && !is_finish) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("s") + next_key); - break; - } else if (is_finish) { - if (DataType::kSets == dtype) { - cursor_ret = 0; - break; - } else if (leftover_visits == 0) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("l")); - break; - } - } - start_key = ""; - case 'l': - is_finish = lists_db_->PKExpireScan(start_key, static_cast(curtime + min_ttl), - static_cast(curtime + max_ttl), keys, &leftover_visits, &next_key); - if ((leftover_visits == 0) && !is_finish) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("l") + next_key); - break; - } else if (is_finish) { - if (DataType::kLists == dtype) { - cursor_ret = 0; - break; - } else if (leftover_visits == 0) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("z")); - break; - } - } - start_key = ""; - case 'z': - is_finish = zsets_db_->PKExpireScan(start_key, static_cast(curtime + min_ttl), - static_cast(curtime + max_ttl), keys, &leftover_visits, &next_key); - if ((leftover_visits == 0) && !is_finish) { - cursor_ret = cursor + step_length; - StoreCursorStartKey(dtype, cursor_ret, std::string("z") + next_key); - break; - } else if (is_finish) { - cursor_ret = 0; - break; - } + // for specific type scan, reach the end + if (is_finish && dtype != DataType::kAll) { + return cursor_ret; + } + + // already get count's element, while iterator is still valid, + // store cursor + if (!is_finish) { + next_key = miter.Key(); + cursor_ret = cursor + step_length; + StoreCursorStartKey(dtype, cursor_ret, type, next_key); + return cursor_ret; + } + + // for all type scan, move to next type, reset start_key + start_key = prefix; } return cursor_ret; } @@ -1076,88 +1500,138 @@ int64_t Storage::PKExpireScan(const DataType& dtype, int64_t cursor, int32_t min Status Storage::PKScanRange(const DataType& data_type, const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, std::vector* keys, std::vector* kvs, std::string* next_key) { - Status s; - keys->clear(); next_key->clear(); - switch (data_type) { - case DataType::kStrings: - s = strings_db_->PKScanRange(key_start, key_end, pattern, limit, kvs, next_key); - break; - case DataType::kHashes: - s = hashes_db_->PKScanRange(key_start, key_end, pattern, limit, keys, next_key); - break; - case DataType::kLists: - s = lists_db_->PKScanRange(key_start, key_end, pattern, limit, keys, next_key); - break; - case DataType::kZSets: - s = zsets_db_->PKScanRange(key_start, key_end, pattern, limit, keys, next_key); - break; - case DataType::kSets: - s = sets_db_->PKScanRange(key_start, key_end, pattern, limit, keys, next_key); - break; - case DataType::kStreams: - s = streams_db_->PKScanRange(key_start, key_end, pattern, limit, keys, next_key); - break; - default: - s = Status::Corruption("Unsupported data types"); - break; + std::string key; + std::string value; + + BaseMetaKey base_key_start(key_start); + BaseMetaKey base_key_end(key_end); + Slice base_key_end_slice(base_key_end.Encode()); + + bool start_no_limit = key_start.empty(); + bool end_no_limit = key_end.empty(); + if (!start_no_limit && !end_no_limit && key_start.compare(key_end) > 0) { + return Status::InvalidArgument("error in given range"); } - return s; + + std::vector inst_iters; + for (const auto& inst : insts_) { + IterSptr iter_sptr; + iter_sptr.reset(inst->CreateIterator(data_type, pattern.ToString(), + nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + inst_iters.push_back(iter_sptr); + } + + MergingIterator miter(inst_iters); + if (start_no_limit) { + miter.SeekToFirst(); + } else { + std::string temp = base_key_start.Encode().ToString(); + miter.Seek(temp); + } + + while (miter.Valid() && limit > 0 && + (end_no_limit || miter.Key().compare(key_end.ToString()) <= 0)) { + if (data_type == DataType::kStrings) { + kvs->push_back({miter.Key(), miter.Value()}); + } else { + keys->push_back(miter.Key()); + } + limit--; + miter.Next(); + } + + if (miter.Valid() && (end_no_limit || miter.Key().compare(key_end.ToString()) <= 0)) { + *next_key = miter.Key(); + } + return Status::OK(); } Status Storage::PKRScanRange(const DataType& data_type, const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, std::vector* keys, std::vector* kvs, std::string* next_key) { - Status s; - keys->clear(); next_key->clear(); - switch (data_type) { - case DataType::kStrings: - s = strings_db_->PKRScanRange(key_start, key_end, pattern, limit, kvs, next_key); - break; - case DataType::kHashes: - s = hashes_db_->PKRScanRange(key_start, key_end, pattern, limit, keys, next_key); - break; - case DataType::kLists: - s = lists_db_->PKRScanRange(key_start, key_end, pattern, limit, keys, next_key); - break; - case DataType::kZSets: - s = zsets_db_->PKRScanRange(key_start, key_end, pattern, limit, keys, next_key); - break; - case DataType::kSets: - s = sets_db_->PKRScanRange(key_start, key_end, pattern, limit, keys, next_key); - break; - case DataType::kStreams: - s = streams_db_->PKRScanRange(key_start, key_end, pattern, limit, keys, next_key); - break; - default: - s = Status::Corruption("Unsupported data types"); - break; + std::string key, value; + BaseMetaKey base_key_start(key_start); + BaseMetaKey base_key_end(key_end); + Slice base_key_start_slice = Slice(base_key_start.Encode()); + + bool start_no_limit = key_start.empty(); + bool end_no_limit = key_end.empty(); + + if (!start_no_limit && !end_no_limit && key_start.compare(key_end) < 0) { + return Status::InvalidArgument("error in given range"); } - return s; + + std::vector inst_iters; + for (const auto& inst : insts_) { + IterSptr iter_sptr; + iter_sptr.reset(inst->CreateIterator(data_type, pattern.ToString(), + nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + inst_iters.push_back(iter_sptr); + } + MergingIterator miter(inst_iters); + if (start_no_limit) { + miter.SeekToLast(); + } else { + miter.SeekForPrev(base_key_start.Encode().ToString()); + } + + while (miter.Valid() && limit > 0 && + (end_no_limit || miter.Key().compare(key_end.ToString()) >= 0)) { + if (data_type == DataType::kStrings) { + kvs->push_back({miter.Key(), miter.Value()}); + } else { + keys->push_back(miter.Key()); + } + limit--; + miter.Prev(); + } + + if (miter.Valid() && (end_no_limit || miter.Key().compare(key_end.ToString()) >= 0)) { + *next_key = miter.Key(); + } + return Status::OK(); } Status Storage::PKPatternMatchDel(const DataType& data_type, const std::string& pattern, int32_t* ret) { Status s; - switch (data_type) { - case DataType::kStrings: - s = strings_db_->PKPatternMatchDel(pattern, ret); - break; - case DataType::kHashes: - s = hashes_db_->PKPatternMatchDel(pattern, ret); - break; - case DataType::kLists: - s = lists_db_->PKPatternMatchDel(pattern, ret); - break; - case DataType::kZSets: - s = zsets_db_->PKPatternMatchDel(pattern, ret); - break; - case DataType::kSets: - s = sets_db_->PKPatternMatchDel(pattern, ret); - break; - default: - s = Status::Corruption("Unsupported data type"); - break; + for (const auto& inst : insts_) { + switch (data_type) { + case DataType::kStrings: { + s = inst->StringsPKPatternMatchDel(pattern, ret); + if (!s.ok()) { + return s; + } + } + case DataType::kHashes: { + s = inst->HashesPKPatternMatchDel(pattern, ret); + if (!s.ok()) { + return s; + } + } + case DataType::kLists: { + s = inst->ListsPKPatternMatchDel(pattern, ret); + if (!s.ok()) { + return s; + } + } + case DataType::kZSets: { + s = inst->ZsetsPKPatternMatchDel(pattern, ret); + if (!s.ok()) { + return s; + } + } + case DataType::kSets: { + s = inst->SetsPKPatternMatchDel(pattern, ret); + if (!s.ok()) { + return s; + } + } + default: + s = Status::Corruption("Unsupported data types"); + break; + } } return s; } @@ -1167,38 +1641,40 @@ Status Storage::Scanx(const DataType& data_type, const std::string& start_key, c Status s; keys->clear(); next_key->clear(); - switch (data_type) { - case DataType::kStrings: - strings_db_->Scan(start_key, pattern, keys, &count, next_key); - break; - case DataType::kHashes: - hashes_db_->Scan(start_key, pattern, keys, &count, next_key); - break; - case DataType::kLists: - lists_db_->Scan(start_key, pattern, keys, &count, next_key); - break; - case DataType::kZSets: - zsets_db_->Scan(start_key, pattern, keys, &count, next_key); - break; - case DataType::kSets: - sets_db_->Scan(start_key, pattern, keys, &count, next_key); - break; - case DataType::kStreams: - streams_db_->Scan(start_key, pattern, keys, &count, next_key); - break; - default: - Status::Corruption("Unsupported data types"); - break; + + std::vector inst_iters; + for (const auto& inst : insts_) { + IterSptr iter_sptr; + iter_sptr.reset(inst->CreateIterator(data_type, pattern, + nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + inst_iters.push_back(iter_sptr); } - return s; + + BaseMetaKey base_start_key(start_key); + MergingIterator miter(inst_iters); + miter.Seek(base_start_key.Encode().ToString()); + while (miter.Valid() && count > 0) { + keys->push_back(miter.Key()); + miter.Next(); + count--; + } + + std::string prefix = isTailWildcard(pattern) ? pattern.substr(0, pattern.size() - 1) : ""; + if (miter.Valid() && (miter.Key().compare(prefix) <= 0 || miter.Key().substr(0, prefix.size()) == prefix)) { + *next_key = miter.Key(); + } else { + *next_key = ""; + } + return Status::OK(); } -int32_t Storage::Expireat(const Slice& key, int32_t timestamp, std::map* type_status) { +int32_t Storage::Expireat(const Slice& key, int64_t timestamp, std::map* type_status) { Status s; int32_t count = 0; bool is_corruption = false; - s = strings_db_->Expireat(key, timestamp); + auto& inst = GetDBInstance(key); + s = inst->StringsExpireat(key, timestamp); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -1206,7 +1682,7 @@ int32_t Storage::Expireat(const Slice& key, int32_t timestamp, std::mapExpireat(key, timestamp); + s = inst->HashesExpireat(key, timestamp); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -1214,7 +1690,7 @@ int32_t Storage::Expireat(const Slice& key, int32_t timestamp, std::mapExpireat(key, timestamp); + s = inst->SetsExpireat(key, timestamp); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -1222,7 +1698,7 @@ int32_t Storage::Expireat(const Slice& key, int32_t timestamp, std::mapExpireat(key, timestamp); + s = inst->ListsExpireat(key, timestamp); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -1230,19 +1706,18 @@ int32_t Storage::Expireat(const Slice& key, int32_t timestamp, std::mapExpireat(key, timestamp); + s = inst->ZsetsExpireat(key, timestamp); if (s.ok()) { count++; } else if (!s.IsNotFound()) { is_corruption = true; - (*type_status)[DataType::kLists] = s; + (*type_status)[DataType::kZSets] = s; } if (is_corruption) { return -1; - } else { - return count; } + return count; } int32_t Storage::Persist(const Slice& key, std::map* type_status) { @@ -1250,7 +1725,8 @@ int32_t Storage::Persist(const Slice& key, std::map* type_stat int32_t count = 0; bool is_corruption = false; - s = strings_db_->Persist(key); + auto& inst = GetDBInstance(key); + s = inst->StringsPersist(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -1258,7 +1734,7 @@ int32_t Storage::Persist(const Slice& key, std::map* type_stat (*type_status)[DataType::kStrings] = s; } - s = hashes_db_->Persist(key); + s = inst->HashesPersist(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -1266,7 +1742,7 @@ int32_t Storage::Persist(const Slice& key, std::map* type_stat (*type_status)[DataType::kHashes] = s; } - s = sets_db_->Persist(key); + s = inst->SetsPersist(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -1274,7 +1750,7 @@ int32_t Storage::Persist(const Slice& key, std::map* type_stat (*type_status)[DataType::kSets] = s; } - s = lists_db_->Persist(key); + s = inst->ListsPersist(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -1282,12 +1758,12 @@ int32_t Storage::Persist(const Slice& key, std::map* type_stat (*type_status)[DataType::kLists] = s; } - s = zsets_db_->Persist(key); + s = inst->ZsetsPersist(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { is_corruption = true; - (*type_status)[DataType::kLists] = s; + (*type_status)[DataType::kZSets] = s; } if (is_corruption) { @@ -1302,7 +1778,8 @@ std::map Storage::TTL(const Slice& key, std::map ret; int64_t timestamp = 0; - s = strings_db_->TTL(key, ×tamp); + auto& inst = GetDBInstance(key); + s = inst->StringsTTL(key, ×tamp); if (s.ok() || s.IsNotFound()) { ret[DataType::kStrings] = timestamp; } else if (!s.IsNotFound()) { @@ -1310,7 +1787,7 @@ std::map Storage::TTL(const Slice& key, std::mapTTL(key, ×tamp); + s = inst->HashesTTL(key, ×tamp); if (s.ok() || s.IsNotFound()) { ret[DataType::kHashes] = timestamp; } else if (!s.IsNotFound()) { @@ -1318,7 +1795,7 @@ std::map Storage::TTL(const Slice& key, std::mapTTL(key, ×tamp); + s = inst->ListsTTL(key, ×tamp); if (s.ok() || s.IsNotFound()) { ret[DataType::kLists] = timestamp; } else if (!s.IsNotFound()) { @@ -1326,7 +1803,7 @@ std::map Storage::TTL(const Slice& key, std::mapTTL(key, ×tamp); + s = inst->SetsTTL(key, ×tamp); if (s.ok() || s.IsNotFound()) { ret[DataType::kSets] = timestamp; } else if (!s.IsNotFound()) { @@ -1334,7 +1811,7 @@ std::map Storage::TTL(const Slice& key, std::mapTTL(key, ×tamp); + s = inst->ZsetsTTL(key, ×tamp); if (s.ok() || s.IsNotFound()) { ret[DataType::kZSets] = timestamp; } else if (!s.IsNotFound()) { @@ -1349,7 +1826,8 @@ Status Storage::GetType(const std::string& key, bool single, std::vectorGet(key, &value); + auto& inst = GetDBInstance(key); + s = inst->Get(key, &value); if (s.ok()) { types.emplace_back("string"); } else if (!s.IsNotFound()) { @@ -1360,7 +1838,7 @@ Status Storage::GetType(const std::string& key, bool single, std::vectorHLen(key, &hashes_len); + s = inst->HLen(key, &hashes_len); if (s.ok() && hashes_len != 0) { types.emplace_back("hash"); } else if (!s.IsNotFound()) { @@ -1371,7 +1849,7 @@ Status Storage::GetType(const std::string& key, bool single, std::vectorLLen(key, &lists_len); + s = inst->LLen(key, &lists_len); if (s.ok() && lists_len != 0) { types.emplace_back("list"); } else if (!s.IsNotFound()) { @@ -1382,7 +1860,7 @@ Status Storage::GetType(const std::string& key, bool single, std::vectorZCard(key, &zsets_size); + s = inst->ZCard(key, &zsets_size); if (s.ok() && zsets_size != 0) { types.emplace_back("zset"); } else if (!s.IsNotFound()) { @@ -1393,7 +1871,7 @@ Status Storage::GetType(const std::string& key, bool single, std::vectorSCard(key, &sets_size); + s = inst->SCard(key, &sets_size); if (s.ok() && sets_size != 0) { types.emplace_back("set"); } else if (!s.IsNotFound()) { @@ -1406,90 +1884,68 @@ Status Storage::GetType(const std::string& key, bool single, std::vector* keys) { - Status s; - if (data_type == DataType::kStrings) { - s = strings_db_->ScanKeys(pattern, keys); - if (!s.ok()) { - return s; - } - } else if (data_type == DataType::kHashes) { - s = hashes_db_->ScanKeys(pattern, keys); - if (!s.ok()) { - return s; - } - } else if (data_type == DataType::kZSets) { - s = zsets_db_->ScanKeys(pattern, keys); - if (!s.ok()) { - return s; - } - } else if (data_type == DataType::kSets) { - s = sets_db_->ScanKeys(pattern, keys); - if (!s.ok()) { - return s; - } - } else if (data_type == DataType::kLists) { - s = lists_db_->ScanKeys(pattern, keys); - if (!s.ok()) { - return s; - } - } else if (data_type == DataType::kStreams) { - s = streams_db_->ScanKeys(pattern, keys); - if (!s.ok()) { - return s; - } + keys->clear(); + std::vector types; + if (data_type == DataType::kAll) { + types.push_back(DataType::kStrings); + types.push_back(DataType::kHashes); + types.push_back(DataType::kLists); + types.push_back(DataType::kZSets); + types.push_back(DataType::kSets); } else { - s = strings_db_->ScanKeys(pattern, keys); - if (!s.ok()) { - return s; - } - s = hashes_db_->ScanKeys(pattern, keys); - if (!s.ok()) { - return s; - } - s = zsets_db_->ScanKeys(pattern, keys); - if (!s.ok()) { - return s; - } - s = sets_db_->ScanKeys(pattern, keys); - if (!s.ok()) { - return s; + types.push_back(data_type); + } + + for (const auto& type : types) { + std::vector inst_iters; + for (const auto& inst : insts_) { + IterSptr inst_iter; + inst_iter.reset(inst->CreateIterator(type, pattern, + nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + inst_iters.push_back(inst_iter); } - s = lists_db_->ScanKeys(pattern, keys); - if (!s.ok()) { - return s; + + MergingIterator miter(inst_iters); + miter.SeekToFirst(); + while (miter.Valid()) { + keys->push_back(miter.Key()); + miter.Next(); } s = streams_db_->ScanKeys(pattern, keys); if (!s.ok()) { return s; } } - return s; + + return Status::OK(); } void Storage::ScanDatabase(const DataType& type) { - switch (type) { - case kStrings: - strings_db_->ScanDatabase(); - break; - case kHashes: - hashes_db_->ScanDatabase(); - break; - case kSets: - sets_db_->ScanDatabase(); - break; - case kZSets: - zsets_db_->ScanDatabase(); - break; - case kLists: - lists_db_->ScanDatabase(); - break; - case kAll: - strings_db_->ScanDatabase(); - hashes_db_->ScanDatabase(); - sets_db_->ScanDatabase(); - zsets_db_->ScanDatabase(); - lists_db_->ScanDatabase(); - break; + for (const auto& inst : insts_) { + switch (type) { + case kStrings: + inst->ScanStrings(); + break; + case kHashes: + inst->ScanHashes(); + break; + case kSets: + inst->ScanSets(); + break; + case kZSets: + inst->ScanZsets(); + break; + case kLists: + inst->ScanLists(); + break; + case kAll: + inst->ScanStrings(); + inst->ScanHashes(); + inst->ScanSets(); + inst->ScanZsets(); + inst->ScanLists(); + break; + } } } @@ -1503,7 +1959,8 @@ Status Storage::PfAdd(const Slice& key, const std::vector& values, std::string value; std::string registers; std::string result; - Status s = strings_db_->Get(key, &value); + auto& inst = GetDBInstance(key); + Status s = inst->Get(key, &value); if (s.ok()) { registers = value; } else if (s.IsNotFound()) { @@ -1521,7 +1978,7 @@ Status Storage::PfAdd(const Slice& key, const std::vector& values, if (previous != now || (s.IsNotFound() && values.empty())) { *update = true; } - s = strings_db_->Set(key, result); + s = inst->Set(key, result); return s; } @@ -1532,7 +1989,8 @@ Status Storage::PfCount(const std::vector& keys, int64_t* result) { std::string value; std::string first_registers; - Status s = strings_db_->Get(keys[0], &value); + auto& inst = GetDBInstance(keys[0]); + Status s = inst->Get(keys[0], &value); if (s.ok()) { first_registers = std::string(value.data(), value.size()); } else if (s.IsNotFound()) { @@ -1543,7 +2001,8 @@ Status Storage::PfCount(const std::vector& keys, int64_t* result) { for (size_t i = 1; i < keys.size(); ++i) { std::string value; std::string registers; - s = strings_db_->Get(keys[i], &value); + auto& inst = GetDBInstance(keys[i]); + s = inst->Get(keys[i], &value); if (s.ok()) { registers = value; } else if (s.IsNotFound()) { @@ -1567,7 +2026,8 @@ Status Storage::PfMerge(const std::vector& keys, std::string& value std::string value; std::string first_registers; std::string result; - s = strings_db_->Get(keys[0], &value); + auto& inst = GetDBInstance(keys[0]); + s = inst->Get(keys[0], &value); if (s.ok()) { first_registers = std::string(value.data(), value.size()); } else if (s.IsNotFound()) { @@ -1579,7 +2039,8 @@ Status Storage::PfMerge(const std::vector& keys, std::string& value for (size_t i = 1; i < keys.size(); ++i) { std::string value; std::string registers; - s = strings_db_->Get(keys[i], &value); + auto& tmp_inst = GetDBInstance(keys[i]); + s = tmp_inst->Get(keys[i], &value); if (s.ok()) { registers = std::string(value.data(), value.size()); } else if (s.IsNotFound()) { @@ -1590,7 +2051,8 @@ Status Storage::PfMerge(const std::vector& keys, std::string& value HyperLogLog log(kPrecision, registers); result = first_log.Merge(log); } - s = strings_db_->Set(keys[0], result); + auto& ninst = GetDBInstance(keys[0]); + s = ninst->Set(keys[0], result); value_to_dest = std::move(result); return s; } @@ -1628,7 +2090,7 @@ Status Storage::AddBGTask(const BGTask& bg_task) { Status Storage::RunBGTask() { BGTask task; while (!bg_tasks_should_exit_) { - std::unique_lock lock(bg_tasks_mutex_); + std::unique_lock lock(bg_tasks_mutex_); bg_tasks_cond_var_.wait(lock, [this]() { return !bg_tasks_queue_.empty() || bg_tasks_should_exit_; }); if (!bg_tasks_queue_.empty()) { @@ -1642,8 +2104,11 @@ Status Storage::RunBGTask() { } if (task.operation == kCleanAll) { - DoCompact(task.type); + DoCompactRange(task.type, "", ""); } else if (task.operation == kCompactRange) { + if (task.argv.size() == 1) { + DoCompactSpecificKey(task.type, task.argv[0]); + } if (task.argv.size() == 2) { DoCompactRange(task.type, task.argv.front(), task.argv.back()); } @@ -1654,45 +2119,58 @@ Status Storage::RunBGTask() { Status Storage::Compact(const DataType& type, bool sync) { if (sync) { - return DoCompact(type); + return DoCompactRange(type, "", ""); } else { AddBGTask({type, kCleanAll}); } return Status::OK(); } -Status Storage::DoCompact(const DataType& type) { +// run compactrange for all rocksdb instance +Status Storage::DoCompactRange(const DataType& type, const std::string& start, const std::string& end) { if (type != kAll && type != kStrings && type != kHashes && type != kSets && type != kZSets && type != kLists) { return Status::InvalidArgument(""); } + std::string start_key, end_key; + CalculateStartAndEndKey(start, &start_key, nullptr); + CalculateStartAndEndKey(end, nullptr, &end_key); + Slice slice_start_key(start_key); + Slice slice_end_key(end_key); + Slice* start_ptr = slice_start_key.empty() ? nullptr : &slice_start_key; + Slice* end_ptr = slice_end_key.empty() ? nullptr : &slice_end_key; + Status s; - if (type == kStrings) { - current_task_type_ = Operation::kCleanStrings; - s = strings_db_->CompactRange(nullptr, nullptr); - } else if (type == kHashes) { - current_task_type_ = Operation::kCleanHashes; - s = hashes_db_->CompactRange(nullptr, nullptr); - } else if (type == kSets) { - current_task_type_ = Operation::kCleanSets; - s = sets_db_->CompactRange(nullptr, nullptr); - } else if (type == kZSets) { - current_task_type_ = Operation::kCleanZSets; - s = zsets_db_->CompactRange(nullptr, nullptr); - } else if (type == kLists) { - current_task_type_ = Operation::kCleanLists; - s = lists_db_->CompactRange(nullptr, nullptr); - } else if (type == kStreams) { - current_task_type_ = Operation::kCleanStreams; - s = streams_db_->CompactRange(nullptr, nullptr); - } else { - current_task_type_ = Operation::kCleanAll; - s = strings_db_->CompactRange(nullptr, nullptr); - s = hashes_db_->CompactRange(nullptr, nullptr); - s = sets_db_->CompactRange(nullptr, nullptr); - s = zsets_db_->CompactRange(nullptr, nullptr); - s = lists_db_->CompactRange(nullptr, nullptr); - s = streams_db_->CompactRange(nullptr, nullptr); + for (const auto& inst : insts_) { + switch (type) { + case DataType::kStrings: + current_task_type_ = Operation::kCleanStrings; + s = inst->CompactRange(type, start_ptr, end_ptr); + break; + case DataType::kHashes: + current_task_type_ = Operation::kCleanHashes; + s = inst->CompactRange(type, start_ptr, end_ptr); + break; + case DataType::kLists: + current_task_type_ = Operation::kCleanLists; + s = inst->CompactRange(type, start_ptr, end_ptr); + break; + case DataType::kSets: + current_task_type_ = Operation::kCleanSets; + s = inst->CompactRange(type, start_ptr, end_ptr); + break; + case DataType::kZSets: + current_task_type_ = Operation::kCleanZSets; + s = inst->CompactRange(type, start_ptr, end_ptr); + break; + default: + current_task_type_ = Operation::kCleanAll; + s = inst->CompactRange(DataType::kStrings, start_ptr, end_ptr); + s = inst->CompactRange(DataType::kHashes, start_ptr, end_ptr); + s = inst->CompactRange(DataType::kLists, start_ptr, end_ptr); + s = inst->CompactRange(DataType::kSets, start_ptr, end_ptr); + s = inst->CompactRange(DataType::kZSets, start_ptr, end_ptr); + } } current_task_type_ = Operation::kNone; return s; @@ -1707,66 +2185,36 @@ Status Storage::CompactRange(const DataType& type, const std::string& start, con return Status::OK(); } -Status Storage::DoCompactRange(const DataType& type, const std::string& start, const std::string& end) { +Status Storage::DoCompactSpecificKey(const DataType& type, const std::string& key) { Status s; - if (type == kStrings) { - Slice slice_begin(start); - Slice slice_end(end); - s = strings_db_->CompactRange(&slice_begin, &slice_end); - return s; - } + auto& inst = GetDBInstance(key); - std::string meta_start_key; - std::string meta_end_key; - std::string data_start_key; - std::string data_end_key; - CalculateMetaStartAndEndKey(start, &meta_start_key, nullptr); - CalculateMetaStartAndEndKey(end, nullptr, &meta_end_key); - CalculateDataStartAndEndKey(start, &data_start_key, nullptr); - CalculateDataStartAndEndKey(end, nullptr, &data_end_key); - Slice slice_meta_begin(meta_start_key); - Slice slice_meta_end(meta_end_key); - Slice slice_data_begin(data_start_key); - Slice slice_data_end(data_end_key); - if (type == kSets) { - s = sets_db_->CompactRange(&slice_meta_begin, &slice_meta_end, kMeta); - s = sets_db_->CompactRange(&slice_data_begin, &slice_data_end, kData); - } else if (type == kZSets) { - s = zsets_db_->CompactRange(&slice_meta_begin, &slice_meta_end, kMeta); - s = zsets_db_->CompactRange(&slice_data_begin, &slice_data_end, kData); - } else if (type == kHashes) { - s = hashes_db_->CompactRange(&slice_meta_begin, &slice_meta_end, kMeta); - s = hashes_db_->CompactRange(&slice_data_begin, &slice_data_end, kData); - } else if (type == kLists) { - s = lists_db_->CompactRange(&slice_meta_begin, &slice_meta_end, kMeta); - s = lists_db_->CompactRange(&slice_data_begin, &slice_data_end, kData); - } else if (type == kStreams) { - s = streams_db_->CompactRange(&slice_meta_begin, &slice_meta_end, kMeta); - s = streams_db_->CompactRange(&slice_data_begin, &slice_data_end, kData); - } + std::string start_key; + std::string end_key; + CalculateStartAndEndKey(key, &start_key, &end_key); + Slice slice_begin(start_key); + Slice slice_end(end_key); + s = inst->CompactRange(type, &slice_begin, &slice_end, kMeta); return s; } Status Storage::SetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys) { - std::vector dbs = {sets_db_.get(), zsets_db_.get(), hashes_db_.get(), lists_db_.get()}; - for (const auto& db : dbs) { - db->SetMaxCacheStatisticKeys(max_cache_statistic_keys); + for (const auto& inst : insts_) { + inst->SetMaxCacheStatisticKeys(max_cache_statistic_keys); } return Status::OK(); } Status Storage::SetSmallCompactionThreshold(uint32_t small_compaction_threshold) { - std::vector dbs = {sets_db_.get(), zsets_db_.get(), hashes_db_.get(), lists_db_.get()}; - for (const auto& db : dbs) { - db->SetSmallCompactionThreshold(small_compaction_threshold); + for (const auto& inst: insts_) { + inst->SetSmallCompactionThreshold(small_compaction_threshold); } return Status::OK(); } Status Storage::SetSmallCompactionDurationThreshold(uint32_t small_compaction_duration_threshold) { - std::vector dbs = {sets_db_.get(), zsets_db_.get(), hashes_db_.get(), lists_db_.get()}; - for (const auto& db : dbs) { - db->SetSmallCompactionDurationThreshold(small_compaction_duration_threshold); + for (const auto& inst : insts_) { + inst->SetSmallCompactionDurationThreshold(small_compaction_duration_threshold); } return Status::OK(); } @@ -1795,42 +2243,30 @@ std::string Storage::GetCurrentTaskType() { } Status Storage::GetUsage(const std::string& property, uint64_t* const result) { - *result = GetProperty(ALL_DB, property); + std::map inst_result; + GetUsage(property, &inst_result); + for (const auto& it : inst_result) { + *result += it.second; + } return Status::OK(); } -Status Storage::GetUsage(const std::string& property, std::map* const type_result) { - type_result->clear(); - (*type_result)[STRINGS_DB] = GetProperty(STRINGS_DB, property); - (*type_result)[HASHES_DB] = GetProperty(HASHES_DB, property); - (*type_result)[LISTS_DB] = GetProperty(LISTS_DB, property); - (*type_result)[ZSETS_DB] = GetProperty(ZSETS_DB, property); - (*type_result)[SETS_DB] = GetProperty(SETS_DB, property); - (*type_result)[STREAMS_DB] = GetProperty(STREAMS_DB, property); +Status Storage::GetUsage(const std::string& property, std::map* const inst_result) { + inst_result->clear(); + for (const auto& inst : insts_) { + uint64_t value; + inst->GetProperty(property, &value); + (*inst_result)[inst->GetIndex()] = value; + } return Status::OK(); } -uint64_t Storage::GetProperty(const std::string& db_type, const std::string& property) { +uint64_t Storage::GetProperty(const std::string& property) { uint64_t out = 0; uint64_t result = 0; - if (db_type == ALL_DB || db_type == STRINGS_DB) { - strings_db_->GetProperty(property, &out); - result += out; - } - if (db_type == ALL_DB || db_type == HASHES_DB) { - hashes_db_->GetProperty(property, &out); - result += out; - } - if (db_type == ALL_DB || db_type == LISTS_DB) { - lists_db_->GetProperty(property, &out); - result += out; - } - if (db_type == ALL_DB || db_type == ZSETS_DB) { - zsets_db_->GetProperty(property, &out); - result += out; - } - if (db_type == ALL_DB || db_type == SETS_DB) { - sets_db_->GetProperty(property, &out); + Status s; + for (const auto& inst : insts_) { + s = inst->GetProperty(property, &out); result += out; } if (db_type == ALL_DB || db_type == STREAMS_DB) { @@ -1842,15 +2278,19 @@ uint64_t Storage::GetProperty(const std::string& db_type, const std::string& pro Status Storage::GetKeyNum(std::vector* key_infos) { KeyInfo key_info; - // NOTE: keep the db order with string, hash, list, zset, set - std::vector dbs = {strings_db_.get(), hashes_db_.get(), lists_db_.get(), zsets_db_.get(), sets_db_.get()}; - for (const auto& db : dbs) { + key_infos->resize(5); + for (const auto& db : insts_) { + std::vector db_key_infos; // check the scanner was stopped or not, before scanning the next db if (scan_keynum_exit_) { break; } - db->ScanKeyNum(&key_info); - key_infos->push_back(key_info); + auto s = db->ScanKeyNum(&db_key_infos); + if (!s.ok()) { + return s; + } + std::transform(db_key_infos.begin(), db_key_infos.end(), + key_infos->begin(), key_infos->begin(), std::plus<>{}); } if (scan_keynum_exit_) { scan_keynum_exit_ = false; @@ -1864,180 +2304,133 @@ Status Storage::StopScanKeyNum() { return Status::OK(); } -rocksdb::DB* Storage::GetDBByType(const std::string& type) { - if (type == STRINGS_DB) { - return strings_db_->GetDB(); - } else if (type == HASHES_DB) { - return hashes_db_->GetDB(); - } else if (type == LISTS_DB) { - return lists_db_->GetDB(); - } else if (type == SETS_DB) { - return sets_db_->GetDB(); - } else if (type == ZSETS_DB) { - return zsets_db_->GetDB(); - } else if (type == STREAMS_DB) { - return streams_db_->GetDB(); - } else { +rocksdb::DB* Storage::GetDBByIndex(int index) { + if (index < 0 || index >= db_instance_num_) { + LOG(WARNING) << "Invalid DB Index: " << index << "total: " + << db_instance_num_; return nullptr; } + return insts_[index]->GetDB(); } Status Storage::SetOptions(const OptionType& option_type, const std::string& db_type, - const std::unordered_map& options) { + const std::unordered_map& options) { Status s; - if (db_type == ALL_DB || db_type == STRINGS_DB) { - s = strings_db_->SetOptions(option_type, options); - if (!s.ok()) { - return s; - } - } - if (db_type == ALL_DB || db_type == HASHES_DB) { - s = hashes_db_->SetOptions(option_type, options); - if (!s.ok()) { - return s; - } - } - if (db_type == ALL_DB || db_type == LISTS_DB) { - s = lists_db_->SetOptions(option_type, options); - if (!s.ok()) { - return s; - } - } - if (db_type == ALL_DB || db_type == ZSETS_DB) { - s = zsets_db_->SetOptions(option_type, options); + for (const auto& inst : insts_) { + s = inst->SetOptions(option_type, options); if (!s.ok()) { return s; } } - if (db_type == ALL_DB || db_type == SETS_DB) { - s = sets_db_->SetOptions(option_type, options); - if (!s.ok()) { - return s; - } - } - if (db_type == ALL_DB || db_type == STREAMS_DB) { - s = streams_db_->SetOptions(option_type, options); - if (!s.ok()) { - return s; - } - } - s = EnableDymayticOptions(option_type,db_type,options); + s = EnableDymayticOptions(option_type, db_type, options); return s; } void Storage::SetCompactRangeOptions(const bool is_canceled) { - strings_db_->SetCompactRangeOptions(is_canceled); - hashes_db_->SetCompactRangeOptions(is_canceled); - lists_db_->SetCompactRangeOptions(is_canceled); - sets_db_->SetCompactRangeOptions(is_canceled); - zsets_db_->SetCompactRangeOptions(is_canceled); + for (const auto& inst : insts_) { + inst->SetCompactRangeOptions(is_canceled); + } } -Status Storage::EnableDymayticOptions(const OptionType& option_type, - const std::string& db_type, const std::unordered_map& options) { +Status Storage::EnableDymayticOptions(const OptionType& option_type, + const std::string& db_type, const std::unordered_map& options) { Status s; auto it = options.find("disable_auto_compactions"); if (it != options.end() && it->second == "false") { - s = EnableAutoCompaction(option_type,db_type,options); - LOG(WARNING) << "EnableAutoCompaction " << (s.ok() ? "success" : "failed") - << " when Options get disable_auto_compactions: " << it->second << ",db_type:" << db_type; + s = EnableAutoCompaction(option_type, db_type, options); + LOG(WARNING) << "EnableAutoCompaction " << (s.ok() ? "success" : "failed") + << " when Options get disable_auto_compactions: " << it->second << " ,db_type: " << db_type; } return s; } -Status Storage::EnableAutoCompaction(const OptionType& option_type, - const std::string& db_type, const std::unordered_map& options){ +Status Storage::EnableAutoCompaction(const OptionType& option_type, + const std::string& db_type, const std::unordered_map& options) { Status s; - std::vector cfs; - std::vector cfhds; - if (db_type == ALL_DB || db_type == STRINGS_DB) { - cfhds = strings_db_->GetHandles(); - s = strings_db_.get()->GetDB()->EnableAutoCompaction(cfhds); - if (!s.ok()) { - return s; + for (const auto& inst : insts_) { + std::vector cfhds; + if (db_type == ALL_DB || db_type == STRINGS_DB) { + auto string_cfhds = inst->GetStringCFHandles(); + cfhds.insert(cfhds.end(), string_cfhds.begin(), string_cfhds.end()); } - } - if (db_type == ALL_DB || db_type == HASHES_DB) { - cfhds = hashes_db_->GetHandles(); - s = hashes_db_.get()->GetDB()->EnableAutoCompaction(cfhds); - if (!s.ok()) { - return s; + + if (db_type == ALL_DB || db_type == HASHES_DB) { + auto hash_cfhds = inst->GetHashCFHandles(); + cfhds.insert(cfhds.end(), hash_cfhds.begin(), hash_cfhds.end()); } - } - if (db_type == ALL_DB || db_type == LISTS_DB) { - cfhds = lists_db_->GetHandles(); - s = lists_db_.get()->GetDB()->EnableAutoCompaction(cfhds); - if (!s.ok()) { - return s; + + if (db_type == ALL_DB || db_type == LISTS_DB) { + auto list_cfhds = inst->GetListCFHandles(); + cfhds.insert(cfhds.end(), list_cfhds.begin(), list_cfhds.end()); } - } - if (db_type == ALL_DB || db_type == ZSETS_DB) { - cfhds = zsets_db_->GetHandles(); - s = zsets_db_.get()->GetDB()->EnableAutoCompaction(cfhds); - if (!s.ok()) { - return s; + + if (db_type == ALL_DB || db_type == SETS_DB) { + auto set_cfhds = inst->GetSetCFHandles(); + cfhds.insert(cfhds.end(), set_cfhds.begin(), set_cfhds.end()); } - } - if (db_type == ALL_DB || db_type == SETS_DB) { - cfhds = sets_db_->GetHandles(); - s = sets_db_.get()->GetDB()->EnableAutoCompaction(cfhds); + + if (db_type == ALL_DB || db_type == ZSETS_DB) { + auto zset_cfhds = inst->GetZsetCFHandles(); + cfhds.insert(cfhds.end(), zset_cfhds.begin(), zset_cfhds.end()); + } + s = inst->GetDB()->EnableAutoCompaction(cfhds); if (!s.ok()) { return s; } } + return s; } void Storage::GetRocksDBInfo(std::string& info) { - strings_db_->GetRocksDBInfo(info, "strings_"); - hashes_db_->GetRocksDBInfo(info, "hashes_"); - lists_db_->GetRocksDBInfo(info, "lists_"); - sets_db_->GetRocksDBInfo(info, "sets_"); - zsets_db_->GetRocksDBInfo(info, "zsets_"); + char temp[12] = {0}; + for (const auto& inst : insts_) { + snprintf(temp, sizeof(temp), "instance:%2d", inst->GetIndex()); + inst->GetRocksDBInfo(info, temp); + } } int64_t Storage::IsExist(const Slice& key, std::map* type_status) { std::string value; int32_t ret = 0; int64_t type_count = 0; - Status s = strings_db_->Get(key, &value); + auto& inst = GetDBInstance(key); + Status s = inst->Get(key, &value); (*type_status)[DataType::kStrings] = s; if (s.ok()) { type_count++; } - s = hashes_db_->HLen(key, &ret); + s = inst->HLen(key, &ret); (*type_status)[DataType::kHashes] = s; if (s.ok()) { type_count++; } - s = sets_db_->SCard(key, &ret); + s = inst->SCard(key, &ret); (*type_status)[DataType::kSets] = s; if (s.ok()) { type_count++; } uint64_t llen = 0; - s = lists_db_->LLen(key, &llen); + s = inst->LLen(key, &llen); (*type_status)[DataType::kLists] = s; if (s.ok()) { type_count++; } - s = zsets_db_->ZCard(key, &ret); + s = inst->ZCard(key, &ret); (*type_status)[DataType::kZSets] = s; if (s.ok()) { type_count++; } return type_count; } - - + + void Storage::DisableWal(const bool is_wal_disable) { - strings_db_->SetWriteWalOptions(is_wal_disable); - hashes_db_->SetWriteWalOptions(is_wal_disable); - lists_db_->SetWriteWalOptions(is_wal_disable); - sets_db_->SetWriteWalOptions(is_wal_disable); - zsets_db_->SetWriteWalOptions(is_wal_disable); + for (const auto& inst : insts_) { + inst->SetWriteWalOptions(is_wal_disable); + } } } // namespace storage diff --git a/src/storage/src/strings_filter.h b/src/storage/src/strings_filter.h index 28873456d2..fc03595d82 100644 --- a/src/storage/src/strings_filter.h +++ b/src/storage/src/strings_filter.h @@ -22,13 +22,13 @@ class StringsFilter : public rocksdb::CompactionFilter { bool* value_changed) const override { int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - auto cur_time = static_cast(unix_time); + auto cur_time = static_cast(unix_time); ParsedStringsValue parsed_strings_value(value); TRACE("==========================START=========================="); - TRACE("[StringsFilter], key: %s, value = %s, timestamp: %d, cur_time: %d", key.ToString().c_str(), - parsed_strings_value.value().ToString().c_str(), parsed_strings_value.timestamp(), cur_time); + TRACE("[StringsFilter], key: %s, value = %s, timestamp: %llu, cur_time: %llu", key.ToString().c_str(), + parsed_strings_value.UserValue().ToString().c_str(), parsed_strings_value.Etime(), cur_time); - if (parsed_strings_value.timestamp() != 0 && parsed_strings_value.timestamp() < cur_time) { + if (parsed_strings_value.Etime() != 0 && parsed_strings_value.Etime() < cur_time) { TRACE("Drop[Stale]"); return true; } else { @@ -37,6 +37,19 @@ class StringsFilter : public rocksdb::CompactionFilter { } } + /* + virtual rocksdb::CompactionFilter::Decision FilterBlobByKey(int level, const Slice& key, + uint64_t expire_time, std::string* new_value, std::string* skip_until) const override { + int64_t unix_time; + rocksdb::Env::Default()->GetCurrentTime(&unix_time); + auto cur_time = static_cast(unix_time); + if (expire_time !=0 && expire_time < cur_time) { + return CompactionFilter::Decision::kRemove; + } + return CompactionFilter::Decision::kKeep; + } + */ + const char* Name() const override { return "StringsFilter"; } }; diff --git a/src/storage/src/strings_value_format.h b/src/storage/src/strings_value_format.h index 8717fe5c5f..b7c67fc702 100644 --- a/src/storage/src/strings_value_format.h +++ b/src/storage/src/strings_value_format.h @@ -9,19 +9,30 @@ #include #include "src/base_value_format.h" +#include "storage/storage_define.h" namespace storage { - +/* +* | value | reserve | cdate | timestamp | +* | | 16B | 8B | 8B | +*/ class StringsValue : public InternalValue { public: explicit StringsValue(const rocksdb::Slice& user_value) : InternalValue(user_value) {} - size_t AppendTimestampAndVersion() override { + virtual rocksdb::Slice Encode() override { size_t usize = user_value_.size(); - char* dst = start_; + size_t needed = usize + kSuffixReserveLength + 2 * kTimestampLength; + char* dst = ReAllocIfNeeded(needed); + char* start_pos = dst; + memcpy(dst, user_value_.data(), usize); dst += usize; - EncodeFixed32(dst, timestamp_); - return usize + sizeof(int32_t); + memcpy(dst, reserve_, kSuffixReserveLength); + dst += kSuffixReserveLength; + EncodeFixed64(dst, ctime_); + dst += kTimestampLength; + EncodeFixed64(dst, etime_); + return rocksdb::Slice(start_pos, needed); } }; @@ -31,7 +42,9 @@ class ParsedStringsValue : public ParsedInternalValue { explicit ParsedStringsValue(std::string* internal_value_str) : ParsedInternalValue(internal_value_str) { if (internal_value_str->size() >= kStringsValueSuffixLength) { user_value_ = rocksdb::Slice(internal_value_str->data(), internal_value_str->size() - kStringsValueSuffixLength); - timestamp_ = DecodeFixed32(internal_value_str->data() + internal_value_str->size() - kStringsValueSuffixLength); + memcpy(reserve_, internal_value_str->data() + user_value_.size(), kSuffixReserveLength); + ctime_ = DecodeFixed64(internal_value_str->data() + user_value_.size() + kSuffixReserveLength); + etime_ = DecodeFixed64(internal_value_str->data() + user_value_.size() + kSuffixReserveLength + kTimestampLength); } } @@ -39,7 +52,9 @@ class ParsedStringsValue : public ParsedInternalValue { explicit ParsedStringsValue(const rocksdb::Slice& internal_value_slice) : ParsedInternalValue(internal_value_slice) { if (internal_value_slice.size() >= kStringsValueSuffixLength) { user_value_ = rocksdb::Slice(internal_value_slice.data(), internal_value_slice.size() - kStringsValueSuffixLength); - timestamp_ = DecodeFixed32(internal_value_slice.data() + internal_value_slice.size() - kStringsValueSuffixLength); + memcpy(reserve_, internal_value_slice.data() + user_value_.size(), kSuffixReserveLength); + ctime_ = DecodeFixed64(internal_value_slice.data() + user_value_.size() + kSuffixReserveLength); + etime_ = DecodeFixed64(internal_value_slice.data() + user_value_.size() + kSuffixReserveLength + kTimestampLength); } } @@ -52,16 +67,24 @@ class ParsedStringsValue : public ParsedInternalValue { // Strings type do not have version field; void SetVersionToValue() override {} - void SetTimestampToValue() override { + void SetCtimeToValue() override { if (value_) { - char* dst = const_cast(value_->data()) + value_->size() - kStringsValueSuffixLength; - EncodeFixed32(dst, timestamp_); + char* dst = const_cast(value_->data()) + value_->size() - + kStringsValueSuffixLength + kSuffixReserveLength; + EncodeFixed64(dst, ctime_); } } - rocksdb::Slice value() { return user_value_; } + void SetEtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - + kStringsValueSuffixLength + kSuffixReserveLength + kTimestampLength; + EncodeFixed64(dst, etime_); + } + } - static const size_t kStringsValueSuffixLength = sizeof(int32_t); +private: + const size_t kStringsValueSuffixLength = 2 * kTimestampLength + kSuffixReserveLength; }; } // namespace storage diff --git a/src/storage/src/type_iterator.h b/src/storage/src/type_iterator.h new file mode 100644 index 0000000000..254dcfb632 --- /dev/null +++ b/src/storage/src/type_iterator.h @@ -0,0 +1,404 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef TYPE_ITERATOR_H_ +#define TYPE_ITERATOR_H_ + +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "rocksdb/status.h" +#include "rocksdb/table.h" +#include "glog/logging.h" + +#include "util/heap.h" +#include "storage/util.h" +#include "src/mutex.h" +#include "src/debug.h" +#include "src/base_data_key_format.h" +#include "src/base_key_format.h" +#include "src/base_meta_value_format.h" +#include "src/strings_value_format.h" +#include "src/lists_meta_value_format.h" +#include "storage/storage_define.h" + +namespace storage { +using ColumnFamilyHandle = rocksdb::ColumnFamilyHandle; +using Comparator = rocksdb::Comparator; + +enum Direction { kForward, kReverse }; + +class TypeIterator { +public: + TypeIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle) { + raw_iter_.reset(db->NewIterator(options, handle)); + } + + virtual ~TypeIterator() {} + + virtual void Seek(const std::string& start_key) { + raw_iter_->Seek(Slice(start_key)); + while (raw_iter_->Valid() && ShouldSkip()) { + raw_iter_->Next(); + } + } + + void SeekToFirst() { + raw_iter_->SeekToFirst(); + while (raw_iter_->Valid() && ShouldSkip()) { + raw_iter_->Next(); + } + } + + void SeekToLast() { + raw_iter_->SeekToLast(); + while (raw_iter_->Valid() && ShouldSkip()) { + raw_iter_->Prev(); + } + } + + virtual void SeekForPrev(const std::string& start_key) { + raw_iter_->SeekForPrev(Slice(start_key)); + while (raw_iter_->Valid() && ShouldSkip()) { + raw_iter_->Prev(); + } + } + + void Next() { + raw_iter_->Next(); + while (raw_iter_->Valid() && ShouldSkip()) { + raw_iter_->Next(); + } + } + + void Prev() { + raw_iter_->Prev(); + while (raw_iter_->Valid() && ShouldSkip()) { + raw_iter_->Prev(); + } + } + + virtual bool ShouldSkip() { return false; } + + virtual std::string Key() const { return user_key_; } + + virtual std::string Value() const {return user_value_; } + + virtual bool Valid() { return raw_iter_->Valid(); } + + virtual Status status() { return raw_iter_->status(); } + +protected: + std::unique_ptr raw_iter_; + std::string user_key_; + std::string user_value_; + Direction direction_ = kForward; +}; + +class StringsIterator : public TypeIterator { +public: + StringsIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~StringsIterator() {} + + bool ShouldSkip() override { + ParsedStringsValue parsed_value(raw_iter_->value()); + if (parsed_value.IsStale()) { + return true; + } + + ParsedBaseKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), + parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + + user_key_ = parsed_key.Key().ToString(); + user_value_ = parsed_value.UserValue().ToString(); + return false; + } +private: + std::string pattern_; +}; + +class HashesIterator : public TypeIterator { +public: + HashesIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~HashesIterator() {} + + bool ShouldSkip() override { + ParsedHashesMetaValue parsed_meta_value(raw_iter_->value()); + if (parsed_meta_value.IsStale() || parsed_meta_value.Count() == 0) { + return true; + } + + ParsedBaseMetaKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), + parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + user_key_ = parsed_key.Key().ToString(); + user_value_ = parsed_meta_value.UserValue().ToString(); + return false; + } +private: + std::string pattern_; +}; + +class ListsIterator : public TypeIterator { +public: + ListsIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~ListsIterator() {} + + bool ShouldSkip() override { + ParsedListsMetaValue parsed_meta_value(raw_iter_->value()); + if (parsed_meta_value.IsStale() || parsed_meta_value.Count() == 0) { + return true; + } + + ParsedBaseMetaKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), + parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + user_key_ = parsed_key.Key().ToString(); + user_value_ = parsed_meta_value.UserValue().ToString(); + return false; + } +private: + std::string pattern_; +}; + +class SetsIterator : public TypeIterator { +public: + SetsIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~SetsIterator() {} + + bool ShouldSkip() override { + ParsedSetsMetaValue parsed_meta_value(raw_iter_->value()); + if (parsed_meta_value.IsStale() || parsed_meta_value.Count() == 0) { + return true; + } + + ParsedBaseMetaKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), + parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + user_key_ = parsed_key.Key().ToString(); + user_value_ = parsed_meta_value.UserValue().ToString(); + return false; + } +private: + std::string pattern_; +}; + +class ZsetsIterator : public TypeIterator { +public: + ZsetsIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~ZsetsIterator() {} + + bool ShouldSkip() override { + ParsedZSetsMetaValue parsed_meta_value(raw_iter_->value()); + if (parsed_meta_value.IsStale() || parsed_meta_value.Count() == 0) { + return true; + } + + ParsedBaseMetaKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), + parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + user_key_ = parsed_key.Key().ToString(); + user_value_ = parsed_meta_value.UserValue().ToString(); + return false; + } +private: + std::string pattern_; +}; + + +using IterSptr = std::shared_ptr; + +class MinMergeComparator { +public: + MinMergeComparator() = default; + bool operator() (IterSptr a, IterSptr b) { + + int a_len = a->Key().size(); + int b_len = b->Key().size(); + return a->Key().compare(b->Key()) > 0; + } +}; + +class MaxMergeComparator { +public: + MaxMergeComparator() = default; + bool operator() (IterSptr a, IterSptr b) { + int a_len = a->Key().size(); + int b_len = b->Key().size(); + return a->Key().compare(b->Key()) < 0; + } +}; + +using MergerMinIterHeap = rocksdb::BinaryHeap; +using MergerMaxIterHeap = rocksdb::BinaryHeap; + +class MergingIterator { +public: + MergingIterator(const std::vector& children) + : current_(nullptr), direction_(kForward) { + std::copy(children.begin(), children.end(), std::back_inserter(children_)); + for (const auto& child : children_) { + if (child->Valid()) { + min_heap_.push(child); + } + } + current_ = min_heap_.empty() ? nullptr : min_heap_.top(); + } + + ~MergingIterator() {} + + bool Valid() const { return current_ != nullptr; } + + Status status() const { + Status status; + for (const auto& child : children_) { + status = child->status(); + if (!status.ok()) { + break; + } + } + return status; + } + + bool IsFinished(const std::string& prefix) { + if (Valid() && (Key().compare(prefix) <= 0 || Key().substr(0, prefix.size()) == prefix)) { + return false; + } + return true; + } + + void SeekToFirst() { + min_heap_.clear(); + max_heap_.clear(); + for (auto& child : children_) { + child->SeekToFirst(); + if (child->Valid()) { + min_heap_.push(child); + } + } + direction_ = kForward; + current_ = min_heap_.empty() ? nullptr : min_heap_.top(); + } + + void SeekToLast() { + min_heap_.clear(); + max_heap_.clear(); + for (auto& child : children_) { + child->SeekToLast(); + if (child->Valid()) { + max_heap_.push(child); + } + } + direction_ = kReverse; + current_ = max_heap_.empty() ? nullptr : max_heap_.top(); + } + + void Seek(const std::string& target) { + min_heap_.clear(); + max_heap_.clear(); + for (auto& child : children_) { + child->Seek(target); + if (child->Valid()) { + min_heap_.push(child); + } + } + direction_ = kForward; + current_ = min_heap_.empty() ? nullptr : min_heap_.top(); + } + + void SeekForPrev(const std::string& start_key) { + min_heap_.clear(); + max_heap_.clear(); + for (auto& child : children_) { + child->SeekForPrev(start_key); + if (child->Valid()) { + max_heap_.push(child); + } + } + direction_ = kReverse; + current_ = max_heap_.empty() ? nullptr : max_heap_.top(); + } + + void Next() { + assert(direction_ == kForward); + current_->Next(); + if (current_->Valid()) { + min_heap_.replace_top(current_); + } else { + min_heap_.pop(); + } + current_ = min_heap_.empty() ? nullptr : min_heap_.top(); + } + + void Prev() { + assert(direction_ == kReverse); + current_->Prev(); + if (current_->Valid()) { + max_heap_.replace_top(current_); + } else { + max_heap_.pop(); + } + current_ = max_heap_.empty() ? nullptr : max_heap_.top(); + } + + std::string Key() { return current_->Key(); } + + std::string Value() { return current_->Value(); } + + Status status() { + Status s; + for (const auto& child : children_) { + s = child->status(); + if (!s.ok()) { + break; + } + } + return s; + } + + bool Valid() { return current_ != nullptr; } + +private: + + MergerMinIterHeap min_heap_; + MergerMaxIterHeap max_heap_; + std::vector children_; + IterSptr current_; + Direction direction_; +}; + +} // end namespace storage + +# endif diff --git a/src/storage/src/util.cc b/src/storage/src/util.cc index cfc94ca76c..82a4bf82b4 100644 --- a/src/storage/src/util.cc +++ b/src/storage/src/util.cc @@ -11,8 +11,11 @@ #include #include "pstd/include/pstd_string.h" - +#include "pstd/include/pika_codis_slot.h" +#include "src/base_key_format.h" +#include "src/base_data_key_format.h" #include "src/coding.h" +#include "storage/storage_define.h" #include "storage/util.h" namespace storage { @@ -205,43 +208,34 @@ int is_dir(const char* filename) { return -1; } -int CalculateMetaStartAndEndKey(const std::string& key, std::string* meta_start_key, std::string* meta_end_key) { - size_t needed = key.size() + 1; - auto dst = std::make_unique(needed); - const char* start = dst.get(); - std::strncpy(dst.get(), key.data(), key.size()); - char* dst_ptr = dst.get() + key.size(); - if (meta_start_key) { - meta_start_key->assign(start, key.size()); - } - *dst_ptr = static_cast(0xff); - if (meta_end_key) { - meta_end_key->assign(start, key.size() + 1); +int CalculateStartAndEndKey(const std::string& key, std::string* start_key, std::string* end_key) { + if (key.empty()) { + return 0; } - return 0; -} - -int CalculateDataStartAndEndKey(const std::string& key, std::string* data_start_key, std::string* data_end_key) { - size_t needed = sizeof(int32_t) + key.size() + 1; - auto dst = std::make_unique(needed); - const char* start = dst.get(); - char* dst_ptr = dst.get(); - - EncodeFixed32(dst_ptr, key.size()); - dst_ptr += sizeof(int32_t); - std::strncpy(dst_ptr, key.data(), key.size()); - dst_ptr += key.size(); - *dst_ptr = static_cast(0xff); - - if (data_start_key) { - data_start_key->assign(start, sizeof(int32_t) + key.size()); + size_t usize = kPrefixReserveLength + key.size() + kEncodedKeyDelimSize; + size_t nzero = std::count(key.begin(), key.end(), kNeedTransformCharacter); + usize += nzero; + auto dst = std::make_unique(usize); + char* ptr = dst.get(); + memset(ptr, kNeedTransformCharacter, kPrefixReserveLength); + ptr += kPrefixReserveLength; + ptr = storage::EncodeUserKey(Slice(key), ptr, nzero); + if (start_key) { + *start_key = std::string(dst.get(), ptr); } - if (data_end_key) { - data_end_key->assign(start, sizeof(int32_t) + key.size() + 1); + if (end_key) { + *end_key = std::string(dst.get(), ptr); + // Encoded key's last two character is "\u0000\u0000", + // so directly upgrade end_key's back character to '\u0001'. + end_key->back() = '\u0001'; } return 0; } +// requires: +// 1. pattern's length >= 2 +// 2. tail character is '*' +// 3. other position's charactor cannot be *, ?, [,] bool isTailWildcard(const std::string& pattern) { if (pattern.size() < 2) { return false; diff --git a/src/storage/src/zsets_data_key_format.h b/src/storage/src/zsets_data_key_format.h index 45352781f6..3b721a7107 100644 --- a/src/storage/src/zsets_data_key_format.h +++ b/src/storage/src/zsets_data_key_format.h @@ -6,16 +6,21 @@ #ifndef SRC_ZSETS_DATA_KEY_FORMAT_H_ #define SRC_ZSETS_DATA_KEY_FORMAT_H_ +#include "src/coding.h" +#include "storage/storage_define.h" + namespace storage { -/* - * | | | | | | - * 4 Bytes key size Bytes 4 Bytes 8 Bytes member size Bytes +/* zset score to member data key format: +* | reserve1 | key | version | score | member | reserve2 | +* | 8B | | 8B | 8B | | 16B | */ class ZSetsScoreKey { public: - ZSetsScoreKey(const Slice& key, int32_t version, double score, const Slice& member) - : key_(key), version_(version), score_(score), member_(member) {} + ZSetsScoreKey(const Slice& key, uint64_t version, + double score, const Slice& member) + : key_(key), version_(version), + score_(score), member_(member) {} ~ZSetsScoreKey() { if (start_ != space_) { @@ -24,7 +29,11 @@ class ZSetsScoreKey { } Slice Encode() { - size_t needed = key_.size() + member_.size() + sizeof(int32_t) * 2 + sizeof(uint64_t); + size_t meta_size = sizeof(reserve1_) + sizeof(version_) + sizeof(score_) + sizeof(reserve2_); + size_t usize = key_.size() + member_.size() + kEncodedKeyDelimSize; + size_t nzero = std::count(key_.data(), key_.data() + key_.size(), kNeedTransformCharacter); + usize += nzero; + size_t needed = meta_size + usize; char* dst = nullptr; if (needed <= sizeof(space_)) { dst = space_; @@ -36,71 +45,80 @@ class ZSetsScoreKey { delete[] start_; } } + start_ = dst; - EncodeFixed32(dst, key_.size()); - dst += sizeof(int32_t); - memcpy(dst, key_.data(), key_.size()); - dst += key_.size(); - EncodeFixed32(dst, version_); - dst += sizeof(int32_t); + // reserve1: 8 byte + memcpy(dst, reserve1_, sizeof(reserve1_)); + dst += sizeof(reserve1_); + // key + dst = EncodeUserKey(key_, dst, nzero); + // version 8 byte + EncodeFixed64(dst, version_); + dst += sizeof(version_); + // score const void* addr_score = reinterpret_cast(&score_); EncodeFixed64(dst, *reinterpret_cast(addr_score)); - dst += sizeof(uint64_t); + dst += sizeof(score_); + // member memcpy(dst, member_.data(), member_.size()); + dst += member_.size(); + // reserve2 16 byte + memcpy(dst, reserve2_, sizeof(reserve2_)); return Slice(start_, needed); } private: - char space_[200]; char* start_ = nullptr; + char space_[200]; + char reserve1_[8] = {0}; Slice key_; - int32_t version_ = 0; + uint64_t version_ = uint64_t(-1); double score_ = 0.0; Slice member_; + char reserve2_[16] = {0}; }; class ParsedZSetsScoreKey { public: explicit ParsedZSetsScoreKey(const std::string* key) { const char* ptr = key->data(); - int32_t key_len = DecodeFixed32(ptr); - ptr += sizeof(int32_t); - key_ = Slice(ptr, key_len); - ptr += key_len; - version_ = DecodeFixed32(ptr); - ptr += sizeof(int32_t); - - uint64_t tmp = DecodeFixed64(ptr); - const void* ptr_tmp = reinterpret_cast(&tmp); - score_ = *reinterpret_cast(ptr_tmp); - ptr += sizeof(uint64_t); - member_ = Slice(ptr, key->size() - key_len - 2 * sizeof(int32_t) - sizeof(uint64_t)); + const char* end_ptr = key->data() + key->size(); + decode(ptr, end_ptr); } explicit ParsedZSetsScoreKey(const Slice& key) { const char* ptr = key.data(); - int32_t key_len = DecodeFixed32(ptr); - ptr += sizeof(int32_t); - key_ = Slice(ptr, key_len); - ptr += key_len; - version_ = DecodeFixed32(ptr); - ptr += sizeof(int32_t); + const char* end_ptr = key.data() + key.size(); + decode(ptr, end_ptr); + } + void decode(const char* ptr, const char* end_ptr) { + const char* start = ptr; + // skip head reserve1_ + ptr += sizeof(reserve1_); + // skip tail reserve2_ + end_ptr -= sizeof(reserve2_); + // user key + ptr = DecodeUserKey(ptr, std::distance(ptr, end_ptr), &key_str_); + version_ = DecodeFixed64(ptr); + ptr += sizeof(version_); uint64_t tmp = DecodeFixed64(ptr); const void* ptr_tmp = reinterpret_cast(&tmp); score_ = *reinterpret_cast(ptr_tmp); ptr += sizeof(uint64_t); - member_ = Slice(ptr, key.size() - key_len - 2 * sizeof(int32_t) - sizeof(uint64_t)); + member_ = Slice(ptr, std::distance(ptr, end_ptr)); } - Slice key() { return key_; } - int32_t version() const { return version_; } + Slice key() { return Slice(key_str_); } + uint64_t Version() const { return version_; } double score() const { return score_; } Slice member() { return member_; } private: - Slice key_; - int32_t version_ = 0; + std::string key_str_; + char reserve1_[8] = {0}; + uint64_t version_ = uint64_t(-1); + char reserve2_[16] = {0}; double score_ = 0.0; Slice member_; }; diff --git a/src/storage/src/zsets_filter.h b/src/storage/src/zsets_filter.h index 51d58d94a9..8de0e6612b 100644 --- a/src/storage/src/zsets_filter.h +++ b/src/storage/src/zsets_filter.h @@ -20,30 +20,40 @@ namespace storage { class ZSetsScoreFilter : public rocksdb::CompactionFilter { public: - ZSetsScoreFilter(rocksdb::DB* db, std::vector* handles_ptr) - : db_(db), cf_handles_ptr_(handles_ptr) {} + ZSetsScoreFilter(rocksdb::DB* db, std::vector* handles_ptr, int meta_cf_index) + : db_(db), cf_handles_ptr_(handles_ptr), meta_cf_index_(meta_cf_index) {} bool Filter(int level, const rocksdb::Slice& key, const rocksdb::Slice& value, std::string* new_value, bool* value_changed) const override { + UNUSED(level); + UNUSED(value); + UNUSED(new_value); + UNUSED(value_changed); ParsedZSetsScoreKey parsed_zsets_score_key(key); TRACE("==========================START=========================="); - TRACE("[ScoreFilter], key: %s, score = %lf, member = %s, version = %d", + TRACE("[ScoreFilter], key: %s, score = %lf, member = %s, version = %llu", parsed_zsets_score_key.key().ToString().c_str(), parsed_zsets_score_key.score(), - parsed_zsets_score_key.member().ToString().c_str(), parsed_zsets_score_key.version()); + parsed_zsets_score_key.member().ToString().c_str(), parsed_zsets_score_key.Version()); - if (parsed_zsets_score_key.key().ToString() != cur_key_) { - cur_key_ = parsed_zsets_score_key.key().ToString(); + const char* ptr = key.data(); + int key_size = key.size(); + ptr = SeekUserkeyDelim(ptr + kPrefixReserveLength, key_size - kPrefixReserveLength); + std::string meta_key_enc(key.data(), std::distance(key.data(), ptr)); + meta_key_enc.append(kSuffixReserveLength, kNeedTransformCharacter); + + if (meta_key_enc != cur_key_) { + cur_key_ = meta_key_enc; std::string meta_value; // destroyed when close the database, Reserve Current key value if (cf_handles_ptr_->empty()) { return false; } - Status s = db_->Get(default_read_options_, (*cf_handles_ptr_)[0], cur_key_, &meta_value); + Status s = db_->Get(default_read_options_, (*cf_handles_ptr_)[meta_cf_index_], cur_key_, &meta_value); if (s.ok()) { meta_not_found_ = false; ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - cur_meta_version_ = parsed_zsets_meta_value.version(); - cur_meta_timestamp_ = parsed_zsets_meta_value.timestamp(); + cur_meta_version_ = parsed_zsets_meta_value.Version(); + cur_meta_etime_ = parsed_zsets_meta_value.Etime(); } else if (s.IsNotFound()) { meta_not_found_ = true; } else { @@ -60,11 +70,11 @@ class ZSetsScoreFilter : public rocksdb::CompactionFilter { int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - if (cur_meta_timestamp_ != 0 && cur_meta_timestamp_ < static_cast(unix_time)) { + if (cur_meta_etime_ != 0 && cur_meta_etime_ < static_cast(unix_time)) { TRACE("Drop[Timeout]"); return true; } - if (cur_meta_version_ > parsed_zsets_score_key.version()) { + if (cur_meta_version_ > parsed_zsets_score_key.Version()) { TRACE("Drop[score_key_version < cur_meta_version]"); return true; } else { @@ -73,6 +83,23 @@ class ZSetsScoreFilter : public rocksdb::CompactionFilter { } } + /* + // Only judge by meta value ttl + virtual rocksdb::CompactionFilter::Decision FilterBlobByKey(int level, const Slice& key, + std::string* new_value, std::string* skip_until) const { + UNUSED(level); + UNUSED(new_value); + UNUSED(skip_until); + bool unused_value_changed; + bool should_remove = Filter(level, key, Slice{}, new_value, &unused_value_changed); + if (should_remove) { + return CompactionFilter::Decision::kRemove; + } + return CompactionFilter::Decision::kKeep; + } + */ + + const char* Name() const override { return "ZSetsScoreFilter"; } private: @@ -81,18 +108,19 @@ class ZSetsScoreFilter : public rocksdb::CompactionFilter { rocksdb::ReadOptions default_read_options_; mutable std::string cur_key_; mutable bool meta_not_found_ = false; - mutable int32_t cur_meta_version_ = 0; - mutable int32_t cur_meta_timestamp_ = 0; + mutable uint64_t cur_meta_version_ = 0; + mutable uint64_t cur_meta_etime_ = 0; + int meta_cf_index_ = 0; }; class ZSetsScoreFilterFactory : public rocksdb::CompactionFilterFactory { public: - ZSetsScoreFilterFactory(rocksdb::DB** db_ptr, std::vector* handles_ptr) - : db_ptr_(db_ptr), cf_handles_ptr_(handles_ptr) {} + ZSetsScoreFilterFactory(rocksdb::DB** db_ptr, std::vector* handles_ptr, int meta_cf_index) + : db_ptr_(db_ptr), cf_handles_ptr_(handles_ptr), meta_cf_index_(meta_cf_index) {} std::unique_ptr CreateCompactionFilter( const rocksdb::CompactionFilter::Context& context) override { - return std::make_unique(*db_ptr_, cf_handles_ptr_); + return std::make_unique(*db_ptr_, cf_handles_ptr_, meta_cf_index_); } const char* Name() const override { return "ZSetsScoreFilterFactory"; } @@ -100,6 +128,7 @@ class ZSetsScoreFilterFactory : public rocksdb::CompactionFilterFactory { private: rocksdb::DB** db_ptr_ = nullptr; std::vector* cf_handles_ptr_ = nullptr; + int meta_cf_index_ = 0; }; } // namespace storage diff --git a/src/storage/tests/CMakeLists.txt b/src/storage/tests/CMakeLists.txt index ec5d10bb29..09dc7f32cc 100644 --- a/src/storage/tests/CMakeLists.txt +++ b/src/storage/tests/CMakeLists.txt @@ -6,6 +6,8 @@ file(GLOB_RECURSE BLACKWINDOW_TEST_SOURCE "${PROJECT_SOURCE_DIR}/tests/*.cc") aux_source_directory(../src TEST_SRCS) +add_compile_definitions(PIKA_ROOT_DIR="${CMAKE_SOURCE_DIR}") + # set(EXECUTABLE_OUTPUT_PATH ${CMAKE_SOURCE_DIR}/build) foreach(blackwindow_test_source ${BLACKWINDOW_TEST_SOURCE}) get_filename_component(storage_test_filename ${blackwindow_test_source} NAME) @@ -14,6 +16,7 @@ foreach(blackwindow_test_source ${BLACKWINDOW_TEST_SOURCE}) # Add the test target add_executable(${blackwindow_test_name} ${blackwindow_test_source}) target_include_directories(${blackwindow_test_name} + PUBLIC ${CMAKE_SOURCE_DIR}/include PUBLIC ${PROJECT_SOURCE_DIR}/include PUBLIC ${PROJECT_SOURCE_DIR}/.. ${ROCKSDB_INCLUDE_DIR} @@ -22,8 +25,9 @@ foreach(blackwindow_test_source ${BLACKWINDOW_TEST_SOURCE}) add_dependencies(${blackwindow_test_name} gtest glog gflags ${LIBUNWIND_NAME}) target_link_libraries(${blackwindow_test_name} PUBLIC ${GTEST_LIBRARY} - PUBLIC pstd PUBLIC ${ROCKSDB_LIBRARY} + PUBLIC pstd + PUBLIC net PUBLIC storage PUBLIC ${GLOG_LIBRARY} PUBLIC ${GFLAGS_LIBRARY} diff --git a/src/storage/tests/hashes_test.cc b/src/storage/tests/hashes_test.cc index ff3f1b4ac4..50d2207256 100644 --- a/src/storage/tests/hashes_test.cc +++ b/src/storage/tests/hashes_test.cc @@ -10,6 +10,10 @@ #include #include +#include "glog/logging.h" + +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/env.h" #include "storage/storage.h" #include "storage/util.h" @@ -22,9 +26,8 @@ class HashesTest : public ::testing::Test { void SetUp() override { std::string path = "./db/hashes"; - if (access(path.c_str(), F_OK) != 0) { - mkdir(path.c_str(), 0755); - } + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); storage_options.options.create_if_missing = true; s = db.Open(storage_options, path); } @@ -2420,6 +2423,14 @@ TEST_F(HashesTest, PKHRScanRangeTest) { } int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("hashes_test"); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/src/storage/tests/keys_test.cc b/src/storage/tests/keys_test.cc index e828969bbe..8acff5c12f 100644 --- a/src/storage/tests/keys_test.cc +++ b/src/storage/tests/keys_test.cc @@ -7,6 +7,10 @@ #include #include +#include "glog/logging.h" + +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/env.h" #include "storage/storage.h" #include "storage/util.h" @@ -22,9 +26,8 @@ class KeysTest : public ::testing::Test { void SetUp() override { std::string path = "./db/keys"; - if (access(path.c_str(), F_OK) != 0) { - mkdir(path.c_str(), 0755); - } + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); storage_options.options.create_if_missing = true; s = db.Open(storage_options, path); } @@ -60,9 +63,12 @@ static bool set_timeout(storage::Storage* const db, const Slice& key, int32_t tt static bool key_value_match(const std::vector& key_value_out, const std::vector& expect_key_value) { if (key_value_out.size() != expect_key_value.size()) { + LOG(WARNING) << "key_value_out.size: " << key_value_out.size() << " expect_key_value.size: " << expect_key_value.size(); return false; } for (int32_t idx = 0; idx < key_value_out.size(); ++idx) { + LOG(WARNING) << "key_value_out[idx]: "<< key_value_out[idx].key << " expect_key_value[idx]: " << expect_key_value[idx].key; + LOG(WARNING) << "key_value_out[idx]: "<< key_value_out[idx].value << " expect_key_value[idx]: " << expect_key_value[idx].value; if (key_value_out[idx].key != expect_key_value[idx].key || key_value_out[idx].value != expect_key_value[idx].value) { return false; @@ -239,6 +245,7 @@ for (const auto& kv : kvs) { expect_kvs.push_back(kvs[idx]); } ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); ASSERT_EQ(next_key, "PKSCANRANGE_M"); // ************************** Group 10 Test ************************** @@ -4870,2933 +4877,219 @@ TEST_F(KeysTest, ScanCaseSingleTest) { // NOLINT db.Compact(DataType::kAll, true); } -TEST_F(KeysTest, PKExpireScanCaseAllTest) { // NOLINT - int64_t cursor; - int64_t next_cursor; - int64_t del_num; - int32_t int32_ret; - uint64_t uint64_ret; - std::vector keys; - std::vector total_keys; - std::vector delete_keys; +// Expire +TEST_F(KeysTest, ExpireTest) { + std::string value; std::map type_status; + int32_t ret; // ***************** Group 1 Test ***************** - // String - s = db.Set("GP1_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", "GP1_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP1_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", "GP1_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP1_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", "GP1_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - - // Hash - s = db.HSet("GP1_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", "GP1_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP1_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP1_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", "GP1_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP1_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP1_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", "GP1_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP1_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); + // Strings + s = db.Set("GP1_EXPIRE_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); - // Set - s = db.SAdd("GP1_PKEXPIRESCAN_CASE_ALL_SET_KEY1", {"GP1_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP1_PKEXPIRESCAN_CASE_ALL_SET_KEY2", {"GP1_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP1_PKEXPIRESCAN_CASE_ALL_SET_KEY3", {"GP1_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + // Hashes + s = db.HSet("GP1_EXPIRE_KEY", "FIELD", "VALUE", &ret); + ASSERT_TRUE(s.ok()); - // List - s = db.LPush("GP1_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", {"GP1_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP1_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", {"GP1_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP1_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", {"GP1_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + // Sets + s = db.SAdd("GP1_EXPIRE_KEY", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); - // ZSet - s = db.ZAdd("GP1_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", {{1, "GP1_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP1_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", {{1, "GP1_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP1_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", {{1, "GP1_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + // Lists + uint64_t llen; + s = db.RPush("GP1_EXPIRE_KEY", {"NODE"}, &llen); + ASSERT_TRUE(s.ok()); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", 2)); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", 4)); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", 6)); + // Zsets + s = db.ZAdd("GP1_EXPIRE_KEY", {{1, "MEMBER"}}, &ret); + ASSERT_TRUE(s.ok()); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", 8)); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", 10)); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", 12)); + ret = db.Expire("GP1_EXPIRE_KEY", 1, &type_status); + ASSERT_EQ(ret, 5); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_SET_KEY1", 14)); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_SET_KEY2", 16)); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_SET_KEY3", 18)); + // Strings + s = db.Get("GP1_EXPIRE_KEY", &value); + ASSERT_TRUE(s.IsNotFound()); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", 20)); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", 22)); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", 24)); + // Hashes + s = db.HGet("GP1_EXPIRE_KEY", "EXPIRE_FIELD", &value); + ASSERT_TRUE(s.IsNotFound()); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", 26)); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", 28)); - ASSERT_TRUE(set_timeout(&db, "GP1_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", 30)); + // Sets + s = db.SCard("GP1_EXPIRE_KEY", &ret); + ASSERT_TRUE(s.IsNotFound()); - // PKExpireScan - delete_keys.clear(); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 0, 0, 100, 3, &keys); - ASSERT_EQ(cursor, 3); - ASSERT_EQ(keys.size(), 3); - ASSERT_EQ(keys[0], "GP1_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - ASSERT_EQ(keys[1], "GP1_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - ASSERT_EQ(keys[2], "GP1_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + // Lists + s = db.LLen("GP1_EXPIRE_KEY", &llen); + ASSERT_TRUE(s.IsNotFound()); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 3, 0, 100, 3, &keys); - ASSERT_EQ(cursor, 6); - ASSERT_EQ(keys.size(), 3); - ASSERT_EQ(keys[0], "GP1_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - ASSERT_EQ(keys[1], "GP1_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - ASSERT_EQ(keys[2], "GP1_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + // ZSets + s = db.ZCard("GP1_EXPIRE_KEY", &ret); + ASSERT_TRUE(s.IsNotFound()); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 6, 0, 100, 3, &keys); - ASSERT_EQ(cursor, 9); - ASSERT_EQ(keys.size(), 3); - ASSERT_EQ(keys[0], "GP1_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - ASSERT_EQ(keys[1], "GP1_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - ASSERT_EQ(keys[2], "GP1_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + // ***************** Group 2 Test ***************** + // Strings + s = db.Set("GP2_EXPIRE_STRING_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_STRING_KEY")); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 9, 0, 100, 3, &keys); - ASSERT_EQ(cursor, 12); - ASSERT_EQ(keys.size(), 3); - ASSERT_EQ(keys[0], "GP1_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - ASSERT_EQ(keys[1], "GP1_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - ASSERT_EQ(keys[2], "GP1_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + type_status.clear(); + ret = db.Expire("GP2_EXPIRE_STRING_KEY", 1, &type_status); + ASSERT_EQ(ret, 0); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 12, 0, 100, 3, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 3); - ASSERT_EQ(keys[0], "GP1_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - ASSERT_EQ(keys[1], "GP1_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - ASSERT_EQ(keys[2], "GP1_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + // Hashes + s = db.HSet("GP2_EXPIRE_HASHES_KEY", "FIELD", "VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_HASHES_KEY")); - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); + type_status.clear(); + ret = db.Expire("GP2_EXPIRE_HASHES_KEY", 1, &type_status); + ASSERT_EQ(ret, 0); - // ***************** Group 2 Test ***************** - // String - s = db.Set("GP2_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", "GP2_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP2_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", "GP2_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP2_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", "GP2_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); + // Sets + s = db.SAdd("GP2_EXPIRE_SETS_KEY", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_SETS_KEY")); - // Hash - s = db.HSet("GP2_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", "GP2_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP2_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP2_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", "GP2_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP2_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP2_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", "GP2_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP2_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); + type_status.clear(); + ret = db.Expire("GP2_EXPIRE_SETS_KEY", 1, &type_status); + ASSERT_EQ(ret, 0); - // Set - s = db.SAdd("GP2_PKEXPIRESCAN_CASE_ALL_SET_KEY1", {"GP2_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP2_PKEXPIRESCAN_CASE_ALL_SET_KEY2", {"GP2_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP2_PKEXPIRESCAN_CASE_ALL_SET_KEY3", {"GP2_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + // Lists + s = db.RPush("GP2_EXPIRE_LISTS_KEY", {"NODE"}, &llen); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_LISTS_KEY")); - // List - s = db.LPush("GP2_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", {"GP2_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP2_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", {"GP2_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP2_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", {"GP2_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + type_status.clear(); + ret = db.Expire("GP2_EXPIRE_LISTS_KEY", 1, &type_status); + ASSERT_EQ(ret, 0); - // ZSet - s = db.ZAdd("GP2_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", {{1, "GP2_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP2_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", {{1, "GP2_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP2_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", {{1, "GP2_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + // Zsets + s = db.ZAdd("GP2_EXPIRE_ZSETS_KEY", {{1, "MEMBER"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_ZSETS_KEY")); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", 2)); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", 4)); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", 6)); + type_status.clear(); + ret = db.Expire("GP2_EXPIRE_ZSETS_KEY", 1, &type_status); + ASSERT_EQ(ret, 0); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", 8)); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", 10)); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", 12)); + // ***************** Group 3 Test ***************** + // Strings + s = db.Set("GP3_EXPIRE_STRING_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + ret = db.Del({"GP3_EXPIRE_STRING_KEY"}, &type_status); + ASSERT_EQ(ret, 1); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_SET_KEY1", 14)); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_SET_KEY2", 16)); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_SET_KEY3", 18)); + type_status.clear(); + ret = db.Expire("GP3_EXPIRE_STRING_KEY", 1, &type_status); + ASSERT_EQ(ret, 0); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", 20)); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", 22)); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", 24)); + // Hashes + s = db.HSet("GP3_EXPIRE_HASHES_KEY", "FIELD", "VALUE", &ret); + ASSERT_TRUE(s.ok()); + s = db.HDel("GP3_EXPIRE_HASHES_KEY", {"FIELD"}, &ret); + ASSERT_TRUE(s.ok()); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", 26)); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", 28)); - ASSERT_TRUE(set_timeout(&db, "GP2_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", 30)); + type_status.clear(); + ret = db.Expire("GP3_EXPIRE_HASHES_KEY", 1, &type_status); + ASSERT_EQ(ret, 0); - // PKExpireScan - delete_keys.clear(); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 0, 0, 100, 2, &keys); - ASSERT_EQ(cursor, 2); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP2_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - ASSERT_EQ(keys[1], "GP2_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + // Sets + s = db.SAdd("GP3_EXPIRE_SETS_KEY", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); + s = db.SRem("GP3_EXPIRE_SETS_KEY", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 2, 0, 100, 2, &keys); - ASSERT_EQ(cursor, 4); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP2_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - ASSERT_EQ(keys[1], "GP2_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + type_status.clear(); + ret = db.Expire("GP3_EXPIRE_SETS_KEY", 1, &type_status); + ASSERT_EQ(ret, 0); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 4, 0, 100, 2, &keys); - ASSERT_EQ(cursor, 6); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP2_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - ASSERT_EQ(keys[1], "GP2_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + // Lists + s = db.RPush("GP3_EXPIRE_LISTS_KEY", {"NODE"}, &llen); + ASSERT_TRUE(s.ok()); + std::vector elements; + s = db.LPop("GP3_EXPIRE_LISTS_KEY", 1,&elements); + ASSERT_TRUE(s.ok()); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 6, 0, 100, 2, &keys); - ASSERT_EQ(cursor, 8); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP2_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - ASSERT_EQ(keys[1], "GP2_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + type_status.clear(); + ret = db.Expire("GP3_EXPIRE_LISTS_KEY", 1, &type_status); + LOG(WARNING) << "ret: " << ret; + for (const auto& ts : type_status) { + LOG(WARNING) << "type: " << ts.first << " status: " << ts.second.ToString(); + } + ASSERT_EQ(ret, 0); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 8, 0, 100, 2, &keys); - ASSERT_EQ(cursor, 10); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP2_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - ASSERT_EQ(keys[1], "GP2_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + // Zsets + s = db.ZAdd("GP3_EXPIRE_ZSETS_KEY", {{1, "MEMBER"}}, &ret); + ASSERT_TRUE(s.ok()); + s = db.ZRem("GP3_EXPIRE_ZSETS_KEY", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 10, 0, 100, 2, &keys); - ASSERT_EQ(cursor, 12); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP2_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - ASSERT_EQ(keys[1], "GP2_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + type_status.clear(); + ret = db.Expire("GP3_EXPIRE_ZSETS_KEY", 1, &type_status); + ASSERT_EQ(ret, 0); +} - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 12, 0, 100, 2, &keys); - ASSERT_EQ(cursor, 14); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP2_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - ASSERT_EQ(keys[1], "GP2_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); +// Del +TEST_F(KeysTest, DelTest) { + int32_t ret; + std::string value; + std::map type_status; + std::vector keys{"DEL_KEY"}; - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 14, 0, 100, 2, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 1); - ASSERT_EQ(keys[0], "GP2_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + // Strings + s = db.Set("DEL_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); + // Hashes + s = db.HSet("DEL_KEY", "FIELD", "VALUE", &ret); + ASSERT_TRUE(s.ok()); - // ***************** Group 3 Test ***************** - // String - s = db.Set("GP3_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", "GP3_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP3_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", "GP3_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP3_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", "GP3_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); + // Sets + s = db.SAdd("DEL_KEY", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); - // Hash - s = db.HSet("GP3_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", "GP3_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP3_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP3_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", "GP3_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP3_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP3_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", "GP3_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP3_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); + // Lists + uint64_t llen; + s = db.RPush("DEL_KEY", {"NODE"}, &llen); + ASSERT_TRUE(s.ok()); - // Set - s = db.SAdd("GP3_PKEXPIRESCAN_CASE_ALL_SET_KEY1", {"GP3_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP3_PKEXPIRESCAN_CASE_ALL_SET_KEY2", {"GP3_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP3_PKEXPIRESCAN_CASE_ALL_SET_KEY3", {"GP3_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + // ZSets + s = db.ZAdd("DEL_KEY", {{1, "MEMBER"}}, &ret); + ASSERT_TRUE(s.ok()); - // List - s = db.LPush("GP3_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", {"GP3_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP3_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", {"GP3_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP3_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", {"GP3_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + ret = db.Del(keys, &type_status); + ASSERT_EQ(ret, 5); - // ZSet - s = db.ZAdd("GP3_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", {{1, "GP3_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP3_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", {{1, "GP3_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP3_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", {{1, "GP3_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + // Strings + s = db.Get("DEL_KEY", &value); + ASSERT_TRUE(s.IsNotFound()); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", 2)); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", 4)); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", 6)); + // Hashes + s = db.HGet("DEL_KEY", "DEL_FIELD", &value); + ASSERT_TRUE(s.IsNotFound()); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", 8)); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", 10)); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", 12)); + // Sets + s = db.SCard("DEL_KEY", &ret); + ASSERT_TRUE(s.IsNotFound()); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_SET_KEY1", 14)); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_SET_KEY2", 16)); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_SET_KEY3", 18)); + // Lists + s = db.LLen("DEL_KEY", &llen); + ASSERT_TRUE(s.IsNotFound()); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", 20)); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", 22)); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", 24)); + // ZSets + s = db.ZCard("DEL_KEY", &ret); + ASSERT_TRUE(s.IsNotFound()); +} - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", 26)); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", 28)); - ASSERT_TRUE(set_timeout(&db, "GP3_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", 30)); - - // PKExpireScan - delete_keys.clear(); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 0, 0, 100, 5, &keys); - ASSERT_EQ(cursor, 5); - ASSERT_EQ(keys.size(), 5); - ASSERT_EQ(keys[0], "GP3_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - ASSERT_EQ(keys[1], "GP3_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - ASSERT_EQ(keys[2], "GP3_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - ASSERT_EQ(keys[3], "GP3_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - ASSERT_EQ(keys[4], "GP3_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); - - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 5, 0, 100, 5, &keys); - ASSERT_EQ(cursor, 10); - ASSERT_EQ(keys.size(), 5); - ASSERT_EQ(keys[0], "GP3_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - ASSERT_EQ(keys[1], "GP3_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - ASSERT_EQ(keys[2], "GP3_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - ASSERT_EQ(keys[3], "GP3_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - ASSERT_EQ(keys[4], "GP3_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); - - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 10, 0, 100, 5, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 5); - ASSERT_EQ(keys[0], "GP3_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - ASSERT_EQ(keys[1], "GP3_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - ASSERT_EQ(keys[2], "GP3_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - ASSERT_EQ(keys[3], "GP3_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - ASSERT_EQ(keys[4], "GP3_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 4 Test ***************** - // String - s = db.Set("GP4_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", "GP4_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP4_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", "GP4_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP4_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", "GP4_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - - // Hash - s = db.HSet("GP4_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", "GP4_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP4_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP4_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", "GP4_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP4_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP4_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", "GP4_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP4_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - - // Set - s = db.SAdd("GP4_PKEXPIRESCAN_CASE_ALL_SET_KEY1", {"GP4_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP4_PKEXPIRESCAN_CASE_ALL_SET_KEY2", {"GP4_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP4_PKEXPIRESCAN_CASE_ALL_SET_KEY3", {"GP4_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - - // List - s = db.LPush("GP4_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", {"GP4_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP4_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", {"GP4_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP4_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", {"GP4_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - - // ZSet - s = db.ZAdd("GP4_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", {{1, "GP4_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP4_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", {{1, "GP4_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP4_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", {{1, "GP4_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); - - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", 2)); - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", 4)); - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", 6)); - - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", 8)); - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", 10)); - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", 12)); - - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_SET_KEY1", 14)); - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_SET_KEY2", 16)); - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_SET_KEY3", 18)); - - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", 20)); - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", 22)); - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", 24)); - - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", 26)); - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", 28)); - ASSERT_TRUE(set_timeout(&db, "GP4_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", 30)); - - delete_keys.clear(); - keys.clear(); - cursor = db.PKExpireScan(DataType::kAll, 0, 0, 100, 15, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 15); - ASSERT_EQ(keys[0], "GP4_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - ASSERT_EQ(keys[1], "GP4_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - ASSERT_EQ(keys[2], "GP4_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - ASSERT_EQ(keys[3], "GP4_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - ASSERT_EQ(keys[4], "GP4_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - ASSERT_EQ(keys[5], "GP4_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - ASSERT_EQ(keys[6], "GP4_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - ASSERT_EQ(keys[7], "GP4_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - ASSERT_EQ(keys[8], "GP4_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - ASSERT_EQ(keys[9], "GP4_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - ASSERT_EQ(keys[10], "GP4_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - ASSERT_EQ(keys[11], "GP4_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - ASSERT_EQ(keys[12], "GP4_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - ASSERT_EQ(keys[13], "GP4_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - ASSERT_EQ(keys[14], "GP4_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 5 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP5_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", "GP5_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP5_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", "GP5_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP5_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", "GP5_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - - // Hash - s = db.HSet("GP5_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", "GP5_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP5_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP5_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", "GP5_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP5_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP5_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", "GP5_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP5_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - - // Set - s = db.SAdd("GP5_PKEXPIRESCAN_CASE_ALL_SET_KEY1", {"GP5_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP5_PKEXPIRESCAN_CASE_ALL_SET_KEY2", {"GP5_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP5_PKEXPIRESCAN_CASE_ALL_SET_KEY3", {"GP5_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - - // List - s = db.LPush("GP5_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", {"GP5_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP5_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", {"GP5_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP5_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", {"GP5_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - - // ZSet - s = db.ZAdd("GP5_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", {{1, "GP5_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP5_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", {{1, "GP5_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP5_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", {{1, "GP5_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - delete_keys.emplace_back("GP5_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", 5)); - - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", 5)); - - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_SET_KEY1", 15)); - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_SET_KEY2", 15)); - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_SET_KEY3", 15)); - - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", 25)); - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", 25)); - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP5_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", 25)); - - cursor = 0; - keys.clear(); - total_keys.clear(); - do { - next_cursor = db.PKExpireScan(DataType::kAll, cursor, 10, 20, 5, &keys); - total_keys.insert(total_keys.end(), keys.begin(), keys.end()); - cursor = next_cursor; - } while (cursor != 0); - ASSERT_EQ(total_keys.size(), 3); - ASSERT_EQ(total_keys[0], "GP5_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - ASSERT_EQ(total_keys[1], "GP5_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - ASSERT_EQ(total_keys[2], "GP5_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 6 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP6_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", "GP6_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP6_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", "GP6_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP6_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", "GP6_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - - // Hash - s = db.HSet("GP6_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", "GP6_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP6_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP6_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", "GP6_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP6_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP6_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", "GP6_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP6_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - - // Set - s = db.SAdd("GP6_PKEXPIRESCAN_CASE_ALL_SET_KEY1", {"GP6_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP6_PKEXPIRESCAN_CASE_ALL_SET_KEY2", {"GP6_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP6_PKEXPIRESCAN_CASE_ALL_SET_KEY3", {"GP6_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - - // List - s = db.LPush("GP6_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", {"GP6_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP6_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", {"GP6_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP6_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", {"GP6_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - - // ZSet - s = db.ZAdd("GP6_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", {{1, "GP6_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP6_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", {{1, "GP6_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP6_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", {{1, "GP6_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - delete_keys.emplace_back("GP6_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", 15)); - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", 15)); - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_SET_KEY1", 15)); - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_SET_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_SET_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", 15)); - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", 15)); - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP6_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", 25)); - - cursor = 0; - keys.clear(); - total_keys.clear(); - do { - next_cursor = db.PKExpireScan(DataType::kAll, cursor, 10, 20, 5, &keys); - total_keys.insert(total_keys.end(), keys.begin(), keys.end()); - cursor = next_cursor; - } while (cursor != 0); - ASSERT_EQ(total_keys.size(), 5); - ASSERT_EQ(total_keys[0], "GP6_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - ASSERT_EQ(total_keys[1], "GP6_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - ASSERT_EQ(total_keys[2], "GP6_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - ASSERT_EQ(total_keys[3], "GP6_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - ASSERT_EQ(total_keys[4], "GP6_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 7 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP7_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", "GP7_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP7_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", "GP7_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP7_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", "GP7_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - - // Hash - s = db.HSet("GP7_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", "GP7_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP7_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP7_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", "GP7_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP7_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP7_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", "GP7_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP7_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - - // Set - s = db.SAdd("GP7_PKEXPIRESCAN_CASE_ALL_SET_KEY1", {"GP7_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP7_PKEXPIRESCAN_CASE_ALL_SET_KEY2", {"GP7_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP7_PKEXPIRESCAN_CASE_ALL_SET_KEY3", {"GP7_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - - // List - s = db.LPush("GP7_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", {"GP7_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP7_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", {"GP7_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP7_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", {"GP7_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - - // ZSet - s = db.ZAdd("GP7_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", {{1, "GP7_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP7_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", {{1, "GP7_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP7_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", {{1, "GP7_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - delete_keys.emplace_back("GP7_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", 15)); - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", 15)); - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_SET_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_SET_KEY2", 15)); - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_SET_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", 15)); - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", 15)); - ASSERT_TRUE(set_timeout(&db, "GP7_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", 25)); - - cursor = 0; - keys.clear(); - total_keys.clear(); - do { - next_cursor = db.PKExpireScan(DataType::kAll, cursor, 10, 20, 5, &keys); - total_keys.insert(total_keys.end(), keys.begin(), keys.end()); - cursor = next_cursor; - } while (cursor != 0); - ASSERT_EQ(total_keys.size(), 5); - ASSERT_EQ(total_keys[0], "GP7_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - ASSERT_EQ(total_keys[1], "GP7_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - ASSERT_EQ(total_keys[2], "GP7_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - ASSERT_EQ(total_keys[3], "GP7_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - ASSERT_EQ(total_keys[4], "GP7_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 8 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP8_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", "GP8_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP8_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", "GP8_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP8_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", "GP8_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - - // Hash - s = db.HSet("GP8_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", "GP8_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP8_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP8_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", "GP8_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP8_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP8_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", "GP8_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP8_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - - // Set - s = db.SAdd("GP8_PKEXPIRESCAN_CASE_ALL_SET_KEY1", {"GP8_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP8_PKEXPIRESCAN_CASE_ALL_SET_KEY2", {"GP8_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP8_PKEXPIRESCAN_CASE_ALL_SET_KEY3", {"GP8_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - - // List - s = db.LPush("GP8_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", {"GP8_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP8_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", {"GP8_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP8_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", {"GP8_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - - // ZSet - s = db.ZAdd("GP8_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", {{1, "GP8_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP8_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", {{1, "GP8_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP8_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", {{1, "GP8_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - delete_keys.emplace_back("GP8_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", 15)); - - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", 15)); - - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_SET_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_SET_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_SET_KEY3", 15)); - - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", 15)); - - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP8_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", 15)); - - cursor = 0; - keys.clear(); - total_keys.clear(); - do { - next_cursor = db.PKExpireScan(DataType::kAll, cursor, 10, 20, 5, &keys); - total_keys.insert(total_keys.end(), keys.begin(), keys.end()); - cursor = next_cursor; - } while (cursor != 0); - ASSERT_EQ(total_keys.size(), 5); - ASSERT_EQ(total_keys[0], "GP8_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - ASSERT_EQ(total_keys[1], "GP8_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - ASSERT_EQ(total_keys[2], "GP8_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - ASSERT_EQ(total_keys[3], "GP8_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - ASSERT_EQ(total_keys[4], "GP8_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 9 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP9_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", "GP9_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP9_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", "GP9_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP9_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", "GP9_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - - // Hash - s = db.HSet("GP9_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", "GP9_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP9_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP9_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", "GP9_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP9_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP9_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", "GP9_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP9_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - - // Set - s = db.SAdd("GP9_PKEXPIRESCAN_CASE_ALL_SET_KEY1", {"GP9_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP9_PKEXPIRESCAN_CASE_ALL_SET_KEY2", {"GP9_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP9_PKEXPIRESCAN_CASE_ALL_SET_KEY3", {"GP9_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - - // List - s = db.LPush("GP9_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", {"GP9_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP9_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", {"GP9_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP9_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", {"GP9_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - - // ZSet - s = db.ZAdd("GP9_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", {{1, "GP9_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP9_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", {{1, "GP9_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP9_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", {{1, "GP9_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - delete_keys.emplace_back("GP9_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", 15)); - - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", 6)); - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", 16)); - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", 26)); - - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_SET_KEY1", 7)); - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_SET_KEY2", 17)); - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_SET_KEY3", 27)); - - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", 8)); - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", 18)); - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", 28)); - - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", 9)); - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", 19)); - ASSERT_TRUE(set_timeout(&db, "GP9_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", 29)); - - cursor = 0; - keys.clear(); - total_keys.clear(); - do { - next_cursor = db.PKExpireScan(DataType::kAll, cursor, 0, 30, 5, &keys); - total_keys.insert(total_keys.end(), keys.begin(), keys.end()); - cursor = next_cursor; - } while (cursor != 0); - ASSERT_EQ(total_keys.size(), 15); - ASSERT_EQ(total_keys[0], "GP9_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - ASSERT_EQ(total_keys[1], "GP9_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - ASSERT_EQ(total_keys[2], "GP9_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - ASSERT_EQ(total_keys[3], "GP9_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - ASSERT_EQ(total_keys[4], "GP9_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - ASSERT_EQ(total_keys[5], "GP9_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - ASSERT_EQ(total_keys[6], "GP9_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - ASSERT_EQ(total_keys[7], "GP9_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - ASSERT_EQ(total_keys[8], "GP9_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - ASSERT_EQ(total_keys[9], "GP9_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - ASSERT_EQ(total_keys[10], "GP9_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - ASSERT_EQ(total_keys[11], "GP9_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - ASSERT_EQ(total_keys[12], "GP9_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - ASSERT_EQ(total_keys[13], "GP9_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - ASSERT_EQ(total_keys[14], "GP9_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 10 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP10_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", "GP10_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP10_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", "GP10_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP10_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", "GP10_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - - // Hash - s = db.HSet("GP10_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", "GP10_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP10_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP10_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", "GP10_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP10_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP10_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", "GP10_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP10_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - - // Set - s = db.SAdd("GP10_PKEXPIRESCAN_CASE_ALL_SET_KEY1", {"GP10_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP10_PKEXPIRESCAN_CASE_ALL_SET_KEY2", {"GP10_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP10_PKEXPIRESCAN_CASE_ALL_SET_KEY3", {"GP10_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - - // List - s = db.LPush("GP10_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", {"GP10_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP10_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", {"GP10_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP10_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", {"GP10_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - - // ZSet - s = db.ZAdd("GP10_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", {{1, "GP10_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP10_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", {{1, "GP10_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP10_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", {{1, "GP10_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - delete_keys.emplace_back("GP10_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", 15)); - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", 15)); - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", 15)); - - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", 5)); - - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_SET_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_SET_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_SET_KEY3", 5)); - - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", 25)); - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", 25)); - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP10_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", 25)); - - cursor = 0; - keys.clear(); - total_keys.clear(); - do { - next_cursor = db.PKExpireScan(DataType::kAll, cursor, 10, 20, 5, &keys); - total_keys.insert(total_keys.end(), keys.begin(), keys.end()); - cursor = next_cursor; - } while (cursor != 0); - ASSERT_EQ(total_keys.size(), 3); - ASSERT_EQ(total_keys[0], "GP10_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - ASSERT_EQ(total_keys[1], "GP10_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - ASSERT_EQ(total_keys[2], "GP10_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 11 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP11_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", "GP11_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP11_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", "GP11_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP11_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", "GP11_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - - // Hash - s = db.HSet("GP11_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", "GP11_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP11_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP11_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", "GP11_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP11_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP11_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", "GP11_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP11_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - - // Set - s = db.SAdd("GP11_PKEXPIRESCAN_CASE_ALL_SET_KEY1", {"GP11_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP11_PKEXPIRESCAN_CASE_ALL_SET_KEY2", {"GP11_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP11_PKEXPIRESCAN_CASE_ALL_SET_KEY3", {"GP11_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - - // List - s = db.LPush("GP11_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", {"GP11_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP11_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", {"GP11_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP11_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", {"GP11_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - - // ZSet - s = db.ZAdd("GP11_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", {{1, "GP11_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP11_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", {{1, "GP11_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP11_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", {{1, "GP11_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - delete_keys.emplace_back("GP11_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", 5)); - - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", 5)); - - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_SET_KEY1", 15)); - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_SET_KEY2", 15)); - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_SET_KEY3", 15)); - - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", 25)); - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", 25)); - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP11_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", 25)); - - cursor = 0; - keys.clear(); - total_keys.clear(); - do { - next_cursor = db.PKExpireScan(DataType::kAll, cursor, 10, 20, 5, &keys); - total_keys.insert(total_keys.end(), keys.begin(), keys.end()); - cursor = next_cursor; - } while (cursor != 0); - ASSERT_EQ(total_keys.size(), 3); - ASSERT_EQ(total_keys[0], "GP11_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - ASSERT_EQ(total_keys[1], "GP11_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - ASSERT_EQ(total_keys[2], "GP11_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 12 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP12_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", "GP12_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP12_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", "GP12_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP12_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", "GP12_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_STRING_KEY1"); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_STRING_KEY2"); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_STRING_KEY3"); - - // Hash - s = db.HSet("GP12_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", "GP12_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP12_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP12_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", "GP12_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP12_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP12_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", "GP12_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP12_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_HASH_KEY1"); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_HASH_KEY2"); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_HASH_KEY3"); - - // Set - s = db.SAdd("GP12_PKEXPIRESCAN_CASE_ALL_SET_KEY1", {"GP12_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP12_PKEXPIRESCAN_CASE_ALL_SET_KEY2", {"GP12_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP12_PKEXPIRESCAN_CASE_ALL_SET_KEY3", {"GP12_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_SET_KEY1"); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_SET_KEY2"); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_SET_KEY3"); - - // List - s = db.LPush("GP12_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", {"GP12_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP12_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", {"GP12_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP12_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", {"GP12_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_LIST_KEY1"); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_LIST_KEY2"); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_LIST_KEY3"); - - // ZSet - s = db.ZAdd("GP12_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", {{1, "GP12_PKEXPIRESCAN_CASE_ALL_ZSET_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP12_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", {{1, "GP12_PKEXPIRESCAN_CASE_ALL_ZSET_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP12_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", {{1, "GP12_PKEXPIRESCAN_CASE_ALL_ZSET_MEMBER3"}}, &int32_ret); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - delete_keys.emplace_back("GP12_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_STRING_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_STRING_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_STRING_KEY3", 5)); - - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_HASH_KEY1", 5)); - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_HASH_KEY2", 5)); - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_HASH_KEY3", 5)); - - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_SET_KEY1", 25)); - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_SET_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_SET_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_LIST_KEY1", 25)); - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_LIST_KEY2", 25)); - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_LIST_KEY3", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1", 15)); - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2", 15)); - ASSERT_TRUE(set_timeout(&db, "GP12_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3", 15)); - - cursor = 0; - keys.clear(); - total_keys.clear(); - do { - next_cursor = db.PKExpireScan(DataType::kAll, cursor, 10, 20, 5, &keys); - total_keys.insert(total_keys.end(), keys.begin(), keys.end()); - cursor = next_cursor; - } while (cursor != 0); - ASSERT_EQ(total_keys.size(), 3); - ASSERT_EQ(total_keys[0], "GP12_PKEXPIRESCAN_CASE_ALL_ZSET_KEY1"); - ASSERT_EQ(total_keys[1], "GP12_PKEXPIRESCAN_CASE_ALL_ZSET_KEY2"); - ASSERT_EQ(total_keys[2], "GP12_PKEXPIRESCAN_CASE_ALL_ZSET_KEY3"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 13 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP13_KEY1_PKEXPIRESCAN_CASE_ALL_STRING", "GP13_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP13_KEY2_PKEXPIRESCAN_CASE_ALL_STRING", "GP13_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP13_KEY3_PKEXPIRESCAN_CASE_ALL_STRING", "GP13_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - delete_keys.emplace_back("GP13_KEY1_PKEXPIRESCAN_CASE_ALL_STRING"); - delete_keys.emplace_back("GP13_KEY2_PKEXPIRESCAN_CASE_ALL_STRING"); - delete_keys.emplace_back("GP13_KEY3_PKEXPIRESCAN_CASE_ALL_STRING"); - - // Hash - s = db.HSet("GP13_KEY1_PKEXPIRESCAN_CASE_ALL_HASH", "GP13_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP13_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP13_KEY2_PKEXPIRESCAN_CASE_ALL_HASH", "GP13_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP13_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP13_KEY3_PKEXPIRESCAN_CASE_ALL_HASH", "GP13_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP13_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - delete_keys.emplace_back("GP13_KEY1_PKEXPIRESCAN_CASE_ALL_HASH"); - delete_keys.emplace_back("GP13_KEY2_PKEXPIRESCAN_CASE_ALL_HASH"); - delete_keys.emplace_back("GP13_KEY3_PKEXPIRESCAN_CASE_ALL_HASH"); - - // Set - s = db.SAdd("GP13_KEY1_PKEXPIRESCAN_CASE_ALL_SET", {"GP13_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP13_KEY2_PKEXPIRESCAN_CASE_ALL_SET", {"GP13_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP13_KEY3_PKEXPIRESCAN_CASE_ALL_SET", {"GP13_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - delete_keys.emplace_back("GP13_KEY1_PKEXPIRESCAN_CASE_ALL_SET"); - delete_keys.emplace_back("GP13_KEY2_PKEXPIRESCAN_CASE_ALL_SET"); - delete_keys.emplace_back("GP13_KEY3_PKEXPIRESCAN_CASE_ALL_SET"); - - // List - s = db.LPush("GP13_KEY1_PKEXPIRESCAN_CASE_ALL_LIST", {"GP13_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP13_KEY2_PKEXPIRESCAN_CASE_ALL_LIST", {"GP13_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP13_KEY3_PKEXPIRESCAN_CASE_ALL_LIST", {"GP13_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - delete_keys.emplace_back("GP13_KEY1_PKEXPIRESCAN_CASE_ALL_LIST"); - delete_keys.emplace_back("GP13_KEY2_PKEXPIRESCAN_CASE_ALL_LIST"); - delete_keys.emplace_back("GP13_KEY3_PKEXPIRESCAN_CASE_ALL_LIST"); - - // ZSet - s = db.ZAdd("GP13_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET", {{1, "GP13_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP13_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET", {{1, "GP13_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP13_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET", {{1, "GP13_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); - delete_keys.emplace_back("GP13_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET"); - delete_keys.emplace_back("GP13_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET"); - delete_keys.emplace_back("GP13_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP13_KEY1_PKEXPIRESCAN_CASE_ALL_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP13_KEY2_PKEXPIRESCAN_CASE_ALL_STRING", 5)); - ASSERT_TRUE(set_timeout(&db, "GP13_KEY3_PKEXPIRESCAN_CASE_ALL_STRING", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP13_KEY1_PKEXPIRESCAN_CASE_ALL_HASH", 15)); - ASSERT_TRUE(set_timeout(&db, "GP13_KEY2_PKEXPIRESCAN_CASE_ALL_HASH", 5)); - ASSERT_TRUE(set_timeout(&db, "GP13_KEY3_PKEXPIRESCAN_CASE_ALL_HASH", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP13_KEY1_PKEXPIRESCAN_CASE_ALL_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP13_KEY2_PKEXPIRESCAN_CASE_ALL_SET", 5)); - ASSERT_TRUE(set_timeout(&db, "GP13_KEY3_PKEXPIRESCAN_CASE_ALL_SET", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP13_KEY1_PKEXPIRESCAN_CASE_ALL_LIST", 15)); - ASSERT_TRUE(set_timeout(&db, "GP13_KEY2_PKEXPIRESCAN_CASE_ALL_LIST", 5)); - ASSERT_TRUE(set_timeout(&db, "GP13_KEY3_PKEXPIRESCAN_CASE_ALL_LIST", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP13_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP13_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET", 5)); - ASSERT_TRUE(set_timeout(&db, "GP13_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET", 25)); - - cursor = 0; - keys.clear(); - total_keys.clear(); - do { - next_cursor = db.PKExpireScan(DataType::kAll, cursor, 10, 20, 1, &keys); - total_keys.insert(total_keys.end(), keys.begin(), keys.end()); - cursor = next_cursor; - } while (cursor != 0); - ASSERT_EQ(total_keys.size(), 5); - ASSERT_EQ(total_keys[0], "GP13_KEY1_PKEXPIRESCAN_CASE_ALL_STRING"); - ASSERT_EQ(total_keys[1], "GP13_KEY1_PKEXPIRESCAN_CASE_ALL_HASH"); - ASSERT_EQ(total_keys[2], "GP13_KEY1_PKEXPIRESCAN_CASE_ALL_SET"); - ASSERT_EQ(total_keys[3], "GP13_KEY1_PKEXPIRESCAN_CASE_ALL_LIST"); - ASSERT_EQ(total_keys[4], "GP13_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 14 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP14_KEY1_PKEXPIRESCAN_CASE_ALL_STRING", "GP14_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP14_KEY2_PKEXPIRESCAN_CASE_ALL_STRING", "GP14_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP14_KEY3_PKEXPIRESCAN_CASE_ALL_STRING", "GP14_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - delete_keys.emplace_back("GP14_KEY1_PKEXPIRESCAN_CASE_ALL_STRING"); - delete_keys.emplace_back("GP14_KEY2_PKEXPIRESCAN_CASE_ALL_STRING"); - delete_keys.emplace_back("GP14_KEY3_PKEXPIRESCAN_CASE_ALL_STRING"); - - // Hash - s = db.HSet("GP14_KEY1_PKEXPIRESCAN_CASE_ALL_HASH", "GP14_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP14_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP14_KEY2_PKEXPIRESCAN_CASE_ALL_HASH", "GP14_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP14_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP14_KEY3_PKEXPIRESCAN_CASE_ALL_HASH", "GP14_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP14_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - delete_keys.emplace_back("GP14_KEY1_PKEXPIRESCAN_CASE_ALL_HASH"); - delete_keys.emplace_back("GP14_KEY2_PKEXPIRESCAN_CASE_ALL_HASH"); - delete_keys.emplace_back("GP14_KEY3_PKEXPIRESCAN_CASE_ALL_HASH"); - - // Set - s = db.SAdd("GP14_KEY1_PKEXPIRESCAN_CASE_ALL_SET", {"GP14_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP14_KEY2_PKEXPIRESCAN_CASE_ALL_SET", {"GP14_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP14_KEY3_PKEXPIRESCAN_CASE_ALL_SET", {"GP14_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - delete_keys.emplace_back("GP14_KEY1_PKEXPIRESCAN_CASE_ALL_SET"); - delete_keys.emplace_back("GP14_KEY2_PKEXPIRESCAN_CASE_ALL_SET"); - delete_keys.emplace_back("GP14_KEY3_PKEXPIRESCAN_CASE_ALL_SET"); - - // List - s = db.LPush("GP14_KEY1_PKEXPIRESCAN_CASE_ALL_LIST", {"GP14_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP14_KEY2_PKEXPIRESCAN_CASE_ALL_LIST", {"GP14_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP14_KEY3_PKEXPIRESCAN_CASE_ALL_LIST", {"GP14_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - delete_keys.emplace_back("GP14_KEY1_PKEXPIRESCAN_CASE_ALL_LIST"); - delete_keys.emplace_back("GP14_KEY2_PKEXPIRESCAN_CASE_ALL_LIST"); - delete_keys.emplace_back("GP14_KEY3_PKEXPIRESCAN_CASE_ALL_LIST"); - - // ZSet - s = db.ZAdd("GP14_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET", {{1, "GP14_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP14_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET", {{1, "GP14_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP14_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET", {{1, "GP14_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); - delete_keys.emplace_back("GP14_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET"); - delete_keys.emplace_back("GP14_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET"); - delete_keys.emplace_back("GP14_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP14_KEY1_PKEXPIRESCAN_CASE_ALL_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP14_KEY2_PKEXPIRESCAN_CASE_ALL_STRING", 5)); - ASSERT_TRUE(set_timeout(&db, "GP14_KEY3_PKEXPIRESCAN_CASE_ALL_STRING", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP14_KEY1_PKEXPIRESCAN_CASE_ALL_HASH", 15)); - ASSERT_TRUE(set_timeout(&db, "GP14_KEY2_PKEXPIRESCAN_CASE_ALL_HASH", 5)); - ASSERT_TRUE(set_timeout(&db, "GP14_KEY3_PKEXPIRESCAN_CASE_ALL_HASH", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP14_KEY1_PKEXPIRESCAN_CASE_ALL_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP14_KEY2_PKEXPIRESCAN_CASE_ALL_SET", 5)); - ASSERT_TRUE(set_timeout(&db, "GP14_KEY3_PKEXPIRESCAN_CASE_ALL_SET", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP14_KEY1_PKEXPIRESCAN_CASE_ALL_LIST", 15)); - ASSERT_TRUE(set_timeout(&db, "GP14_KEY2_PKEXPIRESCAN_CASE_ALL_LIST", 5)); - ASSERT_TRUE(set_timeout(&db, "GP14_KEY3_PKEXPIRESCAN_CASE_ALL_LIST", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP14_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP14_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET", 5)); - ASSERT_TRUE(set_timeout(&db, "GP14_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET", 25)); - - cursor = 0; - keys.clear(); - total_keys.clear(); - do { - next_cursor = db.PKExpireScan(DataType::kAll, cursor, 10, 20, 5, &keys); - total_keys.insert(total_keys.end(), keys.begin(), keys.end()); - cursor = next_cursor; - } while (cursor != 0); - ASSERT_EQ(total_keys.size(), 5); - ASSERT_EQ(total_keys[0], "GP14_KEY1_PKEXPIRESCAN_CASE_ALL_STRING"); - ASSERT_EQ(total_keys[1], "GP14_KEY1_PKEXPIRESCAN_CASE_ALL_HASH"); - ASSERT_EQ(total_keys[2], "GP14_KEY1_PKEXPIRESCAN_CASE_ALL_SET"); - ASSERT_EQ(total_keys[3], "GP14_KEY1_PKEXPIRESCAN_CASE_ALL_LIST"); - ASSERT_EQ(total_keys[4], "GP14_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 15 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP15_KEY1_PKEXPIRESCAN_CASE_ALL_STRING", "GP15_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP15_KEY2_PKEXPIRESCAN_CASE_ALL_STRING", "GP15_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP15_KEY3_PKEXPIRESCAN_CASE_ALL_STRING", "GP15_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - delete_keys.emplace_back("GP15_KEY1_PKEXPIRESCAN_CASE_ALL_STRING"); - delete_keys.emplace_back("GP15_KEY2_PKEXPIRESCAN_CASE_ALL_STRING"); - delete_keys.emplace_back("GP15_KEY3_PKEXPIRESCAN_CASE_ALL_STRING"); - - // Hash - s = db.HSet("GP15_KEY1_PKEXPIRESCAN_CASE_ALL_HASH", "GP15_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP15_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP15_KEY2_PKEXPIRESCAN_CASE_ALL_HASH", "GP15_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP15_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP15_KEY3_PKEXPIRESCAN_CASE_ALL_HASH", "GP15_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP15_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - delete_keys.emplace_back("GP15_KEY1_PKEXPIRESCAN_CASE_ALL_HASH"); - delete_keys.emplace_back("GP15_KEY2_PKEXPIRESCAN_CASE_ALL_HASH"); - delete_keys.emplace_back("GP15_KEY3_PKEXPIRESCAN_CASE_ALL_HASH"); - - // Set - s = db.SAdd("GP15_KEY1_PKEXPIRESCAN_CASE_ALL_SET", {"GP15_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP15_KEY2_PKEXPIRESCAN_CASE_ALL_SET", {"GP15_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP15_KEY3_PKEXPIRESCAN_CASE_ALL_SET", {"GP15_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - delete_keys.emplace_back("GP15_KEY1_PKEXPIRESCAN_CASE_ALL_SET"); - delete_keys.emplace_back("GP15_KEY2_PKEXPIRESCAN_CASE_ALL_SET"); - delete_keys.emplace_back("GP15_KEY3_PKEXPIRESCAN_CASE_ALL_SET"); - - // List - s = db.LPush("GP15_KEY1_PKEXPIRESCAN_CASE_ALL_LIST", {"GP15_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP15_KEY2_PKEXPIRESCAN_CASE_ALL_LIST", {"GP15_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP15_KEY3_PKEXPIRESCAN_CASE_ALL_LIST", {"GP15_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - delete_keys.emplace_back("GP15_KEY1_PKEXPIRESCAN_CASE_ALL_LIST"); - delete_keys.emplace_back("GP15_KEY2_PKEXPIRESCAN_CASE_ALL_LIST"); - delete_keys.emplace_back("GP15_KEY3_PKEXPIRESCAN_CASE_ALL_LIST"); - - // ZSet - s = db.ZAdd("GP15_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET", {{1, "GP15_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP15_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET", {{1, "GP15_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP15_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET", {{1, "GP15_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); - delete_keys.emplace_back("GP15_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET"); - delete_keys.emplace_back("GP15_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET"); - delete_keys.emplace_back("GP15_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP15_KEY1_PKEXPIRESCAN_CASE_ALL_STRING", 5)); - ASSERT_TRUE(set_timeout(&db, "GP15_KEY2_PKEXPIRESCAN_CASE_ALL_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP15_KEY3_PKEXPIRESCAN_CASE_ALL_STRING", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP15_KEY1_PKEXPIRESCAN_CASE_ALL_HASH", 5)); - ASSERT_TRUE(set_timeout(&db, "GP15_KEY2_PKEXPIRESCAN_CASE_ALL_HASH", 15)); - ASSERT_TRUE(set_timeout(&db, "GP15_KEY3_PKEXPIRESCAN_CASE_ALL_HASH", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP15_KEY1_PKEXPIRESCAN_CASE_ALL_SET", 5)); - ASSERT_TRUE(set_timeout(&db, "GP15_KEY2_PKEXPIRESCAN_CASE_ALL_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP15_KEY3_PKEXPIRESCAN_CASE_ALL_SET", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP15_KEY1_PKEXPIRESCAN_CASE_ALL_LIST", 5)); - ASSERT_TRUE(set_timeout(&db, "GP15_KEY2_PKEXPIRESCAN_CASE_ALL_LIST", 15)); - ASSERT_TRUE(set_timeout(&db, "GP15_KEY3_PKEXPIRESCAN_CASE_ALL_LIST", 25)); - - ASSERT_TRUE(set_timeout(&db, "GP15_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET", 5)); - ASSERT_TRUE(set_timeout(&db, "GP15_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP15_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET", 25)); - - cursor = 0; - keys.clear(); - total_keys.clear(); - do { - next_cursor = db.PKExpireScan(DataType::kAll, cursor, 10, 20, 5, &keys); - total_keys.insert(total_keys.end(), keys.begin(), keys.end()); - cursor = next_cursor; - } while (cursor != 0); - ASSERT_EQ(total_keys.size(), 5); - ASSERT_EQ(total_keys[0], "GP15_KEY2_PKEXPIRESCAN_CASE_ALL_STRING"); - ASSERT_EQ(total_keys[1], "GP15_KEY2_PKEXPIRESCAN_CASE_ALL_HASH"); - ASSERT_EQ(total_keys[2], "GP15_KEY2_PKEXPIRESCAN_CASE_ALL_SET"); - ASSERT_EQ(total_keys[3], "GP15_KEY2_PKEXPIRESCAN_CASE_ALL_LIST"); - ASSERT_EQ(total_keys[4], "GP15_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 16 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP16_KEY1_PKEXPIRESCAN_CASE_ALL_STRING", "GP16_PKEXPIRESCAN_CASE_ALL_STRING_VALUE1"); - s = db.Set("GP16_KEY2_PKEXPIRESCAN_CASE_ALL_STRING", "GP16_PKEXPIRESCAN_CASE_ALL_STRING_VALUE2"); - s = db.Set("GP16_KEY3_PKEXPIRESCAN_CASE_ALL_STRING", "GP16_PKEXPIRESCAN_CASE_ALL_STRING_VALUE3"); - delete_keys.emplace_back("GP16_KEY1_PKEXPIRESCAN_CASE_ALL_STRING"); - delete_keys.emplace_back("GP16_KEY2_PKEXPIRESCAN_CASE_ALL_STRING"); - delete_keys.emplace_back("GP16_KEY3_PKEXPIRESCAN_CASE_ALL_STRING"); - - // Hash - s = db.HSet("GP16_KEY1_PKEXPIRESCAN_CASE_ALL_HASH", "GP16_PKEXPIRESCAN_CASE_ALL_HASH_FIELD1", - "GP16_PKEXPIRESCAN_CASE_ALL_HASH_VALUE1", &int32_ret); - s = db.HSet("GP16_KEY2_PKEXPIRESCAN_CASE_ALL_HASH", "GP16_PKEXPIRESCAN_CASE_ALL_HASH_FIELD2", - "GP16_PKEXPIRESCAN_CASE_ALL_HASH_VALUE2", &int32_ret); - s = db.HSet("GP16_KEY3_PKEXPIRESCAN_CASE_ALL_HASH", "GP16_PKEXPIRESCAN_CASE_ALL_HASH_FIELD3", - "GP16_PKEXPIRESCAN_CASE_ALL_HASH_VALUE3", &int32_ret); - delete_keys.emplace_back("GP16_KEY1_PKEXPIRESCAN_CASE_ALL_HASH"); - delete_keys.emplace_back("GP16_KEY2_PKEXPIRESCAN_CASE_ALL_HASH"); - delete_keys.emplace_back("GP16_KEY3_PKEXPIRESCAN_CASE_ALL_HASH"); - - // Set - s = db.SAdd("GP16_KEY1_PKEXPIRESCAN_CASE_ALL_SET", {"GP16_PKEXPIRESCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP16_KEY2_PKEXPIRESCAN_CASE_ALL_SET", {"GP16_PKEXPIRESCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP16_KEY3_PKEXPIRESCAN_CASE_ALL_SET", {"GP16_PKEXPIRESCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); - delete_keys.emplace_back("GP16_KEY1_PKEXPIRESCAN_CASE_ALL_SET"); - delete_keys.emplace_back("GP16_KEY2_PKEXPIRESCAN_CASE_ALL_SET"); - delete_keys.emplace_back("GP16_KEY3_PKEXPIRESCAN_CASE_ALL_SET"); - - // List - s = db.LPush("GP16_KEY1_PKEXPIRESCAN_CASE_ALL_LIST", {"GP16_PKEXPIRESCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP16_KEY2_PKEXPIRESCAN_CASE_ALL_LIST", {"GP16_PKEXPIRESCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP16_KEY3_PKEXPIRESCAN_CASE_ALL_LIST", {"GP16_PKEXPIRESCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); - delete_keys.emplace_back("GP16_KEY1_PKEXPIRESCAN_CASE_ALL_LIST"); - delete_keys.emplace_back("GP16_KEY2_PKEXPIRESCAN_CASE_ALL_LIST"); - delete_keys.emplace_back("GP16_KEY3_PKEXPIRESCAN_CASE_ALL_LIST"); - - // ZSet - s = db.ZAdd("GP16_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET", {{1, "GP16_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP16_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET", {{1, "GP16_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP16_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET", {{1, "GP16_PKEXPIRESCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); - delete_keys.emplace_back("GP16_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET"); - delete_keys.emplace_back("GP16_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET"); - delete_keys.emplace_back("GP16_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP16_KEY1_PKEXPIRESCAN_CASE_ALL_STRING", 5)); - ASSERT_TRUE(set_timeout(&db, "GP16_KEY2_PKEXPIRESCAN_CASE_ALL_STRING", 25)); - ASSERT_TRUE(set_timeout(&db, "GP16_KEY3_PKEXPIRESCAN_CASE_ALL_STRING", 15)); - - ASSERT_TRUE(set_timeout(&db, "GP16_KEY1_PKEXPIRESCAN_CASE_ALL_HASH", 5)); - ASSERT_TRUE(set_timeout(&db, "GP16_KEY2_PKEXPIRESCAN_CASE_ALL_HASH", 25)); - ASSERT_TRUE(set_timeout(&db, "GP16_KEY3_PKEXPIRESCAN_CASE_ALL_HASH", 15)); - - ASSERT_TRUE(set_timeout(&db, "GP16_KEY1_PKEXPIRESCAN_CASE_ALL_SET", 5)); - ASSERT_TRUE(set_timeout(&db, "GP16_KEY2_PKEXPIRESCAN_CASE_ALL_SET", 25)); - ASSERT_TRUE(set_timeout(&db, "GP16_KEY3_PKEXPIRESCAN_CASE_ALL_SET", 15)); - - ASSERT_TRUE(set_timeout(&db, "GP16_KEY1_PKEXPIRESCAN_CASE_ALL_LIST", 5)); - ASSERT_TRUE(set_timeout(&db, "GP16_KEY2_PKEXPIRESCAN_CASE_ALL_LIST", 25)); - ASSERT_TRUE(set_timeout(&db, "GP16_KEY3_PKEXPIRESCAN_CASE_ALL_LIST", 15)); - - ASSERT_TRUE(set_timeout(&db, "GP16_KEY1_PKEXPIRESCAN_CASE_ALL_ZSET", 5)); - ASSERT_TRUE(set_timeout(&db, "GP16_KEY2_PKEXPIRESCAN_CASE_ALL_ZSET", 25)); - ASSERT_TRUE(set_timeout(&db, "GP16_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET", 15)); - - cursor = 0; - keys.clear(); - total_keys.clear(); - do { - next_cursor = db.PKExpireScan(DataType::kAll, cursor, 10, 20, 5, &keys); - total_keys.insert(total_keys.end(), keys.begin(), keys.end()); - cursor = next_cursor; - } while (cursor != 0); - ASSERT_EQ(total_keys.size(), 5); - ASSERT_EQ(total_keys[0], "GP16_KEY3_PKEXPIRESCAN_CASE_ALL_STRING"); - ASSERT_EQ(total_keys[1], "GP16_KEY3_PKEXPIRESCAN_CASE_ALL_HASH"); - ASSERT_EQ(total_keys[2], "GP16_KEY3_PKEXPIRESCAN_CASE_ALL_SET"); - ASSERT_EQ(total_keys[3], "GP16_KEY3_PKEXPIRESCAN_CASE_ALL_LIST"); - ASSERT_EQ(total_keys[4], "GP16_KEY3_PKEXPIRESCAN_CASE_ALL_ZSET"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 15); - sleep(2); - db.Compact(DataType::kAll, true); -} - -TEST_F(KeysTest, PKExpireScanCaseSingleTest) { // NOLINT - int64_t cursor; - int64_t next_cursor; - int64_t del_num; - int32_t int32_ret; - uint64_t uint64_ret; - std::vector keys; - std::vector total_keys; - std::vector delete_keys; - std::map type_status; - - // ***************** Group 1 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP1_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP1_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE1"); - s = db.Set("GP1_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP1_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE2"); - s = db.Set("GP1_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP1_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE3"); - s = db.Set("GP1_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP1_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE4"); - s = db.Set("GP1_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP1_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE5"); - s = db.Set("GP1_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP1_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE6"); - delete_keys.emplace_back("GP1_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP1_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP1_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP1_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP1_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP1_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - // Hash - s = db.HSet("GP1_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP1_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD1", - "GP1_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); - s = db.HSet("GP1_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP1_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD2", - "GP1_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); - s = db.HSet("GP1_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP1_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD3", - "GP1_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); - s = db.HSet("GP1_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP1_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD4", - "GP1_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); - s = db.HSet("GP1_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP1_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD5", - "GP1_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); - s = db.HSet("GP1_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP1_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD6", - "GP1_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); - delete_keys.emplace_back("GP1_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP1_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP1_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP1_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP1_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP1_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH"); - - // Set - s = db.SAdd("GP1_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP1_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP1_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP1_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP1_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP1_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); - s = db.SAdd("GP1_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP1_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); - s = db.SAdd("GP1_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP1_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); - s = db.SAdd("GP1_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP1_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); - delete_keys.emplace_back("GP1_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP1_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP1_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP1_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP1_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP1_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - // List - s = db.LPush("GP1_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP1_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP1_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP1_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP1_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP1_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); - s = db.LPush("GP1_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP1_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); - s = db.LPush("GP1_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP1_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); - s = db.LPush("GP1_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP1_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); - delete_keys.emplace_back("GP1_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP1_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP1_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP1_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP1_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP1_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST"); - - // ZSet - s = db.ZAdd("GP1_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP1_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP1_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP1_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP1_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP1_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); - s = db.ZAdd("GP1_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP1_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); - s = db.ZAdd("GP1_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP1_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); - s = db.ZAdd("GP1_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP1_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); - delete_keys.emplace_back("GP1_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP1_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP1_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP1_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP1_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP1_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP1_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP1_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP1_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP1_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP1_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP1_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - - keys.clear(); - cursor = 0; - cursor = db.PKExpireScan(DataType::kStrings, cursor, 10, 20, 2, &keys); - ASSERT_EQ(cursor, 2); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP1_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[1], "GP1_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - keys.clear(); - cursor = db.PKExpireScan(DataType::kStrings, cursor, 10, 20, 2, &keys); - ASSERT_EQ(cursor, 4); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP1_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[1], "GP1_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - keys.clear(); - cursor = db.PKExpireScan(DataType::kStrings, cursor, 10, 20, 2, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP1_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[1], "GP1_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 30); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 2 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP2_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP2_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE1"); - s = db.Set("GP2_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP2_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE2"); - s = db.Set("GP2_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP2_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE3"); - s = db.Set("GP2_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP2_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE4"); - s = db.Set("GP2_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP2_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE5"); - s = db.Set("GP2_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP2_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE6"); - delete_keys.emplace_back("GP2_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP2_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP2_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP2_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP2_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP2_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - // Hash - s = db.HSet("GP2_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP2_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD1", - "GP2_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); - s = db.HSet("GP2_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP2_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD2", - "GP2_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); - s = db.HSet("GP2_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP2_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD3", - "GP2_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); - s = db.HSet("GP2_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP2_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD4", - "GP2_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); - s = db.HSet("GP2_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP2_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD5", - "GP2_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); - s = db.HSet("GP2_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP2_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD6", - "GP2_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); - delete_keys.emplace_back("GP2_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP2_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP2_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP2_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP2_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP2_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH"); - - // Set - s = db.SAdd("GP2_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP2_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP2_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP2_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP2_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP2_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); - s = db.SAdd("GP2_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP2_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); - s = db.SAdd("GP2_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP2_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); - s = db.SAdd("GP2_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP2_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); - delete_keys.emplace_back("GP2_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP2_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP2_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP2_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP2_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP2_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - // List - s = db.LPush("GP2_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP2_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP2_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP2_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP2_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP2_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); - s = db.LPush("GP2_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP2_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); - s = db.LPush("GP2_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP2_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); - s = db.LPush("GP2_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP2_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); - delete_keys.emplace_back("GP2_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP2_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP2_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP2_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP2_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP2_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST"); - - // ZSet - s = db.ZAdd("GP2_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP2_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP2_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP2_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP2_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP2_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); - s = db.ZAdd("GP2_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP2_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); - s = db.ZAdd("GP2_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP2_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); - s = db.ZAdd("GP2_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP2_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); - delete_keys.emplace_back("GP2_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP2_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP2_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP2_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP2_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP2_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP2_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP2_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP2_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP2_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP2_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP2_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - - keys.clear(); - cursor = 0; - cursor = db.PKExpireScan(DataType::kStrings, cursor, 10, 20, 4, &keys); - ASSERT_EQ(cursor, 4); - ASSERT_EQ(keys.size(), 4); - ASSERT_EQ(keys[0], "GP2_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[1], "GP2_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[2], "GP2_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[3], "GP2_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - keys.clear(); - cursor = db.PKExpireScan(DataType::kStrings, cursor, 10, 20, 4, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP2_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[1], "GP2_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 30); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 3 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP3_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP3_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE1"); - s = db.Set("GP3_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP3_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE2"); - s = db.Set("GP3_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP3_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE3"); - s = db.Set("GP3_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP3_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE4"); - s = db.Set("GP3_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP3_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE5"); - s = db.Set("GP3_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP3_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE6"); - delete_keys.emplace_back("GP3_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP3_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP3_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP3_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP3_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP3_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - // Hash - s = db.HSet("GP3_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP3_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD1", - "GP3_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); - s = db.HSet("GP3_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP3_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD2", - "GP3_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); - s = db.HSet("GP3_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP3_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD3", - "GP3_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); - s = db.HSet("GP3_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP3_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD4", - "GP3_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); - s = db.HSet("GP3_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP3_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD5", - "GP3_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); - s = db.HSet("GP3_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP3_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD6", - "GP3_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); - delete_keys.emplace_back("GP3_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP3_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP3_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP3_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP3_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP3_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH"); - - // Set - s = db.SAdd("GP3_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP3_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP3_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP3_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP3_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP3_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); - s = db.SAdd("GP3_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP3_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); - s = db.SAdd("GP3_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP3_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); - s = db.SAdd("GP3_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP3_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); - delete_keys.emplace_back("GP3_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP3_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP3_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP3_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP3_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP3_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - // List - s = db.LPush("GP3_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP3_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP3_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP3_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP3_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP3_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); - s = db.LPush("GP3_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP3_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); - s = db.LPush("GP3_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP3_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); - s = db.LPush("GP3_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP3_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); - delete_keys.emplace_back("GP3_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP3_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP3_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP3_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP3_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP3_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST"); - - // ZSet - s = db.ZAdd("GP3_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP3_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP3_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP3_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP3_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP3_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); - s = db.ZAdd("GP3_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP3_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); - s = db.ZAdd("GP3_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP3_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); - s = db.ZAdd("GP3_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP3_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); - delete_keys.emplace_back("GP3_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP3_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP3_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP3_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP3_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP3_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP3_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP3_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP3_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP3_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP3_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP3_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - - keys.clear(); - cursor = 0; - cursor = db.PKExpireScan(DataType::kStrings, cursor, 10, 20, 6, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 6); - ASSERT_EQ(keys[0], "GP3_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[1], "GP3_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[2], "GP3_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[3], "GP3_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[4], "GP3_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[5], "GP3_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 30); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 4 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP4_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP4_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE1"); - s = db.Set("GP4_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP4_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE2"); - s = db.Set("GP4_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP4_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE3"); - s = db.Set("GP4_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP4_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE4"); - s = db.Set("GP4_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP4_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE5"); - s = db.Set("GP4_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP4_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE6"); - delete_keys.emplace_back("GP4_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP4_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP4_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP4_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP4_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP4_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - // Hash - s = db.HSet("GP4_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP4_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD1", - "GP4_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); - s = db.HSet("GP4_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP4_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD2", - "GP4_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); - s = db.HSet("GP4_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP4_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD3", - "GP4_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); - s = db.HSet("GP4_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP4_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD4", - "GP4_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); - s = db.HSet("GP4_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP4_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD5", - "GP4_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); - s = db.HSet("GP4_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP4_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD6", - "GP4_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); - delete_keys.emplace_back("GP4_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP4_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP4_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP4_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP4_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP4_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH"); - - // Set - s = db.SAdd("GP4_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP4_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP4_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP4_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP4_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP4_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); - s = db.SAdd("GP4_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP4_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); - s = db.SAdd("GP4_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP4_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); - s = db.SAdd("GP4_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP4_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); - delete_keys.emplace_back("GP4_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP4_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP4_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP4_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP4_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP4_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - // List - s = db.LPush("GP4_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP4_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP4_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP4_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP4_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP4_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); - s = db.LPush("GP4_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP4_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); - s = db.LPush("GP4_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP4_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); - s = db.LPush("GP4_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP4_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); - delete_keys.emplace_back("GP4_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP4_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP4_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP4_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP4_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP4_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST"); - - // ZSet - s = db.ZAdd("GP4_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP4_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP4_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP4_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP4_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP4_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); - s = db.ZAdd("GP4_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP4_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); - s = db.ZAdd("GP4_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP4_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); - s = db.ZAdd("GP4_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP4_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); - delete_keys.emplace_back("GP4_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP4_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP4_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP4_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP4_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP4_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP4_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP4_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP4_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP4_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP4_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - ASSERT_TRUE(set_timeout(&db, "GP4_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", 15)); - - keys.clear(); - cursor = 0; - cursor = db.PKExpireScan(DataType::kStrings, cursor, 10, 20, 10, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 6); - ASSERT_EQ(keys[0], "GP4_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[1], "GP4_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[2], "GP4_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[3], "GP4_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[4], "GP4_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - ASSERT_EQ(keys[5], "GP4_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 30); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 5 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP5_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP5_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE1"); - s = db.Set("GP5_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP5_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE2"); - s = db.Set("GP5_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP5_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE3"); - s = db.Set("GP5_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP5_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE4"); - s = db.Set("GP5_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP5_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE5"); - s = db.Set("GP5_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP5_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE6"); - delete_keys.emplace_back("GP5_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP5_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP5_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP5_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP5_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP5_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - // Hash - s = db.HSet("GP5_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP5_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD1", - "GP5_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); - s = db.HSet("GP5_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP5_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD2", - "GP5_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); - s = db.HSet("GP5_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP5_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD3", - "GP5_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); - s = db.HSet("GP5_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP5_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD4", - "GP5_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); - s = db.HSet("GP5_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP5_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD5", - "GP5_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); - s = db.HSet("GP5_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP5_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD6", - "GP5_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); - delete_keys.emplace_back("GP5_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP5_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP5_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP5_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP5_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP5_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH"); - - // Set - s = db.SAdd("GP5_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP5_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP5_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP5_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP5_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP5_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); - s = db.SAdd("GP5_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP5_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); - s = db.SAdd("GP5_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP5_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); - s = db.SAdd("GP5_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP5_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); - delete_keys.emplace_back("GP5_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP5_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP5_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP5_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP5_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP5_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - // List - s = db.LPush("GP5_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP5_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP5_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP5_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP5_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP5_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); - s = db.LPush("GP5_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP5_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); - s = db.LPush("GP5_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP5_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); - s = db.LPush("GP5_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP5_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); - delete_keys.emplace_back("GP5_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP5_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP5_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP5_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP5_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP5_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST"); - - // ZSet - s = db.ZAdd("GP5_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP5_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP5_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP5_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP5_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP5_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); - s = db.ZAdd("GP5_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP5_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); - s = db.ZAdd("GP5_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP5_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); - s = db.ZAdd("GP5_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP5_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); - delete_keys.emplace_back("GP5_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP5_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP5_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP5_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP5_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP5_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP5_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP5_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP5_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP5_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP5_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP5_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - - keys.clear(); - cursor = 0; - cursor = db.PKExpireScan(DataType::kSets, cursor, 10, 20, 2, &keys); - ASSERT_EQ(cursor, 2); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP5_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[1], "GP5_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - - keys.clear(); - cursor = db.PKExpireScan(DataType::kSets, cursor, 10, 20, 2, &keys); - ASSERT_EQ(cursor, 4); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP5_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[1], "GP5_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - - keys.clear(); - cursor = db.PKExpireScan(DataType::kSets, cursor, 10, 20, 2, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP5_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[1], "GP5_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 30); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 6 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP6_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP6_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE1"); - s = db.Set("GP6_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP6_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE2"); - s = db.Set("GP6_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP6_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE3"); - s = db.Set("GP6_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP6_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE4"); - s = db.Set("GP6_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP6_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE5"); - s = db.Set("GP6_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP6_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE6"); - delete_keys.emplace_back("GP6_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP6_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP6_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP6_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP6_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP6_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - // Hash - s = db.HSet("GP6_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP6_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD1", - "GP6_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); - s = db.HSet("GP6_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP6_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD2", - "GP6_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); - s = db.HSet("GP6_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP6_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD3", - "GP6_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); - s = db.HSet("GP6_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP6_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD4", - "GP6_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); - s = db.HSet("GP6_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP6_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD5", - "GP6_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); - s = db.HSet("GP6_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP6_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD6", - "GP6_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); - delete_keys.emplace_back("GP6_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP6_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP6_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP6_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP6_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP6_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH"); - - // Set - s = db.SAdd("GP6_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP6_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP6_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP6_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP6_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP6_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); - s = db.SAdd("GP6_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP6_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); - s = db.SAdd("GP6_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP6_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); - s = db.SAdd("GP6_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP6_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); - delete_keys.emplace_back("GP6_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP6_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP6_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP6_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP6_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP6_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - // List - s = db.LPush("GP6_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP6_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP6_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP6_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP6_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP6_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); - s = db.LPush("GP6_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP6_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); - s = db.LPush("GP6_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP6_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); - s = db.LPush("GP6_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP6_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); - delete_keys.emplace_back("GP6_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP6_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP6_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP6_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP6_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP6_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST"); - - // ZSet - s = db.ZAdd("GP6_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP6_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP6_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP6_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP6_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP6_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); - s = db.ZAdd("GP6_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP6_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); - s = db.ZAdd("GP6_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP6_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); - s = db.ZAdd("GP6_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP6_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); - delete_keys.emplace_back("GP6_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP6_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP6_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP6_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP6_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP6_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP6_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP6_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP6_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP6_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP6_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP6_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - - keys.clear(); - cursor = 0; - cursor = db.PKExpireScan(DataType::kSets, cursor, 10, 20, 4, &keys); - ASSERT_EQ(cursor, 4); - ASSERT_EQ(keys.size(), 4); - ASSERT_EQ(keys[0], "GP6_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[1], "GP6_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[2], "GP6_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[3], "GP6_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - - keys.clear(); - cursor = db.PKExpireScan(DataType::kSets, cursor, 10, 20, 4, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP6_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[1], "GP6_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 30); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 7 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP7_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP7_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE1"); - s = db.Set("GP7_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP7_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE2"); - s = db.Set("GP7_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP7_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE3"); - s = db.Set("GP7_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP7_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE4"); - s = db.Set("GP7_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP7_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE5"); - s = db.Set("GP7_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP7_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE6"); - delete_keys.emplace_back("GP7_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP7_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP7_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP7_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP7_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP7_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - // Hash - s = db.HSet("GP7_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP7_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD1", - "GP7_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); - s = db.HSet("GP7_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP7_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD2", - "GP7_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); - s = db.HSet("GP7_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP7_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD3", - "GP7_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); - s = db.HSet("GP7_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP7_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD4", - "GP7_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); - s = db.HSet("GP7_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP7_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD5", - "GP7_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); - s = db.HSet("GP7_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP7_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD6", - "GP7_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); - delete_keys.emplace_back("GP7_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP7_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP7_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP7_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP7_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP7_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH"); - - // Set - s = db.SAdd("GP7_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP7_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP7_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP7_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP7_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP7_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); - s = db.SAdd("GP7_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP7_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); - s = db.SAdd("GP7_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP7_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); - s = db.SAdd("GP7_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP7_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); - delete_keys.emplace_back("GP7_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP7_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP7_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP7_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP7_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP7_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - // List - s = db.LPush("GP7_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP7_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP7_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP7_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP7_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP7_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); - s = db.LPush("GP7_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP7_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); - s = db.LPush("GP7_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP7_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); - s = db.LPush("GP7_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP7_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); - delete_keys.emplace_back("GP7_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP7_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP7_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP7_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP7_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP7_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST"); - - // ZSet - s = db.ZAdd("GP7_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP7_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP7_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP7_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP7_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP7_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); - s = db.ZAdd("GP7_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP7_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); - s = db.ZAdd("GP7_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP7_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); - s = db.ZAdd("GP7_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP7_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); - delete_keys.emplace_back("GP7_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP7_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP7_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP7_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP7_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP7_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP7_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP7_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP7_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP7_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP7_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP7_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - - keys.clear(); - cursor = 0; - cursor = db.PKExpireScan(DataType::kSets, cursor, 10, 20, 6, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 6); - ASSERT_EQ(keys[0], "GP7_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[1], "GP7_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[2], "GP7_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[3], "GP7_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[4], "GP7_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[5], "GP7_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 30); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 8 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP8_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP8_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE1"); - s = db.Set("GP8_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP8_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE2"); - s = db.Set("GP8_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP8_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE3"); - s = db.Set("GP8_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP8_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE4"); - s = db.Set("GP8_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP8_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE5"); - s = db.Set("GP8_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP8_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE6"); - delete_keys.emplace_back("GP8_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP8_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP8_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP8_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP8_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP8_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - // Hash - s = db.HSet("GP8_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP8_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD1", - "GP8_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); - s = db.HSet("GP8_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP8_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD2", - "GP8_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); - s = db.HSet("GP8_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP8_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD3", - "GP8_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); - s = db.HSet("GP8_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP8_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD4", - "GP8_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); - s = db.HSet("GP8_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP8_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD5", - "GP8_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); - s = db.HSet("GP8_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP8_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD6", - "GP8_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); - delete_keys.emplace_back("GP8_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP8_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP8_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP8_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP8_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP8_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH"); - - // Set - s = db.SAdd("GP8_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP8_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP8_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP8_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP8_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP8_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); - s = db.SAdd("GP8_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP8_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); - s = db.SAdd("GP8_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP8_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); - s = db.SAdd("GP8_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP8_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); - delete_keys.emplace_back("GP8_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP8_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP8_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP8_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP8_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP8_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - // List - s = db.LPush("GP8_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP8_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP8_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP8_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP8_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP8_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); - s = db.LPush("GP8_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP8_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); - s = db.LPush("GP8_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP8_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); - s = db.LPush("GP8_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP8_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); - delete_keys.emplace_back("GP8_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP8_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP8_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP8_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP8_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP8_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST"); - - // ZSet - s = db.ZAdd("GP8_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP8_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP8_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP8_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP8_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP8_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); - s = db.ZAdd("GP8_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP8_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); - s = db.ZAdd("GP8_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP8_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); - s = db.ZAdd("GP8_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP8_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); - delete_keys.emplace_back("GP8_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP8_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP8_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP8_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP8_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP8_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP8_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP8_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP8_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP8_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP8_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP8_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", 15)); - - keys.clear(); - cursor = 0; - cursor = db.PKExpireScan(DataType::kSets, cursor, 10, 20, 10, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 6); - ASSERT_EQ(keys[0], "GP8_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[1], "GP8_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[2], "GP8_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[3], "GP8_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[4], "GP8_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - ASSERT_EQ(keys[5], "GP8_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 30); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 9 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP9_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP9_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE1"); - s = db.Set("GP9_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP9_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE2"); - s = db.Set("GP9_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP9_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE3"); - s = db.Set("GP9_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP9_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE4"); - s = db.Set("GP9_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP9_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE5"); - s = db.Set("GP9_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP9_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE6"); - delete_keys.emplace_back("GP9_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP9_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP9_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP9_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP9_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP9_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - // Hash - s = db.HSet("GP9_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP9_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD1", - "GP9_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); - s = db.HSet("GP9_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP9_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD2", - "GP9_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); - s = db.HSet("GP9_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP9_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD3", - "GP9_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); - s = db.HSet("GP9_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP9_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD4", - "GP9_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); - s = db.HSet("GP9_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP9_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD5", - "GP9_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); - s = db.HSet("GP9_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP9_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD6", - "GP9_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); - delete_keys.emplace_back("GP9_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP9_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP9_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP9_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP9_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP9_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH"); - - // Set - s = db.SAdd("GP9_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP9_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP9_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP9_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP9_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP9_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); - s = db.SAdd("GP9_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP9_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); - s = db.SAdd("GP9_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP9_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); - s = db.SAdd("GP9_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP9_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); - delete_keys.emplace_back("GP9_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP9_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP9_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP9_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP9_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP9_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - // List - s = db.LPush("GP9_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP9_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP9_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP9_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP9_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP9_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); - s = db.LPush("GP9_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP9_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); - s = db.LPush("GP9_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP9_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); - s = db.LPush("GP9_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP9_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); - delete_keys.emplace_back("GP9_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP9_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP9_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP9_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP9_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP9_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST"); - - // ZSet - s = db.ZAdd("GP9_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP9_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); - s = db.ZAdd("GP9_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP9_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); - s = db.ZAdd("GP9_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP9_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); - s = db.ZAdd("GP9_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP9_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); - s = db.ZAdd("GP9_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP9_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); - s = db.ZAdd("GP9_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP9_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); - delete_keys.emplace_back("GP9_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP9_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP9_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP9_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP9_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP9_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP9_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP9_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP9_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP9_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP9_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP9_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - - keys.clear(); - cursor = 0; - cursor = db.PKExpireScan(DataType::kZSets, cursor, 10, 20, 2, &keys); - ASSERT_EQ(cursor, 2); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP9_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[1], "GP9_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - keys.clear(); - cursor = db.PKExpireScan(DataType::kZSets, cursor, 10, 20, 2, &keys); - ASSERT_EQ(cursor, 4); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP9_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[1], "GP9_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - keys.clear(); - cursor = db.PKExpireScan(DataType::kZSets, cursor, 10, 20, 2, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP9_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[1], "GP9_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 30); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 10 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP10_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP10_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE1"); - s = db.Set("GP10_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP10_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE2"); - s = db.Set("GP10_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP10_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE3"); - s = db.Set("GP10_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP10_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE4"); - s = db.Set("GP10_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP10_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE5"); - s = db.Set("GP10_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP10_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE6"); - delete_keys.emplace_back("GP10_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP10_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP10_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP10_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP10_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP10_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - // Hash - s = db.HSet("GP10_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP10_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD1", - "GP10_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); - s = db.HSet("GP10_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP10_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD2", - "GP10_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); - s = db.HSet("GP10_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP10_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD3", - "GP10_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); - s = db.HSet("GP10_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP10_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD4", - "GP10_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); - s = db.HSet("GP10_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP10_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD5", - "GP10_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); - s = db.HSet("GP10_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP10_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD6", - "GP10_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); - delete_keys.emplace_back("GP10_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP10_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP10_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP10_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP10_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP10_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH"); - - // Set - s = db.SAdd("GP10_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP10_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP10_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP10_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP10_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP10_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); - s = db.SAdd("GP10_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP10_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); - s = db.SAdd("GP10_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP10_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); - s = db.SAdd("GP10_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP10_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); - delete_keys.emplace_back("GP10_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP10_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP10_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP10_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP10_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP10_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - // List - s = db.LPush("GP10_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP10_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP10_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP10_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP10_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP10_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); - s = db.LPush("GP10_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP10_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); - s = db.LPush("GP10_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP10_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); - s = db.LPush("GP10_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP10_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); - delete_keys.emplace_back("GP10_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP10_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP10_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP10_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP10_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP10_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST"); - - // ZSet - s = db.ZAdd("GP10_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP10_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER1"}}, - &int32_ret); - s = db.ZAdd("GP10_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP10_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER2"}}, - &int32_ret); - s = db.ZAdd("GP10_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP10_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER3"}}, - &int32_ret); - s = db.ZAdd("GP10_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP10_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER4"}}, - &int32_ret); - s = db.ZAdd("GP10_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP10_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER5"}}, - &int32_ret); - s = db.ZAdd("GP10_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP10_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER6"}}, - &int32_ret); - delete_keys.emplace_back("GP10_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP10_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP10_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP10_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP10_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP10_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP10_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP10_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP10_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP10_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP10_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP10_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - - keys.clear(); - cursor = 0; - cursor = db.PKExpireScan(DataType::kZSets, cursor, 10, 20, 4, &keys); - ASSERT_EQ(cursor, 4); - ASSERT_EQ(keys.size(), 4); - ASSERT_EQ(keys[0], "GP10_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[1], "GP10_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[2], "GP10_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[3], "GP10_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - keys.clear(); - cursor = db.PKExpireScan(DataType::kZSets, cursor, 10, 20, 4, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 2); - ASSERT_EQ(keys[0], "GP10_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[1], "GP10_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 30); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 11 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP11_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP11_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE1"); - s = db.Set("GP11_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP11_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE2"); - s = db.Set("GP11_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP11_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE3"); - s = db.Set("GP11_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP11_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE4"); - s = db.Set("GP11_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP11_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE5"); - s = db.Set("GP11_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP11_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE6"); - delete_keys.emplace_back("GP11_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP11_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP11_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP11_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP11_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP11_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - // Hash - s = db.HSet("GP11_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP11_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD1", - "GP11_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); - s = db.HSet("GP11_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP11_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD2", - "GP11_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); - s = db.HSet("GP11_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP11_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD3", - "GP11_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); - s = db.HSet("GP11_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP11_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD4", - "GP11_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); - s = db.HSet("GP11_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP11_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD5", - "GP11_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); - s = db.HSet("GP11_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP11_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD6", - "GP11_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); - delete_keys.emplace_back("GP11_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP11_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP11_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP11_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP11_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP11_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH"); - - // Set - s = db.SAdd("GP11_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP11_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP11_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP11_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP11_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP11_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); - s = db.SAdd("GP11_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP11_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); - s = db.SAdd("GP11_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP11_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); - s = db.SAdd("GP11_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP11_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); - delete_keys.emplace_back("GP11_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP11_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP11_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP11_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP11_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP11_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - // List - s = db.LPush("GP11_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP11_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP11_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP11_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP11_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP11_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); - s = db.LPush("GP11_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP11_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); - s = db.LPush("GP11_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP11_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); - s = db.LPush("GP11_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP11_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); - delete_keys.emplace_back("GP11_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP11_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP11_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP11_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP11_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP11_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST"); - - // ZSet - s = db.ZAdd("GP11_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP11_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER1"}}, - &int32_ret); - s = db.ZAdd("GP11_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP11_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER2"}}, - &int32_ret); - s = db.ZAdd("GP11_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP11_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER3"}}, - &int32_ret); - s = db.ZAdd("GP11_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP11_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER4"}}, - &int32_ret); - s = db.ZAdd("GP11_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP11_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER5"}}, - &int32_ret); - s = db.ZAdd("GP11_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP11_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER6"}}, - &int32_ret); - delete_keys.emplace_back("GP11_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP11_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP11_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP11_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP11_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP11_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP11_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP11_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP11_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP11_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP11_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP11_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - - keys.clear(); - cursor = 0; - cursor = db.PKExpireScan(DataType::kZSets, cursor, 10, 20, 6, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 6); - ASSERT_EQ(keys[0], "GP11_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[1], "GP11_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[2], "GP11_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[3], "GP11_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[4], "GP11_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[5], "GP11_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 30); - sleep(2); - db.Compact(DataType::kAll, true); - - // ***************** Group 12 Test ***************** - delete_keys.clear(); - // String - s = db.Set("GP12_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP12_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE1"); - s = db.Set("GP12_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP12_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE2"); - s = db.Set("GP12_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP12_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE3"); - s = db.Set("GP12_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP12_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE4"); - s = db.Set("GP12_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP12_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE5"); - s = db.Set("GP12_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING", "GP12_PKEXPIRESCAN_CASE_SINGLE_STRING_VALUE6"); - delete_keys.emplace_back("GP12_KEY1_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP12_KEY2_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP12_KEY3_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP12_KEY4_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP12_KEY5_PKEXPIRESCAN_CASE_SINGLE_STRING"); - delete_keys.emplace_back("GP12_KEY6_PKEXPIRESCAN_CASE_SINGLE_STRING"); - - // Hash - s = db.HSet("GP12_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP12_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD1", - "GP12_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); - s = db.HSet("GP12_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP12_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD2", - "GP12_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); - s = db.HSet("GP12_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP12_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD3", - "GP12_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); - s = db.HSet("GP12_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP12_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD4", - "GP12_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); - s = db.HSet("GP12_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP12_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD5", - "GP12_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); - s = db.HSet("GP12_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH", "GP12_PKEXPIRESCAN_CASE_SINGLE_HASH_FIELD6", - "GP12_PKEXPIRESCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); - delete_keys.emplace_back("GP12_KEY1_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP12_KEY2_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP12_KEY3_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP12_KEY4_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP12_KEY5_PKEXPIRESCAN_CASE_SINGLE_HASH"); - delete_keys.emplace_back("GP12_KEY6_PKEXPIRESCAN_CASE_SINGLE_HASH"); - - // Set - s = db.SAdd("GP12_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP12_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); - s = db.SAdd("GP12_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP12_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); - s = db.SAdd("GP12_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP12_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); - s = db.SAdd("GP12_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP12_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); - s = db.SAdd("GP12_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP12_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); - s = db.SAdd("GP12_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET", {"GP12_PKEXPIRESCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); - delete_keys.emplace_back("GP12_KEY1_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP12_KEY2_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP12_KEY3_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP12_KEY4_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP12_KEY5_PKEXPIRESCAN_CASE_SINGLE_SET"); - delete_keys.emplace_back("GP12_KEY6_PKEXPIRESCAN_CASE_SINGLE_SET"); - - // List - s = db.LPush("GP12_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP12_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); - s = db.LPush("GP12_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP12_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); - s = db.LPush("GP12_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP12_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); - s = db.LPush("GP12_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP12_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); - s = db.LPush("GP12_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP12_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); - s = db.LPush("GP12_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST", {"GP12_PKEXPIRESCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); - delete_keys.emplace_back("GP12_KEY1_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP12_KEY2_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP12_KEY3_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP12_KEY4_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP12_KEY5_PKEXPIRESCAN_CASE_SINGLE_LIST"); - delete_keys.emplace_back("GP12_KEY6_PKEXPIRESCAN_CASE_SINGLE_LIST"); - - // ZSet - s = db.ZAdd("GP12_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP12_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER1"}}, - &int32_ret); - s = db.ZAdd("GP12_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP12_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER2"}}, - &int32_ret); - s = db.ZAdd("GP12_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP12_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER3"}}, - &int32_ret); - s = db.ZAdd("GP12_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP12_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER4"}}, - &int32_ret); - s = db.ZAdd("GP12_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP12_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER5"}}, - &int32_ret); - s = db.ZAdd("GP12_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", {{1, "GP12_PKEXPIRESCAN_CASE_SINGLE_LIST_MEMBER6"}}, - &int32_ret); - delete_keys.emplace_back("GP12_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP12_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP12_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP12_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP12_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - delete_keys.emplace_back("GP12_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - ASSERT_TRUE(set_timeout(&db, "GP12_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP12_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP12_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP12_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP12_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - ASSERT_TRUE(set_timeout(&db, "GP12_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET", 15)); - - keys.clear(); - cursor = 0; - cursor = db.PKExpireScan(DataType::kZSets, cursor, 10, 20, 10, &keys); - ASSERT_EQ(cursor, 0); - ASSERT_EQ(keys.size(), 6); - ASSERT_EQ(keys[0], "GP12_KEY1_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[1], "GP12_KEY2_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[2], "GP12_KEY3_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[3], "GP12_KEY4_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[4], "GP12_KEY5_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - ASSERT_EQ(keys[5], "GP12_KEY6_PKEXPIRESCAN_CASE_SINGLE_ZSET"); - - del_num = db.Del(delete_keys, &type_status); - ASSERT_EQ(del_num, 30); - sleep(2); - db.Compact(DataType::kAll, true); -} - -// Expire -TEST_F(KeysTest, ExpireTest) { - std::string value; - std::map type_status; - int32_t ret; - - // ***************** Group 1 Test ***************** - // Strings - s = db.Set("GP1_EXPIRE_KEY", "VALUE"); - ASSERT_TRUE(s.ok()); - - // Hashes - s = db.HSet("GP1_EXPIRE_KEY", "FIELD", "VALUE", &ret); - ASSERT_TRUE(s.ok()); - - // Sets - s = db.SAdd("GP1_EXPIRE_KEY", {"MEMBER"}, &ret); - ASSERT_TRUE(s.ok()); - - // Lists - uint64_t llen; - s = db.RPush("GP1_EXPIRE_KEY", {"NODE"}, &llen); - ASSERT_TRUE(s.ok()); - - // Zsets - s = db.ZAdd("GP1_EXPIRE_KEY", {{1, "MEMBER"}}, &ret); - ASSERT_TRUE(s.ok()); - - ret = db.Expire("GP1_EXPIRE_KEY", 1, &type_status); - ASSERT_EQ(ret, 5); - std::this_thread::sleep_for(std::chrono::milliseconds(2000)); - - // Strings - s = db.Get("GP1_EXPIRE_KEY", &value); - ASSERT_TRUE(s.IsNotFound()); - - // Hashes - s = db.HGet("GP1_EXPIRE_KEY", "EXPIRE_FIELD", &value); - ASSERT_TRUE(s.IsNotFound()); - - // Sets - s = db.SCard("GP1_EXPIRE_KEY", &ret); - ASSERT_TRUE(s.IsNotFound()); - - // Lists - s = db.LLen("GP1_EXPIRE_KEY", &llen); - ASSERT_TRUE(s.IsNotFound()); - - // ZSets - s = db.ZCard("GP1_EXPIRE_KEY", &ret); - ASSERT_TRUE(s.IsNotFound()); - - // ***************** Group 2 Test ***************** - // Strings - s = db.Set("GP2_EXPIRE_STRING_KEY", "VALUE"); - ASSERT_TRUE(s.ok()); - ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_STRING_KEY")); - - type_status.clear(); - ret = db.Expire("GP2_EXPIRE_STRING_KEY", 1, &type_status); - ASSERT_EQ(ret, 0); - - // Hashes - s = db.HSet("GP2_EXPIRE_HASHES_KEY", "FIELD", "VALUE", &ret); - ASSERT_TRUE(s.ok()); - ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_HASHES_KEY")); - - type_status.clear(); - ret = db.Expire("GP2_EXPIRE_HASHES_KEY", 1, &type_status); - ASSERT_EQ(ret, 0); - - // Sets - s = db.SAdd("GP2_EXPIRE_SETS_KEY", {"MEMBER"}, &ret); - ASSERT_TRUE(s.ok()); - ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_SETS_KEY")); - - type_status.clear(); - ret = db.Expire("GP2_EXPIRE_SETS_KEY", 1, &type_status); - ASSERT_EQ(ret, 0); - - // Lists - s = db.RPush("GP2_EXPIRE_LISTS_KEY", {"NODE"}, &llen); - ASSERT_TRUE(s.ok()); - ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_LISTS_KEY")); - - type_status.clear(); - ret = db.Expire("GP2_EXPIRE_LISTS_KEY", 1, &type_status); - ASSERT_EQ(ret, 0); - - // Zsets - s = db.ZAdd("GP2_EXPIRE_ZSETS_KEY", {{1, "MEMBER"}}, &ret); - ASSERT_TRUE(s.ok()); - ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_ZSETS_KEY")); - - type_status.clear(); - ret = db.Expire("GP2_EXPIRE_ZSETS_KEY", 1, &type_status); - ASSERT_EQ(ret, 0); - - // ***************** Group 3 Test ***************** - // Strings - s = db.Set("GP3_EXPIRE_STRING_KEY", "VALUE"); - ASSERT_TRUE(s.ok()); - ret = db.Del({"GP3_EXPIRE_STRING_KEY"}, &type_status); - ASSERT_EQ(ret, 1); - - type_status.clear(); - ret = db.Expire("GP3_EXPIRE_STRING_KEY", 1, &type_status); - ASSERT_EQ(ret, 0); - - // Hashes - s = db.HSet("GP3_EXPIRE_HASHES_KEY", "FIELD", "VALUE", &ret); - ASSERT_TRUE(s.ok()); - s = db.HDel("GP3_EXPIRE_HASHES_KEY", {"FIELD"}, &ret); - ASSERT_TRUE(s.ok()); - - type_status.clear(); - ret = db.Expire("GP3_EXPIRE_HASHES_KEY", 1, &type_status); - ASSERT_EQ(ret, 0); - - // Sets - s = db.SAdd("GP3_EXPIRE_SETS_KEY", {"MEMBER"}, &ret); - ASSERT_TRUE(s.ok()); - s = db.SRem("GP3_EXPIRE_SETS_KEY", {"MEMBER"}, &ret); - ASSERT_TRUE(s.ok()); - - type_status.clear(); - ret = db.Expire("GP3_EXPIRE_SETS_KEY", 1, &type_status); - ASSERT_EQ(ret, 0); - - // Lists - s = db.RPush("GP3_EXPIRE_LISTS_KEY", {"NODE"}, &llen); - ASSERT_TRUE(s.ok()); - std::vector elements; - s = db.LPop("GP3_EXPIRE_LISTS_KEY", 1,&elements); - ASSERT_TRUE(s.ok()); - - type_status.clear(); - ret = db.Expire("GP3_EXPIRE_LISTS_KEY", 1, &type_status); - ASSERT_EQ(ret, 0); - - // Zsets - s = db.ZAdd("GP3_EXPIRE_ZSETS_KEY", {{1, "MEMBER"}}, &ret); - ASSERT_TRUE(s.ok()); - s = db.ZRem("GP3_EXPIRE_ZSETS_KEY", {"MEMBER"}, &ret); - ASSERT_TRUE(s.ok()); - - type_status.clear(); - ret = db.Expire("GP3_EXPIRE_ZSETS_KEY", 1, &type_status); - ASSERT_EQ(ret, 0); -} - -// Del -TEST_F(KeysTest, DelTest) { - int32_t ret; - std::string value; - std::map type_status; - std::vector keys{"DEL_KEY"}; - - // Strings - s = db.Set("DEL_KEY", "VALUE"); - ASSERT_TRUE(s.ok()); - - // Hashes - s = db.HSet("DEL_KEY", "FIELD", "VALUE", &ret); - ASSERT_TRUE(s.ok()); - - // Sets - s = db.SAdd("DEL_KEY", {"MEMBER"}, &ret); - ASSERT_TRUE(s.ok()); - - // Lists - uint64_t llen; - s = db.RPush("DEL_KEY", {"NODE"}, &llen); - ASSERT_TRUE(s.ok()); - - // ZSets - s = db.ZAdd("DEL_KEY", {{1, "MEMBER"}}, &ret); - ASSERT_TRUE(s.ok()); - - ret = db.Del(keys, &type_status); - ASSERT_EQ(ret, 5); - - // Strings - s = db.Get("DEL_KEY", &value); - ASSERT_TRUE(s.IsNotFound()); - - // Hashes - s = db.HGet("DEL_KEY", "DEL_FIELD", &value); - ASSERT_TRUE(s.IsNotFound()); - - // Sets - s = db.SCard("DEL_KEY", &ret); - ASSERT_TRUE(s.IsNotFound()); - - // Lists - s = db.LLen("DEL_KEY", &llen); - ASSERT_TRUE(s.IsNotFound()); - - // ZSets - s = db.ZCard("DEL_KEY", &ret); - ASSERT_TRUE(s.IsNotFound()); -} - -// Exists -TEST_F(KeysTest, ExistsTest) { - int32_t ret; - uint64_t llen; - std::map type_status; - std::vector keys{"EXISTS_KEY"}; +// Exists +TEST_F(KeysTest, ExistsTest) { + int32_t ret; + uint64_t llen; + std::map type_status; + std::vector keys{"EXISTS_KEY"}; // Strings s = db.Set("EXISTS_KEY", "VALUE"); @@ -7853,7 +5146,7 @@ TEST_F(KeysTest, ExpireatTest) { int64_t unix_time; rocksdb::Env::Default()->GetCurrentTime(&unix_time); - int32_t timestamp = static_cast(unix_time) + 1; + int32_t timestamp = unix_time + 1; ret = db.Expireat("EXPIREAT_KEY", timestamp, &type_status); ASSERT_EQ(ret, 5); @@ -8017,7 +5310,16 @@ TEST_F(KeysTest, TTLTest) { } } + int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("keys_test"); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/src/storage/tests/kv_format_test.cc b/src/storage/tests/kv_format_test.cc new file mode 100644 index 0000000000..0bf8b92af7 --- /dev/null +++ b/src/storage/tests/kv_format_test.cc @@ -0,0 +1,120 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include +#include "glog/logging.h" + +#include "src/debug.h" +#include "src/coding.h" +#include "src/base_key_format.h" +#include "src/base_data_key_format.h" +#include "src/zsets_data_key_format.h" +#include "src/lists_data_key_format.h" +#include "storage/storage_define.h" + +using namespace storage; + +TEST(KVFormatTest, BaseKeyFormat) { + rocksdb::Slice slice_key("\u0000\u0001abc\u0000", 6); + BaseKey bk(slice_key); + + rocksdb::Slice slice_enc = bk.Encode(); + std::string expect_enc(8, '\0'); + expect_enc.append("\u0000\u0001\u0001abc\u0000\u0001\u0000\u0000", 10); + expect_enc.append(16, '\0'); + ASSERT_EQ(slice_enc, Slice(expect_enc)); + + ParsedBaseKey pbk(slice_enc); + ASSERT_EQ(pbk.Key(), slice_key); +} + +TEST(KVFormatTest, BaseDataKeyFormat) { + rocksdb::Slice slice_key("\u0000\u0001base_data_key\u0000", 16); + rocksdb::Slice slice_data("\u0000\u0001data\u0000", 7); + uint64_t version = 1701848429; + + BaseDataKey bdk(slice_key, version, slice_data); + rocksdb::Slice seek_key_enc = bdk.EncodeSeekKey(); + std::string expect_enc(8, '\0'); + expect_enc.append("\u0000\u0001\u0001base_data_key\u0000\u0001\u0000\u0000", 20); + char dst[9]; + EncodeFixed64(dst, version); + expect_enc.append(dst, 8); + expect_enc.append("\u0000\u0001data\u0000", 7); + ASSERT_EQ(seek_key_enc, Slice(expect_enc)); + + rocksdb::Slice key_enc = bdk.Encode(); + expect_enc.append(16, '\0'); + ASSERT_EQ(key_enc, Slice(expect_enc)); + + ParsedBaseDataKey pbmk(key_enc); + ASSERT_EQ(pbmk.Key(), slice_key); + ASSERT_EQ(pbmk.Data(), slice_data); + ASSERT_EQ(pbmk.Version(), version); +} + +TEST(KVFormatTest, ZsetsScoreKeyFormat) { + rocksdb::Slice slice_key("\u0000\u0001base_data_key\u0000", 16); + rocksdb::Slice slice_data("\u0000\u0001data\u0000", 7); + uint64_t version = 1701848429; + double score = -3.5; + + ZSetsScoreKey zsk(slice_key, version, score, slice_data); + // reserve + std::string expect_enc(8, '\0'); + // user_key + expect_enc.append("\u0000\u0001\u0001base_data_key\u0000\u0001\u0000\u0000", 20); + // version + char dst[9]; + EncodeFixed64(dst, version); + expect_enc.append(dst, 8); + // score + const void* addr_score = reinterpret_cast(&score); + EncodeFixed64(dst, *reinterpret_cast(addr_score)); + expect_enc.append(dst, 8); + // data + expect_enc.append("\u0000\u0001data\u0000", 7); + // reserve + expect_enc.append(16, '\0'); + rocksdb::Slice key_enc = zsk.Encode(); + ASSERT_EQ(key_enc, Slice(expect_enc)); + + ParsedZSetsScoreKey pzsk(key_enc); + ASSERT_EQ(pzsk.key(), slice_key); + ASSERT_EQ(pzsk.member(), slice_data); + ASSERT_EQ(pzsk.Version(), version); + ASSERT_EQ(pzsk.score(), score); +} + +TEST(KVFormatTest, ListDataKeyFormat) { + rocksdb::Slice slice_key("\u0000\u0001list_data_key\u0000", 16); + uint64_t version = 1701848429; + uint64_t index = 10; + + ListsDataKey ldk(slice_key, version, index); + rocksdb::Slice key_enc = ldk.Encode(); + std::string expect_enc(8, '\0'); + expect_enc.append("\u0000\u0001\u0001list_data_key\u0000\u0001\u0000\u0000", 20); + char dst[9]; + EncodeFixed64(dst, version); + expect_enc.append(dst, 8); + EncodeFixed64(dst, index); + expect_enc.append(dst, 8); + expect_enc.append(16, '\0'); + ASSERT_EQ(key_enc, Slice(expect_enc)); + + ParsedListsDataKey pldk(key_enc); + ASSERT_EQ(pldk.key(), slice_key); + ASSERT_EQ(pldk.index(), index); + ASSERT_EQ(pldk.Version(), version); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/src/storage/tests/lists_filter_test.cc b/src/storage/tests/lists_filter_test.cc index 44c872204b..694fe66bb6 100644 --- a/src/storage/tests/lists_filter_test.cc +++ b/src/storage/tests/lists_filter_test.cc @@ -8,9 +8,11 @@ #include #include "src/lists_filter.h" +#include "src/base_key_format.h" #include "src/redis.h" #include "storage/storage.h" +using namespace storage; using storage::EncodeFixed64; using storage::ListsDataFilter; using storage::ListsDataKey; @@ -68,7 +70,7 @@ TEST_F(ListsFilterTest, MetaFilterTest) { char str[8]; bool filter_result; bool value_changed; - int32_t version = 0; + uint64_t version = 0; std::string new_value; // Test Meta Filter @@ -120,20 +122,23 @@ TEST_F(ListsFilterTest, DataFilterTest) { char str[8]; bool filter_result; bool value_changed; - int32_t version = 0; + uint64_t version = 0; std::string new_value; // Timeout timestamp is not set, the version is valid. - auto lists_data_filter1 = std::make_unique(meta_db, &handles); + auto lists_data_filter1 = std::make_unique(meta_db, &handles, 0); ASSERT_TRUE(lists_data_filter1 != nullptr); EncodeFixed64(str, 1); ListsMetaValue lists_meta_value1(Slice(str, sizeof(uint64_t))); version = lists_meta_value1.UpdateVersion(); - s = meta_db->Put(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY", lists_meta_value1.Encode()); + + std::string user_key = "FILTER_TEST_KEY"; + BaseMetaKey bmk(user_key); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], bmk.Encode(), lists_meta_value1.Encode()); ASSERT_TRUE(s.ok()); - ListsDataKey lists_data_key1("FILTER_TEST_KEY", version, 1); + ListsDataKey lists_data_key1(user_key, version, 1); filter_result = lists_data_filter1->Filter(0, lists_data_key1.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); ASSERT_EQ(filter_result, false); @@ -141,70 +146,70 @@ TEST_F(ListsFilterTest, DataFilterTest) { ASSERT_TRUE(s.ok()); // Timeout timestamp is set, but not expired. - auto lists_data_filter2 = std::make_unique(meta_db, &handles); + auto lists_data_filter2 = std::make_unique(meta_db, &handles, 0); ASSERT_TRUE(lists_data_filter2 != nullptr); EncodeFixed64(str, 1); ListsMetaValue lists_meta_value2(Slice(str, sizeof(uint64_t))); version = lists_meta_value2.UpdateVersion(); lists_meta_value2.SetRelativeTimestamp(1); - s = meta_db->Put(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY", lists_meta_value2.Encode()); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], bmk.Encode(), lists_meta_value2.Encode()); ASSERT_TRUE(s.ok()); ListsDataKey lists_data_key2("FILTER_TEST_KEY", version, 1); filter_result = lists_data_filter2->Filter(0, lists_data_key2.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); ASSERT_EQ(filter_result, false); - s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY"); + s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], bmk.Encode()); ASSERT_TRUE(s.ok()); // Timeout timestamp is set, already expired. - auto lists_data_filter3 = std::make_unique(meta_db, &handles); + auto lists_data_filter3 = std::make_unique(meta_db, &handles, 0); ASSERT_TRUE(lists_data_filter3 != nullptr); EncodeFixed64(str, 1); ListsMetaValue lists_meta_value3(Slice(str, sizeof(uint64_t))); version = lists_meta_value3.UpdateVersion(); lists_meta_value3.SetRelativeTimestamp(1); - s = meta_db->Put(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY", lists_meta_value3.Encode()); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], bmk.Encode(), lists_meta_value3.Encode()); ASSERT_TRUE(s.ok()); std::this_thread::sleep_for(std::chrono::milliseconds(2000)); ListsDataKey lists_data_key3("FILTER_TEST_KEY", version, 1); filter_result = lists_data_filter3->Filter(0, lists_data_key3.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); ASSERT_EQ(filter_result, true); - s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY"); + s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], bmk.Encode()); ASSERT_TRUE(s.ok()); // Timeout timestamp is not set, the version is invalid - auto lists_data_filter4 = std::make_unique(meta_db, &handles); + auto lists_data_filter4 = std::make_unique(meta_db, &handles, 0); ASSERT_TRUE(lists_data_filter4 != nullptr); EncodeFixed64(str, 1); ListsMetaValue lists_meta_value4(Slice(str, sizeof(uint64_t))); version = lists_meta_value4.UpdateVersion(); - s = meta_db->Put(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY", lists_meta_value4.Encode()); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], bmk.Encode(), lists_meta_value4.Encode()); ASSERT_TRUE(s.ok()); ListsDataKey lists_data_key4("FILTER_TEST_KEY", version, 1); version = lists_meta_value4.UpdateVersion(); - s = meta_db->Put(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY", lists_meta_value4.Encode()); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], bmk.Encode(), lists_meta_value4.Encode()); ASSERT_TRUE(s.ok()); filter_result = lists_data_filter4->Filter(0, lists_data_key4.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); ASSERT_EQ(filter_result, true); - s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY"); + s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], bmk.Encode()); ASSERT_TRUE(s.ok()); // Meta data has been clear - auto lists_data_filter5 = std::make_unique(meta_db, &handles); + auto lists_data_filter5 = std::make_unique(meta_db, &handles, 0); ASSERT_TRUE(lists_data_filter5 != nullptr); EncodeFixed64(str, 1); ListsMetaValue lists_meta_value5(Slice(str, sizeof(uint64_t))); version = lists_meta_value5.UpdateVersion(); - s = meta_db->Put(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY", lists_meta_value5.Encode()); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], bmk.Encode(), lists_meta_value5.Encode()); ASSERT_TRUE(s.ok()); ListsDataKey lists_data_value5("FILTER_TEST_KEY", version, 1); - s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY"); + s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], bmk.Encode()); ASSERT_TRUE(s.ok()); filter_result = lists_data_filter5->Filter(0, lists_data_value5.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); diff --git a/src/storage/tests/lists_test.cc b/src/storage/tests/lists_test.cc index 049cc130e6..ed3325a316 100644 --- a/src/storage/tests/lists_test.cc +++ b/src/storage/tests/lists_test.cc @@ -7,6 +7,10 @@ #include #include +#include "glog/logging.h" + +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/env.h" #include "storage/storage.h" #include "storage/util.h" @@ -16,6 +20,7 @@ static bool elements_match(storage::Storage* const db, const Slice& key, const std::vector& expect_elements) { std::vector elements_out; Status s = db->LRange(key, 0, -1, &elements_out); + LOG(WARNING) << "status: " << s.ToString() << " elements_out size: " << elements_out.size(); if (!s.ok() && !s.IsNotFound()) { return false; } @@ -26,6 +31,7 @@ static bool elements_match(storage::Storage* const db, const Slice& key, return true; } for (uint64_t idx = 0; idx < elements_out.size(); ++idx) { + LOG(WARNING) << "element: " << elements_out[idx]; if (strcmp(elements_out[idx].c_str(), expect_elements[idx].c_str()) != 0) { return false; } @@ -75,9 +81,8 @@ class ListsTest : public ::testing::Test { void SetUp() override { std::string path = "./db/lists"; - if (access(path.c_str(), F_OK) != 0) { - mkdir(path.c_str(), 0755); - } + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); storage_options.options.create_if_missing = true; s = db.Open(storage_options, path); if (!s.ok()) { @@ -2582,6 +2587,7 @@ TEST_F(ListsTest, RPushTest) { // NOLINT ASSERT_TRUE(s.ok()); ASSERT_EQ(3, num); ASSERT_TRUE(len_match(&db, "GP6_RPUSH_KEY", 3)); + LOG(WARNING) << "-------------"; ASSERT_TRUE(elements_match(&db, "GP6_RPUSH_KEY", {"t", "h", "e"})); type_status.clear(); @@ -2700,6 +2706,14 @@ TEST_F(ListsTest, RPushxTest) { // NOLINT } int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("lists_test"); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/src/storage/tests/sets_test.cc b/src/storage/tests/sets_test.cc index a84304597a..c6c4dd220e 100644 --- a/src/storage/tests/sets_test.cc +++ b/src/storage/tests/sets_test.cc @@ -7,6 +7,10 @@ #include #include +#include "glog/logging.h" + +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/env.h" #include "storage/storage.h" #include "storage/util.h" @@ -19,9 +23,8 @@ class SetsTest : public ::testing::Test { void SetUp() override { std::string path = "./db/sets"; - if (access(path.c_str(), F_OK) != 0) { - mkdir(path.c_str(), 0755); - } + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); storage_options.options.create_if_missing = true; s = db.Open(storage_options, path); } @@ -1304,7 +1307,7 @@ TEST_F(SetsTest, SPopTest) { // NOLINT s = db.SPop("GP1_SPOP_KEY", &members, 1); ASSERT_TRUE(s.ok()); ASSERT_TRUE(size_match(&db, "GP1_SPOP_KEY", 1)); - + s = db.SPop("GP1_SPOP_KEY", &members, 1); ASSERT_TRUE(s.ok()); @@ -1331,7 +1334,7 @@ TEST_F(SetsTest, SPopTest) { // NOLINT s = db.SPop("GP2_SPOP_KEY", &members, 1); ASSERT_TRUE(s.ok()); ASSERT_TRUE(size_match(&db, "GP2_SPOP_KEY", 1 - idx)); - + } gp2_out_all.swap(members); @@ -1355,7 +1358,7 @@ TEST_F(SetsTest, SPopTest) { // NOLINT s = db.SPop("GP3_SPOP_KEY", &members, 1); ASSERT_TRUE(s.ok()); ASSERT_TRUE(size_match(&db, "GP3_SPOP_KEY", 100 - idx)); - + } gp3_out_all.swap(members); @@ -1379,7 +1382,7 @@ TEST_F(SetsTest, SPopTest) { // NOLINT s = db.SPop("GP4_SPOP_KEY", &members, 1); ASSERT_TRUE(s.ok()); ASSERT_TRUE(size_match(&db, "GP4_SPOP_KEY", 10000 - idx)); - + } gp4_out_all.swap(members); @@ -2238,6 +2241,14 @@ TEST_F(SetsTest, SScanTest) { // NOLINT } int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("strings_test"); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/src/storage/tests/strings_filter_test.cc b/src/storage/tests/strings_filter_test.cc index 5bfa713b64..26af189877 100644 --- a/src/storage/tests/strings_filter_test.cc +++ b/src/storage/tests/strings_filter_test.cc @@ -19,7 +19,7 @@ TEST(StringsFilterTest, FilterTest) { bool value_changed; auto filter = std::make_unique(); - int32_t ttl = 1; + int64_t ttl = 1; StringsValue strings_value("FILTER_VALUE"); strings_value.SetRelativeTimestamp(ttl); is_stale = filter->Filter(0, "FILTER_KEY", strings_value.Encode(), &new_value, &value_changed); diff --git a/src/storage/tests/strings_test.cc b/src/storage/tests/strings_test.cc index 27759269f3..33e15c67ef 100644 --- a/src/storage/tests/strings_test.cc +++ b/src/storage/tests/strings_test.cc @@ -7,6 +7,10 @@ #include #include +#include "glog/logging.h" + +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/env.h" #include "storage/storage.h" #include "storage/util.h" @@ -19,9 +23,8 @@ class StringsTest : public ::testing::Test { void SetUp() override { std::string path = "./db/strings"; - if (access(path.c_str(), F_OK) != 0) { - mkdir(path.c_str(), 0755); - } + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); storage_options.options.create_if_missing = true; s = db.Open(storage_options, path); } @@ -1006,6 +1009,14 @@ TEST_F(StringsTest, PKSetexAtTest) { } int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("strings_test"); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/src/storage/tests/zsets_test.cc b/src/storage/tests/zsets_test.cc index f22da43ebd..465c48f00e 100644 --- a/src/storage/tests/zsets_test.cc +++ b/src/storage/tests/zsets_test.cc @@ -7,6 +7,10 @@ #include #include +#include "glog/logging.h" + +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/env.h" #include "storage/storage.h" #include "storage/util.h" @@ -24,9 +28,8 @@ class ZSetsTest : public ::testing::Test { void SetUp() override { std::string path = "./db/zsets"; - if (access(path.c_str(), F_OK) != 0) { - mkdir(path.c_str(), 0755); - } + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); storage_options.options.create_if_missing = true; s = db.Open(storage_options, path); if (!s.ok()) { @@ -103,6 +106,7 @@ static bool size_match(storage::Storage* const db, const Slice& key, int32_t exp if (s.IsNotFound() && (expect_size == 0)) { return true; } + LOG(WARNING) << "size_match ? size: " << size << " expect_size: " << expect_size; return size == expect_size; } @@ -5233,6 +5237,14 @@ TEST_F(ZSetsTest, ZScanTest) { // NOLINT } int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("zsets_test"); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/tests/assets/default.conf b/tests/assets/default.conf index 7c0c0f791d..a4d89653b1 100644 --- a/tests/assets/default.conf +++ b/tests/assets/default.conf @@ -7,6 +7,10 @@ # Port 10221 is used for Rsync, and port 11221 is used for Replication, while the listening port is 9221. port : 9221 +db-instance-num : 3 +rocksdb-ttl-second : 86400 * 7; +rocksdb-periodic-second : 86400 * 3; + # Random value identifying the Pika server, its string length must be 40. # If not set, Pika will generate a random string with a length of 40 random characters. # run-id : diff --git a/tests/conf/pika.conf b/tests/conf/pika.conf index 7c0c0f791d..5f9167d96a 100644 --- a/tests/conf/pika.conf +++ b/tests/conf/pika.conf @@ -7,6 +7,8 @@ # Port 10221 is used for Rsync, and port 11221 is used for Replication, while the listening port is 9221. port : 9221 +db-instance-num : 3 + # Random value identifying the Pika server, its string length must be 40. # If not set, Pika will generate a random string with a length of 40 random characters. # run-id : diff --git a/tests/integration/start_master_and_slave.sh b/tests/integration/start_master_and_slave.sh index c2b6a01c38..6c6e79107e 100755 --- a/tests/integration/start_master_and_slave.sh +++ b/tests/integration/start_master_and_slave.sh @@ -1,12 +1,13 @@ #!/bin/bash # This script is used by .github/workflows/pika.yml, Do not modify this file unless you know what you are doing. # it's used to start pika master and slave, running path: build -cp ../tests/conf/pika.conf ./pika_master.conf -cp ../tests/conf/pika.conf ./pika_slave.conf +cp ../../output/pika ./pika +cp ../conf/pika.conf ./pika_master.conf +cp ../conf/pika.conf ./pika_slave.conf mkdir slave_data sed -i '' -e 's|databases : 1|databases : 2|' -e 's|#daemonize : yes|daemonize : yes|' ./pika_master.conf sed -i '' -e 's|databases : 1|databases : 2|' -e 's|port : 9221|port : 9231|' -e 's|log-path : ./log/|log-path : ./slave_data/log/|' -e 's|db-path : ./db/|db-path : ./slave_data/db/|' -e 's|dump-path : ./dump/|dump-path : ./slave_data/dump/|' -e 's|pidfile : ./pika.pid|pidfile : ./slave_data/pika.pid|' -e 's|db-sync-path : ./dbsync/|db-sync-path : ./slave_data/dbsync/|' -e 's|#daemonize : yes|daemonize : yes|' ./pika_slave.conf ./pika -c ./pika_master.conf ./pika -c ./pika_slave.conf #ensure both master and slave are ready -sleep 10 \ No newline at end of file +sleep 10 diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 4c90e9745c..c1f39f6fd8 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -3,6 +3,6 @@ add_subdirectory(./benchmark_client) add_subdirectory(./binlog_sender) add_subdirectory(./manifest_generator) add_subdirectory(./rdb_to_pika) -add_subdirectory(./pika_to_txt) -add_subdirectory(./txt_to_pika) -add_subdirectory(./pika-port/pika_port_3) \ No newline at end of file +#add_subdirectory(./pika_to_txt) +#add_subdirectory(./txt_to_pika) +#add_subdirectory(./pika-port/pika_port_3) From 6601034795d736bea92656789c45cc67aaea6a2f Mon Sep 17 00:00:00 2001 From: Mixficsol <838844609@qq.com> Date: Wed, 31 Jan 2024 21:29:16 +0800 Subject: [PATCH 2/4] fix floyd keys (#2364) Co-authored-by: wuxianrong --- src/pika_command.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pika_command.cc b/src/pika_command.cc index f2a1391174..b76baca28b 100644 --- a/src/pika_command.cc +++ b/src/pika_command.cc @@ -640,7 +640,7 @@ void InitCmdTable(CmdTable* cmd_table) { cmd_table->insert(std::pair>(kCmdNameSRem, std::move(sremptr))); ////SUnionCmd std::unique_ptr sunionptr = std::make_unique( - kCmdNameSUnion, -2, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsSlow); + kCmdNameSUnion, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSUnion, std::move(sunionptr))); ////SUnionstoreCmd std::unique_ptr sunionstoreptr = @@ -648,7 +648,7 @@ void InitCmdTable(CmdTable* cmd_table) { cmd_table->insert(std::pair>(kCmdNameSUnionstore, std::move(sunionstoreptr))); ////SInterCmd std::unique_ptr sinterptr = std::make_unique( - kCmdNameSInter, -2, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsSlow); + kCmdNameSInter, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSInter, std::move(sinterptr))); ////SInterstoreCmd std::unique_ptr sinterstoreptr = @@ -660,7 +660,7 @@ void InitCmdTable(CmdTable* cmd_table) { cmd_table->insert(std::pair>(kCmdNameSIsmember, std::move(sismemberptr))); ////SDiffCmd std::unique_ptr sdiffptr = - std::make_unique(kCmdNameSDiff, -2, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsSlow); + std::make_unique(kCmdNameSDiff, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); cmd_table->insert(std::pair>(kCmdNameSDiff, std::move(sdiffptr))); ////SDiffstoreCmd std::unique_ptr sdiffstoreptr = From eb14a0fd3fb1622969e6c5844f44e5e607a61ce4 Mon Sep 17 00:00:00 2001 From: wangshaoyi Date: Fri, 2 Feb 2024 17:31:00 +0800 Subject: [PATCH 3/4] floyd rebase unstable branch --- src/pika_admin.cc | 20 +- src/pika_kv.cc | 1 - src/pika_repl_client_conn.cc | 1 + src/storage/include/storage/storage.h | 4 +- src/storage/include/storage/storage_define.h | 3 +- src/storage/src/pika_stream_meta_value.h | 20 +- src/storage/src/redis.cc | 36 +- src/storage/src/redis.h | 77 +++- src/storage/src/redis_hashes.cc | 2 +- src/storage/src/redis_streams.cc | 414 ++++--------------- src/storage/src/redis_streams.h | 146 +------ src/storage/src/storage.cc | 35 +- src/storage/src/type_iterator.h | 30 ++ 13 files changed, 285 insertions(+), 504 deletions(-) diff --git a/src/pika_admin.cc b/src/pika_admin.cc index f0f5692b37..d25b9459e4 100644 --- a/src/pika_admin.cc +++ b/src/pika_admin.cc @@ -630,20 +630,8 @@ void FlushdbCmd::DoInitial() { if (argv_.size() == 1) { db_name_ = "all"; } else { - std::string struct_type = argv_[1]; - if (strcasecmp(struct_type.data(), "string") == 0) { - db_name_ = "strings"; - } else if (strcasecmp(struct_type.data(), "hash") == 0) { - db_name_ = "hashes"; - } else if (strcasecmp(struct_type.data(), "set") == 0) { - db_name_ = "sets"; - } else if (strcasecmp(struct_type.data(), "zset") == 0) { - db_name_ = "zsets"; - } else if (strcasecmp(struct_type.data(), "list") == 0) { - db_name_ = "lists"; - } else { - res_.SetRes(CmdRes::kInvalidDbType); - } + LOG(WARNING) << "not supported to flushdb with specific type in Floyd"; + res_.SetRes(CmdRes::kInvalidParameter, "not supported to flushdb with specific type in Floyd"); } } @@ -656,7 +644,6 @@ void FlushdbCmd::Do() { } else { //Floyd does not support flushdb by type LOG(ERROR) << "cannot flushdb by type in floyd"; - // db_->FlushSubDB(db_name_); } } } @@ -690,7 +677,8 @@ void FlushdbCmd::DoWithoutLock() { if (db_name_ == "all") { db_->FlushDBWithoutLock(); } else { - db_->FlushSubDBWithoutLock(db_name_); + //Floyd does not support flushdb by type + LOG(ERROR) << "cannot flushdb by type in floyd"; } DoUpdateCache(); } diff --git a/src/pika_kv.cc b/src/pika_kv.cc index e1e9315306..bf121e2dd3 100644 --- a/src/pika_kv.cc +++ b/src/pika_kv.cc @@ -9,7 +9,6 @@ #include "include/pika_command.h" #include "include/pika_slot_command.h" #include "include/pika_cache.h" -#include "include/pika_stream_base.h" #include "include/pika_conf.h" #include "pstd/include/pstd_string.h" diff --git a/src/pika_repl_client_conn.cc b/src/pika_repl_client_conn.cc index b911134e9a..672648d64d 100644 --- a/src/pika_repl_client_conn.cc +++ b/src/pika_repl_client_conn.cc @@ -30,6 +30,7 @@ bool PikaReplClientConn::IsDBStructConsistent(const std::vector& curre } for (const auto& db_struct : current_dbs) { if (find(expect_dbs.begin(), expect_dbs.end(), db_struct) == expect_dbs.end()) { + LOG(WARNING) << "DB struct mismatch"; return false; } } diff --git a/src/storage/include/storage/storage.h b/src/storage/include/storage/storage.h index 8855c06476..296882f510 100644 --- a/src/storage/include/storage/storage.h +++ b/src/storage/include/storage/storage.h @@ -138,9 +138,9 @@ struct ScoreMember { enum BeforeOrAfter { Before, After }; -enum DataType { kAll, kStrings, kHashes, kLists, kZSets, kSets, kStreams }; +enum DataType { kAll, kStrings, kHashes, kSets, kLists, kZSets, kStreams }; -const char DataTypeTag[] = {'a', 'k', 'h', 'l', 'z', 's', 'x'}; +const char DataTypeTag[] = {'a', 'k', 'h', 's', 'l', 'z', 'x'}; enum class OptionType { kDB, diff --git a/src/storage/include/storage/storage_define.h b/src/storage/include/storage/storage_define.h index 367687d92b..7dbd614169 100644 --- a/src/storage/include/storage/storage_define.h +++ b/src/storage/include/storage/storage_define.h @@ -40,7 +40,8 @@ enum ColumnFamilyIndex { kZsetsMetaCF = 7, kZsetsDataCF = 8, kZsetsScoreCF = 9, - kStreamsCF = 10, + kStreamsMetaCF = 10, + kStreamsDataCF = 11, }; const static char kNeedTransformCharacter = '\u0000'; diff --git a/src/storage/src/pika_stream_meta_value.h b/src/storage/src/pika_stream_meta_value.h index 1a43f9dcbe..acbd3a11ed 100644 --- a/src/storage/src/pika_stream_meta_value.h +++ b/src/storage/src/pika_stream_meta_value.h @@ -14,7 +14,7 @@ namespace storage { static const uint64_t kDefaultStreamValueLength = - sizeof(tree_id_t) + sizeof(uint64_t) + 3 * sizeof(streamID) + sizeof(int32_t) + sizeof(int32_t); + sizeof(tree_id_t) + sizeof(uint64_t) + 3 * sizeof(streamID) + sizeof(int32_t) + sizeof(uint64_t); class StreamMetaValue { public: explicit StreamMetaValue() = default; @@ -62,7 +62,7 @@ class StreamMetaValue { EncodeFixed32(dst, length_); dst += sizeof(length_); - EncodeFixed32(dst, version_); + EncodeFixed64(dst, version_); } // used only when parse a existed stream meta @@ -99,10 +99,10 @@ class StreamMetaValue { length_ = static_cast(DecodeFixed32(pos)); pos += sizeof(length_); - version_ = static_cast(DecodeFixed32(pos)); + version_ = static_cast(DecodeFixed64(pos)); } - int32_t version() const { return version_; } + uint64_t version() const { return version_; } tree_id_t groups_id() const { return groups_id_; } @@ -176,12 +176,12 @@ class StreamMetaValue { EncodeFixed32(dst, length_); } - void set_version(int32_t version) { + void set_version(uint64_t version) { assert(value_.size() == kDefaultStreamValueLength); version_ = version; char* dst = const_cast(value_.data()) + sizeof(tree_id_t) + sizeof(uint64_t) + 3 * sizeof(streamID) + sizeof(length_); - EncodeFixed32(dst, version_); + EncodeFixed64(dst, version_); } private: @@ -191,7 +191,7 @@ class StreamMetaValue { streamID last_id_; streamID max_deleted_entry_id_; int32_t length_{0}; // number of the messages in the stream - int32_t version_{0}; + uint64_t version_{0}; std::string value_{}; }; @@ -230,10 +230,10 @@ class ParsedStreamMetaValue { length_ = static_cast(DecodeFixed32(pos)); pos += sizeof(length_); - version_ = static_cast(DecodeFixed32(pos)); + version_ = static_cast(DecodeFixed64(pos)); } - int32_t version() const { return version_; } + uint64_t version() const { return version_; } tree_id_t groups_id() const { return groups_id_; } @@ -262,7 +262,7 @@ class ParsedStreamMetaValue { streamID last_id_; streamID max_deleted_entry_id_; int32_t length_{0}; // number of the messages in the stream - int32_t version_{0}; + uint64_t version_{0}; }; static const uint64_t kDefaultStreamCGroupValueLength = sizeof(streamID) + sizeof(uint64_t) + 2 * sizeof(tree_id_t); diff --git a/src/storage/src/redis.cc b/src/storage/src/redis.cc index 9ac40cdcea..94c85ecbca 100644 --- a/src/storage/src/redis.cc +++ b/src/storage/src/redis.cc @@ -144,6 +144,19 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ zset_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_data_cf_table_ops)); zset_score_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_score_cf_table_ops)); + // stream column-family options + rocksdb::ColumnFamilyOptions stream_meta_cf_ops(storage_options.options); + rocksdb::ColumnFamilyOptions stream_data_cf_ops(storage_options.options); + + rocksdb::BlockBasedTableOptions stream_meta_cf_table_ops(table_ops); + rocksdb::BlockBasedTableOptions stream_data_cf_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + stream_meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + stream_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + stream_meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(stream_meta_cf_table_ops)); + stream_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(stream_data_cf_table_ops)); + std::vector column_families; column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, string_cf_ops); // hash CF @@ -159,6 +172,9 @@ Status Redis::Open(const StorageOptions& storage_options, const std::string& db_ column_families.emplace_back("zset_meta_cf", zset_meta_cf_ops); column_families.emplace_back("zset_data_cf", zset_data_cf_ops); column_families.emplace_back("zset_score_cf", zset_score_cf_ops); + // stream CF + column_families.emplace_back("stream_meta_cf", stream_meta_cf_ops); + column_families.emplace_back("stream_data_cf", stream_data_cf_ops); return rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); } @@ -231,6 +247,14 @@ Status Redis::CompactRange(const DataType& dtype, const rocksdb::Slice* begin, c db_->CompactRange(default_compact_range_options_, handles_[kZsetsScoreCF], begin, end); } break; + case DataType::kStreams: + if (type == kMeta || type == kMetaAndData) { + s = db_->CompactRange(default_compact_range_options_, handles_[kStreamsMetaCF], begin, end); + } + if (s.ok() && (type == kData || type == kMetaAndData)) { + s = db_->CompactRange(default_compact_range_options_, handles_[kStreamsDataCF], begin, end); + } + break; default: return Status::Corruption("Invalid data type"); } @@ -391,10 +415,14 @@ void Redis::SetCompactRangeOptions(const bool is_canceled) { } else { default_compact_range_options_.canceled->store(is_canceled); } +} + Status Redis::GetProperty(const std::string& property, uint64_t* out) { std::string value; - db_->GetProperty(property, &value); - *out = std::strtoull(value.c_str(), nullptr, 10); + for (const auto& handle : handles_) { + db_->GetProperty(handle, property, &value); + *out += std::strtoull(value.c_str(), nullptr, 10); + } return Status::OK(); } @@ -421,6 +449,10 @@ Status Redis::ScanKeyNum(std::vector* key_infos) { if (!s.ok()) { return s; } + s = ScanSetsKeyNum(&((*key_infos)[5])); + if (!s.ok()) { + return s; + } return Status::OK(); } diff --git a/src/storage/src/redis.h b/src/storage/src/redis.h index 180cc7fa9c..2e28743aae 100644 --- a/src/storage/src/redis.h +++ b/src/storage/src/redis.h @@ -23,6 +23,7 @@ #include "storage/storage.h" #include "storage/storage_define.h" #include "pstd/include/env.h" +#include "src/redis_streams.h" #include "pstd/include/pika_codis_slot.h" #define SPOP_COMPACT_THRESHOLD_COUNT 500 @@ -114,12 +115,14 @@ class Redis { Status ScanListsKeyNum(KeyInfo* key_info); Status ScanZsetsKeyNum(KeyInfo* key_info); Status ScanSetsKeyNum(KeyInfo* key_info); + Status ScanStreamsKeyNum(KeyInfo* key_info); virtual Status StringsPKPatternMatchDel(const std::string& pattern, int32_t* ret); virtual Status ListsPKPatternMatchDel(const std::string& pattern, int32_t* ret); virtual Status HashesPKPatternMatchDel(const std::string& pattern, int32_t* ret); virtual Status ZsetsPKPatternMatchDel(const std::string& pattern, int32_t* ret); virtual Status SetsPKPatternMatchDel(const std::string& pattern, int32_t* ret); + virtual Status StreamsPKPatternMatchDel(const std::string& pattern, int32_t* ret); // Keys Commands virtual Status StringsExpire(const Slice& key, int64_t ttl); @@ -133,6 +136,7 @@ class Redis { virtual Status ListsDel(const Slice& key); virtual Status ZsetsDel(const Slice& key); virtual Status SetsDel(const Slice& key); + virtual Status StreamsDel(const Slice& key); virtual Status StringsExpireat(const Slice& key, int64_t timestamp); virtual Status HashesExpireat(const Slice& key, int64_t timestamp); @@ -212,7 +216,7 @@ class Redis { Status SetSmallCompactionThreshold(uint64_t small_compaction_threshold); Status SetSmallCompactionDurationThreshold(uint64_t small_compaction_duration_threshold); - std::vector GetStringCFHandles() { return {handles_[0]}; } + std::vector GetStringCFHandles() { return {handles_[kStringsCF]}; } std::vector GetHashCFHandles() { return {handles_.begin() + kHashesMetaCF, handles_.begin() + kHashesDataCF + 1}; @@ -227,7 +231,11 @@ class Redis { } std::vector GetZsetCFHandles() { - return {handles_.begin() + kZsetsMetaCF, handles_.end()}; + return {handles_.begin() + kZsetsMetaCF, handles_.begin() + kZsetsScoreCF + 1}; + } + + std::vector GetStreamCFHandles() { + return {handles_.begin() + kStreamsMetaCF, handles_.end()}; } void GetRocksDBInfo(std::string &info, const char *prefix); @@ -304,6 +312,35 @@ class Redis { Status ZPopMax(const Slice& key, int64_t count, std::vector* score_members); Status ZPopMin(const Slice& key, int64_t count, std::vector* score_members); + //===--------------------------------------------------------------------===// + // Commands + //===--------------------------------------------------------------------===// + Status XAdd(const Slice& key, const std::string& serialized_message, StreamAddTrimArgs& args); + Status XDel(const Slice& key, const std::vector& ids, int32_t& count); + Status XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& count); + Status XRange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages); + Status XRevrange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages); + Status XLen(const Slice& key, int32_t& len); + Status XRead(const StreamReadGroupReadArgs& args, std::vector>& results, + std::vector& reserved_keys); + Status XInfo(const Slice& key, StreamInfoResult& result); + Status ScanStream(const ScanStreamOptions& option, std::vector& id_messages, std::string& next_field, + rocksdb::ReadOptions& read_options); + // get and parse the stream meta if found + // @return ok only when the stream meta exists + Status GetStreamMeta(StreamMetaValue& tream_meta, const rocksdb::Slice& key, rocksdb::ReadOptions& read_options); + + // Before calling this function, the caller should ensure that the ids are valid + Status DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, + const std::vector& ids, rocksdb::ReadOptions& read_options); + + // Before calling this function, the caller should ensure that the ids are valid + Status DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, + const std::vector& serialized_ids, rocksdb::ReadOptions& read_options); + + Status TrimStream(int32_t& count, StreamMetaValue& stream_meta, const rocksdb::Slice& key, StreamAddTrimArgs& args, + rocksdb::ReadOptions& read_options); + void ScanDatabase(); void ScanStrings(); void ScanHashes(); @@ -336,6 +373,9 @@ class Redis { case 'z': return new ZsetsIterator(options, db_, handles_[kZsetsMetaCF], pattern); break; + case 'x': + return new StreamsIterator(options, db_, handles_[kStreamsMetaCF], pattern); + break; default: LOG(WARNING) << "Invalid datatype to create iterator"; return nullptr; @@ -343,6 +383,39 @@ class Redis { return nullptr; } +private: + Status GenerateStreamID(const StreamMetaValue& stream_meta, StreamAddTrimArgs& args); + + Status StreamScanRange(const Slice& key, const uint64_t version, const Slice& id_start, const std::string& id_end, + const Slice& pattern, int32_t limit, std::vector& id_messages, std::string& next_id, + rocksdb::ReadOptions& read_options); + Status StreamReScanRange(const Slice& key, const uint64_t version, const Slice& id_start, const std::string& id_end, + const Slice& pattern, int32_t limit, std::vector& id_values, std::string& next_id, + rocksdb::ReadOptions& read_options); + + struct TrimRet { + // the count of deleted messages + int32_t count{0}; + // the next field after trim + std::string next_field; + // the max deleted field, will be empty if no message is deleted + std::string max_deleted_field; + }; + + Status TrimByMaxlen(TrimRet& trim_ret, StreamMetaValue& stream_meta, const rocksdb::Slice& key, + const StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options); + + Status TrimByMinid(TrimRet& trim_ret, StreamMetaValue& stream_meta, const rocksdb::Slice& key, + const StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options); + + inline Status SetFirstID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, rocksdb::ReadOptions& read_options); + + inline Status SetLastID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, rocksdb::ReadOptions& read_options); + + inline Status SetFirstOrLastID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, bool is_set_first, + rocksdb::ReadOptions& read_options); + + private: int32_t index_ = 0; Storage* const storage_; diff --git a/src/storage/src/redis_hashes.cc b/src/storage/src/redis_hashes.cc index 27eeff7e69..bf91e568ab 100644 --- a/src/storage/src/redis_hashes.cc +++ b/src/storage/src/redis_hashes.cc @@ -34,7 +34,7 @@ Status Redis::ScanHashesKeyNum(KeyInfo* key_info) { int64_t curtime; rocksdb::Env::Default()->GetCurrentTime(&curtime); - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kHashesDataCF]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kHashesMetaCF]); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedHashesMetaValue parsed_hashes_meta_value(iter->value()); if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { diff --git a/src/storage/src/redis_streams.cc b/src/storage/src/redis_streams.cc index 84e11832be..b7ccce66ba 100644 --- a/src/storage/src/redis_streams.cc +++ b/src/storage/src/redis_streams.cc @@ -3,15 +3,17 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. -#include "src/redis_streams.h" #include #include #include #include #include #include + #include "rocksdb/slice.h" #include "rocksdb/status.h" + +#include "src/redis.h" #include "src/base_data_key_format.h" #include "src/base_filter.h" #include "src/debug.h" @@ -23,7 +25,7 @@ namespace storage { -Status RedisStreams::XAdd(const Slice& key, const std::string& serialized_message, StreamAddTrimArgs& args) { +Status Redis::XAdd(const Slice& key, const std::string& serialized_message, StreamAddTrimArgs& args) { // With the lock, we do not need snapshot for read. // And it's bugy to use snapshot for read when we try to add message with trim. // such as: XADD key 1-0 field value MINID 1-0 @@ -65,7 +67,7 @@ Status RedisStreams::XAdd(const Slice& key, const std::string& serialized_messag #endif StreamDataKey stream_data_key(key, stream_meta.version(), args.id.Serialize()); - s = db_->Put(default_write_options_, handles_[1], stream_data_key.Encode(), serialized_message); + s = db_->Put(default_write_options_, handles_[kStreamsDataCF], stream_data_key.Encode(), serialized_message); if (!s.ok()) { return Status::Corruption("error from XADD, insert stream message failed 1: " + s.ToString()); } @@ -89,7 +91,8 @@ Status RedisStreams::XAdd(const Slice& key, const std::string& serialized_messag } // 5 update stream meta - s = db_->Put(default_write_options_, handles_[0], key, stream_meta.value()); + BaseMetaKey base_meta_key(key); + s = db_->Put(default_write_options_, handles_[kStreamsMetaCF], base_meta_key.Encode(), stream_meta.value()); if (!s.ok()) { return s; } @@ -97,7 +100,7 @@ Status RedisStreams::XAdd(const Slice& key, const std::string& serialized_messag return Status::OK(); } -Status RedisStreams::XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& count) { +Status Redis::XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& count) { // 1 get stream meta rocksdb::Status s; @@ -115,7 +118,8 @@ Status RedisStreams::XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& c } // 3 update stream meta - s = db_->Put(default_write_options_, handles_[0], key, stream_meta.value()); + BaseMetaKey base_meta_key(key); + s = db_->Put(default_write_options_, handles_[kStreamsMetaCF], base_meta_key.Encode(), stream_meta.value()); if (!s.ok()) { return s; } @@ -123,7 +127,7 @@ Status RedisStreams::XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& c return Status::OK(); } -Status RedisStreams::XDel(const Slice& key, const std::vector& ids, int32_t& count) { +Status Redis::XDel(const Slice& key, const std::vector& ids, int32_t& count) { // 1 try to get stream meta StreamMetaValue stream_meta; @@ -140,7 +144,7 @@ Status RedisStreams::XDel(const Slice& key, const std::vector& ids, in std::string unused; for (auto id : ids) { StreamDataKey stream_data_key(key, stream_meta.version(), id.Serialize()); - s = db_->Get(default_read_options_, handles_[1], stream_data_key.Encode(), &unused); + s = db_->Get(default_read_options_, handles_[kStreamsDataCF], stream_data_key.Encode(), &unused); if (s.IsNotFound()) { --count; continue; @@ -168,11 +172,11 @@ Status RedisStreams::XDel(const Slice& key, const std::vector& ids, in return s; } } - - return db_->Put(default_write_options_, handles_[0], key, stream_meta.value()); + + return db_->Put(default_write_options_, handles_[kStreamsMetaCF], BaseMetaKey(key).Encode(), stream_meta.value()); } -Status RedisStreams::XRange(const Slice& key, const StreamScanArgs& args, std::vector& field_values) { +Status Redis::XRange(const Slice& key, const StreamScanArgs& args, std::vector& field_values) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -196,7 +200,7 @@ Status RedisStreams::XRange(const Slice& key, const StreamScanArgs& args, std::v return s; } -Status RedisStreams::XRevrange(const Slice& key, const StreamScanArgs& args, std::vector& field_values) { +Status Redis::XRevrange(const Slice& key, const StreamScanArgs& args, std::vector& field_values) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -220,7 +224,7 @@ Status RedisStreams::XRevrange(const Slice& key, const StreamScanArgs& args, std return s; } -Status RedisStreams::XLen(const Slice& key, int32_t& len) { +Status Redis::XLen(const Slice& key, int32_t& len) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -238,8 +242,8 @@ Status RedisStreams::XLen(const Slice& key, int32_t& len) { return Status::OK(); } -Status RedisStreams::XRead(const StreamReadGroupReadArgs& args, std::vector>& results, - std::vector& reserved_keys) { +Status Redis::XRead(const StreamReadGroupReadArgs& args, std::vector>& results, + std::vector& reserved_keys) { rocksdb::ReadOptions read_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); @@ -304,7 +308,7 @@ Status RedisStreams::XRead(const StreamReadGroupReadArgs& args, std::vectorSetCapacity(storage_options.statistics_max_size); - small_compaction_threshold_ = storage_options.small_compaction_threshold; - - rocksdb::Options ops(storage_options.options); - Status s = rocksdb::DB::Open(ops, db_path, &db_); - if (s.ok()) { - // create column family - rocksdb::ColumnFamilyHandle* cf; - s = db_->CreateColumnFamily(rocksdb::ColumnFamilyOptions(), "data_cf", &cf); - if (!s.ok()) { - return s; - } - // close DB - delete cf; - delete db_; - } - - // Open - rocksdb::DBOptions db_ops(storage_options.options); - rocksdb::ColumnFamilyOptions meta_cf_ops(storage_options.options); - rocksdb::ColumnFamilyOptions data_cf_ops(storage_options.options); - // Notice: Stream's Meta dose not have timestamp and version, so it does not need to be filtered. - - // use the bloom filter policy to reduce disk reads - rocksdb::BlockBasedTableOptions table_ops(storage_options.table_options); - table_ops.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); - rocksdb::BlockBasedTableOptions meta_cf_table_ops(table_ops); - rocksdb::BlockBasedTableOptions data_cf_table_ops(table_ops); - if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { - meta_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); - } - meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(meta_cf_table_ops)); - data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(data_cf_table_ops)); - - std::vector column_families; - // Meta CF - column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, meta_cf_ops); - // Data CF - column_families.emplace_back("data_cf", data_cf_ops); - return rocksdb::DB::Open(db_ops, db_path, column_families, &handles_, &db_); -} - -Status RedisStreams::CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, - const ColumnFamilyType& type) { - if (type == kMeta || type == kMetaAndData) { - db_->CompactRange(default_compact_range_options_, handles_[0], begin, end); - } - if (type == kData || type == kMetaAndData) { - db_->CompactRange(default_compact_range_options_, handles_[1], begin, end); - } - return Status::OK(); -} - -Status RedisStreams::GetProperty(const std::string& property, uint64_t* out) { - std::string value; - db_->GetProperty(handles_[0], property, &value); - *out = std::strtoull(value.c_str(), nullptr, 10); - db_->GetProperty(handles_[1], property, &value); - *out += std::strtoull(value.c_str(), nullptr, 10); - return Status::OK(); -} - -Status RedisStreams::ScanKeyNum(KeyInfo* key_info) { +Status Redis::ScanStreamsKeyNum(KeyInfo* key_info) { uint64_t keys = 0; uint64_t expires = 0; uint64_t ttl_sum = 0; @@ -402,7 +342,7 @@ Status RedisStreams::ScanKeyNum(KeyInfo* key_info) { int64_t curtime; rocksdb::Env::Default()->GetCurrentTime(&curtime); - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kStreamsMetaCF]); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedStreamMetaValue parsed_stream_meta_value(iter->value()); if (parsed_stream_meta_value.length() == 0) { @@ -418,51 +358,30 @@ Status RedisStreams::ScanKeyNum(KeyInfo* key_info) { return Status::OK(); } -Status RedisStreams::ScanKeys(const std::string& pattern, std::vector* keys) { - std::string key; +Status Redis::StreamsPKPatternMatchDel(const std::string& pattern, int32_t* ret) { rocksdb::ReadOptions iterator_options; const rocksdb::Snapshot* snapshot; ScopeSnapshot ss(db_, &snapshot); iterator_options.snapshot = snapshot; iterator_options.fill_cache = false; - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); - for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { - ParsedStreamMetaValue parsed_stream_meta_value(iter->value()); - if (parsed_stream_meta_value.length() != 0) { - key = iter->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - } - } - delete iter; - return Status::OK(); -} - -Status RedisStreams::PKPatternMatchDel(const std::string& pattern, int32_t* ret) { - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - std::string key; + std::string encoded_key; std::string meta_value; int32_t total_delete = 0; Status s; rocksdb::WriteBatch batch; - rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[0]); + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kStreamsMetaCF]); iter->SeekToFirst(); while (iter->Valid()) { - key = iter->key().ToString(); + encoded_key = iter->key().ToString(); meta_value = iter->value().ToString(); + ParsedBaseMetaKey parsed_meta_key(iter->key()); StreamMetaValue stream_meta_value; stream_meta_value.ParseFrom(meta_value); if ((stream_meta_value.length() != 0) && - (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0)) { + (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != 0)) { stream_meta_value.InitMetaValue(); - batch.Put(handles_[0], key, stream_meta_value.value()); + batch.Put(handles_[kStreamsMetaCF], encoded_key, stream_meta_value.value()); } if (static_cast(batch.Count()) >= BATCH_DELETE_LIMIT) { s = db_->Write(default_write_options_, &batch); @@ -488,115 +407,10 @@ Status RedisStreams::PKPatternMatchDel(const std::string& pattern, int32_t* ret) return s; } -rocksdb::Status RedisStreams::PKScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, - int32_t limit, std::vector* keys, std::string* next_key) { - next_key->clear(); - - std::string key; - int32_t remain = limit; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - bool start_no_limit = key_start.compare("") == 0; - bool end_no_limit = key_end.compare("") == 0; - - if (!start_no_limit && !end_no_limit && (key_start.compare(key_end) > 0)) { - return rocksdb::Status::InvalidArgument("error in given range"); - } - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - if (start_no_limit) { - it->SeekToFirst(); - } else { - it->Seek(key_start); - } - - while (it->Valid() && remain > 0 && (end_no_limit || it->key().compare(key_end) <= 0)) { - ParsedStreamMetaValue parsed_meta_value(it->value()); - if (parsed_meta_value.length() == 0) { - it->Next(); - } else { - key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - remain--; - it->Next(); - } - } - - while (it->Valid() && (end_no_limit || it->key().compare(key_end) <= 0)) { - ParsedStreamMetaValue parsed_meta_value(it->value()); - if (parsed_meta_value.length() == 0) { - it->Next(); - } else { - *next_key = it->key().ToString(); - break; - } - } - delete it; - return rocksdb::Status::OK(); -} - -Status RedisStreams::PKRScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key) { - next_key->clear(); - - std::string key; - int32_t remain = limit; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - bool start_no_limit = key_start.compare("") == 0; - bool end_no_limit = key_end.compare("") == 0; - - if (!start_no_limit && !end_no_limit && (key_start.compare(key_end) < 0)) { - return Status::InvalidArgument("error in given range"); - } - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - if (start_no_limit) { - it->SeekToLast(); - } else { - it->SeekForPrev(key_start); - } - - while (it->Valid() && remain > 0 && (end_no_limit || it->key().compare(key_end) >= 0)) { - ParsedStreamMetaValue parsed_streams_meta_value(it->value()); - if (parsed_streams_meta_value.length() == 0) { - it->Prev(); - } else { - key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), key.data(), key.size(), 0) != 0) { - keys->push_back(key); - } - remain--; - it->Prev(); - } - } - - while (it->Valid() && (end_no_limit || it->key().compare(key_end) >= 0)) { - ParsedStreamMetaValue parsed_streams_meta_value(it->value()); - if (parsed_streams_meta_value.length() == 0) { - it->Prev(); - } else { - *next_key = it->key().ToString(); - break; - } - } - delete it; - return Status::OK(); -} - -Status RedisStreams::Del(const Slice& key) { +Status Redis::StreamsDel(const Slice& key) { std::string meta_value; - Status s = db_->Get(default_read_options_, handles_[0], key, &meta_value); + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kStreamsMetaCF], base_meta_key.Encode(), &meta_value); if (s.ok()) { StreamMetaValue stream_meta_value; stream_meta_value.ParseFrom(meta_value); @@ -605,82 +419,18 @@ Status RedisStreams::Del(const Slice& key) { } else { uint32_t statistic = stream_meta_value.length(); stream_meta_value.InitMetaValue(); - s = db_->Put(default_write_options_, handles_[0], key, stream_meta_value.value()); - UpdateSpecificKeyStatistics(key.ToString(), statistic); + s = db_->Put(default_write_options_, handles_[kStreamsMetaCF], base_meta_key.Encode(), stream_meta_value.value()); + UpdateSpecificKeyStatistics(DataType::kStreams, key.ToString(), statistic); } } return s; } -bool RedisStreams::Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, - int64_t* count, std::string* next_key) { - std::string meta_key; - bool is_finish = true; - rocksdb::ReadOptions iterator_options; - const rocksdb::Snapshot* snapshot; - ScopeSnapshot ss(db_, &snapshot); - iterator_options.snapshot = snapshot; - iterator_options.fill_cache = false; - - rocksdb::Iterator* it = db_->NewIterator(iterator_options, handles_[0]); - - it->Seek(start_key); - while (it->Valid() && (*count) > 0) { - ParsedStreamMetaValue parsed_stream_meta_value(it->value()); - if (parsed_stream_meta_value.length() == 0) { - it->Next(); - continue; - } else { - meta_key = it->key().ToString(); - if (StringMatch(pattern.data(), pattern.size(), meta_key.data(), meta_key.size(), 0) != 0) { - keys->push_back(meta_key); - } - (*count)--; - it->Next(); - } - } - - std::string prefix = isTailWildcard(pattern) ? pattern.substr(0, pattern.size() - 1) : ""; - if (it->Valid() && (it->key().compare(prefix) <= 0 || it->key().starts_with(prefix))) { - *next_key = it->key().ToString(); - is_finish = false; - } else { - *next_key = ""; - } - delete it; - return is_finish; -} - -Status RedisStreams::Expire(const Slice& key, int32_t ttl) { - rocksdb::Status s(rocksdb::Status::NotSupported("RedisStreams::Expire not supported by stream")); - return Status::Corruption(s.ToString()); -} - -bool RedisStreams::PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) { - TRACE("RedisStreams::PKExpireScan not supported by stream"); - return false; -} - -Status RedisStreams::Expireat(const Slice& key, int32_t timestamp) { - rocksdb::Status s(rocksdb::Status::NotSupported("RedisStreams::Expireat not supported by stream")); - return Status::Corruption(s.ToString()); -} - -Status RedisStreams::Persist(const Slice& key) { - rocksdb::Status s(rocksdb::Status::NotSupported("RedisStreams::Persist not supported by stream")); - return Status::Corruption(s.ToString()); -} - -Status RedisStreams::TTL(const Slice& key, int64_t* timestamp) { - rocksdb::Status s(rocksdb::Status::NotSupported("RedisStreams::TTL not supported by stream")); - return Status::Corruption(s.ToString()); -} - -Status RedisStreams::GetStreamMeta(StreamMetaValue& stream_meta, const rocksdb::Slice& key, - rocksdb::ReadOptions& read_options) { +Status Redis::GetStreamMeta(StreamMetaValue& stream_meta, const rocksdb::Slice& key, + rocksdb::ReadOptions& read_options) { std::string value; - auto s = db_->Get(read_options, handles_[0], key, &value); + BaseMetaKey base_meta_key(key); + auto s = db_->Get(read_options, handles_[kStreamsMetaCF], base_meta_key.Encode(), &value); if (s.ok()) { stream_meta.ParseFrom(value); return Status::OK(); @@ -688,8 +438,8 @@ Status RedisStreams::GetStreamMeta(StreamMetaValue& stream_meta, const rocksdb:: return s; } -Status RedisStreams::TrimStream(int32_t& count, StreamMetaValue& stream_meta, const rocksdb::Slice& key, - StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options) { +Status Redis::TrimStream(int32_t& count, StreamMetaValue& stream_meta, const rocksdb::Slice& key, + StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options) { count = 0; // 1 do the trim TrimRet trim_ret; @@ -731,8 +481,8 @@ Status RedisStreams::TrimStream(int32_t& count, StreamMetaValue& stream_meta, co return Status::OK(); } -Status RedisStreams::ScanStream(const ScanStreamOptions& op, std::vector& field_values, - std::string& next_field, rocksdb::ReadOptions& read_options) { +Status Redis::ScanStream(const ScanStreamOptions& op, std::vector& field_values, + std::string& next_field, rocksdb::ReadOptions& read_options) { std::string start_field; std::string end_field; Slice pattern = "*"; // match all the fields from start_field to end_field @@ -746,8 +496,8 @@ Status RedisStreams::ScanStream(const ScanStreamOptions& op, std::vector args.maxlen) { @@ -834,9 +584,9 @@ Status RedisStreams::TrimByMaxlen(TrimRet& trim_ret, StreamMetaValue& stream_met (std::min(static_cast(stream_meta.length() - trim_ret.count - args.maxlen), kDEFAULT_TRIM_BATCH_SIZE)); std::vector id_messages; - RedisStreams::ScanStreamOptions options(key, stream_meta.version(), stream_meta.first_id(), kSTREAMID_MAX, - cur_batch, false, false, false); - s = RedisStreams::ScanStream(options, id_messages, trim_ret.next_field, read_options); + ScanStreamOptions options(key, stream_meta.version(), stream_meta.first_id(), kSTREAMID_MAX, + cur_batch, false, false, false); + s = ScanStream(options, id_messages, trim_ret.next_field, read_options); if (!s.ok()) { assert(!s.IsNotFound()); return s; @@ -862,8 +612,8 @@ Status RedisStreams::TrimByMaxlen(TrimRet& trim_ret, StreamMetaValue& stream_met return s; } -Status RedisStreams::TrimByMinid(TrimRet& trim_ret, StreamMetaValue& stream_meta, const rocksdb::Slice& key, - const StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options) { +Status Redis::TrimByMinid(TrimRet& trim_ret, StreamMetaValue& stream_meta, const rocksdb::Slice& key, + const StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options) { Status s; std::string serialized_min_id; trim_ret.next_field = stream_meta.first_id().Serialize(); @@ -875,9 +625,9 @@ Status RedisStreams::TrimByMinid(TrimRet& trim_ret, StreamMetaValue& stream_meta std::min(static_cast(stream_meta.length() - trim_ret.count), kDEFAULT_TRIM_BATCH_SIZE)); std::vector id_messages; - RedisStreams::ScanStreamOptions options(key, stream_meta.version(), stream_meta.first_id(), args.minid, cur_batch, - false, false, false); - s = RedisStreams::ScanStream(options, id_messages, trim_ret.next_field, read_options); + ScanStreamOptions options(key, stream_meta.version(), stream_meta.first_id(), args.minid, cur_batch, + false, false, false); + s = ScanStream(options, id_messages, trim_ret.next_field, read_options); if (!s.ok()) { assert(!s.IsNotFound()); return s; @@ -915,10 +665,10 @@ Status RedisStreams::TrimByMinid(TrimRet& trim_ret, StreamMetaValue& stream_meta return s; } -Status RedisStreams::ScanRange(const Slice& key, const int32_t version, const Slice& id_start, - const std::string& id_end, const Slice& pattern, int32_t limit, - std::vector& id_messages, std::string& next_id, - rocksdb::ReadOptions& read_options) { +Status Redis::StreamScanRange(const Slice& key, const uint64_t version, const Slice& id_start, + const std::string& id_end, const Slice& pattern, int32_t limit, + std::vector& id_messages, std::string& next_id, + rocksdb::ReadOptions& read_options) { next_id.clear(); id_messages.clear(); @@ -934,8 +684,8 @@ Status RedisStreams::ScanRange(const Slice& key, const int32_t version, const Sl StreamDataKey streams_data_prefix(key, version, Slice()); StreamDataKey streams_start_data_key(key, version, id_start); - std::string prefix = streams_data_prefix.Encode().ToString(); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[1]); + std::string prefix = streams_data_prefix.EncodeSeekKey().ToString(); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kStreamsDataCF]); for (iter->Seek(start_no_limit ? prefix : streams_start_data_key.Encode()); iter->Valid() && remain > 0 && iter->key().starts_with(prefix); iter->Next()) { ParsedStreamDataKey parsed_streams_data_key(iter->key()); @@ -960,10 +710,10 @@ Status RedisStreams::ScanRange(const Slice& key, const int32_t version, const Sl return Status::OK(); } -Status RedisStreams::ReScanRange(const Slice& key, const int32_t version, const Slice& id_start, - const std::string& id_end, const Slice& pattern, int32_t limit, - std::vector& id_messages, std::string& next_id, - rocksdb::ReadOptions& read_options) { +Status Redis::StreamReScanRange(const Slice& key, const uint64_t version, const Slice& id_start, + const std::string& id_end, const Slice& pattern, int32_t limit, + std::vector& id_messages, std::string& next_id, + rocksdb::ReadOptions& read_options) { next_id.clear(); id_messages.clear(); @@ -977,12 +727,12 @@ Status RedisStreams::ReScanRange(const Slice& key, const int32_t version, const return Status::InvalidArgument("error in given range"); } - int32_t start_key_version = start_no_limit ? version + 1 : version; + uint64_t start_key_version = start_no_limit ? version + 1 : version; std::string start_key_id = start_no_limit ? "" : id_start.ToString(); StreamDataKey streams_data_prefix(key, version, Slice()); StreamDataKey streams_start_data_key(key, start_key_version, start_key_id); - std::string prefix = streams_data_prefix.Encode().ToString(); - rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[1]); + std::string prefix = streams_data_prefix.EncodeSeekKey().ToString(); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kStreamsDataCF]); for (iter->SeekForPrev(streams_start_data_key.Encode().ToString()); iter->Valid() && remain > 0 && iter->key().starts_with(prefix); iter->Prev()) { ParsedStreamDataKey parsed_streams_data_key(iter->key()); @@ -1007,8 +757,8 @@ Status RedisStreams::ReScanRange(const Slice& key, const int32_t version, const return Status::OK(); } -Status RedisStreams::DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, - const std::vector& ids, rocksdb::ReadOptions& read_options) { +Status Redis::DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, + const std::vector& ids, rocksdb::ReadOptions& read_options) { std::vector serialized_ids; serialized_ids.reserve(ids.size()); for (const auto& id : ids) { @@ -1017,29 +767,29 @@ Status RedisStreams::DeleteStreamMessages(const rocksdb::Slice& key, const Strea return DeleteStreamMessages(key, stream_meta, serialized_ids, read_options); } -Status RedisStreams::DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, - const std::vector& serialized_ids, - rocksdb::ReadOptions& read_options) { +Status Redis::DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, + const std::vector& serialized_ids, + rocksdb::ReadOptions& read_options) { rocksdb::WriteBatch batch; for (auto& sid : serialized_ids) { StreamDataKey stream_data_key(key, stream_meta.version(), sid); - batch.Delete(handles_[1], stream_data_key.Encode()); + batch.Delete(handles_[kStreamsDataCF], stream_data_key.Encode()); } return db_->Write(default_write_options_, &batch); } -inline Status RedisStreams::SetFirstID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, - rocksdb::ReadOptions& read_options) { +inline Status Redis::SetFirstID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, + rocksdb::ReadOptions& read_options) { return SetFirstOrLastID(key, stream_meta, true, read_options); } -inline Status RedisStreams::SetLastID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, - rocksdb::ReadOptions& read_options) { +inline Status Redis::SetLastID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, + rocksdb::ReadOptions& read_options) { return SetFirstOrLastID(key, stream_meta, false, read_options); } -inline Status RedisStreams::SetFirstOrLastID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, bool is_set_first, - rocksdb::ReadOptions& read_options) { +inline Status Redis::SetFirstOrLastID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, bool is_set_first, + rocksdb::ReadOptions& read_options) { if (stream_meta.length() == 0) { stream_meta.set_first_id(kSTREAMID_MIN); return Status::OK(); diff --git a/src/storage/src/redis_streams.h b/src/storage/src/redis_streams.h index c964efef7f..848fe94900 100644 --- a/src/storage/src/redis_streams.h +++ b/src/storage/src/redis_streams.h @@ -14,8 +14,6 @@ #include "rocksdb/options.h" #include "rocksdb/slice.h" #include "rocksdb/status.h" -#include "src/redis.h" -#include "storage/storage.h" namespace storage { @@ -121,127 +119,25 @@ class StreamUtils { static bool StreamParseIntervalId(const std::string& var, streamID& id, bool* exclude, uint64_t missing_seq); }; -class RedisStreams : public Redis { - public: - RedisStreams(Storage* const s, const DataType& type) : Redis(s, type) {} - ~RedisStreams() override = default; - - //===--------------------------------------------------------------------===// - // Commands - //===--------------------------------------------------------------------===// - Status XAdd(const Slice& key, const std::string& serialized_message, StreamAddTrimArgs& args); - Status XDel(const Slice& key, const std::vector& ids, int32_t& count); - Status XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& count); - Status XRange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages); - Status XRevrange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages); - Status XLen(const Slice& key, int32_t& len); - Status XRead(const StreamReadGroupReadArgs& args, std::vector>& results, - std::vector& reserved_keys); - Status XInfo(const Slice& key, StreamInfoResult& result); - - //===--------------------------------------------------------------------===// - // Common Commands - //===--------------------------------------------------------------------===// - Status Open(const StorageOptions& storage_options, const std::string& db_path) override; - Status CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end, - const ColumnFamilyType& type = kMetaAndData) override; - Status GetProperty(const std::string& property, uint64_t* out) override; - Status ScanKeyNum(KeyInfo* keyinfo) override; - Status ScanKeys(const std::string& pattern, std::vector* keys) override; - Status PKPatternMatchDel(const std::string& pattern, int32_t* ret) override; - Status PKScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key); - Status PKRScanRange(const Slice& key_start, const Slice& key_end, const Slice& pattern, int32_t limit, - std::vector* keys, std::string* next_key); - - //===--------------------------------------------------------------------===// - // Keys Commands - //===--------------------------------------------------------------------===// - Status Del(const Slice& key) override; - bool Scan(const std::string& start_key, const std::string& pattern, std::vector* keys, int64_t* count, - std::string* next_key) override; - - //===--------------------------------------------------------------------===// - // Not needed for streams - //===--------------------------------------------------------------------===// - Status Expire(const Slice& key, int32_t ttl) override; - bool PKExpireScan(const std::string& start_key, int32_t min_timestamp, int32_t max_timestamp, - std::vector* keys, int64_t* leftover_visits, std::string* next_key) override; - Status Expireat(const Slice& key, int32_t timestamp) override; - Status Persist(const Slice& key) override; - Status TTL(const Slice& key, int64_t* timestamp) override; - - //===--------------------------------------------------------------------===// - // Storage API - //===--------------------------------------------------------------------===// - struct ScanStreamOptions { - const rocksdb::Slice key; // the key of the stream - int32_t version; // the version of the stream - streamID start_sid; - streamID end_sid; - int32_t limit; - bool start_ex; // exclude first message - bool end_ex; // exclude last message - bool is_reverse; // scan in reverse order - ScanStreamOptions(const rocksdb::Slice skey, int32_t version, streamID start_sid, streamID end_sid, int32_t count, - bool start_ex = false, bool end_ex = false, bool is_reverse = false) - : key(skey), - version(version), - start_sid(start_sid), - end_sid(end_sid), - limit(count), - start_ex(start_ex), - end_ex(end_ex), - is_reverse(is_reverse) {} - }; - - Status ScanStream(const ScanStreamOptions& option, std::vector& id_messages, std::string& next_field, - rocksdb::ReadOptions& read_options); - // get and parse the stream meta if found - // @return ok only when the stream meta exists - Status GetStreamMeta(StreamMetaValue& tream_meta, const rocksdb::Slice& key, rocksdb::ReadOptions& read_options); - - // Before calling this function, the caller should ensure that the ids are valid - Status DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, - const std::vector& ids, rocksdb::ReadOptions& read_options); - - // Before calling this function, the caller should ensure that the ids are valid - Status DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, - const std::vector& serialized_ids, rocksdb::ReadOptions& read_options); - - Status TrimStream(int32_t& count, StreamMetaValue& stream_meta, const rocksdb::Slice& key, StreamAddTrimArgs& args, - rocksdb::ReadOptions& read_options); - - private: - Status GenerateStreamID(const StreamMetaValue& stream_meta, StreamAddTrimArgs& args); - - Status ScanRange(const Slice& key, const int32_t version, const Slice& id_start, const std::string& id_end, - const Slice& pattern, int32_t limit, std::vector& id_messages, std::string& next_id, - rocksdb::ReadOptions& read_options); - Status ReScanRange(const Slice& key, const int32_t version, const Slice& id_start, const std::string& id_end, - const Slice& pattern, int32_t limit, std::vector& id_values, std::string& next_id, - rocksdb::ReadOptions& read_options); - - struct TrimRet { - // the count of deleted messages - int32_t count{0}; - // the next field after trim - std::string next_field; - // the max deleted field, will be empty if no message is deleted - std::string max_deleted_field; - }; - - Status TrimByMaxlen(TrimRet& trim_ret, StreamMetaValue& stream_meta, const rocksdb::Slice& key, - const StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options); - - Status TrimByMinid(TrimRet& trim_ret, StreamMetaValue& stream_meta, const rocksdb::Slice& key, - const StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options); - - inline Status SetFirstID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, rocksdb::ReadOptions& read_options); - - inline Status SetLastID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, rocksdb::ReadOptions& read_options); - - inline Status SetFirstOrLastID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, bool is_set_first, - rocksdb::ReadOptions& read_options); +struct ScanStreamOptions { + const rocksdb::Slice key; // the key of the stream + uint64_t version; // the version of the stream + streamID start_sid; + streamID end_sid; + int32_t limit; + bool start_ex; // exclude first message + bool end_ex; // exclude last message + bool is_reverse; // scan in reverse order + ScanStreamOptions(const rocksdb::Slice skey, uint64_t version, streamID start_sid, streamID end_sid, int32_t count, + bool start_ex = false, bool end_ex = false, bool is_reverse = false) + : key(skey), + version(version), + start_sid(start_sid), + end_sid(end_sid), + limit(count), + start_ex(start_ex), + end_ex(end_ex), + is_reverse(is_reverse) {} }; -} // namespace storage +} + diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index 20d729a2cb..94427afb42 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -1134,8 +1134,23 @@ Status Storage::XLen(const Slice& key, int32_t& len) { Status Storage::XRead(const StreamReadGroupReadArgs& args, std::vector>& results, std::vector& reserved_keys) { - auto& inst = GetDBInstance(key); - return inst->XRead(args, results, reserved_keys); + Status s; + for (int i = 0; i < args.unparsed_ids.size(); i++) { + StreamReadGroupReadArgs single_args; + single_args.keys.push_back(args.keys[i]); + single_args.unparsed_ids.push_back(args.unparsed_ids[i]); + single_args.count = args.count; + single_args.block = args.block; + single_args.group_name = args.group_name; + single_args.consumer_name = args.consumer_name; + single_args.noack_ = args.noack_; + auto& inst = GetDBInstance(args.keys[i]); + s = inst->XRead(single_args, results, reserved_keys); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + } + return s; } Status Storage::XInfo(const Slice& key, StreamInfoResult &result) { @@ -1255,7 +1270,7 @@ int64_t Storage::Del(const std::vector& keys, std::mapDel(key); + s = inst->StreamsDel(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -1331,7 +1346,7 @@ int64_t Storage::DelByType(const std::vector& keys, const DataType& } // Stream case DataType::kStreams: { - s = streams_db_->Del(key); + s = inst->StreamsDel(key); if (s.ok()) { count++; } else if (!s.IsNotFound()) { @@ -1892,6 +1907,7 @@ Status Storage::Keys(const DataType& data_type, const std::string& pattern, std: types.push_back(DataType::kLists); types.push_back(DataType::kZSets); types.push_back(DataType::kSets); + types.push_back(DataType::kStreams); } else { types.push_back(data_type); } @@ -1911,10 +1927,6 @@ Status Storage::Keys(const DataType& data_type, const std::string& pattern, std: keys->push_back(miter.Key()); miter.Next(); } - s = streams_db_->ScanKeys(pattern, keys); - if (!s.ok()) { - return s; - } } return Status::OK(); @@ -1938,6 +1950,9 @@ void Storage::ScanDatabase(const DataType& type) { case kLists: inst->ScanLists(); break; + case kStreams: + // do noting + break; case kAll: inst->ScanStrings(); inst->ScanHashes(); @@ -2269,10 +2284,6 @@ uint64_t Storage::GetProperty(const std::string& property) { s = inst->GetProperty(property, &out); result += out; } - if (db_type == ALL_DB || db_type == STREAMS_DB) { - streams_db_->GetProperty(property, &out); - result += out; - } return result; } diff --git a/src/storage/src/type_iterator.h b/src/storage/src/type_iterator.h index 254dcfb632..a0be2b8abf 100644 --- a/src/storage/src/type_iterator.h +++ b/src/storage/src/type_iterator.h @@ -25,6 +25,7 @@ #include "src/base_meta_value_format.h" #include "src/strings_value_format.h" #include "src/lists_meta_value_format.h" +#include "src/pika_stream_meta_value.h" #include "storage/storage_define.h" namespace storage { @@ -237,6 +238,35 @@ class ZsetsIterator : public TypeIterator { std::string pattern_; }; +class StreamsIterator : public TypeIterator { +public: + StreamsIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~StreamsIterator() {} + + bool ShouldSkip() override { + ParsedStreamMetaValue parsed_meta_value(raw_iter_->value()); + if (parsed_meta_value.length() == 0) { + return true; + } + + ParsedBaseMetaKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), + parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + user_key_ = parsed_key.Key().ToString(); + // multiple class members defined in StreamMetaValue, + // so user_value_ just return rocksdb raw value + user_value_ = raw_iter_->value().ToString(); + return false; + } +private: + std::string pattern_; +}; + using IterSptr = std::shared_ptr; From d53c6ee8b544d73b502a70cae7ac9459cfef6791 Mon Sep 17 00:00:00 2001 From: wangshaoyi Date: Fri, 23 Feb 2024 18:46:49 +0800 Subject: [PATCH 4/4] fix by review comments --- src/net/include/thread_pool.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/net/include/thread_pool.h b/src/net/include/thread_pool.h index bdced7d3bd..0ec3d1bcb1 100644 --- a/src/net/include/thread_pool.h +++ b/src/net/include/thread_pool.h @@ -20,8 +20,8 @@ using TaskFunc = void (*)(void *); struct Task { Task() = default; - TaskFunc func; - void* arg; + TaskFunc func = nullptr; + void* arg = nullptr; Task(TaskFunc _func, void* _arg) : func(_func), arg(_arg) {} };